max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
educative/binarysearch/findhighestnumber.py | monishshah18/python-cp-cheatsheet | 140 | 12787151 | <filename>educative/binarysearch/findhighestnumber.py<gh_stars>100-1000
def find_highest_number(A):
if len(A) < 3:
return None
def condition(value) -> bool:
leftNeighbor = value - 1
rightNeighbor = value + 1
# TODO: Add conditions for leftmost and rightmost values
if A[leftNeighbor] < A[value] < A[rightNeighbor]:
return False
elif A[leftNeighbor] > A[value] > A[rightNeighbor]:
return True
elif A[leftNeighbor] < A[value] and A[value] > A[rightNeighbor]:
return True
left, right = 0, len(A) -1
while left < right:
mid = left + (right - left) // 2
if condition(mid):
right = mid
else:
left = mid + 1
return A[left]
# Peak element is "5".
A = [1, 2, 3, 4, 5, 4, 3, 2, 1]
print(find_highest_number(A))
A = [1, 6, 5, 4, 3, 2, 1]
print(find_highest_number(A))
A = [1, 2, 3, 4, 5]
print(find_highest_number(A))
A = [5, 4, 3, 2, 1]
print(find_highest_number(A)) | 4.09375 | 4 |
playbooks/files/rax-maas/plugins/glance_registry_local_check.py | nipsy/rpc-maas | 0 | 12787152 | <reponame>nipsy/rpc-maas
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import ipaddr
from maas_common import generate_local_endpoint
from maas_common import get_openstack_client
from maas_common import metric
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
from requests import exceptions as exc
def check(args):
glance = get_openstack_client('image')
try:
# Remove version from returned endpoint
glance_endpoint = str(glance.get_endpoint().rsplit('/', 2)[0])
local_registry_url = generate_local_endpoint(
glance_endpoint, args.ip, args.port, args.protocol,
'/images'
)
resp = glance.session.get(local_registry_url, timeout=180)
milliseconds = resp.elapsed.total_seconds() * 1000
is_up = resp.status_code == 200
except (exc.ConnectionError, exc.HTTPError, exc.Timeout):
is_up = False
metric_bool('client_success', False, m_name='maas_glance')
except Exception as e:
metric_bool('client_success', False, m_name='maas_glance')
status_err(str(e), m_name='maas_glance')
status_ok(m_name='maas_glance')
metric_bool('client_success', True, m_name='maas_glance')
metric_bool('glance_registry_local_status', is_up, m_name='maas_glance')
# Only send remaining metrics if the API is up
if is_up:
metric('glance_registry_local_response_time', 'double',
'%.3f' % milliseconds, 'ms')
def main(args):
check(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check Glance Registry "
" against local or remote address")
parser.add_argument('ip', type=ipaddr.IPv4Address,
help='Glance Registry IP address')
parser.add_argument('--telegraf-output',
action='store_true',
default=False,
help='Set the output format to telegraf')
parser.add_argument('--protocol',
action='store',
default='http',
help='Protocol for the local glance registry API')
parser.add_argument('--port',
action='store',
default='9191',
help='Port for local glance registry API')
args = parser.parse_args()
with print_output(print_telegraf=args.telegraf_output):
main(args)
| 1.820313 | 2 |
Multi_Page_WebApp/tests/test_db.py | Anthogr/netcdf_editor_app | 8 | 12787153 | <gh_stars>1-10
import sqlite3
import pytest
from climate_simulation_platform.db import (
get_db,
get_file_types,
get_file_type_counts,
steps_seen,
)
def test_get_close_db(app):
with app.app_context():
db = get_db()
assert db is get_db()
with pytest.raises(sqlite3.ProgrammingError) as e:
db.execute("SELECT 1")
assert "closed" in str(e.value)
def test_init_db_command(runner, monkeypatch):
class Recorder(object):
called = False
def fake_init_db():
Recorder.called = True
monkeypatch.setattr("climate_simulation_platform.db.init_db", fake_init_db)
result = runner.invoke(args=["init-db"])
assert "Initialized" in result.output
assert Recorder.called
def test_get_file_type_counts(app):
with app.app_context():
file_type_counts = get_file_type_counts(1)
assert type(file_type_counts) == dict
for key, val in file_type_counts.items():
assert type(key) is str
assert type(val) is int
assert file_type_counts["raw"] == 2
assert file_type_counts["routing"] == 1
assert "fake_file_type" not in file_type_counts.keys()
def test_get_file_types(app):
with app.app_context():
file_types = get_file_types(1)
assert type(file_types) == list
assert "raw" in file_types
assert "routing" in file_types
def test_steps_seen(app):
with app.app_context():
ss = steps_seen(9)
assert all([isinstance(step, str) for step in ss])
| 2.25 | 2 |
qaws/qaws.py | kacirekj/saws | 2 | 12787154 | <reponame>kacirekj/saws
import boto3
from datetime import datetime, timedelta
import time
import sys
import re
from typing import Iterator
help = '''
NAME
qaws -- Query AWS CloudWatch logs
SYNOPSIS
qaws [-g groups...]
[-t starttime | starttime endtime]
[-q query]
DESCRIPTION
-h --help
Get this manual.
-g --groups groups ...
Specify 1 to N logging groups like "/ecs/someservice1". Wildcard * can be used like "*ecs*some*1".
If you specify only -g flag then it will print all groups in CloudWatch
-t --time starttime | starttime endtime
Specify starttime in history to more recent endtime in present.
Possible formats for time specification is:
ISO time: "2000-01-01T00:00:00"
Epoch in seconds: "1590314700"
Time relative to Now:
"1h" 1 hour ago
"1h 60m" 2 hours ago
"1h 60m 3600s" 3 hours ago
"3600s 60m 1h" 3 hours ago as well (order doesn't matter)
"3600s 3600s 3600s" 3 hours ago as well (items are repeatable)
"1y 1mo 1w 1d 1h 1m 1s" is possible as well
-g --query query
Query exactly as it is usually written in AWS CloudWatch Insights in Web Console:
fields @timestamp, @message
| filter @message like 'event'
| limit 10"
- It can take few minutes (~2 minutes) until logs appears in CloudWatch and therefore fetching logs
with '-t "1m"' may not return any results
- Even if you set '|limit 1' in --query then CloudWatch will anyway search over entire specified e.g. '-t "10d"'
history which can take lot of time
- When you use wildcard * in group names then it will take longer to finish query as all the log group names has to be fetched from AWS
EXAMPLES
- Prints all log groups in CloudWatch:
qaws \\
--groups
- Prints all log groups in CloudWatch matching wildcard:
qaws \\
--groups "*service*"
- Basic querying:
qaws \\
--groups "/ecs/myservice0" \\
--time "1h" \\
--query "fields @message"
- Multiple groups specified with one containing wildcard:
qaws \\
--groups "*ecs*service0" "/ecs/myservice1" "/ecs/myservice2" \\
--time "1d 1h 30m" \\
--query "fields @message"
- Query logs in between past 5 and 1 hour with wildcard:
qaws \\
--groups "/ecs/*" \\
--time "5h" "1h" \\
--query "fields @timestamp @message | filter @message like 'event' | limit 5000"
- Query logs in between two ISO dates:
qaws \\
--groups "/ecs/*" \\
--time "2020-05-24T00:00:00" "2020-05-24T12:00:00" \\
--query "fields @message | filter @message like 'event' | limit 5000"
- Combine relative time with ISO date:
qaws \\
--groups "/ecs/*" \\
--time "1y" "2020-05-24T00:00:00" \\
--query "fields @message | filter @message like 'event' | limit 5000"
AUTHORS
<NAME> (<EMAIL>) 2020
IMPLEMENTATION
Python 3.8
'''
class TimeParser:
def __init__(self, today=datetime.now()):
self.today = today
def parse(self, string: str, else_return=None) -> datetime:
if not string:
return else_return
return self._parse_isodatetime(string) or self._parse_relative_time(string) or self._parse_timestamp(string) or else_return
def _parse_timestamp(self, string: str) -> datetime:
if string.isdigit():
return datetime.fromtimestamp(float(string))
def _parse_isodatetime(self, string: str):
if all(char in string for char in ['-', ':']):
return datetime.fromisoformat(string)
def _parse_relative_time(self, string: str):
if any(char in string for char in 'y mo w d h m s'):
return self._get_datetime_from_relative(string)
def _get_num_from_str(self, string: str):
num = ''
for ch in string:
if ch.isdigit():
num += ch
return int(num)
def _get_datetime_from_relative(self, relative: str):
seconds = 0
split = relative.split(' ')
for s in split:
if 'y' in s:
seconds += self._get_num_from_str(s) * 3600 * 24 * 365
elif 'mo' in s:
seconds += self._get_num_from_str(s) * 3600 * 24 * 30
elif 'w' in s:
seconds += self._get_num_from_str(s) * 3600 * 24 * 7
elif 'd' in s:
seconds += self._get_num_from_str(s) * 3600 * 24
elif 'h' in s:
seconds += self._get_num_from_str(s) * 3600
elif 'm' in s:
seconds += self._get_num_from_str(s) * 60
elif 's' in s:
seconds += self._get_num_from_str(s)
else:
raise Exception(f"Can't parse {relative}")
return self.today - timedelta(seconds=seconds)
def get_all_log_groups() -> Iterator[str]:
client = boto3.client('logs')
next_token = None
while True:
try:
if not next_token:
response = client.describe_log_groups(limit=50)
else:
response = client.describe_log_groups(limit=50, nextToken=next_token)
for log_group in response['logGroups']:
yield log_group['logGroupName']
next_token = response['nextToken']
except KeyError as k:
break
def filter_log_groups(log_group_names: list, log_groups: Iterator[str]) -> Iterator[str]:
for log_group in log_groups:
for log_group_name in log_group_names:
g = log_group_name.replace('*', '.*')
g = f'^{g}$'
m = re.search(g, log_group)
if m:
yield m.group(0)
def main(argv=None):
argv = sys.argv
if len(argv) < 2:
print(help)
return 0
# Parse input arguments
arg_log_group_names = []
arg_time_range = []
arg_query = []
arg_separator = []
_switch_pointer = []
for idx, item in enumerate(argv):
if item in ['--help', '-h']:
print(help)
return 0
if item in ['--groups', '-g']:
_switch_pointer = arg_log_group_names
continue
if item in ['--time', '-t']:
_switch_pointer = arg_time_range
continue
if item in ['--query', '-q']:
_switch_pointer = arg_query
continue
if item in ['--separator', '-s']:
_switch_pointer = arg_separator
continue
_switch_pointer.append(item)
# Unwildcard Groupnames if needed
if len(arg_log_group_names) > 0:
log_group_names = arg_log_group_names
else:
log_group_names = ['*']
for group_name in log_group_names:
if '*' in group_name:
all_log_groups = get_all_log_groups()
log_group_names = list(set(filter_log_groups(log_group_names, all_log_groups)))
break
log_group_names = sorted(log_group_names)
# Print only groupnames if other arguments missing
if len(arg_query) == 0 or len(arg_time_range) == 0:
print("\n".join(log_group_names))
return 0
# Parse time
timeparser = TimeParser()
arg_time_start = arg_time_range[0]
try:
arg_time_end = arg_time_range[1]
except:
arg_time_end = None
time_start = timeparser.parse(arg_time_start, else_return=timeparser.today)
time_end = timeparser.parse(arg_time_end, else_return=timeparser.today)
# Execute AWS Query
print(f'Querying from {str(time_start)} to {time_end.isoformat()} in log groups:')
print('"' + '" "'.join(log_group_names) + '"')
client = boto3.client('logs')
start_query_response = client.start_query(
logGroupNames=log_group_names,
startTime=int(time_start.timestamp()),
endTime=int(time_end.timestamp()),
queryString=arg_query[0],
)
query_id = start_query_response['queryId']
# Wait for query result
response = None
while response == None or response['status'] == 'Running':
print('Waiting for query to complete ...')
time.sleep(3)
response = client.get_query_results(
queryId=query_id
)
# Print query result
statistics = response["statistics"]
print(f'Records matched {statistics["recordsMatched"]}, Records scanned: {statistics["recordsScanned"]}')
for res in response['results']:
line = []
for r in res:
if '@ptr' not in r['field']:
line.append(r['value'].strip())
line = ', '.join(line)
if len(arg_separator) != 0:
line = arg_separator[0] + line
print(line)
return 0
if __name__ == "__main__":
main()
| 2.671875 | 3 |
scripts/pkl_to_mesh.py | AstitvaSri/SMPL-Segmentation | 6 | 12787155 | import sys
import pickle
import numpy as np
import smplx
import torch
import trimesh
from copy import deepcopy
from psbody.mesh import Mesh
import cv2
import os
import natsort
from tqdm import tqdm
def show(verts = None, faces = None, colors = None):
if torch.is_tensor(verts):
verts = verts.detach().numpy()
if torch.is_tensor(faces):
faces = faces.detach().numpy()
all_meshes = []
if faces is not None:
for i in range(len(verts)):
m = trimesh.Trimesh(verts[i], faces[i])
if colors is not None:
m.visual.vertex_colors = colors[i]
all_meshes.append(m)
else:
for i in range(len(verts)):
m = trimesh.PointCloud(verts[i], colors[i])
all_meshes.append(m)
scene = trimesh.scene.Scene()
for m in all_meshes:
scene.add_geometry(m)
scene.show('gl')
def get_param(path, pamir = True):
with open(path, 'rb') as fi:
d = pickle.load(fi)
if pamir:
scale = d['body_scale']
pose = d['body_pose'][0]
beta = d['betas'][0]
trans = d['global_body_translation']
pose_embedding = d['body_pose_embedding']
return pose_embedding, scale, pose, beta, trans
else:
scale = 1
# print(d.keys())
pose = d['pose']
beta = d['betas'][:99]
# trans = d['trans']
return None, scale, pose, beta, None
if __name__ == '__main__':
src = '/home/groot/PaMIR/our_scans/our_scans_image/mesh_data/'
scans = natsort.natsorted(os.listdir(src))
for scan in tqdm(scans):
scan_smpl_path = src + scan + '/smpl/smpl_param.pkl'
model_folder = '../models'
model = smplx.create(model_folder, create_global_orient = True, create_body_pose = False, create_betas = True, model_type='smpl', gender='male', create_transl = False, create_left_hand_pose= True, create_right_hand_pose = True, create_expression = True, create_jaw_pose = True, create_leye_pose = True, create_reye_pose = True, )
pose_embedding, scale, pose, beta, trans = get_param(scan_smpl_path)
go = torch.tensor(pose[:3]).unsqueeze(0)
pose = torch.tensor(pose[3:]).float().unsqueeze(0)
beta = torch.tensor(beta).float().unsqueeze(0)
output = model(betas=beta, body_pose = pose, global_orient=go, return_verts=True)
vert = output.vertices[0]
vert = vert.detach().numpy()
outdir = src + scan
mesh = Mesh()
vert = vert*scale
vert += trans
mesh.v = vert
mesh.f = model.faces
mesh.write_obj(outdir + '/smpl/smpl_mesh_ordered.obj')
| 2.109375 | 2 |
test/test_online_GMM.py | MarvinLvn/megamix | 0 | 12787156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from scipy import linalg
from numpy.testing import assert_almost_equal
from megamix.online import GaussianMixture
from megamix.online.base import _log_normal_matrix
from megamix.online import dist_matrix
from megamix.utils_testing import checking
from scipy.special import logsumexp
import pytest
import h5py
class TestGaussianMixture_full:
def setup(self):
self.n_components = 5
self.dim = 2
self.n_points = 10
self.file_name = 'test'
def teardown(self):
checking.remove(self.file_name + '.h5')
def test_initialize(self,window):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window)
GM.initialize(points)
checking.verify_covariance(GM.get('cov'),self.n_components,self.dim)
checking.verify_means(GM.get('means'),self.n_components,self.dim)
checking.verify_log_pi(GM.get('log_weights'),self.n_components)
cov_chol = np.empty_like(GM.get('cov'))
for i in range(self.n_components):
cov_chol[i] = linalg.cholesky(GM.get('cov')[i],lower=True)
assert_almost_equal(cov_chol,GM.get('cov_chol'))
assert GM.get('_is_initialized') == True
def test_initialize_cov(self,window,update):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window)
means = np.random.randn(self.n_components,self.dim)
GM.set('means',means)
GM._initialize_cov(points)
predected_cov = GM.get('cov')
assignements = np.zeros((self.n_points,self.n_components))
M = dist_matrix(points,means)
for i in range(self.n_points):
index_min = np.argmin(M[i]) #the cluster number of the ith point is index_min
if (isinstance(index_min,np.int64)):
assignements[i][index_min] = 1
else: #Happens when two points are equally distant from a cluster mean
assignements[i][index_min[0]] = 1
N = np.sum(assignements,axis=0) + 1e-15
N /= self.n_points
S = np.zeros((self.n_components,self.dim,self.dim))
for i in range(self.n_components):
diff = points - means[i]
diff_weighted = diff * assignements[:,i:i+1]
S[i] = np.dot(diff_weighted.T,diff)
S[i].flat[::self.dim+1] += float(GM.get('reg_covar'))
S /= self.n_points
expected_cov = S / N[:,np.newaxis,np.newaxis]
assert_almost_equal(expected_cov,predected_cov)
def test_step_E(self,window):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window)
GM.initialize(points)
log_normal_matrix = _log_normal_matrix(points,GM.get('means'),
GM.get('cov_chol'),'full')
log_product = log_normal_matrix + GM.get('log_weights')[:,np.newaxis].T
expected_log_prob_norm = logsumexp(log_product,axis=1)
expected_log_resp = log_product - expected_log_prob_norm[:,np.newaxis]
predected_log_prob_norm, predected_log_resp = GM._step_E(points)
assert_almost_equal(expected_log_prob_norm,predected_log_prob_norm)
assert_almost_equal(expected_log_resp,predected_log_resp)
def test_step_M(self,window,update):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window,update=update)
GM.initialize(points)
_,log_resp = GM._step_E(points[:GM.get('window'):])
GM._sufficient_statistics(points[:GM.get('window'):],log_resp)
log_weights = np.log(GM.get('N'))
means = GM.get('X') / GM.get('N')[:,np.newaxis]
cov = GM.get('S') / GM.get('N')[:,np.newaxis,np.newaxis]
cov_chol = np.empty_like(cov)
for i in range(self.n_components):
cov_chol[i] = linalg.cholesky(cov[i],lower=True)
GM._step_M()
assert_almost_equal(log_weights,GM.get('log_weights'))
assert_almost_equal(means,GM.get('means'))
assert_almost_equal(cov,GM.get('cov'))
assert_almost_equal(cov_chol,GM.get('cov_chol'))
def test_sufficient_statistics(self,window,update):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window,update=update)
GM.initialize(points)
_,log_resp = GM._step_E(points[:GM.get('window'):])
points_exp = points[:window:]
resp = np.exp(log_resp)
gamma = 1/((GM.get('iter') + window//2)**GM.get('kappa'))
# New sufficient statistics
N = resp.sum(axis=0) + 10 * np.finfo(resp.dtype).eps
N /= window
X = np.dot(resp.T,points_exp)
X /= window
S = np.zeros((self.n_components,self.dim,self.dim))
for i in range(self.n_components):
diff = points_exp - GM.get('means')[i]
diff_weighted = diff * np.sqrt(resp[:,i:i+1])
S[i] = np.dot(diff_weighted.T,diff_weighted)
S /= window
# Sufficient statistics update
expected_N = (1-gamma)*GM.get('N') + gamma*N
expected_X = (1-gamma)*GM.get('X') + gamma*X
expected_S = (1-gamma)*GM.get('S') + gamma*S
expected_S_chol = np.zeros((self.n_components,self.dim,self.dim))
for i in range(self.n_components):
expected_S_chol[i] = linalg.cholesky(expected_S[i],lower=True)
GM._sufficient_statistics(points_exp,log_resp)
assert_almost_equal(expected_N,GM.get('N'))
assert_almost_equal(expected_X,GM.get('X'))
assert_almost_equal(expected_S,GM.get('S'))
def test_score(self,window,update):
points = np.random.randn(self.n_points,self.dim)
points2 = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window,update=update)
with pytest.raises(Exception):
GM.score(points)
GM.initialize(points)
GM.fit(points)
score1 = GM.score(points)
score2 = GM.score(points2)
assert score1 > score2
def test_write_and_read(self,update):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,update=update)
GM.initialize(points)
f = h5py.File(self.file_name + '.h5','w')
grp = f.create_group('init')
GM.write(grp)
f.close()
GM2 = GaussianMixture(self.n_components,update=update)
f = h5py.File(self.file_name + '.h5','r')
grp = f['init']
GM2.read_and_init(grp,points)
f.close()
checking.verify_online_models(GM,GM2)
GM.fit(points)
GM2.fit(points)
checking.verify_online_models(GM,GM2)
def test_predict_log_resp(self,window,update):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window,update=update)
with pytest.raises(Exception):
GM.predict_log_resp(points)
GM.initialize(points)
predected_log_resp = GM.predict_log_resp(points)
_,expected_log_resp = GM._step_E(points)
assert_almost_equal(predected_log_resp,expected_log_resp)
def test_update(self,window):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window,update=True)
GM.initialize(points)
GM.fit(points)
expected_cov_chol = np.zeros((self.n_components,self.dim,self.dim))
for i in range(self.n_components):
expected_cov_chol[i] = linalg.cholesky(GM.get('cov')[i],lower=True)
predected_cov_chol = GM.get('cov_chol')
assert_almost_equal(expected_cov_chol,predected_cov_chol)
def test_fit_save(self,window):
points = np.random.randn(self.n_points,self.dim)
GM = GaussianMixture(self.n_components,window=window)
checking.remove(self.file_name + '.h5')
GM.initialize(points)
GM.fit(points,saving='linear',saving_iter=2,
file_name=self.file_name)
f = h5py.File(self.file_name + '.h5','r')
cpt = 0
for name in f:
cpt += 1
assert cpt == self.n_points//(2*window)
checking.remove(self.file_name + '.h5')
GM.fit(points,saving='log',saving_iter=2,
file_name=self.file_name)
f = h5py.File(self.file_name + '.h5','r')
cpt = 0
for name in f:
cpt += 1
assert cpt == 1 + int(np.log(self.n_points/window)/np.log(2)) | 2.1875 | 2 |
wpoke/fingers/theme/models.py | sonirico/wpoke | 4 | 12787157 | <gh_stars>1-10
from dataclasses import dataclass
from typing import List, AnyStr
@dataclass
class WPThemeModelDisplay:
theme_name: AnyStr = "Theme Name"
theme_uri: AnyStr = "Theme URI"
description: AnyStr = "Description"
author: AnyStr = "Author"
author_uri: AnyStr = "Author URI"
version: AnyStr = "Version"
license: AnyStr = "License"
license_uri: AnyStr = "License URI"
tags: AnyStr = "Tags"
text_domain: AnyStr = "Text Domain"
included_translations: AnyStr = "Included Translations"
template: AnyStr = "Template"
status: AnyStr = "Status"
def __iter__(self):
for k, v in self.__dict__.items():
yield (k, v)
@dataclass
class WPThemeMetadata:
theme_name: AnyStr = None
theme_uri: AnyStr = None
description: AnyStr = None
author: AnyStr = None
author_uri: AnyStr = None
version: AnyStr = None
license: AnyStr = None
license_uri: AnyStr = None
tags: List[AnyStr] = None
text_domain: AnyStr = None
included_translations: AnyStr = None
template: AnyStr = None
status: AnyStr = None
featured_image: AnyStr = None
def set_value_for_key(self, k, v) -> None:
setattr(self, k, v)
def set_featured_image(self, img_link):
self.featured_image = img_link
| 2.328125 | 2 |
Practica02/hill_test.py | Argenis616/cryptography | 0 | 12787158 | <gh_stars>0
import pytest
from utils import CryptographyException
from hill import Hill
from random import randint
alphabet = "ABCDEFGHIJKLMNÑOPQRSTUVWXYZ"
cipher = None
key2 = "EBAY"
def test_init():
size = 8#randint(4, 9)
if size == 4:
with pytest.raises(CryptographyException):
cipher = Hill(alphabet, size, "DBAB")
elif size == 9:
with pytest.raises(CryptographyException):
cipher = Hill(alphabet, size, "DDDABCEFG")
else:
with pytest.raises(CryptographyException):
cipher = Hill(alphabet, size)
def test_known_key():
cipher = Hill(alphabet, 4, key2)
criptotext = cipher.cipher("UN MENSAJE CON Ñ")
assert True
assert criptotext == "PBYSQPJJRWSBCA"
assert cipher.decipher(criptotext) == "UNMENSAJECONÑA"
criptotext = cipher.cipher("UN MENSAJE DE LONGITUD PAR")
assert criptotext == "PBYSQPJJSUAFSBFLTMBVRR"
assert cipher.decipher("UNMENSAJEDELONGITUDPAR")
def test_random_key():
cipher = Hill(alphabet, 4)
c1 = cipher.cipher("UN MENSAJE CON Ñ")
assert cipher.decipher(c1) == "UNMENSAJECONÑA"
c2 = cipher.cipher("UN MENSAJE DE LONGITUD PAR")
assert cipher.decipher(c2) == "UNMENSAJEDELONGITUDPAR"
| 2.59375 | 3 |
utils.py | cartologic/cartoview_story_map | 1 | 12787159 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2017 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import requests
import uuid
import logging
import json
from django.template.defaultfilters import slugify
from geoserver.catalog import Catalog
from geoserver.catalog import FailedRequestError
from geonode import GeoNodeException
from geonode.layers.models import Layer
from geonode.layers.utils import get_valid_name, _clean_string
from geonode.people.models import Profile
from geonode.geoserver.helpers import ogc_server_settings
from django.conf import settings
logger = logging.getLogger(__name__)
def create_layer(name, title, owner_name, geometry_type, attributes=None):
"""
Create an empty layer in GeoServer and register it in GeoNode.
"""
# first validate parameters
if geometry_type not in ('Point', 'LineString', 'Polygon'):
msg = 'geometry must be Point, LineString or Polygon'
logger.error(msg)
raise GeoNodeException(msg)
name = get_valid_name(name)
# we can proceed
print 'Creating the layer in GeoServer'
workspace, datastore = create_gs_layer(name, title, geometry_type, attributes)
print 'Creating the layer in GeoNode'
return create_gn_layer(workspace, datastore, name, title, owner_name)
def create_gn_layer(workspace, datastore, name, title, owner_name):
"""
Associate a layer in GeoNode for a given layer in GeoServer.
"""
owner = Profile.objects.get(username=owner_name)
layer = Layer.objects.create(
name=name,
workspace=workspace.name,
store=datastore.name,
storeType='dataStore',
# alternate='%s:%s' % (workspace.name, name),
title=title,
owner=owner,
uuid=str(uuid.uuid4()),
bbox_x0=-180,
bbox_x1=180,
bbox_y0=-90,
bbox_y1=90
)
print(layer)
# setting permissions in GeoServer GeoFence data rules with the owner of the layer
perms = {u'users': {u'AnonymousUser': [], owner_name: [u'view_resourcebase', u'download_resourcebase', u'change_resourcebase_metadata', u'change_layer_data', u'change_layer_style', u'change_resourcebase', u'delete_resourcebase', u'change_resourcebase_permissions', u'publish_resourcebase']}, u'groups': {}}
layer.set_permissions(perms)
return layer
def get_attributes(geometry_type, json_attrs=None):
"""
Convert a json representation of attributes to a Python representation.
parameters:
json_attrs
{
"field_str": "string",
"field_int": "integer",
"field_date": "date",
"field_float": "float"
}
geometry_type: a string which can be "Point", "LineString" or "Polygon"
Output:
[
['the_geom', u'com.vividsolutions.jts.geom.Polygon', {'nillable': False}],
['field_str', 'java.lang.String', {'nillable': True}],
['field_int', 'java.lang.Integer', {'nillable': True}],
['field_date', 'java.util.Date', {'nillable': True}],
['field_float', 'java.lang.Float', {'nillable': True}]
]
"""
lattrs = []
gattr = []
gattr.append('the_geom')
gattr.append('com.vividsolutions.jts.geom.%s' % geometry_type)
gattr.append({'nillable': False})
lattrs.append(gattr)
if json_attrs:
jattrs = json.loads(json_attrs)
for jattr in jattrs.items():
lattr = []
attr_name = slugify(jattr[0])
attr_type = jattr[1].lower()
if len(attr_name) == 0:
msg = 'You must provide an attribute name for attribute of type %s' % (attr_type)
logger.error(msg)
raise GeoNodeException(msg)
if attr_type not in ('float', 'date', 'string', 'integer'):
msg = '%s is not a valid type for attribute %s' % (attr_type, attr_name)
logger.error(msg)
raise GeoNodeException(msg)
if attr_type == 'date':
attr_type = 'java.util.%s' % attr_type[:1].upper() + attr_type[1:]
else:
attr_type = 'java.lang.%s' % attr_type[:1].upper() + attr_type[1:]
lattr.append(attr_name)
lattr.append(attr_type)
lattr.append({'nillable': True})
lattrs.append(lattr)
print("lattrs",lattrs)
return lattrs
def get_or_create_datastore(cat, workspace=None, charset="UTF-8"):
"""
Get a PostGIS database store or create it in GeoServer if does not exist.
"""
# TODO refactor this and geoserver.helpers._create_db_featurestore
db = ogc_server_settings.datastore_db
dsname = db['NAME']
if not ogc_server_settings.DATASTORE:
msg = ("To use the createlayer application you must set ogc_server_settings.datastore_db['ENGINE']"
" to 'django.contrib.gis.db.backends.postgis")
logger.error(msg)
raise GeoNodeException(msg)
try:
ds = cat.get_store(dsname,settings.DEFAULT_WORKSPACE)
assert ds
except Exception as e :
print(e.message)
ds = cat.create_datastore(dsname, workspace=workspace)
print("connnnnn",ds)
ds.connection_parameters.update(
{'validate connections': 'true',
'max connections': '10',
'min connections': '1',
'fetch size': '1000',
'host': db['HOST'],
'port': db['PORT'] if isinstance(
db['PORT'], basestring) else str(db['PORT']) or '5432',
'database': db['NAME'],
'user': db['USER'],
'passwd': db['PASSWORD'],
'dbtype': 'postgis'}
)
cat.save(ds)
# we need to reload the ds as gsconfig-1.0.6 apparently does not populate ds.type
# using create_datastore (TODO fix this in gsconfig)
# commented for geosites
# ds = cat.get_store(dsname, workspace)
print("dss",ds)
return ds
def create_gs_layer(name, title, geometry_type, attributes=None):
"""
Create an empty PostGIS layer in GeoServer with a given name, title,
geometry_type and attributes.
"""
native_name = name
gs_user = ogc_server_settings.credentials[0]
gs_password = ogc_server_settings.credentials[1]
cat = Catalog(ogc_server_settings.rest, gs_user, gs_password, disable_ssl_certificate_validation=True)
# get workspace and store
# workspace = cat.get_default_workspace()
workspace = cat.get_workspace(settings.DEFAULT_WORKSPACE)
print("workspace",workspace)
# get (or create the datastore)
datastore = get_or_create_datastore(cat, workspace)
# check if datastore is of PostGIS type
if datastore.type != 'PostGIS':
msg = ("To use the createlayer application you must use PostGIS")
logger.error(msg)
# raise GeoNodeException(msg)
# check if layer is existing
resources = datastore.get_resources()
for resource in resources:
if resource.name == name:
msg = "There is already a layer named %s in %s" % (name, workspace)
logger.error(msg)
raise GeoNodeException(msg)
attributes = get_attributes(geometry_type, attributes)
attributes_block = "<attributes>"
for spec in attributes:
att_name, binding, opts = spec
nillable = opts.get("nillable", False)
attributes_block += ("<attribute>"
"<name>{name}</name>"
"<binding>{binding}</binding>"
"<nillable>{nillable}</nillable>"
"</attribute>").format(name=att_name, binding=binding, nillable=nillable)
attributes_block += "</attributes>"
# TODO implement others srs and not only EPSG:4326
xml = ("<featureType>"
"<name>{name}</name>"
"<nativeName>{native_name}</nativeName>"
"<title>{title}</title>"
"<srs>EPSG:4326</srs>"
"<latLonBoundingBox><minx>-180</minx><maxx>180</maxx><miny>-90</miny><maxy>90</maxy>"
"<crs>EPSG:4326</crs></latLonBoundingBox>"
"{attributes}"
"</featureType>").format(
name=name.encode('UTF-8', 'strict'), native_name=native_name.encode('UTF-8', 'strict'),
title=title.encode('UTF-8', 'strict'),
attributes=attributes_block)
url = ('%s/workspaces/%s/datastores/%s/featuretypes'
% (ogc_server_settings.rest, workspace.name, datastore.name))
print("**************",url)
headers = {'Content-Type': 'application/xml'}
req = requests.post(url, data=xml, headers=headers, auth=(gs_user, gs_password))
if req.status_code != 201:
logger.error('Request status code was: %s' % req.status_code)
logger.error('Response was: %s' % req.text)
raise GeoNodeException("Layer could not be created in GeoServer")
return workspace, datastore
def update_layer(name,title):
valid_name= _clean_string(name)
try:
layer=Layer.objects.filter(name=valid_name).update(title=title)
except:
pass
| 1.765625 | 2 |
BNNs/KF_Laplace/hessian_operations.py | kw-lee/Bayesian-Neural-Networks | 1 | 12787160 | from __future__ import division
import torch
# from BNNs.base_net import *
def softmax_CE_preact_hessian(last_layer_acts):
side = last_layer_acts.shape[1]
I = torch.eye(side).type(torch.ByteTensor)
# for i != j H = -ai * aj -- Note that these are activations not pre-activations
Hl = - last_layer_acts.unsqueeze(1) * last_layer_acts.unsqueeze(2)
# for i == j H = ai * (1 - ai)
Hl[:, I] = last_layer_acts * (1 - last_layer_acts)
return Hl
def layer_act_hessian_recurse(prev_hessian, prev_weights, layer_pre_acts):
newside = layer_pre_acts.shape[1]
batch_size = layer_pre_acts.shape[0]
I = torch.eye(newside).type(torch.ByteTensor) # .unsqueeze(0).expand([batch_size, -1, -1])
# print(d_act(layer_pre_acts).unsqueeze(1).shape, I.shape)
B = prev_weights.data.new(batch_size, newside, newside).fill_(0)
B[:, I] = (layer_pre_acts > 0).type(B.type()) # d_act(layer_pre_acts)
D = prev_weights.data.new(batch_size, newside, newside).fill_(0) # is just 0 for a piecewise linear
# D[:, I] = dd_act(layer_pre_acts) * act_grads
Hl = torch.bmm(torch.t(prev_weights).unsqueeze(0).expand([batch_size, -1, -1]), prev_hessian)
Hl = torch.bmm(Hl, prev_weights.unsqueeze(0).expand([batch_size, -1, -1]))
Hl = torch.bmm(B, Hl)
Hl = torch.matmul(Hl, B)
Hl = Hl + D
return Hl
def chol_scale_invert_kron_factor(factor, prior_scale, data_scale, upper=False):
scaled_factor = data_scale * factor + prior_scale * torch.eye(factor.shape[0]).type(factor.type())
inv_factor = torch.inverse(scaled_factor)
chol_inv_factor = torch.cholesky(inv_factor, upper=upper)
return chol_inv_factor
def sample_K_laplace_MN(MAP, upper_Qinv, lower_HHinv):
# H = Qi (kron) HHi
# sample isotropic unit variance mtrix normal
Z = MAP.data.new(MAP.size()).normal_(mean=0, std=1)
# AAT = HHi
# A = torch.cholesky(HHinv, upper=False)
# BTB = Qi
# B = torch.cholesky(Qinv, upper=True)
all_mtx_sample = MAP + torch.matmul(torch.matmul(lower_HHinv, Z), upper_Qinv)
weight_mtx_sample = all_mtx_sample[:, :-1]
bias_mtx_sample = all_mtx_sample[:, -1]
return weight_mtx_sample, bias_mtx_sample | 2.140625 | 2 |
ceraon/models/transactions.py | Rdbaker/Mealbound | 1 | 12787161 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Models for transactions."""
import datetime as dt
import uuid
import stripe
from sqlalchemy.dialects.postgresql import JSONB, UUID
from ceraon.database import Column, IDModel, db, reference_col, relationship
class Transaction(IDModel):
"""A transaction made by a user for a meal.
For now, we're using stripe to process payments in our API. This will mean
that some of the columns we have here are internal to stripe.
"""
__tablename__ = 'transaction'
created_at = Column(db.DateTime(timezone=True), nullable=False,
default=dt.datetime.utcnow)
meal_id = reference_col('meal', nullable=False)
meal = relationship('Meal')
payer_id = reference_col('users', nullable=False)
payer = relationship('User', foreign_keys=[payer_id])
payee_id = reference_col('users', nullable=False)
payee = relationship('User', foreign_keys=[payee_id])
transaction_went_through = Column(db.Boolean, nullable=False, default=False)
transaction_paid_out = Column(db.Boolean, nullable=False, default=False)
canceled = Column(db.Boolean, nullable=False, default=False)
refunded = Column(db.Boolean, nullable=False, default=False)
stripe_idempotency_key = Column(UUID(as_uuid=True), nullable=False,
default=uuid.uuid4)
# the cost of the transaction in USD
amount = Column(db.Float(), nullable=False)
# data from Stripe that was returned after the "charge" was created
# NOTE: stripe shows the "amount" of a charge in cents
stripe_payload = Column(JSONB)
@property
def takehome_amount(self):
"""The amount for this transaction minus operational overhead."""
return self.amount - self.operational_overhead_cut
@property
def operational_overhead_cut(self):
"""The amount of money we need to take to run our servers."""
# We take 10% of the transaction, or at least $0.50
return max(0.50, self.amount * 0.1)
def charge(self, transaction_token=None):
"""Run the charge for the transaction.
:param transaction_token string: (default: None) the string used to
create a new transaction via stripe, if not using saved payment info
:return bool: indicating whether it was a success or failure
We use stripe, so if we change this later, we should change the low
level implementation details, but the general flow should be the same.
"""
try:
charge_params = {
'amount': int(self.amount * 100),
'currency': 'usd',
'idempotency_key': str(self.stripe_idempotency_key)
}
# see if we're doing a one-time transaction or if we're charging a
# card that stripe has on file
if transaction_token is not None:
charge_params.update(source=transaction_token)
else:
charge_params.update(customer=self.payer.stripe_customer_id)
stripe.Charge.create(**charge_params)
except:
return False
else:
self.update(transaction_went_through=True)
return True
def cancel(self):
"""Cancel the charge for the transaction and start the refund process.
We use stripe, so if we change this later, we should change the low
level implementation details, but the general flow should be the same.
"""
self.update(canceled=True)
# TODO: automate this flow later, if we're okay with that.
# for now, we'll do payouts manually
return True
def payer_has_stripe_source(self):
"""Return true if the payer has a `source` on their stripe customer."""
if not self.payer.stripe_customer_id:
return False
else:
customer = stripe.Customer.retrieve(self.payer.stripe_customer_id)
if customer is None or customer.default_source is None:
return False
else:
return True
@staticmethod
def set_stripe_source_on_user(user, token):
"""Set the stripe customer source for the user given a source token.
Since this class is responsible for interfacing with our payments
vendor, it is responsible for setting up the user with a corresponding
Stripe customer.
:param user User: the user to set the stripe_customer_id on
:param token string: the token stripe returned in exchange for payment
info
:return bool: indicating whether it was successful or not
"""
try:
if user.stripe_customer_id is None:
customer = stripe.Customer.create(
email=user.email,
source=token
)
user.stripe_customer_id = customer.id
user.save()
else:
customer = stripe.Customer.retrieve(user.stripe_customer_id)
customer.source = token
customer.save()
except:
return False
else:
return True
| 2.78125 | 3 |
test/test_crypto.py | eXhumer/aws-crt-python | 48 | 12787162 | <gh_stars>10-100
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
from test import NativeResourceTest
from awscrt.crypto import Hash
import unittest
class TestCredentials(NativeResourceTest):
def test_sha256_empty(self):
h = Hash.sha256_new()
digest = h.digest()
expected = b'\xe3\xb0\xc4\x42\x98\xfc\x1c\x14\x9a\xfb\xf4\xc8\x99\x6f\xb9\x24\x27\xae\x41\xe4\x64\x9b\x93\x4c\xa4\x95\x99\x1b\x78\x52\xb8\x55'
self.assertEqual(expected, digest)
def test_sha256_one_shot(self):
h = Hash.sha256_new()
h.update('abc')
digest = h.digest()
expected = b'\xba\x78\x16\xbf\x8f\x01\xcf\xea\x41\x41\x40\xde\x5d\xae\x22\x23\xb0\x03\x61\xa3\x96\x17\x7a\x9c\xb4\x10\xff\x61\xf2\x00\x15\xad'
self.assertEqual(expected, digest)
def test_sha256_iterated(self):
h = Hash.sha256_new()
h.update('a')
h.update('b')
h.update('c')
digest = h.digest()
expected = b'\xba\x78\x16\xbf\x8f\x01\xcf\xea\x41\x41\x40\xde\x5d\xae\x22\x23\xb0\x03\x61\xa3\x96\x17\x7a\x9c\xb4\x10\xff\x61\xf2\x00\x15\xad'
self.assertEqual(expected, digest)
def test_sha1_empty(self):
h = Hash.sha1_new()
digest = h.digest()
expected = b'\xda\x39\xa3\xee\x5e\x6b\x4b\x0d\x32\x55\xbf\xef\x95\x60\x18\x90\xaf\xd8\x07\x09'
self.assertEqual(expected, digest)
def test_sha1_one_shot(self):
h = Hash.sha1_new()
h.update('abc')
digest = h.digest()
expected = b'\xa9\x99\x3e\x36\x47\x06\x81\x6a\xba\x3e\x25\x71\x78\x50\xc2\x6c\x9c\xd0\xd8\x9d'
self.assertEqual(expected, digest)
def test_sha1_iterated(self):
h = Hash.sha1_new()
h.update('a')
h.update('b')
h.update('c')
digest = h.digest()
expected = b'\xa9\x99\x3e\x36\x47\x06\x81\x6a\xba\x3e\x25\x71\x78\x50\xc2\x6c\x9c\xd0\xd8\x9d'
self.assertEqual(expected, digest)
def test_md5_empty(self):
h = Hash.md5_new()
digest = h.digest()
expected = b'\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04\xe9\x80\x09\x98\xec\xf8\x42\x7e'
self.assertEqual(expected, digest)
def test_md5_one_shot(self):
h = Hash.md5_new()
h.update('abc')
digest = h.digest()
expected = b'\x90\x01\x50\x98\x3c\xd2\x4f\xb0\xd6\x96\x3f\x7d\x28\xe1\x7f\x72'
self.assertEqual(expected, digest)
def test_md5_iterated(self):
h = Hash.md5_new()
h.update('a')
h.update('b')
h.update('c')
digest = h.digest()
expected = b'\x90\x01\x50\x98\x3c\xd2\x4f\xb0\xd6\x96\x3f\x7d\x28\xe1\x7f\x72'
self.assertEqual(expected, digest)
if __name__ == '__main__':
unittest.main()
| 2.15625 | 2 |
yzrpc/config/__init__.py | ml444/yz-rpc | 5 | 12787163 | #!/usr/bin/python3.7+
# -*- coding:utf-8 -*-
"""
@auth: cml
@date: 2021/2/24
@desc: ...
"""
from .default_settings import *
| 1.28125 | 1 |
wings/resource.py | KnowledgeCaptureAndDiscovery/wings-client | 0 | 12787164 | import json
from urllib.parse import urlencode
class Resource(object):
def __init__(self, api_client):
self.api_client = api_client
def get_machine(self, resid):
params = {'resid': resid}
resp = self.api_client.session.get(self.api_client.get_server() + '/common/resources/getMachineJSON?' +
urlencode(params))
return resp.json()
def save_machine(self, mid, machine_data):
params = {'resid': mid, 'json': json.dumps(machine_data)}
self.api_client.session.post(
self.api_client.get_server() + '/common/resources/saveMachineJSON', params)
| 2.8125 | 3 |
Dodgeball/Dodgeball2.py | Software-Cat/Python-Mini-Projects | 0 | 12787165 | <gh_stars>0
import pygame
import sys
def vertical_dashed_line(screen, color, startPos, length, width, dashLength):
segments = []
dashes = int(length/dashLength)
startY = startPos[1]
for i in range(dashes):
if i%2 == 0:
segments.append([[startPos[0], startY+i*dashLength], [startPos[0], startY+(i+1)*dashLength]])
for segment in segments:
pygame.draw.line(screen, color, segment[0], segment[1], width)
def game_over():
pass
def pause():
pass
def draw_stage():
# Ground
pygame.draw.rect(screen, [200, 200, 200], [0, 0, 1105, 697])
pygame.draw.rect(screen, [0, 0, 0], [0, 0, 1105, 697], 2)
# Court
pygame.draw.rect(screen, [230, 230, 230], [42.5, 93.5, 1020, 510])
# Neutral Zone
pygame.draw.rect(screen, [190, 190, 190], [518.5, 93.5, 68, 510], 0)
# Court lines
# Centerline
pygame.draw.line(screen, [0, 0, 0], [552.5, 93.5], [552.5, 603.5])
# Neutral lines
vertical_dashed_line(screen, [0, 0, 0], [518.5, 93.5], 510, 1, 12)
#pygame.draw.line(screen, [0, 0, 0], [518.5, 93.5], [518.5, 603.5])
pygame.draw.line(screen, [0, 0, 0], [586.5, 93.5], [586.5, 603.5])
# Court border accent
pygame.draw.rect(screen, [0, 0, 0], [42.5, 93.5, 1020, 510], 5)
pygame.init()
screen = pygame.display.set_mode([1105, 697])
# Coefficient = 17
running = True
while running:
draw_stage()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.display.flip()
game_over()
| 2.984375 | 3 |
src/core/grasp.py | danbailo/T1-Teoria-Computacao | 0 | 12787166 | import numpy as np
import random
from time import time
random.seed(42)
def semi_greedy_construction(window, number_items, weight_max, values_items, weight_items):
efficiency = np.divide(values_items, weight_items)
items = {}
for i in range(number_items):
items[i] = efficiency[i], values_items[i], weight_items[i]
items = sorted(items.values(), reverse=True)
result_final = []
value = 0
weight = 0
aux = items[:]
while len(items) > 0 and weight < weight_max:
if len(items) >= window: tmp_window = window
else: tmp_window = len(items)
index = random.randint(0,tmp_window-1)
value_item = items[index][1]
weight_item = items[index][2]
if weight_item+weight <= weight_max:
result_final.append(items[index][1])
value += value_item
weight += weight_item
del items[index]
solution = np.zeros(number_items,dtype=np.int16)
for item in values_items:
if item in result_final: solution[values_items.index(item)] = 1
return solution, value, weight
def local_search(solution, values_items, weight_items, value, weight, weight_max):
length = len(solution)
neighbor = (solution.copy(), value, weight)
for i in range(length):
new_weight = 0
new_value = 0
if solution[i] == 0:
if weight+weight_items[i] <= weight_max:
if value+values_items[i] > neighbor[1]:
temp = solution.copy()
temp[i] = 1
neighbor = temp, weight+weight_items[i], value+values_items[i]
if value == neighbor[1] :return value
return local_search(neighbor[0], values_items, weight_items, neighbor[1], neighbor[2], weight_max)
def grasp(max_it, window, number_items, weight_max, values_items, weight_items):
best_solution = 0
for i in range(max_it):
solution, value, weight = semi_greedy_construction(window, number_items, weight_max, values_items, weight_items)
solution = local_search(solution, values_items, weight_items, value, weight, weight_max)
if solution > best_solution: best_solution = solution
return best_solution | 2.765625 | 3 |
download_config.py | ASHIK11ab/swito | 1 | 12787167 | <reponame>ASHIK11ab/swito<filename>download_config.py
import requests
import os
import sys
URL = "https://www.googleapis.com/drive/v3/files"
QUERY_STRING = {"alt":"media"}
FILE_ID = sys.argv[1]
API_KEY = sys.argv[2]
def get_config_file_contents():
""" Reads the lines of the configuration file. """
with open(f"./configuration.py", "r") as file:
lines = file.readlines()
return lines
def save_config_file(contents):
""" Saves the configuration file to the file system. """
with open(f"./configuration.py", "w") as file:
file.writelines(contents)
def fetch_config_file(fileId, api_key):
""" Fetches the configuration file from Google Drive. """
file_url = f"{URL}/{fileId}?key={api_key}"
resp = requests.get(file_url, params=QUERY_STRING)
file_contents = resp.text
save_config_file(file_contents)
def main():
fetch_config_file(FILE_ID, API_KEY)
if __name__ == "__main__":
main() | 3.21875 | 3 |
ckanext-hdx_theme/ckanext/hdx_theme/tests/test_actions/test_api_token_create.py | OCHA-DAP/hdx-ckan | 58 | 12787168 | <filename>ckanext-hdx_theme/ckanext/hdx_theme/tests/test_actions/test_api_token_create.py<gh_stars>10-100
import pytest
from builtins import str
import ckan.model as model
import ckan.plugins.toolkit as tk
import ckan.tests.factories as factories
import ckan.tests.helpers as helpers
@pytest.fixture(scope='module')
def keep_db_tables_on_clean():
model.repo.tables_created_and_initialised = True
@pytest.mark.usefixtures('keep_db_tables_on_clean', 'clean_db')
class TestApiToken(object):
LIMIT = 180 # days
def test_token_expiry_with_integer_params(self):
user = factories.User(name='testuser1')
context = {
u"model": model,
u"user": user[u"name"]
}
# should raise an exception when trying to create a token with expiration period > LIMIT
with pytest.raises(tk.ValidationError):
helpers.call_action(u"api_token_create", context=context,
user=user[u"name"], name=u"token-name",
expires_in=self.LIMIT + 1, unit=24 * 60 * 60)
# there should be no problem creating a token with expiration period <= LIMIT
helpers.call_action(u"api_token_create", context=context, user=user[u"name"], name=u"token-name",
expires_in=self.LIMIT, unit=24 * 60 * 60)
def test_token_expiry_with_str_params(self):
user = factories.User(name='testuser2')
context = {
u"model": model,
u"user": user[u"name"]
}
# should raise an exception when trying to create a token with expiration period > LIMIT
with pytest.raises(tk.ValidationError):
helpers.call_action(u"api_token_create", context=context,
user=user[u"name"], name=u"token-name",
expires_in=str(self.LIMIT + 1), unit=str(24 * 60 * 60))
# there should be no problem creating a token with expiration period <= LIMIT
helpers.call_action(u"api_token_create", context=context, user=user[u"name"], name=u"token-name",
expires_in=str(self.LIMIT), unit=str(24 * 60 * 60))
| 2.015625 | 2 |
cornflow-server/cornflow/shared/authentication.py | ggsdc/corn | 2 | 12787169 | """
"""
# Global imports
from functools import wraps
from cornflow_core.authentication import BaseAuth
from cornflow_core.exceptions import InvalidData, NoPermission
from cornflow_core.models import ViewBaseModel, PermissionViewRoleBaseModel
# Partial imports
from flask import request, g, current_app
# Internal modules imports
from .const import PERMISSION_METHOD_MAP
from ..models import UserModel, PermissionsDAG
class Auth(BaseAuth):
def __init__(self, user_model=UserModel):
super().__init__(user_model)
def authenticate(self):
user = self.get_user_from_header(request.headers)
check = Auth._get_permission_for_request(request, user.id)
g.user = user
return True
@staticmethod
def dag_permission_required(func):
"""
DAG permission decorator
:param func:
:return:
"""
@wraps(func)
def dag_decorator(*args, **kwargs):
if int(current_app.config["OPEN_DEPLOYMENT"]) == 0:
user_id = g.user.id
dag_id = request.json.get("schema", None)
if dag_id is None:
raise InvalidData(
error="The request does not specify a schema to use",
status_code=400,
)
else:
if PermissionsDAG.check_if_has_permissions(user_id, dag_id):
# We have permissions
return func(*args, **kwargs)
else:
raise NoPermission(
error="You do not have permission to use this DAG",
status_code=403,
)
else:
return func(*args, **kwargs)
return dag_decorator
@staticmethod
def return_user_from_token(token):
"""
Function used for internal testing. Given a token gives back the user_id encoded in it.
:param str token: the given token
:return: the user id code.
:rtype: int
"""
user_id = Auth.decode_token(token)["user_id"]
return user_id
"""
START OF INTERNAL PROTECTED METHODS
"""
@staticmethod
def _get_permission_for_request(req, user_id):
method, url = Auth._get_request_info(req)
user_roles = UserModel.get_one_user(user_id).roles
if user_roles is None or user_roles == {}:
raise NoPermission(
error="You do not have permission to access this endpoint",
status_code=403,
)
action_id = PERMISSION_METHOD_MAP[method]
view_id = ViewBaseModel.query.filter_by(url_rule=url).first().id
for role in user_roles:
has_permission = PermissionViewRoleBaseModel.get_permission(
role_id=role, api_view_id=view_id, action_id=action_id
)
if has_permission:
return True
raise NoPermission(
error="You do not have permission to access this endpoint", status_code=403
)
| 2.1875 | 2 |
81.write_to_a_file.py | gptakhil/Python_Practice_Beginner | 2 | 12787170 | fp = open('./data/Program81.txt')
fp.write("Hello World 1 \n")
fp.write("Hello world 2 \n")
fp.writelines("Hello World 3 \n")
text = ["Hello World 4 \n","Hello World 5 \n", "Hello World 6"]
fp.close() | 2.71875 | 3 |
test/functional/abc_p2p_avalanche_quorum.py | edhoguntur/bitcoin-abc | 2 | 12787171 | #!/usr/bin/env python3
# Copyright (c) 2020-2022 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the quorum detection of avalanche."""
from time import time
from test_framework.avatools import (
create_coinbase_stakes,
get_ava_p2p_interface,
)
from test_framework.key import ECKey, ECPubKey
from test_framework.messages import AvalancheVote, AvalancheVoteError
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from test_framework.wallet_util import bytes_to_wif
class AvalancheQuorumTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [
['-enableavalanche=1',
'-avacooldown=0',
'-avatimeout=0',
'-avaminquorumstake=100000000',
'-avaminquorumconnectedstakeratio=0.8']
]
def mock_forward(self, delta):
self.mock_time += delta
self.nodes[0].setmocktime(self.mock_time)
def run_test(self):
self.mock_time = int(time())
self.mock_forward(0)
# Create a local node to poll from and a helper to send polls from it
# and assert on the response
node = self.nodes[0]
poll_node = get_ava_p2p_interface(node)
poll_node_pubkey = ECPubKey()
poll_node_pubkey.set(bytes.fromhex(node.getavalanchekey()))
def poll_and_assert_response(expected):
# Send poll for best block
block = int(node.getbestblockhash(), 16)
poll_node.send_poll([block])
# Get response and check that the vote is what we expect
response = poll_node.wait_for_avaresponse()
r = response.response
assert poll_node_pubkey.verify_schnorr(response.sig, r.get_hash())
assert_equal(len(r.votes), 1)
actual = repr(r.votes[0])
expected = repr(AvalancheVote(expected, block))
assert_equal(actual, expected)
# Create peers to poll
num_quorum_peers = 2
coinbase_key = node.get_deterministic_priv_key().key
blocks = node.generate(num_quorum_peers)
peers = []
for i in range(0, num_quorum_peers):
keyHex = "12b004fff7f4b69ef8650e767f18f11ede158148b425660723b9f9a66e61f75" + \
str(i)
k = ECKey()
k.set(bytes.fromhex(keyHex), True)
stakes = create_coinbase_stakes(
node, [blocks[i]], coinbase_key)
proof = node.buildavalancheproof(1, 1, bytes_to_wif(k.get_bytes()),
stakes)
peers.append({'key': k, 'proof': proof, 'stake': stakes})
def addavalanchenode(peer):
pubkey = peer['key'].get_pubkey().get_bytes().hex()
assert node.addavalanchenode(
peer['node'].nodeid, pubkey, peer['proof']) is True
# Start polling. The response should be UNKNOWN because there's no
# score
poll_and_assert_response(AvalancheVoteError.UNKNOWN)
# Create one peer with half the score and add one node
peers[0]['node'] = get_ava_p2p_interface(node)
addavalanchenode(peers[0])
poll_and_assert_response(AvalancheVoteError.UNKNOWN)
# Create a second peer with the other half and add one node
peers[1]['node'] = get_ava_p2p_interface(node)
addavalanchenode(peers[1])
poll_and_assert_response(AvalancheVoteError.ACCEPTED)
# Disconnect peer 1's node which drops us below the threshold, but we've
# latched that the quorum is established
self.mock_forward(1)
peers[1]['node'].peer_disconnect()
peers[1]['node'].wait_for_disconnect()
poll_and_assert_response(AvalancheVoteError.ACCEPTED)
# Reconnect node and re-establish quorum
peers[1]['node'] = get_ava_p2p_interface(node)
addavalanchenode(peers[1])
poll_and_assert_response(AvalancheVoteError.ACCEPTED)
if __name__ == '__main__':
AvalancheQuorumTest().main()
| 2.09375 | 2 |
SPP-loopback.py | mwswartwout/Edison_Android_Communications | 0 | 12787172 | <reponame>mwswartwout/Edison_Android_Communications
#!/usr/bin/python
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser, make_option
import os
import sys
import socket
import uuid
import dbus
import dbus.service
import dbus.mainloop.glib
import mraa
pinSound = 0
pinMoisture = 1
pinLight = 2
pinUV = 3
sound = mraa.Aio(pinSound)
moisture = mraa.Aio(pinMoisture)
light = mraa.Aio(pinLight)
uv = mraa.Aio(pinUV)
#button = mraa.Gpio(pinButton)
#encoder1 = mraa.Gpio(pinEncoder1)
#encoder2 = mraa.Gpio(pinEncoder1)
#buzzer = mraa.Gpio(pinBuzzer)
#relay = mraa.Gpio(pinRelay)
#pir = mraa.Gpio(pinPIR)
#servo = mraa.Gpio(pinServo)
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
class Profile(dbus.service.Object):
fd = -1
@dbus.service.method("org.bluez.Profile1",
in_signature="", out_signature="")
def Release(self):
print("Release")
mainloop.quit()
@dbus.service.method("org.bluez.Profile1",
in_signature="", out_signature="")
def Cancel(self):
print("Cancel")
@dbus.service.method("org.bluez.Profile1",
in_signature="oha{sv}", out_signature="")
def NewConnection(self, path, fd, properties):
self.fd = fd.take()
print("NewConnection(%s, %d)" % (path, self.fd))
server_sock = socket.fromfd(self.fd, socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.setblocking(1)
server_sock.send("This is Edison SPP loopback test\nAll data will be loopback\nPlease start:\n")
try:
while True:
data = server_sock.recv(1024)
print("received: %s" % data)
if (data == "start"):
while True:
data = sound.read()
server_sock.send("sound: %d" % data)
print("sound: %d" % data)
data = moisture.read()
server_sock.send("moisture: %d" % data)
print("moisture: %d" % data)
data = light.read()
server_sock.send("light: %d" % data)
print("light: %d" % data)
data = uv.read()
server_sock.send("uv: %d" % data)
print("uv: %d" % data)
server_sock.send("looping back: %s\n" % data)
except IOError:
pass
server_sock.close()
print("all done")
@dbus.service.method("org.bluez.Profile1",
in_signature="o", out_signature="")
def RequestDisconnection(self, path):
print("RequestDisconnection(%s)" % (path))
if (self.fd > 0):
os.close(self.fd)
self.fd = -1
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
manager = dbus.Interface(bus.get_object("org.bluez",
"/org/bluez"), "org.bluez.ProfileManager1")
option_list = [
make_option("-C", "--channel", action="store",
type="int", dest="channel",
default=None),
]
parser = OptionParser(option_list=option_list)
(options, args) = parser.parse_args()
options.uuid = "1101"
options.psm = "3"
options.role = "server"
options.name = "Edison SPP Loopback"
options.service = "spp char loopback"
options.path = "/foo/bar/profile"
options.auto_connect = False
options.record = ""
profile = Profile(bus, options.path)
mainloop = GObject.MainLoop()
opts = {
"AutoConnect" : options.auto_connect,
}
if (options.name):
opts["Name"] = options.name
if (options.role):
opts["Role"] = options.role
if (options.psm is not None):
opts["PSM"] = dbus.UInt16(options.psm)
if (options.channel is not None):
opts["Channel"] = dbus.UInt16(options.channel)
if (options.record):
opts["ServiceRecord"] = options.record
if (options.service):
opts["Service"] = options.service
if not options.uuid:
options.uuid = str(uuid.uuid4())
manager.RegisterProfile(options.path, options.uuid, opts)
mainloop.run()
| 2.203125 | 2 |
sdr_availability/__init__.py | enavu/sdr_avail | 0 | 12787173 | from .data_manips import *
from .input_prompts import * | 1.039063 | 1 |
11_Retos Diarios/reto-diario-06/main.py | rikhen/OpenBootcampPublic | 0 | 12787174 | # ----------------------------------------------------------------------------
# OpenBootcamp - Reto Diario 06
# Created By : Rikhen
# version =' 1.0'
# ---------------------------------------------------------------------------
import sys
import calculator as calc
import converter as conv
import validations as valid
print("Introduzca el cálculo deseado (operadores válidos: suma, resta, multiplica, divide)")
str = input()
varstrs = str.split()
if len(varstrs) != 4:
sys.exit("La entrada no es correcta!")
# Comprueba si las variables son de tipo 'string'
for s in varstrs:
if valid.is_string(s):
continue
else:
sys.exit(str(s) + " no es una cadena!")
# Asigna las variables
operador = varstrs[0]
conector = varstrs[2]
num1str = varstrs[1]
num2str = varstrs[3]
# Comprueba si conector es 'y'
if conector != 'y':
sys.exit("El conector no es válido!")
# Comprueba si la variable está incluida en el directorio
if valid.is_exists(num1str):
try:
# Convierte la cadena en un número
num1int = conv.convert_to_integer(num1str)
except Exception as e:
sys.exit("Ha ocurrido un error: " + e)
else:
sys.exit("El valor " + num1str + " no es válido")
if valid.is_exists(num2str):
try:
num2int = conv.convert_to_integer(num2str)
except Exception as e:
sys.exit("Ha ocurrido un error: " + e)
else:
sys.exit("El valor " + num2str + " no es válido")
# Calcula resultado en números
try:
resint = calc.calculate(operador, num1int, num2int)
except Exception as e:
sys.exit("Ha ocurrido un error: " + e)
# Convierte el resultado en una cadena
try:
resstr = conv.convert_to_string(abs(resint))
if resint < 0:
resstr = "menos " + resstr
except Exception as e:
sys.exit("Ha ocurrido un error: " + e)
# Imprime el resultado final
print("Resultado: ", resstr)
| 3.296875 | 3 |
tests/test_map.py | xealits/symbolic-lisp | 0 | 12787175 | import pytest
from sym_lis3 import GlobalEnv
def test_map_basic():
g = GlobalEnv()
assert list(g.eval_str('(map (lambda (x) (* 2 x)) (list 1 2 3))')) == [2, 4, 6]
def test_map_curry():
g = GlobalEnv()
g.eval_str('(define "foo" (lambda (x y) (* x y)))')
assert list(g.eval_str('(map (curry foo 2) (list 1 2 3))')) == [2, 4, 6]
| 2.875 | 3 |
wonderbits/WBLedMatrix.py | BigCircleLaw/wonderguy | 1 | 12787176 | <filename>wonderbits/WBLedMatrix.py
from .WBits import WBits
from .event import Event
def _format_str_type(x):
if isinstance(x, str):
x = str(x).replace('"', '\\"')
x = "\"" + x + "\""
return x
class LedMatrix(WBits):
def __init__(self, index = 1):
WBits.__init__(self)
self.index = index
def set_onboard_rgb(self, rgb):
command = 'ledMatrix{}.set_onboard_rgb({})'.format(self.index, rgb)
self._set_command(command)
def print(self, text):
"""
显示数字,字符等
:param text: 显示内容,可以是字符串,整数,小数
"""
text = _format_str_type(text)
args = []
args.append(str(text))
command = 'ledMatrix{}.print({})'.format(self.index, ",".join(args))
self._set_command(command)
def clear(self):
"""
清除已经显示的内容
"""
command = 'ledMatrix{}.clear()'.format(self.index)
self._set_command(command)
def set_scrolling(self, state):
"""
控制点阵模块的显示形式系统默认关闭连续滚动显示
:param state: 控制参数: False: 关闭连续滚动显示 True: 开启连续滚动显示
"""
args = []
args.append(str(state))
command = 'ledMatrix{}.set_scrolling({})'.format(self.index, ",".join(args))
self._set_command(command)
def draw_dot(self, x, y):
"""
在点阵上画点,使用该函数会使正在滚动的点阵停止滚动如果画点之后,在使用print函数会将画过的内容清除
:param x: X轴坐标:1~16
:param y: Y轴坐标:1~8
"""
args = []
args.append(str(x))
args.append(str(y))
command = 'ledMatrix{}.draw_dot({})'.format(self.index, ",".join(args))
self._set_command(command)
def draw_line(self, head_x, head_y, tail_x, tail_y):
"""
在点阵上画线,使用该函数会使正在滚动的点阵停止滚动如果画线之后,在使用print函数会将画过的内容清除
:param head_x: 起始点X轴坐标:1~16
:param head_y: 起始点Y轴坐标:1~8
:param tail_x: 终止点X轴坐标:1~16
:param tail_y: 终止点Y轴坐标:1~8
"""
args = []
args.append(str(head_x))
args.append(str(head_y))
args.append(str(tail_x))
args.append(str(tail_y))
command = 'ledMatrix{}.draw_line({})'.format(self.index, ",".join(args))
self._set_command(command)
| 2.953125 | 3 |
hostel/vlans/migrations/0001_initial.py | phylocko/hostel | 0 | 12787177 | <gh_stars>0
# Generated by Django 2.1.5 on 2020-10-17 21:32
from django.db import migrations, models
import django.db.models.deletion
import hostel.service.variables
class Migration(migrations.Migration):
initial = True
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Vlan',
fields=[
('vlanid', models.AutoField(db_column='vlanID', primary_key=True, serialize=False)),
('vlannum', models.IntegerField(unique=True, validators=[hostel.service.variables.validate_vlan_id])),
('vname', models.CharField(max_length=20, unique=True, validators=[hostel.service.variables.validate_vlan_name])),
('status', models.CharField(blank=True, max_length=20, null=True)),
('ticket', models.CharField(max_length=20, null=True)),
('comment', models.CharField(blank=True, max_length=2048, null=True)),
('multivlan', models.BooleanField(default=False)),
('is_management', models.BooleanField(default=False)),
('is_local', models.BooleanField(default=True)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='vlan', to='common.Service')),
],
options={
'db_table': 'vlans',
'managed': True,
},
),
]
| 1.742188 | 2 |
tests/conftest.py | lowitea/flake8-fine-pytest | 0 | 12787178 | import os
import ast
import pytest
from flake8.options.manager import OptionManager
from flake8_fine_pytest.checker import FinePytestChecker
def parse_options(allowed_test_directories, allowed_test_arguments_count, allowed_assert_count):
options = OptionManager()
options.allowed_test_directories = allowed_test_directories
options.allowed_test_arguments_count = allowed_test_arguments_count
options.allowed_assert_count = allowed_assert_count
FinePytestChecker.parse_options(options)
@pytest.fixture
def run_validator_for_test_files():
def _run(filename, allowed_test_directories=None, allowed_test_arguments_count=None, allowed_assert_count=None):
test_file_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'test_files',
filename,
)
with open(test_file_path, 'r') as file_handler:
raw_content = file_handler.read()
tree = ast.parse(raw_content)
checker = FinePytestChecker(tree=tree, filename=test_file_path)
parse_options(allowed_test_directories, allowed_test_arguments_count, allowed_assert_count)
return list(checker.run())
return _run
| 2.34375 | 2 |
fastapi_iam/fixtures.py | jordic/fastapi_iam | 1 | 12787179 | from . import models
from .initialize import initialize_db
from async_asgi_testclient import TestClient
from fastapi.applications import FastAPI
from fastapi_asyncpg import configure_asyncpg
from fastapi_asyncpg import create_pool_test
from fastapi_iam import configure_iam
from pathlib import Path
from pytest_docker_fixtures import images
import asyncpg
import pytest
dir = Path(__file__).parent
images.configure(
"postgresql", "postgres", "11.1", env={"POSTGRES_DB": "test_db"}
)
async def noop(db):
pass
@pytest.fixture
async def pool(pg):
host, port = pg
url = f"postgresql://postgres@{host}:{port}/test_db"
settings = {"db_schema": None}
# apply migrations
conn = await asyncpg.connect(dsn=url)
await initialize_db(settings, conn)
pool = await create_pool_test(url, initialize=noop)
await pool.start()
yield pool
if pool._conn.is_closed():
return
await pool.release()
@pytest.fixture
async def conn(pool):
async with pool.acquire() as db:
yield db
@pytest.fixture
async def theapp(pool):
app = FastAPI()
db = configure_asyncpg(app, "", pool=pool)
settings = {}
iam = configure_iam(settings, fastapi_asyncpg=db)
app.include_router(iam.router, prefix="/auth")
yield iam, app
users_ = [
{
"email": "<EMAIL>",
"password": "<PASSWORD>",
"is_active": True,
"is_staff": True,
"is_admin": False,
},
{
"email": "<EMAIL>",
"password": "<PASSWORD>",
"is_active": True,
"is_staff": True,
"is_admin": True,
},
{
"email": "<EMAIL>",
"password": "<PASSWORD>",
"is_active": False,
"is_staff": True,
"is_admin": True,
},
]
@pytest.fixture
async def users(theapp):
iam, app = theapp
async with TestClient(app) as client:
for user in users_:
await models.create_user(iam, user.copy())
yield client, iam
| 2.09375 | 2 |
hitcarder/exception.py | Gavin-Yi/BatchHitcarder | 6 | 12787180 | # -*- coding: utf-8 -*-
"""Exceptions used in hitcarder.
Author: Tishacy
"""
class LoginError(Exception):
"""Login Exception"""
pass
class RegexMatchError(Exception):
"""Regex Matching Exception"""
pass
class DecodeError(Exception):
"""JSON Decode Exception"""
pass
| 1.75 | 2 |
youtube_dl/extractor/space.py | zoogaezee/youtubeDL | 0 | 12787181 | <reponame>zoogaezee/youtubeDL
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..utils import RegexNotFoundError, ExtractorError
class SpaceIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|m)\.)?space\.com/\d+-(?P<title>[^/\.\?]*?)-video\.html'
_TEST = {
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.space.com/23373-huge-martian-landforms-detail-revealed-by-european-probe-video.html',
'info_dict': {
'id': '2780937028001',
'ext': 'mp4',
'title': 'Huge Martian Landforms\' Detail Revealed By European Probe | Video',
'description': 'md5:db81cf7f3122f95ed234b631a6ea1e61',
'uploader': 'TechMedia Networks',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title = mobj.group('title')
webpage = self._download_webpage(url, title)
try:
# Some videos require the playerKey field, which isn't define in
# the BrightcoveExperience object
brightcove_url = self._og_search_video_url(webpage)
except RegexNotFoundError:
# Other videos works fine with the info from the object
brightcove_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
if brightcove_url is None:
raise ExtractorError(
'The webpage does not contain a video', expected=True)
return self.url_result(brightcove_url, BrightcoveLegacyIE.ie_key())
| 2.4375 | 2 |
web-backend/main.py | mathiash98/openinframap | 43 | 12787182 | from starlette.responses import PlainTextResponse, RedirectResponse
from starlette.applications import Starlette
from starlette.templating import Jinja2Templates
from starlette.routing import Mount, Route
from starlette.staticfiles import StaticFiles
from starlette.exceptions import HTTPException
from template_functions import (
format_power,
osm_link,
country_name,
format_length,
format_voltage,
format_percent,
)
from config import database, config
from util import cache_for, country_required
from sitemap import sitemap
from data import (
get_countries,
stats_power_line,
get_plant,
get_plant_generator_summary,
get_wikidata,
get_commons_thumbnail,
)
DEBUG = config("DEBUG", cast=bool, default=False)
templates = Jinja2Templates(directory="templates")
templates.env.filters["power"] = format_power
templates.env.filters["distance"] = format_length
templates.env.filters["voltage"] = format_voltage
templates.env.filters["percent"] = format_percent
templates.env.filters["country_name"] = country_name
templates.env.globals["osm_link"] = osm_link
app = Starlette(
debug=DEBUG,
on_startup=[database.connect],
on_shutdown=[database.disconnect],
routes=[
Mount("/static", app=StaticFiles(directory="static"), name="static"),
Route("/sitemap.xml", sitemap),
],
)
@app.route("/")
async def main(request):
# Dummy response - this endpoint is served statically in production from the webpack build
return PlainTextResponse("")
@app.route("/about")
@cache_for(3600)
async def about(request):
return templates.TemplateResponse("about.html", {"request": request})
@app.route("/about/exports")
@cache_for(3600)
async def exports(request):
return templates.TemplateResponse("exports.html", {"request": request})
@app.route("/copyright")
@cache_for(3600)
async def copyright(request):
return templates.TemplateResponse("copyright.html", {"request": request})
@app.route("/stats")
@cache_for(86400)
async def stats(request):
power_lines = await stats_power_line()
return templates.TemplateResponse(
"index.html",
{
"request": request,
"countries": await get_countries(),
"power_lines": power_lines,
},
)
@app.route("/stats/area/{country}")
@country_required
@cache_for(3600)
async def country(request, country):
plant_stats = await database.fetch_one(
query="""SELECT SUM(convert_power(output)) AS output, COUNT(*)
FROM power_plant
WHERE ST_Contains(
(SELECT ST_Transform(geom, 3857) FROM countries.country_eez where gid = :gid),
geometry)
AND tags -> 'construction:power' IS NULL
""",
values={"gid": country["gid"]},
)
plant_source_stats = await database.fetch_all(
query="""SELECT first_semi(source) AS source, sum(convert_power(output)) AS output, count(*)
FROM power_plant
WHERE ST_Contains(
(SELECT ST_Transform(geom, 3857) FROM countries.country_eez WHERE gid = :gid),
geometry)
AND tags -> 'construction:power' IS NULL
GROUP BY first_semi(source)
ORDER BY SUM(convert_power(output)) DESC NULLS LAST""",
values={"gid": country["gid"]},
)
power_lines = await stats_power_line(country["union"])
return templates.TemplateResponse(
"country.html",
{
"request": request,
"country": country["union"],
"plant_stats": plant_stats,
"plant_source_stats": plant_source_stats,
"power_lines": power_lines,
"canonical": request.url_for("country", country=country["union"]),
},
)
@app.route("/stats/area/{country}/plants")
@country_required
@cache_for(3600)
async def plants_country(request, country):
gid = country[0]
plants = await database.fetch_all(
query="""SELECT osm_id, name, tags->'name:en' AS name_en, tags->'wikidata' AS wikidata,
tags->'plant:method' AS method, tags->'operator' AS operator,
convert_power(output) AS output,
source, ST_GeometryType(geometry) AS geom_type
FROM power_plant
WHERE ST_Contains(
(SELECT ST_Transform(geom, 3857) FROM countries.country_eez WHERE gid = :gid),
geometry)
AND tags -> 'construction:power' IS NULL
ORDER BY convert_power(output) DESC NULLS LAST, name ASC NULLS LAST """,
values={"gid": gid},
)
source = None
if "source" in request.query_params:
source = request.query_params["source"].lower()
plants = [
plant for plant in plants if source in plant["source"].lower().split(";")
]
min_output = None
if "min_output" in request.query_params:
try:
min_output = int(request.query_params["min_output"])
plants = [
plant
for plant in plants
if plant["output"] and plant["output"] >= min_output
]
except ValueError:
pass
return templates.TemplateResponse(
"plants_country.html",
{
"request": request,
"plants": plants,
"country": country["union"],
"source": source,
"min_output": min_output,
# Canonical URL for all plants without the source filter, to avoid confusing Google.
"canonical": request.url_for("plants_country", country=country["union"]),
},
)
@app.route("/stats/area/{country}/plants/construction")
@country_required
@cache_for(3600)
async def plants_construction_country(request, country):
gid = country[0]
plants = await database.fetch_all(
query="""SELECT osm_id, name, tags->'name:en' AS name_en, tags->'wikidata' AS wikidata,
tags->'plant:method' AS method, tags->'operator' AS operator,
tags->'start_date' AS start_date,
convert_power(output) AS output,
source, ST_GeometryType(geometry) AS geom_type
FROM power_plant
WHERE ST_Contains(
(SELECT ST_Transform(geom, 3857) FROM countries.country_eez WHERE gid = :gid),
geometry)
AND tags -> 'construction:power' IS NOT NULL
ORDER BY convert_power(output) DESC NULLS LAST, name ASC NULLS LAST """,
values={"gid": gid},
)
return templates.TemplateResponse(
"plants_country.html",
{
"construction": True,
"request": request,
"plants": plants,
"country": country["union"],
},
)
@app.route("/stats/object/plant/{id}")
@cache_for(86400)
async def stats_object(request):
try:
id = int(request.path_params["id"])
except ValueError:
raise HTTPException(400)
res = await database.fetch_one(
"""SELECT country_eez."union" FROM power_plant, countries.country_eez WHERE
ST_Contains(ST_Transform(country_eez.geom, 3857), geometry)
AND power_plant.osm_id = :id""",
values={"id": id},
)
if not res:
raise HTTPException(404)
return RedirectResponse(
request.url_for("plant_detail", country=res["union"], id=id)
)
@app.route("/stats/area/{country}/plants/{id}")
@country_required
@cache_for(3600)
async def plant_detail(request, country):
try:
plant_id = int(request.path_params["id"])
except ValueError:
raise HTTPException(404, "Invalid plant ID")
plant = await get_plant(plant_id, country["gid"])
if plant is None:
raise HTTPException(404, "Nonexistent power plant")
generator_summary = await get_plant_generator_summary(plant_id)
if "wikidata" in plant["tags"]:
wd = await get_wikidata(plant["tags"]["wikidata"])
else:
wd = None
image_data = None
if (
wd
and "P18" in wd["claims"]
and wd["claims"]["P18"][0]["mainsnak"]["datatype"] == "commonsMedia"
):
image_data = await get_commons_thumbnail(
wd["claims"]["P18"][0]["mainsnak"]["datavalue"]["value"], 400
)
ref_tags = []
for k, v in plant["tags"].items():
if k.startswith("ref:") or k in ["repd:id"]:
for split_val in v.split(";"):
ref_tags.append((k, split_val))
return templates.TemplateResponse(
"plant_detail.html",
{
"construction": True,
"plant": plant,
"request": request,
"generator_summary": generator_summary,
"country": country["union"],
"wikidata": wd,
"image_data": image_data,
"ref_tags": ref_tags,
},
)
import wikidata # noqa
| 2.140625 | 2 |
setup.py | hhatto/chatpy | 0 | 12787183 | <reponame>hhatto/chatpy
from setuptools import setup
from pip.req import parse_requirements
from pip.download import PipSession
long_desc = file("README.rst").read()
install_reqs = parse_requirements('requirements.txt', session=PipSession())
reqs = [str(ir.req) for ir in install_reqs]
setup(
name='chatpy',
version='0.1',
packages=['tests', 'chatpy'],
url='https://github.com/aqn/chatpy',
license='MIT',
author='aqn',
author_email='<EMAIL>',
description='Chatwork API for Python',
long_description=long_desc,
install_requires=reqs
)
| 1.640625 | 2 |
boto3/iam.py | yinbiao/lazyaws | 0 | 12787184 | <gh_stars>0
# -*- coding=utf-8 -*-
#!/bin/env python
import boto3
from include.tools import fprint
from policydocument import policy
class IamApi(object):
def __init__(self):
self.iam_client = boto3.client('iam')
def create_user(self, username, path="/"):
if not path.endswith("/"):
path = path + "/"
return self.iam_client.create_user(Path=path, UserName=username)
def create_login_profile(self,username, password, **kwargs):
'''创建用户密码
args:
UserName (string) -- [REQUIRED]
Password (string) -- [REQUIRED]
PasswordResetRequired (boolean) -- Specifies whether the user is required to
set a new password on next sign-in.
returns:
{
'LoginProfile': {
'UserName': 'string',
'CreateDate': datetime(2015, 1, 1),
'PasswordResetRequired': True|False
}
}
'''
return self.iam_client.create_login_profile(UserName=username,
Password=password, **kwargs)
def update_login_profile(self, username, password, **kwargs):
# returns : None
return self.iam_client.update_login_profile(UserName=username, Password=password, **kwargs)
def create_policy(self, policyname, policydocument, **kwargs):
'''创建策略
Args:
PolicyName: 'string', [REQUIRED]
Path: 'string',
PolicyDocument: 'string', [REQUIRED]
Description: 'string'
Returns:
{
'Policy': {
'PolicyName': 'string',
'PolicyId': 'string',
'Arn': 'string',
'Path': 'string',
'DefaultVersionId': 'string',
'AttachmentCount': 123,
'IsAttachable': True|False,
'Description': 'string',
'CreateDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1)
}
}
'''
return self.iam_client.create_policy(PolicyName=policyname,
PolicyDocument=policydocument,
**kwargs)
def get_user(self, username):
return self.iam_client.get_user(UserName=username)
def get_account_summary(self):
return self.iam_client.get_account_summary()
def get_account_authorization_details(self,*args, **kwargs):
return self.iam_client.get_account_authorization_details(
Filter=args, **kwargs)
def get_credential_report(self):
return self.iam_client.get_credential_report()
def get_policy(self,arn):
return self.iam_client.get_policy(PolicyArn=arn)
def list_policies(self,**kwargs):
return self.iam_client.list_policies(**kwargs)
def list_user_policies(self, username, **kwargs):
return self.iam_client.list_user_policies(UserName=username, **kwargs)
if __name__ == '__main__':
api = IamApi()
#api.create_user('lonay','/admin')
#print api.get_user('lonay')
#print api.get_account_summary()
# fprint( api.get_account_authorization_details("User","Role",
# **{'MaxItems':100}) )
#print api.get_credential_report()
#fprint( api.get_user() )
#fprint( api.list_policies(**{"Scope":"All","OnlyAttached":False}) )
#s3policy = policy.S3Policy(pid="3333")
#c = { "DateGreaterThan" : {
# "aws:CurrentTime" : "2013-12-15T12:00:00Z"
# }}
#print s3policy.S3BucketUser(['aaa','bbb'],"user1","s3:GetBucketTagging",condition=c)
fprint( api.update_login_profile('lonay','lonay821') )
| 2.125 | 2 |
tests/conftest.py | gsvolt/cle_parcel_lookup | 1 | 12787185 | import pytest
from cle_parcel_lookup import create_app
@pytest.fixture
def app():
return create_app()
@pytest.fixture
def client(app):
return app.test_client()
| 1.398438 | 1 |
10 Days of Statistics/Day 9 Multiple Linear Regression.py | ersincebi/hackerrank | 0 | 12787186 | import numpy as np
m,n = [int(i) for i in '2 7'.strip().split(' ')]
data1=[
'0.18 0.89 109.85',
'1.0 0.26 155.72',
'0.92 0.11 137.66',
'0.07 0.37 76.17',
'0.85 0.16 139.75',
'0.99 0.41 162.6',
'0.87 0.47 151.77'
]
X = []
Y = []
for item in data1:
data = item.strip().split(' ')
X.append(data[:m])
Y.append(data[m:])
data2 = [
'0.49 0.18',
'0.57 0.83',
'0.56 0.64',
'0.76 0.18'
]
X_new = []
for item in data2:
X_new.append(item.strip().split(' '))
X = np.array(X,float)
Y = np.array(Y,float)
X_new = np.array(X_new,float)
#center
X_R = X-np.mean(X,axis=0)
Y_R = Y-np.mean(Y)
#calculate beta
beta = np.dot(np.linalg.inv(np.dot(X_R.T,X_R)),np.dot(X_R.T,Y_R))
#predict
X_new_R = X_new-np.mean(X,axis=0)
Y_new_R = np.dot(X_new_R,beta)
Y_new = Y_new_R + np.mean(Y)
#print
for i in Y_new:
print(round(float(i),2)) | 3 | 3 |
ziggonext/__init__.py | geertsmichael/ziggonext-python | 6 | 12787187 | <reponame>geertsmichael/ziggonext-python<filename>ziggonext/__init__.py<gh_stars>1-10
"""Python client for Ziggo Next."""
from .ziggonext import ZiggoNext
from .models import ZiggoRecordingSingle, ZiggoRecordingShow
from .ziggonextbox import ZiggoNextBox
from .const import ONLINE_RUNNING, ONLINE_STANDBY
from .exceptions import ZiggoNextAuthenticationError, ZiggoNextConnectionError | 1.5625 | 2 |
data/python_templates/material.py | botamochi0x12/Python-Roguelike-Framework | 25 | 12787188 | from components.material import Material
# This Assumes most pieces of armor will have 1 AC as base.
Skin = Material('skin', 'Skin', hardness=0, sharpness=0, potency=0.2, weight=0.1, value=0)
Flesh = Material('flesh', 'Flesh', hardness=0, sharpness=0, potency=0.2, weight=0.15, value=0)
Fur = Material('fur', 'Fur', hardness=0.1, sharpness=0, potency=0.2, weight=1, value=1)
Leather = Material('leather', 'Leather', hardness=0.1, sharpness=0, potency=0.2, weight=1, value=1)
StuddedLeather = Material('studded_leather', 'Studded Leather', hardness=0.2, sharpness=0, potency=0.2, weight=1.3, value=4.5)
Wood = Material('wood', 'Wood', hardness=0.3, sharpness=0.4, potency=0.5, weight=3, value=0.5)
Scale = Material('scale', 'Scale', hardness=0.4, sharpness=0.5, potency=0.4, weight=4.5, value=5)
Bone = Material('bone', 'Bone', hardness=0.5, sharpness=0.5, potency=0.5, weight=5, value=1)
Stone = Material('stone', 'Stone', hardness=0.5, sharpness=0.2, potency=0.1, weight=6, value=0.1)
Silver = Material('silver', 'Silver', hardness=0.5, sharpness=0.5, potency=1, weight=6, value=125)
Gold = Material('gold', 'Gold', hardness=0.5, sharpness=0.5, potency=2, weight=6, value=250)
Chain = Material('chain', 'Chain', hardness=0.6, sharpness=0.3, potency=0.5, weight=5.5, value=7.5)
Bronze = Material('bronze', 'Bronze', hardness=0.6, sharpness=0.8, potency=0.6, weight=7, value=8)
Iron = Material('iron', 'Iron', hardness=0.7, sharpness=1, potency=0.6, weight=5.85, value=20)
Steel = Material('steel', 'Steel', hardness=0.8, sharpness=1.2, potency=0.8, weight=6.5, value=150)
material_templates = {
Skin.uid: Skin,
Flesh.uid: Flesh,
Fur.uid: Fur,
Leather.uid: Leather,
StuddedLeather.uid: StuddedLeather,
Wood.uid: Wood,
Scale.uid: Scale,
Bone.uid: Bone,
Stone.uid: Stone,
Silver.uid: Silver,
Gold.uid: Gold,
Chain.uid: Chain,
Bronze.uid: Bronze,
Iron.uid: Iron,
Steel.uid: Steel
}
| 2.71875 | 3 |
Ngo/FinalProj/accounts/migrations/0013_auto_20201010_1226.py | Devang-25/Donate-Cart-for-NGO-By-Devang-Sharma | 0 | 12787189 | <reponame>Devang-25/Donate-Cart-for-NGO-By-Devang-Sharma
# Generated by Django 3.0.6 on 2020-10-10 06:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_auto_20201010_1225'),
]
operations = [
migrations.RenameField(
model_name='donationdetail',
old_name='requirement',
new_name='req',
),
migrations.RenameField(
model_name='ngorequirementdetail',
old_name='requirement',
new_name='req',
),
]
| 1.703125 | 2 |
sanic_peewee/async_manager.py | daniel4git/sanic-peewee | 3 | 12787190 | <filename>sanic_peewee/async_manager.py
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
@Author: <NAME>
@Date: 01-Apr-2017
@Email: <EMAIL>
# @Last modified by: <NAME>
# @Last modified time: 08-Apr-2017
@License: Apache License Version 2.0
"""
__all__ = ["AsyncManager"]
from peewee_async import Manager, execute
from functools import partial
class AsyncManager(Manager):
"""
async manager from peewee_async.Manager.
here we use it in constructing the aio object in model's partent class
"""
def __init__(self, _model_class, *args, **kwargs):
super(AsyncManager, self).__init__(*args, **kwargs)
self._model_class = _model_class
self.database.allow_sync = False
def _do_fill(self, method, *args, **kwargs):
_class_method = getattr(super(AsyncManager, self), method)
pf = partial(_class_method, self._model_class)
return pf(*args, **kwargs)
def create(self, *args, **kwargs):
"""Create a new object saved to database.
Example:
await <Class>.aio.create(\*args,\*\*kwargs)
"""
return self._do_fill('create', *args, **kwargs)
def get(self, *args, **kwargs):
"""Get the model instance.
Example:
async def my_async_func():
obj1 = await <Class>.aio.get(MyModel, id=1)
obj2 = await <Class>.aio.get(MyModel, MyModel.id == 1)
obj3 = await <Class>.aio.get(MyModel.select().where(MyModel.id == 1))
All will return MyModel instance with id = 1
"""
return self._do_fill('get', *args, **kwargs)
def get_or_create(self, defaults=None, **kwargs):
"""Try to get an object or create it with the specified defaults.
Return 2-tuple containing the model instance and a boolean
indicating whether the instance was created.
"""
return self._do_fill('get_or_create', defaults=defaults, **kwargs)
def create_or_get(self, **kwargs):
"""Try to create new object with specified data. If object already
exists, then try to get it by unique fields.
"""
return self._do_fill('create_or_get', **kwargs)
def execute(self, query):
"""
Parameters:
query (peewee.Query): - 要执行的请求
Return:
object: - 将方法对应偏函数运行,结果看所运行的请求结果是什么
"""
return execute(query)
| 2.828125 | 3 |
python-client/swagger_client/models/vehicle.py | hallelulius/VastHinken | 0 | 12787191 | # coding: utf-8
"""
Reseplaneraren
Provides access to Västtrafik journey planner
OpenAPI spec version: 1.10.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Vehicle(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'lcolor': 'str',
'prod_class': 'str',
'bcolor': 'str',
'direction': 'int',
'name': 'str',
'gid': 'str',
'delay': 'int',
'y': 'float',
'x': 'float'
}
attribute_map = {
'lcolor': 'lcolor',
'prod_class': 'prodClass',
'bcolor': 'bcolor',
'direction': 'direction',
'name': 'name',
'gid': 'gid',
'delay': 'delay',
'y': 'y',
'x': 'x'
}
def __init__(self, lcolor=None, prod_class=None, bcolor=None, direction=None, name=None, gid=None, delay=None, y=None, x=None):
"""
Vehicle - a model defined in Swagger
"""
self._lcolor = None
self._prod_class = None
self._bcolor = None
self._direction = None
self._name = None
self._gid = None
self._delay = None
self._y = None
self._x = None
self.lcolor = lcolor
self.prod_class = prod_class
self.bcolor = bcolor
self.direction = direction
self.name = name
self.gid = gid
self.delay = delay
self.y = y
self.x = x
@property
def lcolor(self):
"""
Gets the lcolor of this Vehicle.
Line color of the journey
:return: The lcolor of this Vehicle.
:rtype: str
"""
return self._lcolor
@lcolor.setter
def lcolor(self, lcolor):
"""
Sets the lcolor of this Vehicle.
Line color of the journey
:param lcolor: The lcolor of this Vehicle.
:type: str
"""
if lcolor is None:
raise ValueError("Invalid value for `lcolor`, must not be `None`")
self._lcolor = lcolor
@property
def prod_class(self):
"""
Gets the prod_class of this Vehicle.
Product class
:return: The prod_class of this Vehicle.
:rtype: str
"""
return self._prod_class
@prod_class.setter
def prod_class(self, prod_class):
"""
Sets the prod_class of this Vehicle.
Product class
:param prod_class: The prod_class of this Vehicle.
:type: str
"""
if prod_class is None:
raise ValueError("Invalid value for `prod_class`, must not be `None`")
allowed_values = ["VAS", "LDT", "REG", "BUS", "BOAT", "TRAM", "TAXI"]
if prod_class not in allowed_values:
raise ValueError(
"Invalid value for `prod_class` ({0}), must be one of {1}"
.format(prod_class, allowed_values)
)
self._prod_class = prod_class
@property
def bcolor(self):
"""
Gets the bcolor of this Vehicle.
Background color of the journey
:return: The bcolor of this Vehicle.
:rtype: str
"""
return self._bcolor
@bcolor.setter
def bcolor(self, bcolor):
"""
Sets the bcolor of this Vehicle.
Background color of the journey
:param bcolor: The bcolor of this Vehicle.
:type: str
"""
if bcolor is None:
raise ValueError("Invalid value for `bcolor`, must not be `None`")
self._bcolor = bcolor
@property
def direction(self):
"""
Gets the direction of this Vehicle.
Direction of the vehicle. This is a value between 0 and 31 which is describing a direction vector
:return: The direction of this Vehicle.
:rtype: int
"""
return self._direction
@direction.setter
def direction(self, direction):
"""
Sets the direction of this Vehicle.
Direction of the vehicle. This is a value between 0 and 31 which is describing a direction vector
:param direction: The direction of this Vehicle.
:type: int
"""
if direction is None:
raise ValueError("Invalid value for `direction`, must not be `None`")
self._direction = direction
@property
def name(self):
"""
Gets the name of this Vehicle.
Journey name
:return: The name of this Vehicle.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Vehicle.
Journey name
:param name: The name of this Vehicle.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def gid(self):
"""
Gets the gid of this Vehicle.
Service GID
:return: The gid of this Vehicle.
:rtype: str
"""
return self._gid
@gid.setter
def gid(self, gid):
"""
Sets the gid of this Vehicle.
Service GID
:param gid: The gid of this Vehicle.
:type: str
"""
if gid is None:
raise ValueError("Invalid value for `gid`, must not be `None`")
self._gid = gid
@property
def delay(self):
"""
Gets the delay of this Vehicle.
Current delay of the vehicle in minutes, can be negative, zero or positive
:return: The delay of this Vehicle.
:rtype: int
"""
return self._delay
@delay.setter
def delay(self, delay):
"""
Sets the delay of this Vehicle.
Current delay of the vehicle in minutes, can be negative, zero or positive
:param delay: The delay of this Vehicle.
:type: int
"""
if delay is None:
raise ValueError("Invalid value for `delay`, must not be `None`")
self._delay = delay
@property
def y(self):
"""
Gets the y of this Vehicle.
Y coordinate (latitude) of the position in WGS84 * 1000000
:return: The y of this Vehicle.
:rtype: float
"""
return self._y
@y.setter
def y(self, y):
"""
Sets the y of this Vehicle.
Y coordinate (latitude) of the position in WGS84 * 1000000
:param y: The y of this Vehicle.
:type: float
"""
if y is None:
raise ValueError("Invalid value for `y`, must not be `None`")
self._y = y
@property
def x(self):
"""
Gets the x of this Vehicle.
X coordinate (longitude) of the position in WGS84 * 1000000
:return: The x of this Vehicle.
:rtype: float
"""
return self._x
@x.setter
def x(self, x):
"""
Sets the x of this Vehicle.
X coordinate (longitude) of the position in WGS84 * 1000000
:param x: The x of this Vehicle.
:type: float
"""
if x is None:
raise ValueError("Invalid value for `x`, must not be `None`")
self._x = x
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Vehicle):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 1.84375 | 2 |
aziende/admin.py | luca772005/studio | 0 | 12787192 | <filename>aziende/admin.py<gh_stars>0
from django.contrib import admin
from aziende.models import Azienda
# Register your models here.
@admin.register(Azienda)
class AziendaAdmin(admin.ModelAdmin):
pass | 1.414063 | 1 |
002_TestMotor/main.py | DaliSummer/MindstormsEV3_py | 0 | 12787193 | <reponame>DaliSummer/MindstormsEV3_py
#!/usr/bin/env pybricks-micropython
from pybricks.hubs import EV3Brick
from pybricks.ev3devices import Motor
from pybricks.parameters import Port
# Initialize the EV3 Brick.
ev3 = EV3Brick()
# Initialize a motor at port B.
motorB = Motor(Port.B)
# Initialize a motor at port C.
motorC = Motor(Port.C)
# Play a beep sound.
ev3.speaker.beep()
# Run the motor up to 500 degrees per second. To a target angle of 180 degrees.
motorB.run_target(100, 180)
# Play another beep sound.
ev3.speaker.beep(frequency=5000, duration=500)
# Run the motor up to 100 degrees per second. To a target angle of 270 degrees.
motorC.run_target(500, 270)
# Run the motor and continue the program.
motorB.run_target(1000, -500, wait=False)
# Run the motor and wait it to reach the target before continuing the program.
motorC.run_target(1000, -500, wait=True)
| 2.8125 | 3 |
daira/migrations/0009_auto_20200816_1259.py | medram/daira | 1 | 12787194 | <filename>daira/migrations/0009_auto_20200816_1259.py
# Generated by Django 3.0.2 on 2020-08-16 11:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('daira', '0008_auto_20200712_1826'),
]
operations = [
migrations.RemoveField(
model_name='individual',
name='born',
),
migrations.AddField(
model_name='individual',
name='ar_firstname',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddField(
model_name='individual',
name='ar_lastname',
field=models.CharField(blank=True, max_length=32, null=True),
),
migrations.AddField(
model_name='individual',
name='born_d',
field=models.IntegerField(blank=True, null=True, verbose_name='Day of birth'),
),
migrations.AddField(
model_name='individual',
name='born_m',
field=models.IntegerField(blank=True, null=True, verbose_name='Month of birth'),
),
migrations.AddField(
model_name='individual',
name='born_no_d_m',
field=models.BooleanField(blank=True, default=False, null=True, verbose_name="I don't have day & month of my birthday"),
),
migrations.AddField(
model_name='individual',
name='born_y',
field=models.IntegerField(blank=True, null=True, verbose_name='Year of birth'),
),
migrations.AddField(
model_name='individual',
name='photo_1',
field=models.ImageField(blank=True, null=True, upload_to='individuals/cin_photos', verbose_name='Front face of CIN'),
),
migrations.AddField(
model_name='individual',
name='photo_2',
field=models.ImageField(blank=True, null=True, upload_to='individuals/cin_photos', verbose_name='Back face of CIN'),
),
migrations.AlterField(
model_name='address',
name='mol7aka',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='addresses', to='daira.Mol7aka', verbose_name='Administrative attache'),
),
migrations.AlterField(
model_name='individual',
name='mol7aka',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='individuals', to='daira.Mol7aka', verbose_name='Administrative attache'),
),
migrations.AlterField(
model_name='relationship',
name='mol7aka',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='relationships', to='daira.Mol7aka', verbose_name='Administrative attache'),
),
migrations.AlterField(
model_name='report',
name='mol7aka',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='reports', to='daira.Mol7aka', verbose_name='Administrative attache'),
),
migrations.AlterField(
model_name='street',
name='mol7aka',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='streets', to='daira.Mol7aka', verbose_name='Administrative attache'),
),
]
| 1.640625 | 2 |
signal/__init__.py | indranilsinharoy/iutils | 0 | 12787195 | # signal utils
| 0.992188 | 1 |
dizoo/classic_control/cartpole/config/cartpole_dqfd_config.py | jayyoung0802/DI-engine | 1 | 12787196 | from easydict import EasyDict
cartpole_dqfd_config = dict(
exp_name='cartpole_dqfd_seed0',
env=dict(
collector_env_num=8,
evaluator_env_num=5,
n_evaluator_episode=5,
stop_value=195,
),
policy=dict(
cuda=True,
priority=True,
model=dict(
obs_shape=4,
action_shape=2,
encoder_hidden_size_list=[128, 128, 64],
dueling=True,
),
nstep=3,
discount_factor=0.97,
learn=dict(
batch_size=64,
learning_rate=0.001,
lambda1=1, # n-step return
lambda2=3.0, # supervised loss
# set this to be 0 (L2 loss = 0) with expert_replay_buffer_size = 0 and lambda1 = 0
# recover the one step pdd dqn
lambda3=0, # L2 regularization
per_train_iter_k=10,
expert_replay_buffer_size=10000, # justify the buffer size of the expert buffer
),
collect=dict(
n_sample=8,
# Users should add their own model path here. Model path should lead to a model.
# Absolute path is recommended.
# In DI-engine, it is ``exp_name/ckpt/ckpt_best.pth.tar``.
model_path='model_path_placeholder',
),
# note: this is the times after which you learns to evaluate
eval=dict(evaluator=dict(eval_freq=50, )),
other=dict(
eps=dict(
type='exp',
start=0.95,
end=0.1,
decay=10000,
),
replay_buffer=dict(replay_buffer_size=20000, ),
),
),
)
cartpole_dqfd_config = EasyDict(cartpole_dqfd_config)
main_config = cartpole_dqfd_config
cartpole_dqfd_create_config = dict(
env=dict(
type='cartpole',
import_names=['dizoo.classic_control.cartpole.envs.cartpole_env'],
),
env_manager=dict(type='base'),
policy=dict(type='dqfd'),
)
cartpole_dqfd_create_config = EasyDict(cartpole_dqfd_create_config)
create_config = cartpole_dqfd_create_config
if __name__ == "__main__":
# or you can enter `ding -m serial_dqfd -c cartpole_dqfd_config.py -s 0`
# then input ``cartpole_dqfd_config.py`` upon the instructions.
# The reason we need to input the dqfd config is we have to borrow its ``_get_train_sample`` function
# in the collector part even though the expert model may be generated from other Q learning algos.
from ding.entry.serial_entry_dqfd import serial_pipeline_dqfd
from dizoo.classic_control.cartpole.config import cartpole_dqfd_config, cartpole_dqfd_create_config
expert_main_config = cartpole_dqfd_config
expert_create_config = cartpole_dqfd_create_config
serial_pipeline_dqfd((main_config, create_config), (expert_main_config, expert_create_config), seed=0)
| 1.929688 | 2 |
base/site-packages/wi_cache/wicache.py | B-ROY/TESTGIT | 2 | 12787197 | <gh_stars>1-10
#encoding = utf-8
import sys, os
from django.core.cache import parse_backend_uri
from django.conf import settings
try:
memcache_settings = settings.memcache_settings
except:
mdefault = "memcached://127.0.0.1:11211/"
memcache_settings = {"CACHE_BACKEND": mdefault, "PAGE_CACHE_BACKEND": mdefault}
memcache_settings.update({"NINGX_CACHE_BACKEND": mdefault})
try:
import pylibmc
scheme, host, params = parse_backend_uri(memcache_settings.get("CACHE_BACKEND"))
model_cache = pylibmc.Client([host])
except Exception, e:
import memcache
scheme, host, params = parse_backend_uri(memcache_settings.get("CACHE_BACKEND"))
model_cache = memcache.Client([host])
try:
import pylibmc
scheme, host, params = parse_backend_uri(memcache_setting.get("NINGX_CACHE_BACKEND"))
nginx_cache = pylibmc.Client([host])
except Exception, e:
import memcache
scheme, host, params = parse_backend_uri(memcache_settings.get("NINGX_CACHE_BACKEND"))
nginx_cache = memcache.Client([host])
try:
import pylibmc
scheme, host, params = parse_backend_uri(memcache_settings.get("PAGE_CACHE_BACKEND"))
page_cache = pylibmc.Client([host])
except Exception, e:
import memcache
scheme, host, params = parse_backend_uri(memcache_settings.get("PAGE_CACHE_BACKEND"))
page_cache = memcache.Client([host]) | 2.140625 | 2 |
apps/reviews/views/__init__.py | Haizza1/RandomCameras-Backend | 1 | 12787198 | <reponame>Haizza1/RandomCameras-Backend
from .lens import LensReviewsViewSet
from .cameras import CamerasReviewsViewSet | 1.03125 | 1 |
src/segmentation_inference.py | emrecanaltinsoy/chromosome-semantic-segmentation | 2 | 12787199 | import argparse
import yaml
import os
from glob import glob
import inspect
import sys
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import time
import numpy as np
import torch
from torch.utils.data import DataLoader
import skimage.io as io
from segmentation_dataset import RawChromosomeDataset as Dataset
from loss import DiceLoss, evals
from models.UNet import UNet
from models.ResUNet import ResUNet
from models.PreactivationResUNet import PreactResUNet
from models.CENet import CE_Net
from models.Segnet import SegNet
from models.AttentionUnet import AttU_Net
from models.FCN import FCN_ResNet101
from models.Unet_nested import UNet_Nested
from models.DeepLabV3 import Deeplabv3_ResNet101
from models.PSPNet import PSPNet
def main(args):
# args.model = "preactivation_resunet"
# args.model_path = "preactivation_resunet-20210416T1703"
# args.weight_num = 1
# args.images = "./datasets/raw_chromosome_data".format(Dataset.name)
# args.batch_size = 2
# args.test_results = False
if args.model == "unet":
model = UNet(
in_channels=Dataset.in_channels,
num_classes=Dataset.num_classes,
init_features=32,
)
net_name = UNet.net_name
elif args.model == "resunet":
model = ResUNet(
in_channels=Dataset.in_channels,
num_classes=Dataset.num_classes,
init_features=32,
)
net_name = "resunet"
elif args.model == "preactivation_resunet":
model = PreactResUNet(
in_channels=Dataset.in_channels,
num_classes=Dataset.num_classes,
init_features=32,
)
net_name = "preactivation_resunet"
elif args.model == "cenet":
model = CE_Net(in_channels=Dataset.in_channels, num_classes=Dataset.num_classes)
net_name = "cenet"
elif args.model == "segnet":
model = SegNet(in_channels=Dataset.in_channels, num_classes=Dataset.num_classes)
net_name = "segnet"
elif args.model == "nested_unet":
model = UNet_Nested(
in_channels=Dataset.in_channels, num_classes=Dataset.num_classes
)
net_name = "nested_unet"
elif args.model == "attention_unet":
model = AttU_Net(
in_channels=Dataset.in_channels, num_classes=Dataset.num_classes
)
net_name = "attention_unet"
elif args.model == "fcn_resnet101":
model = FCN_ResNet101(in_channels=1, num_classes=3)
net_name = "fcn_resnet101"
elif args.model == "deeplabv3_resnet101":
model = Deeplabv3_ResNet101(in_channels=1, num_classes=3)
net_name = "deeplabv3_resnet101"
elif args.model == "pspnet":
model = PSPNet(
num_classes=Dataset.num_classes, pretrained=False, backend="resnet101"
)
net_name = "pspnet"
device = torch.device("cpu" if not torch.cuda.is_available() else args.device)
model.to(device)
weights_dir = "output/{}/{}/weights".format(Dataset.name, args.model_path)
print(weights_dir)
model_name = glob(weights_dir + "/{}-{}*".format(net_name, args.weight_num))[0]
state_dict = torch.load(model_name, map_location=device)
model.load_state_dict(state_dict)
test_dir = "output/{}/{}/test".format(Dataset.name, args.model_path)
model.eval()
dsc = DiceLoss()
evaluations_np = []
total_dsc_loss = []
loader = data_loaders(args)
loaders = {"test": loader}
start = time.time()
print("clock started")
test_img_num = 1
for i, data in enumerate(loaders["test"], 0):
x, y_true = data
x, y_true = x.to(device, dtype=torch.float), y_true.to(
device, dtype=torch.float
)
with torch.set_grad_enabled(False):
y_pred = model(x)
dsc_loss = dsc(y_pred, y_true)
evaluations_ = evals(y_pred, y_true)
evaluations_np += evaluations_
total_dsc_loss.append(dsc_loss.item())
if args.test_results:
y_pred_np = y_pred.detach().cpu().numpy()
x_np = x.detach().cpu().numpy()
for img_num in range(y_pred_np.shape[0]):
for mask_num in range(y_pred_np.shape[1]):
io.imsave(
os.path.join(
test_dir,
"{}_label{}.png".format(test_img_num, mask_num),
),
y_pred_np[img_num, mask_num, :, :],
)
for mask_num in range(x_np.shape[1]):
io.imsave(
os.path.join(test_dir, "%d_image.png" % test_img_num),
x_np[img_num, mask_num, :, :] * 255,
)
test_img_num += 1
end = time.time()
print("{} seconds past".format(end - start))
evaluations_np = np.array(evaluations_np)
with open(
"output/{}/{}/test-eval.npy".format(Dataset.name, args.model_path), "wb"
) as f:
np.save(f, evaluations_np)
mean_dsc_loss = float(np.mean(total_dsc_loss))
mean_DSC = 1 - mean_dsc_loss
metrics = {
"mean_dsc_loss": mean_dsc_loss,
"mean_DSC": mean_DSC,
}
with open(
"output/{}/{}/metrics.yaml".format(Dataset.name, args.model_path), "w"
) as fp:
yaml.dump(metrics, fp)
print(f"mean dsc loss={mean_dsc_loss}")
print(f"mean DSC={mean_DSC}")
def data_loaders(args):
dataset_test = datasets(args)
return DataLoader(
dataset_test,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
)
def datasets(args):
return Dataset(
args,
images_dir=args.images,
subset="test",
image_size=args.image_size,
random_sampling=False,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Semantic segmentation of G-banding chromosome Images"
)
parser.add_argument(
"--model",
type=str,
default="preactivation_resunet",
help="choose model",
)
parser.add_argument(
"--weight-num",
type=int,
default=0,
help="weight number for inference",
)
parser.add_argument(
"--model-path", type=str, default="", help="path to weights file"
)
parser.add_argument(
"--batch-size",
type=int,
default=2,
help="input batch size for training (default: 2)",
)
parser.add_argument(
"--device",
type=str,
default="cuda:0",
help="device for training (default: cuda:0)",
)
parser.add_argument(
"--workers",
type=int,
default=1,
help="number of workers for data loading (default: 1)",
)
parser.add_argument(
"--images",
type=str,
default="./datasets/{}_data/train".format(Dataset.name),
help="root folder with images",
)
parser.add_argument(
"--image-size",
type=int,
default=Dataset.img_size,
help="target input image size (default: 256x256)",
)
parser.add_argument(
"--test-results",
type=bool,
default=False,
help="Do you want to output the test results? (defauld: False)",
)
args = parser.parse_args()
main(args)
| 1.742188 | 2 |
MRI_reconstruction/Homogenizer.py | inbalalo/MRI_rec | 0 | 12787200 | import os
import struct
import numpy as np
import xarray as xr
import netCDF4 as ds
from pathlib import Path
import matplotlib.pyplot as plt
import struct
import itertools
import Homogenizer_GUI
from enum import Enum
from collections import OrderedDict
import pickle
class UserPrefs(Enum):
ScanFoldersPath = 0
CalculateReconstructedImages = 1
CalculateFieldMaps = 2
CalculateInterpolatedFieldMap = 3
SaveReconstructedImages = 4
SaveFieldMaps = 5
SaveInterpolatedFieldMap = 6
ShowReconstructedImages = 7
ShowFieldMaps = 8
ShowInterpolatedFieldMap = 9
class Homogenizer:
def __init__(self):
self.hGUI = None
self.submit_button = "Submit"
self.gamma = 48.52*10**6
self.te_array = []
self.delta_te = 0.0001 #standard initialization
self.dimensions = np.array([128,128]) #standard initialization
self.scan_folders_path = None
self.save_path = None
self.fids_dict = OrderedDict([])
self.reconstructed_image_dict = OrderedDict([])
self.field_map_dict = OrderedDict([])
self.interpolated_field_map = OrderedDict([])
def get_input(self, user_pref: UserPrefs):
return self.hGUI.user_prefs[user_pref.value]
def display_image(self, image_list, abs_values = False):
'''
Displays given images. mark abs_values as True to get the display images of abs values
'''
for image in image_list:
if abs_values:
image = abs(image)
plt.title("Reconstructed Image")
else:
plt.title("Field Map - B[T] Values as function of location")
plt.xlabel("Location")
plt.ylabel("Location")
plt.imshow(image)
plt.colorbar()
plt.show()
def get_tes(self, folder_path):
'''
Finds the TE value in a specific scan (the information exists in the 'method' file of each scan)
Then creates an array of all TEs
'''
dir_list = os.listdir(folder_path)
for scan_dir in dir_list:
file_path = folder_path + '\\' + scan_dir
if os.path.isdir(file_path):
method_path = self.find_file_by_name(file_path, 'method')
with open(method_path, mode='rb') as file:
method_r = file.read()
f=method_r.find(b'EchoTime=')
te_locked=method_r[f+9:f+12]
te_str=str(te_locked)[2:5]
if (str(te_str).find('n') != -1):
te=int(te_str[0])
else:
te=float(te_str)
self.te_array.append(te*10**-3)
del self.te_array[-1]
self.te_array = np.array(self.te_array)
self.delta_te = self.te_array[1] - self.te_array[0]
def get_dimensions(self, folder_path):
'''
Finds the dimensions of the matrix (the information exists in the 'method' file of each scan)
'''
dir_list = os.listdir(folder_path)
for scan_dir in dir_list:
file_path = folder_path + '\\' + scan_dir
if os.path.isdir(file_path):
method_path = self.find_file_by_name(file_path, 'method')
break
with open(method_path, mode='rb') as file:
method_r = file.read()
f=method_r.find(b'PVM_Matrix=( 2 )\n')
dimension_locked=method_r[f+17:f+24]
arr=np.zeros(2, np.int16)
arr[0]=(str(dimension_locked)[2:5])
arr[0]=int(arr[0])
arr[1]=(str(dimension_locked)[6:9])
arr[1]=int(arr[1])
self.dimensions = arr
pickle.dump(self.dimensions, open("dimensions.dat","wb"))
def find_file_by_name(self, containing_folder, name_string):
'''
Finds and returns the fid file within the given folder
'''
pickle.dump(containing_folder, open("containing_folder.dat","wb"))
pickle.dump(name_string, open("name_string.dat","wb"))
dir_list = os.listdir(containing_folder)
for file_name in dir_list:
if file_name == name_string:
file_path = containing_folder + '\\' + file_name
return file_path
def save_arrays_to_disk(self, save_path, arrays_dictionary: dict, name_prefix: str):
"""
Converts every numpy array in arrays_dictionary to xarray and save it in the given path as a NetCDF file.
"""
if not os.path.exists(save_path):
os.makedirs(save_path)
for key, array in arrays_dictionary.items():
x_arr = xr.DataArray(array)
file_name = name_prefix + str(key)
x_arr.to_netcdf(f'{save_path}\\{file_name}.nc', mode='w')
def reconstruct_images_from_fids(self, fid_dict):
for name_prefix, fid in fid_dict.items():
self.reconstructed_image_dict[name_prefix] = self.reconstruct_image(fid, self.dimensions)
def reconstruct_image(self, fid_arr, dimensions):
'''
Calculates the K space matrix -> calculates the
reconstructed image and returns it
'''
pickle.dump(fid_arr, open("fid_arr.dat","wb"))
real_vals = fid_arr[:-1:2]
imag_vals = fid_arr[1::2]
complex_vals = real_vals + 1j*imag_vals
if (len(fid_arr) == dimensions[0]*dimensions[1]*2):
k_space_scan = np.reshape(complex_vals,(dimensions[0],dimensions[1]))
k_casting = k_space_scan.astype(complex)
img = np.fft.fftshift(np.fft.ifft2(k_casting))
return img
else:
raise IndexError('Fid_arr cannot be reshaped to these dimensions')
def calc_field_maps_from_fids (self, fid_dict, dimension):
''' Gets an ordered dictionary of FID files and calculates dictionary of field maps
by running on pairs of FID files
'''
pickle.dump(fid_dict, open("fid_dict.dat","wb"))
self.reconstruct_images_from_fids(fid_dict)
image_pairs = self.pairwise(self.reconstructed_image_dict.values())
name_index = 0
name_list = list(self.reconstructed_image_dict.keys())
for img1, img2 in image_pairs:
field_map_prefix = name_list[name_index] + name_list[name_index+1]
name_index +=1
self.field_map_dict[field_map_prefix] = self.calc_field_map_from_reconstructed_images(img1,img2)
def calc_field_map_from_reconstructed_images(self, img1,img2):
pickle.dump(img1, open("img1.dat","wb"))
pickle.dump(img2, open("img2.dat","wb"))
phase_map = self.compute_phase(img1,img2)
bmap = phase_map/((2*np.pi*self.gamma*(self.delta_te)))
return bmap
def compute_phase(self, img1,img2):
'''
Gets two reconstructed images and computes one phase image
'''
conj_img2 = np.conj(img2)
if (img1.shape[1] == img2.shape[0]):
multiplic_img1_img2 = conj_img2*img1
phase_map = np.angle(multiplic_img1_img2)
return phase_map
else:
raise IndexError('Size of matrices not suitable for linear multiplication')
def pairwise(self, object_list):
'''
Creates pairs of objects from a list of objects
list_of_fids -> (fid0,fid1), (fid1,fid2), (fid2, fid3), and so forth...
'''
pickle.dump(list(object_list), open("object_list.dat","wb"))
obj1, obj2 = itertools.tee(object_list)
next(obj2, None)
return zip(obj1, obj2)
def interpolate_field_map_from_fids(self, fid_dict):
'''
Gets an ordered dictionary of FID files and calculates one interpolated field map
'''
signals_amount = len(fid_dict)
self.calc_field_maps_from_fids(fid_dict, self.dimensions)
self.interpolate_field_map(list(self.field_map_dict.values()), self.te_array, self.dimensions,signals_amount)
def interpolate_field_map(self, field_maps_list,te_values, dimension, signals_amount):
'''
Calculates one interpolated field map from all the calculated field maps
'''
pickle.dump(field_maps_list, open("field_maps_list.dat","wb"))
pickle.dump(te_values, open("te_values.dat","wb"))
pickle.dump(signals_amount, open("signals_amoung.dat","wb"))
slope=np.zeros((dimension[0],dimension[1]))
value_vec_in_phase_map = np.zeros(len(field_maps_list))
for x in range(dimension[0]-1):
for y in range(dimension[1]-1):
for z in range(signals_amount-1):
value_vec_in_phase_map[z] = field_maps_list[z][x,y]
s,intercept = np.polyfit((te_values[:]),value_vec_in_phase_map,1)
slope[x,y] = (s)
interp_b=slope/self.gamma
self.interpolated_field_map = OrderedDict([('',interp_b)])
def create_fid_dict(self, folder_path):
'''
Creates an ordered dictionary of numpy arrays from fid files
'''
pickle.dump(folder_path, open("folder_path.dat","wb"))
dir_list = os.listdir(folder_path)
for scan_dir in dir_list:
file_path = folder_path + '\\' + scan_dir
if os.path.isdir(file_path):
fid_path = self.find_file_by_name(file_path, 'fid')
if isinstance(fid_path, str):
self.fids_dict[scan_dir] = self.fid_to_nparray(fid_path)
def fid_to_nparray(self, fid_path):
'''
Opens a binary file and inserts it to a numpy array
'''
pickle.dump(fid_path, open("fid_path.dat","wb"))
with open(fid_path, mode='rb') as file: # b is important -> binary
fid_r = file.read()
fid_l = list(struct.unpack("i" * ((len(fid_r) -4) // 4), fid_r[0:-4]))
fid_l.append(struct.unpack("i", fid_r[-4:])[0])
fid_arr = np.array(fid_l)
return fid_arr
def start(self):
'''
Triggers calculations begin with given inputs by the user throughout the GUI.
'''
self.scan_folders_path = self.hGUI.open_main_window()
# Starts job if user had pressed submit:
if self.hGUI.last_button_pressed == self.submit_button:
# Checks if user requested to save any files, and if so pops up a browser to choose path.
if (self.get_input(UserPrefs.SaveReconstructedImages)
or self.get_input(UserPrefs.SaveFieldMaps)
or self.get_input(UserPrefs.SaveInterpolatedFieldMap)
):
self.save_path = self.hGUI.request_save_path()
# Cancels the job if the user had closed the window / pressed "Cancel":
if self.hGUI.last_button_pressed != self.submit_button:
self.start()
return
if self.save_path == self.hGUI.default_folder_expression:
self.save_path = self.scan_folders_path
self.create_fid_dict(self.scan_folders_path)
self.get_dimensions(self.scan_folders_path)
self.get_tes(self.scan_folders_path)
# Checks what calculation the user had requested, and performs them:
if self.get_input(UserPrefs.CalculateReconstructedImages):
self.reconstruct_images_from_fids(self.fids_dict)
else:
if self.get_input(UserPrefs.CalculateFieldMaps):
self.calc_field_maps_from_fids(self.fids_dict, self.dimensions)
else:
self.interpolate_field_map_from_fids(self.fids_dict)
if self.get_input(UserPrefs.SaveInterpolatedFieldMap):
self.save_arrays_to_disk(self.save_path, self.interpolated_field_map,'Interpolated_field_map')
if self.get_input(UserPrefs.ShowInterpolatedFieldMap):
self.display_image(list(self.interpolated_field_map.values()))
if self.get_input(UserPrefs.SaveFieldMaps):
self.save_arrays_to_disk(self.save_path, self.field_map_dict, 'Field_map_')
if self.get_input(UserPrefs.ShowFieldMaps):
self.display_image(list(self.field_map_dict.values()))
if self.get_input(UserPrefs.SaveReconstructedImages):
[real_dict, imaginary_dict] = seperate_complex_values_dict(self.reconstructed_image_dict)
self.save_arrays_to_disk(self.save_path, real_dict, 'Reconstructed_image_real')
self.save_arrays_to_disk(self.save_path, imaginary_dict, 'Reconstructed_image_imaginary')
if self.get_input(UserPrefs.ShowReconstructedImages):
self.display_image(list(self.field_map_dict.values()), True)
def seperate_complex_values_dict(dict):
real_dict = OrderedDict([])
imaginary_dict = OrderedDict([])
for name, complexNum in dict.items():
real_dict[name] = complexNum.real
imaginary_dict[name] = complexNum.imag
return [real_dict, imaginary_dict]
if __name__ == "__main__":
homogenizer = Homogenizer()
homogenizer.hGUI = Homogenizer_GUI.Homogenizer_GUI()
homogenizer.start()
| 2.296875 | 2 |
iterables/Exercises/gradepoints.py | WebucatorTraining/classfiles-actionable-python | 2 | 12787201 | def main():
pass # replace this with your code
main() | 1.054688 | 1 |
cocrawler/webserver.py | joye1503/cocrawler | 166 | 12787202 | <filename>cocrawler/webserver.py
import logging
import asyncio
from aiohttp import web
from . import config
LOGGER = logging.getLogger(__name__)
def make_app():
loop = asyncio.get_event_loop()
# TODO switch this to socket.getaddrinfo() -- see https://docs.python.org/3/library/socket.html
serverip = config.read('REST', 'ServerIP')
if serverip is None:
return None
serverport = config.read('REST', 'ServerPort')
increment = False
if isinstance(serverport, str) and serverport.endswith('+'):
increment = True
serverport = serverport[:-1]
app = web.Application()
app.router.add_get('/', frontpage)
app.router.add_get('/api/{name}', api)
# aiohttp 3.0 has AppRunner(). maybe I should switch to it?
# also web.run_app(app, access_log=None) to turn off logging
handler = app.make_handler()
while True:
try:
f = loop.create_server(handler, serverip, serverport)
break
except OSError as e: # address already in use
if increment:
LOGGER.info('OSError starting webserver: %s', repr(e))
serverport += 1
LOGGER.info('incrementing port to %d', serverport)
else:
raise
srv = loop.run_until_complete(f)
LOGGER.info('REST serving on %s', srv.sockets[0].getsockname())
app['cocrawler'] = handler, srv
return app
def close(app):
if app is None:
return
handler, srv = app['cocrawler']
loop = asyncio.get_event_loop()
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(app.shutdown())
loop.run_until_complete(app.cleanup())
async def frontpage(request):
return web.Response(text='Hello, world!')
async def api(request):
name = request.match_info['name']
data = {'name': name}
return web.json_response(data)
| 2.5625 | 3 |
testsuite/tests/QB02-003__m_files/run_test.py | AdaCore/style_checker | 2 | 12787203 | <reponame>AdaCore/style_checker<gh_stars>1-10
def test_ok(style_checker):
"""Style check test against ok.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('whatever', 'ok.m')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_rcs_rev(style_checker):
"""Style check test against ok-rcs-rev.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('whatever', 'ok-rcs-rev.m')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_iqgen_write_log(style_checker):
"""Style check test against iqgen_write_log.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('whatever', 'iqgen_write_log.m')
style_checker.assertEqual(p.status, 0, p.image)
style_checker.assertRunOutputEmpty(p)
def test_nok_dos(style_checker):
"""Style check test against nok-dos.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('whatever', 'nok-dos.m')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
nok-dos.m:36: DOS line ending is not allowed
nok-dos.m:36: inconsistent newline: cr+lf [dos] (the previous line used lf [unix])
""")
def test_nok_tab(style_checker):
"""Style check test against nok-tab.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('whatever', 'nok-tab.m')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
nok-tab.m:36: Indentation must not use Tab characters
nok-tab.m:37: Indentation must not use Tab characters
""")
def test_nok_trailing(style_checker):
"""Style check test against nok-trailing.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('whatever', 'nok-trailing.m')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
nok-trailing.m:15: Trailing spaces are not allowed
""")
def test_nok_copyright(style_checker):
"""Style check test against nok-copyright.m
"""
style_checker.set_year(2017)
p = style_checker.run_style_checker('whatever', 'nok-copyright.m')
style_checker.assertNotEqual(p.status, 0, p.image)
style_checker.assertRunOutputEqual(p, """\
nok-copyright.m: Copyright notice missing, must occur before line 24
""")
| 2.3125 | 2 |
test/test.py | bobrock/AuthKit | 0 | 12787204 | <filename>test/test.py<gh_stars>0
"""
Very basic tests which so no more than check each of the authentication
methods to ensure that an unprotected page is accessible and that a
protected page triggers the a sign in.
Note: Should the Form and Forward methods return 401 or 200 when they
generate an HTML page for the user to sign in?
"""
import sys
import os
import paste.lint
from authkit.authenticate import middleware, sample_app
from paste.fixture import *
sys.path.insert(0, os.getcwd()+'/examples/docs')
from form import app as form_app
from basic import app as basic_app
from digest import app as digest_app
from forward import app as forward_app
from open_id import app as openid_app
from redirect import app as redirect_app
# Add the paste validation middleware
form_app = paste.lint.middleware(form_app)
basic_app = paste.lint.middleware(basic_app)
digest_app = paste.lint.middleware(digest_app)
forward_app = paste.lint.middleware(forward_app)
openid_app = paste.lint.middleware(openid_app)
redirect_app = paste.lint.middleware(redirect_app)
sys.path.insert(0, os.getcwd()+'/examples/config')
from digest import app as config_app
def assertEqual(a,b):
if a != b:
raise AssertionError('%s != %s'%(a,b))
def assertAllEqual(*args):
if not len(args)>2:
raise Exception("Need two arguments")
a = args[0]
for b in args[1:]:
if a != b:
raise AssertionError('%s != %s'%(a,b))
apps = [
form_app,
basic_app,
digest_app,
forward_app,
openid_app,
redirect_app,
config_app,
]
def test_ok():
for app in apps:
if app == forward_app:
res = TestApp(app).get('')
assertEqual(res.header('content-type'), 'text/plain')
assertEqual(res.full_status, '200 OK')
assert 'You Have Access To This Page.' in res
else:
res = TestApp(app).get('')
assertEqual(res.header('content-type'), 'text/plain; charset=UTF-8')
assertEqual(res.full_status, '200 OK')
assert 'You Have Access To This Page.' in res
def test_intercept():
# XXX Note, these tests don't test when the inclusion of a username and only test form
# should also test all the other methods too for correct behaviour
def sample_app(environ, start_response):
if environ.get('PATH_INFO') == '/403':
start_response('403 Forbidden', [('Content-type', 'text/plain')])
return ['Access denied']
elif environ.get('PATH_INFO') == '/401':
start_response('401 Unauth', [('Content-type', 'text/plain')])
return ['Not Authed']
elif environ.get('PATH_INFO') == '/702':
start_response('702 Doesnt exist', [('Content-type', 'text/plain')])
return ['Access denied']
elif environ.get('PATH_INFO') == '/500':
start_response('500 Error', [('Content-type', 'text/plain')])
return ['Error']
app = middleware(
sample_app,
setup_method='digest',
digest_realm='test',
digest_authenticate_user_data = """
Username1:password1
username2:password2
""",
cookie_signoutpath = '/signout',
setup_intercept = "403, 702",
)
res = TestApp(app).get('/403', status=401)
assertEqual(res.header('content-type'), 'text/plain; charset=utf8')
# XXX Should this keep the original status code or not?
assertEqual(res.full_status, '401 Unauthorized')
assert 'This server could not verify that you are authorized' in res
res = TestApp(app).get('/702', status=401)
assertEqual(res.header('content-type'), 'text/plain; charset=utf8')
# XXX Should this keep the original status code or not?
assertEqual(res.full_status, '401 Unauthorized')
assert 'This server could not verify that you are authorized' in res
res = TestApp(app).get('/500', status=500)
assertEqual(res.header('content-type'), 'text/plain')
assertEqual(res.full_status, '500 Error')
assert 'Error' in res
res = TestApp(app).get('/401', status=401)
assertEqual(res.header('content-type'), 'text/plain')
assertEqual(res.full_status, '401 Unauth')
assert 'Not Authed' in res
def test_fail():
for app in [basic_app, digest_app, config_app]:
res = TestApp(app).get('/private', status=401)
assertEqual(res.header('content-type'),'text/plain; charset=utf8')
assertEqual(res.full_status, '401 Unauthorized')
#raise Exception(res)
assert 'This server could not verify that you are' in res
def test_form_fail():
res = TestApp(form_app).get('/private', status=200)
assertEqual(res.header('content-type'),'text/html; charset=UTF-8')
assertEqual(res.full_status, '200 OK')
assert 'Please Sign In' in res
def test_forward_fail():
res = TestApp(forward_app).get('/private')
assertEqual(res.header('content-type'),'text/html')
# XXX Not sure about this but using a 401 triggers an infinite loop
# of redirects.
assertEqual(res.full_status, '200 Sign in required')
assert 'Please Sign In' in res
def test_openid_fail():
res = TestApp(openid_app).get('/private')
assertEqual(res.header('content-type'),'text/html; charset=UTF-8')
assertEqual(res.full_status, '200 OK')
assert 'Please Sign In' in res
def test_redirect_fail():
res = TestApp(redirect_app).get('/private', status=302)
assertEqual(res.header('Location'),'http://3aims.com')
assertEqual(res.full_status, '302 Found')
def test_users_api_database():
try:
from authkit.users.sqlalchemy_04_driver import UsersFromDatabase, setup_model
except ImportError:
raise Exception("Could not run the SQLAlchemy tests, not installed")
try:
from sqlalchemymanager import SQLAlchemyManager
except ImportError:
raise Exception("Could not run the SQLAlchemy tests, SQLAlchemyManager is not installed")
if os.path.exists("mydb.db"):
os.remove("mydb.db")
app = SQLAlchemyManager(
None,
{'sqlalchemy.url':'sqlite:///mydb.db'},
[setup_model]
)
app.create_all()
connection = app.engine.connect()
session = app.session_maker(bind=connection)
try:
environ = {}
environ['sqlalchemy.session'] = session
environ['sqlalchemy.model'] = app.model
d = UsersFromDatabase(environ)
d.role_create("wiki")
d.role_create("adMin")
d.role_create("editor")
d.group_create("pyLOns")
d.group_create("dJAngo")
d.user_create("jaMEs", "<PASSWORD>", "pyLoNs")
d.user_create("ben", "<PASSWORD>")
d.user_create("Simon", "<PASSWORD>")
d.user_create("ian", "<PASSWORD>")
assertEqual(d.list_roles(),["admin", "editor", "wiki"])
assertEqual(d.list_groups(),["django", "pylons"])
assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])
assertEqual(d.user_has_password("<PASSWORD>", "<PASSWORD>"), True)
assertEqual(d.user_has_password("<PASSWORD>", "<PASSWORD>"), False)
d.role_create("test_role")
d.group_create("test_group")
d.user_create("test_user", "password")
assertEqual(d.list_roles(),["admin", "editor", "test_role", "wiki"])
assertEqual(d.list_groups(),["django", "pylons", "test_group"])
assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon', "test_user"])
d.role_delete("test_role")
d.group_delete("test_group")
d.user_delete("test_user")
assertEqual(d.list_roles(),["admin", "editor", "wiki"])
assertEqual(d.list_groups(),["django", "pylons"])
assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])
assertEqual(d.user_has_role("james", "admin"), False)
d.user_add_role("james", "admin")
assertEqual(d.user_has_role("james", "admin"), True)
d.user_remove_role("james", "admin")
assertEqual(d.user_has_role("james", "admin"), False)
d.user_add_role("james", "wiki")
d.user_add_role("simon", "wiki")
d.user_add_role("james", "admin")
#d.user_add_role("james", "editor")
d.user_add_role("ben", "editor")
assertEqual(d.user_has_group("james", "pylons"), True)
assertEqual(d.user_has_group("simon", None), True)
assertEqual(d.user_has_group("simon", "django"), False)
d.user_set_group("simon", "dJangO")
assertEqual(d.user_has_group("simon", None), False)
d.user_set_group("bEn", "PyLONS")
assertEqual(d.user_has_group("simon", "django"), True)
assertEqual(d.user_has_group("bEn", "pYlons"), True)
d.user_remove_group("bEn")
assertEqual(d.user_has_group("bEn", "pYlons"), False)
d.user_set_group("bEn", "PyLONS")
assertEqual(d.user_has_group("bEn", "pYlons"), True)
assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])
d.user_set_username("james", "jim")
assertEqual(d.list_users(),['ben', 'ian', 'jim', 'simon'])
d.user_set_username("jim", "james")
from authkit.users import UsersFromFile, UsersFromString, AuthKitNoSuchUserError, AuthKitNoSuchGroupError,AuthKitNoSuchRoleError
string_data = """jaMEs:passWOrd1:pyLOns wiki adMin
ben:password2:pylons admin editor
simon:<PASSWORD>:<PASSWORD>
ian:paSsword4 wiki
"""
filename = 'test/user_file_data.txt'
s = UsersFromString(string_data)
f = UsersFromFile(filename)
# Test Parsing
assertAllEqual(
s.passwords,
f.passwords,
{
'james':'<PASSWORD>',
'ben':'<PASSWORD>',
'simon':'<PASSWORD>',
'ian':'<PASSWORD>',
},
)
assertAllEqual(
s.roles,
f.roles,
{
'james':['admin', 'wiki'],
'ben':['admin','editor'],
'ian':['wiki'],
'simon':[],
},
)
assertAllEqual(
s.groups,
f.groups,
{
'james':'pylons',
'ben':'pylons',
'ian': None,
'simon':'django',
},
)
assertAllEqual(
s.usernames,
f.usernames,
['ben', 'ian', 'james', 'simon'],
)
# Test list functions
assertAllEqual(
s.list_users(),
f.list_users(),
d.list_users(),
['ben', 'ian', 'james', 'simon'],
)
assertAllEqual(
s.list_roles(),
f.list_roles(),
d.list_roles(),
['admin', 'editor', 'wiki'],
)
assertAllEqual(
s.list_groups(),
f.list_groups(),
d.list_groups(),
['django','pylons'],
)
# Test user has functions
assertAllEqual(
s.user_has_role('jAMes','WiKi'),
f.user_has_role('jAMes','WiKi'),
d.user_has_role('jAMes','WiKi'),
True
)
assertAllEqual(
s.user_has_role('jAMes','editOr'),
f.user_has_role('jAMes','editOr'),
d.user_has_role('jAMes','editOr'),
False
)
assertAllEqual(
s.user_has_group('jAMeS','PyLons'),
f.user_has_group('jAMes','pylOns'),
d.user_has_group('jAMes','pylOns'),
True
)
assertAllEqual(
s.user_has_group('jameS','djaNgo'),
f.user_has_group('JAMes','djAngo'),
d.user_has_group('JAMes','djAngo'),
False
)
assertAllEqual(
s.user_has_password('<PASSWORD>','<PASSWORD>'),
f.user_has_password('j<PASSWORD>','<PASSWORD>'),
d.user_has_password('<PASSWORD>','<PASSWORD>'),
True
)
assertAllEqual(
s.user_has_password('<PASSWORD>S','<PASSWORD>'),
f.user_has_password('<PASSWORD>','<PASSWORD>'),
d.user_has_password('<PASSWORD>','<PASSWORD>'),
False
)
# Existence Methods
assertAllEqual(
s.user_exists('jAMeS'),
f.user_exists('jAMes'),
d.user_exists('jAMes'),
True
)
assertAllEqual(
s.user_exists('nobody'),
f.user_exists('nobody'),
d.user_exists('nobody'),
False
)
# Existence Methods
assertAllEqual(
s.role_exists('wiKi'),
f.role_exists('Wiki'),
d.role_exists('Wiki'),
True
)
assertAllEqual(
s.role_exists('norole'),
f.role_exists('norole'),
d.role_exists('norole'),
False
)
assertAllEqual(
s.group_exists('pyLons'),
f.group_exists('PYlons'),
d.group_exists('PYlons'),
True
)
assertAllEqual(
s.group_exists('nogroup'),
f.group_exists('nogroup'),
d.group_exists('nogroup'),
False
)
# User Methods
assertAllEqual(
s.user('James'),
f.user('James'),
d.user('James'),
{
'username': 'james',
'group': 'pylons',
'password': '<PASSWORD>',
'roles': ['admin','wiki'],
}
)
# Test all user methods raise:
for plugin in [s,f,d]:
for func in [
'user',
'user_roles',
'user_group',
'user_password',
]:
try:
getattr(plugin, func)('nouser')
except AuthKitNoSuchUserError, e:
pass
else:
raise AssertionError("Failed to throw a no user error")
for plugin in [s,f,d]:
for func in [
'user_has_password',
'user_has_role',
'user_has_group',
]:
try:
getattr(plugin, func)('nouser','somevar')
except AuthKitNoSuchUserError, e:
pass
else:
raise AssertionError("Failed to throw a no user error")
assertAllEqual(
s.user_roles('James'),
f.user_roles('James'),
d.user_roles('James'),
['admin','wiki']
)
assertAllEqual(
s.user_group('James'),
f.user_group('James'),
d.user_group('James'),
'pylons'
)
assertAllEqual(
s.user_password('James'),
f.user_password('<PASSWORD>'),
d.user_password('<PASSWORD>'),
'<PASSWORD>'
)
session.flush()
session.commit()
finally:
session.close()
connection.close()
def test_users_model_api_database():
sys.path.insert(0, os.getcwd()+'/examples/user/database-model')
try:
from authkit.users.sqlalchemy_driver import UsersFromDatabase
except ImportError:
raise Exception("Could not run the SQLAlchemy tests, not installed")
if os.path.exists("test.db"):
os.remove("test.db")
import model as test_model
# Setup SQLAlchemy database engine
from sqlalchemy import engine_from_config
engine = engine_from_config({'sqlalchemy.url':'sqlite:///test.db'}, 'sqlalchemy.')
test_model.init_model(engine)
test_model.engine = engine
d = UsersFromDatabase(test_model)
test_model.meta.metadata.create_all(test_model.engine)
d.role_create("wiki")
d.role_create("adMin")
d.role_create("editor")
d.group_create("pyLOns")
d.group_create("dJAngo")
d.user_create("jaMEs", "<PASSWORD>", "pyLoNs")
d.user_create("ben", "<PASSWORD>")
d.user_create("Simon", "<PASSWORD>")
d.user_create("ian", "<PASSWORD>")
assertEqual(d.list_roles(),["admin", "editor", "wiki"])
assertEqual(d.list_groups(),["django", "pylons"])
assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])
assertEqual(d.user_has_password("james", "<PASSWORD>"), True)
assertEqual(d.user_has_password("<PASSWORD>", "<PASSWORD>"), False)
d.role_create("test_role")
d.group_create("test_group")
d.user_create("test_user", "password")
assertEqual(d.list_roles(),["admin", "editor", "test_role", "wiki"])
assertEqual(d.list_groups(),["django", "pylons", "test_group"])
assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon', "test_user"])
d.role_delete("test_role")
d.group_delete("test_group")
d.user_delete("test_user")
assertEqual(d.list_roles(),["admin", "editor", "wiki"])
assertEqual(d.list_groups(),["django", "pylons"])
assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])
assertEqual(d.user_has_role("james", "admin"), False)
d.user_add_role("james", "admin")
assertEqual(d.user_has_role("james", "admin"), True)
d.user_remove_role("james", "admin")
assertEqual(d.user_has_role("james", "admin"), False)
d.user_add_role("james", "wiki")
d.user_add_role("simon", "wiki")
d.user_add_role("james", "admin")
#d.user_add_role("james", "editor")
d.user_add_role("ben", "editor")
assertEqual(d.user_has_group("james", "pylons"), True)
assertEqual(d.user_has_group("simon", None), True)
assertEqual(d.user_has_group("simon", "django"), False)
d.user_set_group("simon", "dJangO")
assertEqual(d.user_has_group("simon", None), False)
d.user_set_group("bEn", "PyLONS")
assertEqual(d.user_has_group("simon", "django"), True)
assertEqual(d.user_has_group("bEn", "pYlons"), True)
d.user_remove_group("bEn")
assertEqual(d.user_has_group("bEn", "pYlons"), False)
d.user_set_group("bEn", "PyLONS")
assertEqual(d.user_has_group("bEn", "pYlons"), True)
assertEqual(d.list_users(),['ben', 'ian', 'james', 'simon'])
d.user_set_username("james", "jim")
assertEqual(d.list_users(),['ben', 'ian', 'jim', 'simon'])
d.user_set_username("jim", "james")
from authkit.users import UsersFromFile, UsersFromString, AuthKitNoSuchUserError, AuthKitNoSuchGroupError,AuthKitNoSuchRoleError
string_data = """jaMEs:passWOrd1:pyLOns wiki adMin
ben:password2:pylons admin editor
simon:password3:d<PASSWORD>
ian:paSsword4 wiki
"""
filename = 'test/user_file_data.txt'
s = UsersFromString(string_data)
f = UsersFromFile(filename)
# Test Parsing
assertAllEqual(
s.passwords,
f.passwords,
{
'james':'<PASSWORD>',
'ben':'<PASSWORD>',
'simon':'<PASSWORD>',
'ian':'<PASSWORD>',
},
)
assertAllEqual(
s.roles,
f.roles,
{
'james':['admin', 'wiki'],
'ben':['admin','editor'],
'ian':['wiki'],
'simon':[],
},
)
assertAllEqual(
s.groups,
f.groups,
{
'james':'pylons',
'ben':'pylons',
'ian': None,
'simon':'django',
},
)
assertAllEqual(
s.usernames,
f.usernames,
['ben', 'ian', 'james', 'simon'],
)
# Test list functions
assertAllEqual(
s.list_users(),
f.list_users(),
d.list_users(),
['ben', 'ian', 'james', 'simon'],
)
assertAllEqual(
s.list_roles(),
f.list_roles(),
d.list_roles(),
['admin', 'editor', 'wiki'],
)
assertAllEqual(
s.list_groups(),
f.list_groups(),
d.list_groups(),
['django','pylons'],
)
# Test user has functions
assertAllEqual(
s.user_has_role('jAMes','WiKi'),
f.user_has_role('jAMes','WiKi'),
d.user_has_role('jAMes','WiKi'),
True
)
assertAllEqual(
s.user_has_role('jAMes','editOr'),
f.user_has_role('jAMes','editOr'),
d.user_has_role('jAMes','editOr'),
False
)
assertAllEqual(
s.user_has_group('jAMeS','PyLons'),
f.user_has_group('jAMes','pylOns'),
d.user_has_group('jAMes','pylOns'),
True
)
assertAllEqual(
s.user_has_group('jameS','djaNgo'),
f.user_has_group('JAMes','djAngo'),
d.user_has_group('JAMes','djAngo'),
False
)
assertAllEqual(
s.user_has_password('<PASSWORD>','<PASSWORD>'),
f.user_has_password('<PASSWORD>','<PASSWORD>'),
d.user_has_password('<PASSWORD>','<PASSWORD>'),
True
)
assertAllEqual(
s.user_has_password('<PASSWORD>','<PASSWORD>'),
f.user_has_password('<PASSWORD>','<PASSWORD>'),
d.user_has_password('<PASSWORD>','<PASSWORD>'),
False
)
# Existence Methods
assertAllEqual(
s.user_exists('jAMeS'),
f.user_exists('jAMes'),
d.user_exists('jAMes'),
True
)
assertAllEqual(
s.user_exists('nobody'),
f.user_exists('nobody'),
d.user_exists('nobody'),
False
)
# Existence Methods
assertAllEqual(
s.role_exists('wiKi'),
f.role_exists('Wiki'),
d.role_exists('Wiki'),
True
)
assertAllEqual(
s.role_exists('norole'),
f.role_exists('norole'),
d.role_exists('norole'),
False
)
assertAllEqual(
s.group_exists('pyLons'),
f.group_exists('PYlons'),
d.group_exists('PYlons'),
True
)
assertAllEqual(
s.group_exists('nogroup'),
f.group_exists('nogroup'),
d.group_exists('nogroup'),
False
)
# User Methods
assertAllEqual(
s.user('James'),
f.user('James'),
d.user('James'),
{
'username': 'james',
'group': 'pylons',
'password': '<PASSWORD>',
'roles': ['admin','wiki'],
}
)
# Test all user methods raise:
for plugin in [s,f,d]:
for func in [
'user',
'user_roles',
'user_group',
'user_password',
]:
try:
getattr(plugin, func)('nouser')
except AuthKitNoSuchUserError, e:
pass
else:
raise AssertionError("Failed to throw a no user error")
for plugin in [s,f,d]:
for func in [
'user_has_password',
'user_has_role',
'user_has_group',
]:
try:
getattr(plugin, func)('nouser','somevar')
except AuthKitNoSuchUserError, e:
pass
else:
raise AssertionError("Failed to throw a no user error")
assertAllEqual(
s.user_roles('James'),
f.user_roles('James'),
d.user_roles('James'),
['admin','wiki']
)
assertAllEqual(
s.user_group('James'),
f.user_group('James'),
d.user_group('James'),
'pylons'
)
assertAllEqual(
s.user_password('<PASSWORD>'),
f.user_password('James'),
d.user_password('<PASSWORD>'),
'<PASSWORD>'
)
| 2.8125 | 3 |
Physics250-ME22/diskfluxMagnitude.py | illusion173/Physics250 | 0 | 12787205 | import numpy as np
import math
Esubo = 8.854 * pow(10,-12)
k = 8.988 * pow(10,9)
def fluxDisk():
radius = float(input("Radius: "))
radius = radius /1000
electricField = float(input("Electric Field: "))
electricField = (electricField*pow(10,3))
theta = float(input("Theta: "))
actualTheta = 90-theta
flux = math.cos(math.radians(actualTheta))*electricField*pow(radius,2) * math.pi
print(flux)
fluxDisk() | 3.5 | 4 |
office365/sharepoint/sites/usage_info.py | theodoriss/Office365-REST-Python-Client | 544 | 12787206 | <reponame>theodoriss/Office365-REST-Python-Client
from office365.runtime.client_value import ClientValue
class UsageInfo(ClientValue):
"""
Provides fields used to access information regarding site collection usage.
"""
def __init__(self, bandwidth=None, discussion_storage=None, visits=None):
"""
:param long bandwidth: Contains the cumulative bandwidth used by the site collection on the previous day or
on the last day that log files were processed, which is tracked by usage analysis code.
:param long discussion_storage: Contains the amount of storage, identified in bytes,
used by Web discussion data in the site collection.
:param long visits: Contains the cumulative number of visits to the site collection,
which is tracked by the usage analysis code.
"""
super(UsageInfo, self).__init__()
self.Bandwidth = bandwidth
self.DiscussionStorage = discussion_storage
self.Visits = visits
| 2.71875 | 3 |
Python_Exercise/exercise_seven.py | kindyluv/My_Personal_Python_Exercises | 0 | 12787207 | <reponame>kindyluv/My_Personal_Python_Exercises<filename>Python_Exercise/exercise_seven.py
import operator
sign_operator = {
"**": operator.pow,
"/": operator.truediv,
"-": operator.sub,
"+": operator.add,
"*": operator.mul,
"%": operator.mod,
}
is_a_good_credit = True
price = int(input("Enter number: "))
percent = input("Enter operator sign: ")
user_percent = input("Enter percentage: ")
for percent in sign_operator.keys():
if is_a_good_credit:
answer = percent, user_percent, price
print(answer)
elif not is_a_good_credit:
answer = percent, user_percent, price
print(answer)
else:
print("Null")
| 4.125 | 4 |
python/paddle_serving_client/metric/auc.py | loveululu/Serving | 789 | 12787208 | <filename>python/paddle_serving_client/metric/auc.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing, doc-string-with-all-args, doc-string-with-returns
def tied_rank(x):
"""
Computes the tied rank of elements in x.
This function computes the tied rank of elements in x.
Parameters
----------
x : list of numbers, numpy array
Returns
-------
score : list of numbers
The tied rank f each element in x
"""
sorted_x = sorted(zip(x, range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank + 1 + i) / 2.0
last_rank = i
if i == len(sorted_x) - 1:
for j in range(last_rank, i + 1):
r[sorted_x[j][1]] = float(last_rank + i + 2) / 2.0
return r
def auc(actual, posterior):
"""
Computes the area under the receiver-operater characteristic (AUC)
This function computes the AUC error metric for binary classification.
Parameters
----------
actual : list of binary numbers, numpy array
The ground truth value
posterior : same type as actual
Defines a ranking on the binary numbers, from most likely to
be positive to least likely to be positive.
Returns
-------
score : double
The mean squared error between actual and posterior
"""
r = tied_rank(posterior)
num_positive = len([0 for x in actual if x == 1])
num_negative = len(actual) - num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i] == 1])
auc = ((sum_positive - num_positive * (num_positive + 1) / 2.0) /
(num_negative * num_positive))
return auc
| 2.578125 | 3 |
convlstm/package/convlstm.py | AlbertoCenzato/pytorch_model_zoo | 11 | 12787209 | <filename>convlstm/package/convlstm.py
from typing import Optional, Union, Tuple, List
from enum import Enum
import torch
import torch.nn as nn
from torch import Tensor
# typedefs
HiddenState = Tuple[Tensor, Tensor]
HiddenStateStacked = Tuple[List[Tensor], List[Tensor]]
class ConvLSTMCell(nn.Module):
def __init__(self, input_size: Tuple[int, int], input_dim: int, hidden_dim: int,
kernel_size: Tuple[int, int], bias: bool, hidden_activation=torch.sigmoid,
output_activation=torch.tanh):
"""
Initialize ConvLSTM cell.
Args:
@input_size: Height and width of input tensor as (height, width).
@input_dim: Number of channels of input tensor.
@hidden_dim: Number of channels of hidden state.
@kernel_size: Size of the convolutional kernel.
@bias: Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
self.output_activation = output_activation
self.hidden_activation = hidden_activation
def forward(self, input: Tensor, hx: HiddenState=None) -> HiddenState:
"""
Inputs: input, (h_0, c_0)
- **input** of shape `(batch, input_dim, height, width)`:
tensor containing input features
- **h_0** of shape `(batch, hidden_dim, height, width)`:
tensor containing the initial hidden state for each element in the batch.
- **c_0** of shape `(batch, hidden_dim, height, width)`:
tensor containing the initial cell state for each element in the batch.
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: h_1, c_1
- **h_1** of shape `(batch, hidden_dim, height, width)`:
tensor containing the next hidden state for each element in the batch
- **c_1** of shape `(batch, hidden_dim, height, width)`:
tensor containing the next cell state for each element in the batch
"""
if not hx:
hx = self.init_hidden(input.size(0))
old_h, old_cell = hx
combined = torch.cat([input, old_h], dim=1) # concatenate along channel axis
gates_activations = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = gates_activations.chunk(4, dim=1)
i = self.hidden_activation(cc_i) # torch.sigmoid(cc_i)
f = self.hidden_activation(cc_f) # torch.sigmoid(cc_f)
o = self.hidden_activation(cc_o) # torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * old_cell + i * g
h_next = o * self.output_activation(c_next) # torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size: int) -> HiddenState:
dtype = self.conv.weight.dtype
device = self.conv.weight.device
shape = (batch_size, self.hidden_dim, self.height, self.width)
h = torch.zeros(shape, dtype=dtype).to(device)
return (h, h)
#class ConvLSTMParams():
#
# def __init__(self, input_size: Tuple[int, int], input_dim: int, hidden_dim: int,
# kernel_size: Tuple[int, int], num_layers: int,
# batch_first: bool=False, bias: bool=True, mode: str='sequence'):
# self.input_size = input_size
# self.input_dim = input_dim
# self.hidden_dim = hidden_dim
# self.kernel_size = kernel_size
# self.num_layers = num_layers
# self.batch_first = batch_first
# self.bias = bias
# self.mode = mode
# self.model = ModelType.CONVLSTM
class ConvLSTM(nn.Module):
"""
2D convolutional LSTM model.
Parameters
----------
input_size: (int, int)
Height and width of input tensor as (height, width).
input_dim: int
Number of channels of each hidden state
hidden_dim: list of int
Number of channels of hidden state.
kernel_size: list of (int, int)
Size of each convolutional kernel.
num_layers: int
number of convolutional LSTM layers
batch_first: bool (default False)
input tensor order: (batch_size, sequence_len, channels, height,
width) if batch_first == True, (sequence_len, batch_size, channels,
height, width) otherwise
bias: bool (default True)
Whether or not to add the bias.
mode: either 'sequence' or 'item' (default 'sequence')
if 'sequence' forward() accepts an input tensor of shape
(batch_size, sequence_len, channels, height, width) and outputs a
tensor of the same shape;
if 'item' the model processes one sequence element at a time,
therefore forward accepts an input tensor of shape (batch_size,
sequence_len, channels, height, width) and outputs a tensor of the
same shape.
When using 'item' mode you should take care of feeing forward() with
the output of init_hidden() when processing the first element of the
sequence
"""
SEQUENCE = 'sequence'
STEP_BY_STEP = 'step-by-step'
def __init__(self, input_size: Tuple[int, int], input_dim: int, hidden_dim: List[int],
kernel_size: List[Tuple[int, int]], num_layers: int, batch_first: bool=False,
bias: bool=True, mode: str='sequence'):
super(ConvLSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.height, self.width = input_size
self.mode = mode
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
cell_list = []
dims = [input_dim, *hidden_dim]
for i in range(num_layers):
layer = ConvLSTMCell(input_size=(self.height, self.width),
input_dim=dims[i],
hidden_dim=dims[i+1],
kernel_size=self.kernel_size[i],
bias=self.bias)
cell_list.append(layer)
self.cell_list = nn.ModuleList(cell_list)
self.set_mode(mode)
def set_mode(self, mode: str) -> str:
old_mode = self.mode
self.mode = mode
if mode == ConvLSTM.SEQUENCE:
self.forward = self._forward_sequence
elif mode == ConvLSTM.STEP_BY_STEP:
self.forward = self._forward_item
else:
raise ValueError("Parameter 'mode' can only be either 'sequence' or 'item'.")
return old_mode
def _forward_sequence(self, input: Tensor, hidden_state: HiddenStateStacked=None) \
-> Tuple[Tensor, HiddenStateStacked]:
"""
Inputs: input, (h_0, c_0)
- **input** either of shape `(seq_len, batch, input_dim, height, width)`
or `(batch, seq_len, channels, height, width)`: tensor containing
the features of the input sequence.
- **h_0** list of size num_layers that contains tensors of shape
`(batch, channels, height, width)`: tensor containing the initial
hidden state for each element in the batch and for each layer in the model.
- **c_0** list of size num_layers that contains tensors of shape
`(batch, channels, height, width)`: tensor containing the initial
cell state for each element in the batch and for each layer in the model.
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: output, (h_n, c_n)
- **output** of shape `(batch, seq_len, channels, height, width)`:
tensor containing the output features `(h_t)` from the last layer
of the ConvLSTM, for each t.
- **h_n** list of size num_layers that contains tensors of shape
`(batch, channels, height, width)`: tensor containing the hidden
state for `t = seq_len`.
- **c_n** list of size num_layers that contains tensors of shape
`(batch, channels, height, width)`: tensor containing the cell
state for `t = seq_len`.
"""
# (b, t, c, h, w) -> (t, b, c, h, w)
input_seq = input.transpose(0, 1) if self.batch_first else input
if hidden_state is None:
hidden_state = self.init_hidden(batch_size=input_seq.size(1))
seq_len = input_seq.size(0)
h_0, c_0 = hidden_state
h_n_list, c_n_list = [], []
prev_layer_output = list(torch.unbind(input_seq)) # [tensor.squeeze(1) for tensor in input_seq.split(1, dim=1)]
for l, cell in enumerate(self.cell_list):
state = (h_0[l], c_0[l])
for t in range(seq_len):
state = cell(prev_layer_output[t], state)
prev_layer_output[t] = state[0]
h_n_list.append(state[0])
c_n_list.append(state[1])
output = torch.stack(prev_layer_output, dim=1)
if self.batch_first:
return output.transpose(0, 1), (h_n_list, c_n_list)
return output, (h_n_list, c_n_list)
def _forward_item(self, input: Tensor, hidden_state: HiddenStateStacked) \
-> Tuple[Tensor, HiddenStateStacked]:
"""
Inputs: input, (h_0, c_0)
- **input** of shape `(batch, input_dim, height, width)`:
tensor containing the input image.
- **h_0** list of size num_layers that contains tensors of shape
`(batch, channels, height, width)`: tensor containing
the initial hidden state for each element in the batch and for
each layer in the model.
- **c_0** list of size num_layers that contains tensors of shape
`(batch, channels, height, width)`: tensor containing the initial
cell state for each element in the batch and for each layer in the model.
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: output, (h_n, c_n)
- **output** of shape `(batch, channels, height, width)`:
tensor containing the output features `(h_t)` from the last
layer of the LSTM.
- **h_n** list of size num_layers that contains tensors of shape
`(batch, channels, height, width)`: tensor containing the hidden
state for each layer.
- **c_n** list of size num_layers that contains tensors of shape
(batch, channels, height, width): tensor containing the cell state
"""
if hidden_state is None:
hidden_state = self.init_hidden(batch_size=input.size(0))
output = input
h_0, c_0 = hidden_state
h_n_list, c_n_list = [], []
for l, cell in enumerate(self.cell_list):
h_n, c_n = cell(output, (h_0[l], c_0[l]))
output = h_n
h_n_list.append(h_n)
c_n_list.append(c_n)
return output, (h_n_list, c_n_list)
def init_hidden(self, batch_size: int) -> HiddenStateStacked:
h_0, c_0 = [], []
for cell in self.cell_list:
h, c = cell.init_hidden(batch_size)
h_0.append(h)
c_0.append(c)
return (h_0, c_0) # NOTE: using a list to allow hidden states of different sizes
@staticmethod
def _check_kernel_size_consistency(kernel_size: Tuple[int, int]):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and
all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers: Union[List[int], int]) -> List[int]:
if not isinstance(param, list):
param = [param] * num_layers
return param | 2.78125 | 3 |
build/lib/tunning/cross_validation.py | Breno-st/ds_utils | 0 | 12787210 | """Optimization
* :function:`.single_nested_cvrs`
* :function:`.dual_nested_cvrs`
* :function:`.single_cv`
* :function:`.chi2_test`
"""
# data wrangling
import numpy as np
import pandas as pd
from itertools import product
from scipy import stats
# validation
from sklearn.metrics import balanced_accuracy_score, accuracy_score, f1_score, roc_auc_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
# from scipy import stats
# from pandas import *
# # Store sample sizes and number of errors
# n1 = 1000 # samples
# m1 = 300 # errors
# n2 = 1000 # samples
# m2 = 360 # errors
# # Store errors and correct classifications in a 2x2 table
# perf = DataFrame([[m1, m2], [n1-m1, n2-m2]], index=["Error", "Correct"])
# perf.columns = ["S_1", "S_2"]
# print(perf)
# ##### Chi-2 test for equality of error rates
# pvalue = stats.chi2_contingency(perf)[1]
# print("p-value = ", '{0:.6f}'.format(pvalue))
# ##### Fisher test for equality of error rates
# pvalue = stats.fisher_exact(perf)[1]
# print("p-value = ", ’{0:.6f}’.format(pvalue))
# import pandas as pd
# res = pd.read_csv("Crossval.csv", index_col=0)
# print(res)
# """
# algo1 algo2
# 1 75.05 78.08
# 2 74.24 79.77
# 3 76.20 79.61
# 4 81.35 88.39
# 5 80.96 88.27
# 6 84.22 76.20
# 7 77.68 88.04
# 8 82.10 87.50
# 9 81.35 84.37
# 10 81.80 84.04
# """
# ##### t-student test for equality of error rates
# pvalue = stats.ttest_rel(res[’algo2’], res[’algo1’])[1]
# print("p-value = ", ’{0:.6f}’.format(pvalue))
def nested_single_cv(x_t, y_t, L, grid, k_ext, k_int):
"""
Summary: Help set a hyper-parameters list for a given model before makes
-------- its comparison with others hyper-parameterized models.
Input: - x_t: features train (numpy.arrays)
------ - y_t: labels train (numpy.arrays)
- L: learning algorithm (class method .predict())
- grid: keys as a parameter name; values as the array of the parameter' values (dict)
- K_ext: number of external folds (integer)
- K_int: number of internal folds (integer)
Output: - inner_result_frame: index: [k_ext], columns: [hp_set], values: [v_bcr_(k_int_mean)]
------- - outter_result_frame: index: [k_ext, hp_hat], columns:[t_bcr, v_bcr], values:[t_bcr, v_bcr]
Example: model1= BaggingTrees
-------- grid1 = {'epochs':[1]
, 'n_trees':[100]
, 'criterion': ['entropy']
, 'min_samples_leaf':[0.06] #
, 'max_depth':[3]
, 'min_samples_split':[0.03] #
, 'max_leaf_nodes':[200]
}
K_int, K_ext = 4, 10
outter, inner = nested_single_cv(x_t, y_t, model1, grid1, K_ext, K_int)
outter.groupby('hp_hat').agg({'t_bcr': ['count', 'mean', 'std']
, 'v_bcr': ['mean', 'std']}).reset_index('hp_hat')
"""
hp_set = [v for v in product(*grid.values())]
inner_results = pd.DataFrame(columns = hp_set)
outter_results = pd.DataFrame(columns = ['hp_hat'
, 't_bcr'
, 'v_bcr'
])
# frame pointer
i = 0
# partionate "training rows" into "K_ext" sets
K_ext_folds = KFold(n_splits = k_ext, shuffle=False).split(x_t) # (markers t_i, v_i)
for t_ext_fold, v_ext_fold in K_ext_folds:
# sectioning "train set" between "S_k" into "ext_fold" sets
x_S_k = x_t[t_ext_fold] # training x
y_S_k = y_t[t_ext_fold] # training y
x_ext_fold = x_t[v_ext_fold] # test x
y_ext_fold = y_t[v_ext_fold] # test y
# get hp_hat in the inner loop
hp_dic = {}
for idx, hp in enumerate(hp_set):
hp_dic[idx]=[]
# partionate "S_k training rows" into "K_int" sets
K_int_folds = KFold(n_splits = k_int, shuffle=False).split(x_S_k)
for t_int_fold, v_int_fold in K_int_folds:
# sectioning "S_k" between "Ss_k" into "int_fold" sets
x_Ss_k = x_S_k[t_int_fold] # training x
y_Ss_k = y_S_k[t_int_fold] # training y
x_int_fold = x_S_k[v_int_fold] # test x
y_int_fold = y_S_k[v_int_fold] # test y
# must scaler after partition, for specific a training normalization
min_max_scaler = MinMaxScaler(feature_range=(0, 1))
X_t = min_max_scaler.fit_transform(x_Ss_k)
X_v = min_max_scaler.fit_transform(x_int_fold)
Y_t = y_Ss_k
Y_v = y_int_fold
# Loading and fitting model
model = L(hp)
model.fit(X_t, Y_t)
# prediction
Y_v_predicted = model.predict(X_v)
# validation
v_bcr = balanced_accuracy_score(Y_v, Y_v_predicted)
# append all
hp_dic[idx].append(v_bcr)
# # Averages the k_int iteractions for each hp in hp_set and stores it
inner_results.loc[i] = [sum(arr) / len(arr) for arr in hp_dic.values()]
# avg all hp predictions scores to define hp_hat (the highest) # use t-test?
ixd_max= max([(k,np.mean(v)) for k,v in hp_dic.items()],key=lambda item:item[1])[0]
hp_hat = hp_set[ixd_max]
# must scaler after partition, for specific a training normalization
min_max_scaler = MinMaxScaler(feature_range=(0, 1))
X_t = min_max_scaler.fit_transform(x_S_k)
X_v = min_max_scaler.fit_transform(x_ext_fold)
Y_t = y_S_k
Y_v = y_ext_fold
# Loading and fitting model
model = L(hp)
model.fit(X_t, Y_t)
# prediction
Y_v_predicted = model.predict(X_v)
# training metrics
t_acc = model.acc
t_bcr = model.bcr
t_f1 = model.f1
t_auc = model.auc
# validation metrics
v_acc = accuracy_score(Y_v, Y_v_predicted)
v_bcr = balanced_accuracy_score(Y_v, Y_v_predicted)
v_f1 = f1_score(Y_v, Y_v_predicted, average='macro')
v_auc = roc_auc_score(Y_v, Y_v_predicted, average='macro')
outter_results.loc[i] = [hp_hat
, t_bcr
, v_bcr]
i += 1
return outter_results, inner_results
| 2.640625 | 3 |
geoana/kernels/__init__.py | simpeg/geoana | 11 | 12787211 | <filename>geoana/kernels/__init__.py
"""This module contains kernals for (semi-)analytic geophysical responses
"""
from geoana.kernels.tranverse_electric_reflections import rTE_forward, rTE_gradient
| 1.007813 | 1 |
Desafio 105.py | MoomenEltelbany/PythonDesafios | 0 | 12787212 | <reponame>MoomenEltelbany/PythonDesafios<filename>Desafio 105.py
def notas(*n, sit=False):
d = dict()
d['total'] = len(n)
d['maior'] = max(n)
d['menor'] = min(n)
d['media'] = sum(n) / len(n)
if sit:
if d['media'] >= 7:
d['sitauaca'] = 'Boa'
elif d['media'] >= 5:
d['sitauaca'] = 'Razoavel'
else:
d['sitauaca'] = 'RUIM DPPPP'
return d
resp = notas(5.5, 9, 10)
print(resp) | 3.21875 | 3 |
avaxpython/network/ip.py | jgeofil/avax-python | 25 | 12787213 | <gh_stars>10-100
# avax-python : Python tools for the exploration of the Avalanche AVAX network.
#
# Find tutorials and use cases at https://crypto.bi
"""
Copyright (C) 2021 - crypto.bi
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
---
Help support this Open Source project!
Donations address: X-avax1qr6yzjykcjmeflztsgv6y88dl0xnlel3chs3r4
Thank you!
"""
# --#--#--
import requests
import json
import socket
IP_API_ENDPOINT1 = "https://api.myip.com/"
IP_API_ENDPOINT2 = "https://api.ipify.org/?format=json"
class ExternalIP:
"""Helper class to standardize IP. In case the external API gets changed."""
def __init__(self, ipaddr, country=None, country_code=None) -> None:
self.ip = ipaddr
self.country = country
self.country_code = country_code
def __repr__(self):
dts = {
"ip": self.ip,
"country": self.country,
"country_code": self.country_code
}
return json.dumps(dts)
def get_external_ip():
"""Requests our IP from an external IP API service."""
try:
resp = requests.get(IP_API_ENDPOINT2)
json = resp.json()
return ExternalIP(json["ip"])
except:
try:
resp = requests.get(IP_API_ENDPOINT1)
json = resp.json()
return ExternalIP(json["ip"], country = json["country"], country_code=json["cc"])
except:
return None
def get_internal_ip():
"""Gets our local IP. May be non-routable."""
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
return ExternalIP(local_ip)
| 1.984375 | 2 |
gifi/main.py | kokosing/git-gifi | 9 | 12787214 | import sys
import logging
from gifi.utils import git_utils
from gifi.command import Command, AggregatedCommand, UnknownCommandException, CommandException
import gifi.epic
import gifi.feature
import pkg_resources
import gifi.queue
import gifi.git_hub
logging.basicConfig(filename='/tmp/gifi.log', level=logging.DEBUG)
command = AggregatedCommand('gifi', 'Git and github enhancements to git.', [
gifi.epic.command,
gifi.feature.command,
gifi.queue.command,
gifi.git_hub.command,
Command('version', 'Show version number.', lambda: pkg_resources.require("git-gifi")[0].version)
])
class HelpGenerator(object):
def __init__(self, main):
self.main = main
def __call__(self):
help = str(self.main)
help += '\nUsage:\n\t%s command [command arguments]\n\nCommands:\n' % self.main.name
# it does not have to be recursive as there are only two levels
for command in self.main.nested_commands():
help += str(command)
if len(command.nested_commands()) != 0:
help += ' See below subcommands:\n'
for subcommand in command.nested_commands():
help += '\t%s\n' % str(subcommand)
else:
help += '\n'
return help
command.add_command(Command('help', 'Display this window.', HelpGenerator(command)))
class AliasesInstaller(object):
def __init__(self, main):
self.main = main
def __call__(self, config_level='global'):
repo = git_utils.get_repo()
config_writer = repo.config_writer(config_level)
# it does not have to be recursive as there are only two levels
for command in self.main.nested_commands():
if len(command.nested_commands()) != 0:
for subcommand in command.nested_commands():
alias = '%s-%s' % (command.name, subcommand.name)
value = '"!%s %s %s"' % (sys.argv[0], command.name, subcommand.name)
config_writer.set_value('alias', alias, value)
config_writer.release()
command.add_command(Command('install', 'Install gifi as a bunch of git aliases.', AliasesInstaller(command)))
def main():
args = list(sys.argv)
args.pop(0)
_main(args)
def _main(args):
if len(args) == 0:
args.append('help')
try:
result = command(*args)
if result is not None:
print(result)
except UnknownCommandException:
print("Wrong command, try 'help'.")
except CommandException as e:
print("ERROR: ", e)
| 2.515625 | 3 |
altair/vegalite/v2/examples/bar_chart_with_highlight.py | mtzl/altair | 1 | 12787215 | <filename>altair/vegalite/v2/examples/bar_chart_with_highlight.py
"""
Bar Chart with Highlight
------------------------
This example shows a Bar chart that highlights values beyond a threshold.
"""
import altair as alt
import pandas as pd
data = pd.DataFrame({"Day": range(1, 16),
"Value": [54.8, 112.1, 63.6, 37.6, 79.7, 137.9, 120.1, 103.3,
394.8, 199.5, 72.3, 51.1, 112.0, 174.5, 130.5]})
data2 = pd.DataFrame([{"ThresholdValue": 300, "Threshold": "hazardous"}])
bar1 = alt.Chart(data).mark_bar().encode(
x='Day:O',
y='Value:Q'
)
bar2 = alt.Chart(data).mark_bar(color="#e45755").encode(
x='Day:O',
y='baseline:Q',
y2='Value:Q'
).transform_filter(
"datum.Value >= 300"
).transform_calculate(
"baseline", "300"
)
rule = alt.Chart(data2).mark_rule().encode(
y='ThresholdValue:Q'
)
text = alt.Chart(data2).mark_text(
align='left', dx=215, dy=-5
).encode(
alt.Y('ThresholdValue:Q', axis=alt.Axis(title='PM2.5 Value')),
text=alt.value('hazardous')
)
bar1 + text + bar2 + rule
| 3.53125 | 4 |
core/core_menu.py | g0d0/green | 6 | 12787216 | import subprocess,sys,random
import GreenLib
#Mensagens
sys.path.insert(0, 'messages')
import msg_control
import msg_logo
sys.path.insert(0, 'core')
import core_cripto
import core_recon
import core_menu
#--
# Função Menu principal
#--
def main_menu():
subprocess.run(["clear"])
logos = [msg_logo.msg_logo1,msg_logo.msg_logo2,msg_logo.msg_logo3]
print(random.choice(logos))
print(msg_control.msg_menu)
choice = input(" >> ")
exec_menu(choice)
return
def cripto_main_menu():
subprocess.run(["clear"])
logos = [msg_logo.msg_logo1,msg_logo.msg_logo2,msg_logo.msg_logo3]
print(random.choice(logos))
print(msg_control.msg_cripto)
choice = input(" >> ")
exec_menu(choice)
return
def recon_main_menu():
subprocess.run(["clear"])
logos = [msg_logo.msg_logo1,msg_logo.msg_logo2,msg_logo.msg_logo3]
print(random.choice(logos))
print(msg_control.msg_cripto)
choice = input(" >> ")
exec_menu(choice)
return
#--
# Função executa menu
#--
def exec_menu(choice):
subprocess.run(["clear"])
# TODO
# Deixa a escolha em letra minuscula
# Foi comentado devida as strings com as hashs criptografadas com maiusculas
#ch = choice.lower()
ch = choice
# Filtra por (Espaço , numeros e vazio)
retorno_filtro = filtra_entrada(ch)
# Caso tenha algum erro vai ser direcionado para a pagina principal
if (retorno_filtro != True):
exec_menu('main_menu')
else:
try:
# Cria uma lista com o que foi digitado
entrada_de_dados = [str(x) for x in ch.split()]
# Caso o tamanho seja de 1 a escolha vai para a função menu_actions
# para redirecionar para uma função
if(len(entrada_de_dados) == 1):
GreenLib.menu_actions[ch]()
#Caso a entrada for menor que 1 ou maior que 3 por ter algo errado
#Ja que por enquanto o maximo é de 3 (cripto reverse "54321")
#Se for maior que 3 vamos alterar
elif(len(entrada_de_dados) <= 1) or (len(entrada_de_dados) > 3):
exec_menu('main_menu')
#Se for igual a 3 argumentos vamos enviar para a função navega_green
elif(len(entrada_de_dados) == 3):
navega_green(entrada_de_dados[0],entrada_de_dados[1],entrada_de_dados[2])
else:
#
# AQUI CHAMA MENU COM MULTIPLAS ESCOLHAS USANDO A LISTA CRIADA
#
navega_green(entrada_de_dados[0],entrada_de_dados[1],entrada_de_dados[2])
# Caso de algum erro vai ser retornado a mensagem avisando.
except KeyError:
print ("Invalid selection, please try again.\n")
exec_menu('main_menu')
return
# Exit program
def exit():
subprocess.run(["clear"])
print("O "+sys.argv[0]+" foi finalizado com segurança!")
sys.exit()
# Back Menu
# TODO Implementar ele
def back():
GreenLib.exec_menu('main_menu')
def espera():
print("Aperte um botao para continua...")
go = input(">")
exec_menu('main_menu')
def filtra_entrada(ch):
#Checa se tem apenas numeros
if(ch.isdigit() == True):
return False
#Checa se tem espaços
elif(ch.isspace() == True):
return False
#Checa se esta vazio
elif(ch == ''):
return False
else:
return True
# Navegar
def navega_green(escolha,sub_escolha,key):
if (escolha == "search") or (escolha.upper() == "SEARCH"):
print("Função search")
time.sleep(4)
elif (escolha == "set") or (escolha.upper() == "SET"):
print("Função set")
time.sleep(4)
elif (escolha == "cripto") or (escolha.upper() == "CRIPTO"):
teste_cripto = core_cripto.Cripto(sub_escolha,key)
teste_cripto.greenCripto(sub_escolha,key)
espera()
elif (escolha == "greenrecon") or (escolha.upper() == "GREENRECON"):
teste_cripto = core_recon.Recon(sub_escolha,key)
teste_cripto.greenScan()
espera()
elif (escolha == "reverseshell") or (escolha.upper() == "REVERSESHELL"):
print("Sub Escolha:"+sub_escolha)
print("Key:"+key)
server = core_reverseshell.MultiServer()
server.print_help()
server.start_turtle()
else:
GreenLib.menu_actions['main_menu']()
| 2.765625 | 3 |
tests/opensocial_tests/__init__.py | chrischabot/opensocial-python-client | 0 | 12787217 | <reponame>chrischabot/opensocial-python-client
import sys
import logging
sys.path.insert(0, sys.path[0] + '/../src')
logging.basicConfig(level=logging.INFO) | 1.664063 | 2 |
code/composite_example_ERA5.py | kuchaale/wcd_2021 | 0 | 12787218 | <filename>code/composite_example_ERA5.py
import cdstoolbox as ct
@ct.application(title='Calculate composite mean')
@ct.output.download()
def download_application():
variable = 'total_column_ozone'
data_ls = []
year_ls = [1981, 1982, 1982, 1982, 1983, 1983, 1984, 1984, 1984, 1985, 1985,
1986, 1988, 1988, 1988, 1989, 1990, 1991, 1993, 1994, 1994, 1995,
1996, 1996, 1998, 1998, 1999, 2000, 2001, 2002, 2002, 2003, 2003,
2005, 2006, 2006, 2006, 2007, 2008, 2008, 2009, 2009, 2010]
month_ls = [ 2, 1, 2, 12, 2, 3, 2, 3, 12, 1, 2, 1, 1, 2, 3, 2, 1,
2, 4, 1, 2, 12, 1, 3, 1, 3, 3, 2, 1, 2, 12, 2, 3, 1,
1, 1, 2, 1, 1, 12, 1, 2, 2]
day_ls = [21, 21, 20, 8, 14, 11, 1, 13, 19, 10, 24, 30, 21, 11, 20, 14, 16,
15, 3, 11, 17, 11, 21, 14, 15, 13, 16, 21, 24, 6, 6, 8, 3, 31,
3, 30, 20, 2, 21, 18, 22, 21, 13]
time_ls = [f'{i:02d}:00' for i in range(1,24)]
for year, month, day in zip(year_ls, month_ls, day_ls):
data_may_08 = ct.catalogue.retrieve(
'reanalysis-era5-single-levels',
{
'variable': variable,
'product_type': 'reanalysis',
'year': [
str(year)
],
'month': [
f'{month:02d}'
],
'day': [
f'{day:02d}'
],
'time': time_ls,
}
)
data_ls.append(data_may_08)
data = ct.cube.concat(data_ls, dim='time')
return data | 2.671875 | 3 |
p4cw/p4cw.py | nurpax/p4cw | 1 | 12787219 | <reponame>nurpax/p4cw
#!/usr/bin/env python3
import argparse
import os
import subprocess
import tempfile
import sys
import re
from typing import List, Optional, Tuple
def friendlify(spec: List[str]) -> Tuple[Optional[str], List[str]]:
if len(spec) < 1:
return None, spec
if spec[0].startswith('# A Perforce Client Specification.'):
spec_out = []
client_spec_name = None
state = 'spec'
for line in spec:
if state == 'spec':
if m := re.match(r'^Client:\s+(.*)$', line):
client_spec_name = m[1]
spec_out.append(line)
continue
if re.match(r'^View:\s*$', line):
state = 'view'
spec_out.append(line)
continue
# Pass everything else through as-is.
spec_out.append(line)
elif state == 'view':
if m := re.match(r'^\t//([a-zA-Z0-9_-]+)/(.*) //([a-zA-Z0-9_-]+)/(.*)$', line):
# friendlify client spec mapping
depot = m[1]
depot_path = m[2]
client_name = m[3]
client_path = m[4]
if (depot_path == client_path) and (client_name == client_spec_name):
spec_out.append(f'\t//{depot}/{depot_path}')
else:
spec_out.append(line)
continue
else:
state = 'spec'
spec_out.append(line)
continue
return 'clientspec', spec_out
return None, spec
def unfriendlify(spec: List[str], spec_type: Optional[str]) -> List[str]:
if spec_type is None or len(spec_type) < 1:
return spec
assert spec_type in ['clientspec']
if spec_type == 'clientspec':
spec_out = []
client_spec_name = None
state = 'spec'
for line in spec:
if state == 'spec':
if m := re.match(r'^Client:\s+(.*)$', line):
client_spec_name = m[1]
elif re.match(r'^View:\s*$', line):
state = 'view'
# Pass everything else through as-is.
spec_out.append(line)
elif state == 'view':
# TODO exclusions prefixed with `-` not supported yet.
if m := re.match(r'^\t//([a-zA-Z0-9_-]+)/(.*)$', line):
depot = m[1]
depot_path = m[2]
# Add client path mapping back.
spec_out.append(f'\t//{depot}/{depot_path} //{client_spec_name}/{depot_path}')
elif m := re.match(r'^\t//.*$', line):
# Pass other types of client spec mappings as-is.
spec_out.append(line)
else:
state = 'spec'
spec_out.append(line)
return spec_out
assert False, 'unknown spec type'
def main():
parser = argparse.ArgumentParser(
description='Wrapper for Perforce `p4 client` editor (See https://github.com/nurpax/p4cw.)'
)
parser.add_argument('spec', metavar='FILE', nargs=1, help='input file')
parser.add_argument('--stdout', action='store_true', help='Print edited spec on stdout for debugging')
args = parser.parse_args()
real_editor = os.environ.get('EDITOR')
if real_editor is None or real_editor == '':
print ('EDITOR must be set')
sys.exit(1)
# Read input spec from p4 (this can be a client spec, submit spec, or similar.)
with open(args.spec[0], 'rt') as f:
spec_in = [line.rstrip(' \r\n') for line in f.readlines()]
fd, path = tempfile.mkstemp()
try:
with os.fdopen(fd, 'w') as tmp:
spec_type, lines = friendlify(spec_in)
tmp.write('\n'.join(lines) + '\n')
# Spawn editor for the "friendlier" version of the input spec for the user
# to edit.
subprocess.run([real_editor] + [path], check=True)
with open(path, 'rt') as tmp:
# Transform friendly format spec back to something p4 can understand
# and save to disk.
spec_out = unfriendlify([line.rstrip(' \r\n') for line in tmp.readlines()], spec_type)
if args.stdout:
print ('\n'.join(spec_out) + '\n')
else:
with open(args.spec[0], 'wt') as fout:
fout.write('\n'.join(spec_out) + '\n')
finally:
os.remove(path)
if __name__ == "__main__":
main()
| 2.546875 | 3 |
others/DDCC/2020/b.py | fumiyanll23/AtCoder | 0 | 12787220 | def main():
# input
N = int(input())
As = list(map(int, input().split()))
# compute
diffs = [0] * (N-1)
r, s = 0, sum(As)
for i in range(N-1):
r += As[i]
s -= As[i]
diffs[i] = abs(s-r)
# output
print(min(diffs))
if __name__ == '__main__':
main()
| 2.96875 | 3 |
michelanglo_protein/generate/protParam_mod.py | matteoferla/protein-module-for-VENUS | 1 | 12787221 | from Bio.SeqUtils import ProtParam, ProtParamData
from warnings import warn
# mod for DIWV
def mod(sequence):
"""
This is a not implemented function. It is a fix for ProtParam.ProteinAnalysis().protein_scale and the DIWV scale.
As the latter requires knowldge of the preceeding amino acid it will fail.
>>> p = ProtParam.ProteinAnalysis(sequence)
>>> p.protein_scale(ProtParamData.DIWV, window=9, edge=.4)
hashtag epicfail.
So this is the repalacement.
:param sequence: sequence to score
:type sequence: str
:return: DIWV score.
:rtype: list[int]
"""
p = ProtParam.ProteinAnalysis(sequence)
param_dict = ProtParamData.DIWV
window = 9
edge = 0.4
weights = p._weight_list(window, edge)
sum_of_weights = sum(weights) * 2 + 1
scores = []
for i in range(p.length - window):
subsequence = p.sequence[i:i + window]
score = 0.0
for j in range(window // 2):
try:
front = param_dict[subsequence[j]][subsequence[j + 1]]
back = param_dict[subsequence[window - j]][subsequence[window - j + 1]]
score += weights[j] * front + weights[j] * back
except KeyError:
warn(f'warning: {subsequence[j]} or {subsequence[window - j - 1]} is not a standard amino acid.')
middle = subsequence[window // 2]
if middle in param_dict:
score += param_dict[middle]
else:
warn(f'warning: {middle} is not a standard amino acid.')
scores.append(score / sum_of_weights)
return scores | 2.4375 | 2 |
read_2600.py | john-stephens/atari_2600_reader | 0 | 12787222 |
from __future__ import print_function
import smbus
import time
import struct
import argparse
import sys
import math
# Argument definition and handling
parser = argparse.ArgumentParser(description="Read an Atari 2600 cartridge via I2C")
parser.add_argument("-s", dest="rom_size", metavar="size", type=int, required=True, choices=[2, 4, 8, 16], help="ROM size in kb (2, 4, 8, 16)")
parser.add_argument("-o", dest="output_file", metavar="filename", required=True, help="ROM output file")
parser.add_argument("-b", dest="rom_bank", metavar="type", default="auto", choices=["auto", "F8", "F6"], help="ROM bank switching method (auto, F8, F6) [default: F8]")
parser.add_argument("--rom-delay", metavar="delay", type=float, default=0.2, help="ROM delay in seconds between setting the address and reading a byte [default=0.2]")
parser.add_argument("--retries", metavar="num", type=int, default=3, help="Number of retried when an I/O error is received during reading [default: 3]")
parser.add_argument("--i2c-bus", metavar="num", type=int, default=1, help="The I2C bus to read from (0=/dev/i2c-0, 1=/dev/i2c-1) [default: 1]")
parser.add_argument("--write-bus1", metavar="addr", default="0x20", help="The I2C bus address to use to write the first 8 bytes of the ROM address [default: 0x20]")
parser.add_argument("--write-bank1", metavar="num", type=int, default=0, choices=[0, 1, 2], help="The MCP23017 or MCP23008 bank to use to write the first 8 bytes of the ROM address (0=MCP23017 Bank A, 1=MCP23017 Bank B, 2=MCP23008) [default: 0]")
parser.add_argument("--write-bus2", metavar="addr", default="0x20", help="The I2C bus address to use to write the last 5 bytes of the ROM address [default: 0x20]")
parser.add_argument("--write-bank2", metavar="num", type=int, default=1, choices=[0, 1, 2], help="The MCP23017 or MCP23008 bank to use to write the last 5 bytes of the ROM address (0=MCP23017 Bank A, 1=MCP23017 Bank B, 2=MCP23008) [default: 1]")
parser.add_argument("--read-bus", metavar="addr", default="0x24", help="The I2C bus address to use to read the ROM data [default: 0x24]")
parser.add_argument("--read-bank", metavar="num", type=int, default=0, choices=[0, 1, 2], help="The MCP23017 or MCP23008 bank to use to read the ROM data (0=MCP23017 Bank A, 1=MCP23017 Bank B, 2=MCP23008) [default: 0]")
args = parser.parse_args()
# Output settings
OUTPUT_FILE = args.output_file
# ROM settings
ROM_SIZE = args.rom_size * 1024
ROM_OFFSET = 0x1000
ROM_MAX_BANK = 4096
ROM_BANK = args.rom_bank
ROM_F8_BANKS = [ 0x1FF8, 0x1FF9 ]
ROM_F6_BANKS = [ 0x1FF6, 0x1FF7, 0x1FF8, 0x1FF9 ]
ROM_DELAY = args.rom_delay
MAX_RETRIES = args.retries
RETRY_DELAY = 5
# I2C bus settings
I2C_BUS = args.i2c_bus
# The 2600 has 13 address pins, so we need to spread these over two banks
# with the first 8 bits on the first bank and the remaining 5 on the second.
ADDR_WRITE_BUS1 = int(args.write_bus1, 0)
ADDR_WRITE_BANK1 = args.write_bank1
ADDR_WRITE_BUS2 = int(args.write_bus2, 0)
ADDR_WRITE_BANK2 = args.write_bank2
# The 2600 has 8 data pins, so we can use a single bank for that
ADDR_READ_BUS = int(args.read_bus, 0)
ADDR_READ_BANK = args.read_bank
# I2C Register Constants for MCP23017 and MCP23008
#
# Taken from the following datasheets:
# MCP23017: http://ww1.microchip.com/downloads/en/DeviceDoc/20001952C.pdf (table 3-3)
# MCP23008: http://ww1.microchip.com/downloads/en/DeviceDoc/21919e.pdf (table 1-3)
I2C_REG_IODIR = [ 0x00, 0x01, 0x00 ]
I2C_REG_GPIO = [ 0x12, 0x13, 0x09 ]
I2C_IODIR_PORT_READ = 0xFF
I2C_IODIR_PORT_WRITE = 0x00
# Configure the MCP23017/MCP23008 chips for reading and writing
def configBus(bus):
# Write bus
print("Configuring bus 0x{0:02x}, bank {1} for writing (reg: 0x{2:02x})" . format(ADDR_WRITE_BUS1, ADDR_WRITE_BANK1, I2C_REG_IODIR[ ADDR_WRITE_BANK1 ]))
bus.write_byte_data(ADDR_WRITE_BUS1, I2C_REG_IODIR[ ADDR_WRITE_BANK1 ], I2C_IODIR_PORT_WRITE)
print("Configuring bus 0x{0:02x}, bank {1} for writing (reg: 0x{2:02x})" . format(ADDR_WRITE_BUS2, ADDR_WRITE_BANK2, I2C_REG_IODIR[ ADDR_WRITE_BANK2 ]))
bus.write_byte_data(ADDR_WRITE_BUS2, I2C_REG_IODIR[ ADDR_WRITE_BANK2 ], I2C_IODIR_PORT_WRITE)
# Read bus
print("Configuring bus 0x{0:02x}, bank {1} for reading (reg: 0x{2:02x})" . format(ADDR_READ_BUS, ADDR_READ_BANK, I2C_REG_IODIR[ ADDR_READ_BANK ]))
bus.write_byte_data(ADDR_READ_BUS, I2C_REG_IODIR[ ADDR_READ_BANK ], I2C_IODIR_PORT_READ)
def realAddress(address):
return ( ( address - ROM_OFFSET ) % ROM_MAX_BANK ) + ROM_OFFSET
def bankNumber(address):
return int(math.floor( ( address - ROM_OFFSET ) / ROM_MAX_BANK ))
# Perform bank switching to correct the bank before reading, if needed
def bankSwitch(bus, address, rom_bank):
real_address = realAddress(address)
bank_number = bankNumber(address)
if rom_bank == "F8" and ( real_address == ROM_OFFSET or ( real_address - 1 ) in ROM_F8_BANKS ):
print("\nBank switch! {0:x} {1:x}" . format(address, ROM_F8_BANKS[ bank_number ]))
setAddress(bus, ROM_F8_BANKS[ bank_number ])
elif rom_bank == "F6" and ( real_address == ROM_OFFSET or ( real_address - 1 ) in ROM_F6_BANKS ):
print("\nBank switch! {0:x} {1:x}" . format(address, ROM_F6_BANKS[ bank_number ]))
setAddress(bus, ROM_F6_BANKS[ bank_number ])
# Set the address to read from the cartridge
def setAddress(bus, address):
bus.write_byte_data(ADDR_WRITE_BUS1, I2C_REG_GPIO[ ADDR_WRITE_BANK1 ], address & 0xFF)
bus.write_byte_data(ADDR_WRITE_BUS2, I2C_REG_GPIO[ ADDR_WRITE_BANK2 ], address >> 8)
# time.sleep(ROM_DELAY)
# Read a byte from the cartridge
def readByte(bus, retry=0):
try:
return bus.read_byte_data(ADDR_READ_BUS, I2C_REG_GPIO[ ADDR_READ_BANK ])
except:
if retry < MAX_RETRIES:
print("\nRetry delay!")
time.sleep(RETRY_DELAY)
return readByte(bus, retry + 1)
else:
raise
def readByteFast(bus, retry=0):
last_byte = None
byte_count = 0
while byte_count < 10:
byte = readByte(bus, retry)
if byte == last_byte:
byte_count += 1
else:
if last_byte != None:
print("Mismatch {0:x} {1:x}" . format(last_byte, byte))
time.sleep(ROM_DELAY)
last_byte = byte
byte_count = 0
return byte
# Check the ROM for basic errors
def checkRom(bus):
print("Checking ROM...")
bytes = []
for x in range(0, 16):
setAddress(bus, x + ROM_OFFSET)
byte = readByte(bus)
bytes.append(byte)
if checkRomZeros(bytes) and checkRomDuplicate(bytes):
print("ROM checks passed")
return True
return False
# Check the ROM for all zeros
def checkRomZeros(bytes):
if bytes.count(0) == len(bytes):
print("Error: all zeros returned, is cartridge inserted?")
return False
return True
# Check the ROM for pairs of bytes with duplicate values
def checkRomDuplicate(bytes):
num_bytes = len(bytes)
count = 0
for x in range(0, num_bytes/2):
if bytes[x * 2] == bytes[x * 2 + 1]:
count += 1
if count == num_bytes/2:
print("Error: duplicate bytes returned, wiring issue?")
return False
return True
# Test code to validate the address line wiring, moves from the first
# address pin to the last with a 30 second delay
#
#bit = 1
#
#for x in range(0, 13):
# setAddress(bit)
# bit = bit << 1
# time.sleep(30)
bus = smbus.SMBus(I2C_BUS)
configBus(bus)
if checkRom(bus):
# Set the default ROM bank method
if ROM_SIZE == 8192 and ROM_BANK == "auto":
ROM_BANK = "F8"
if ROM_SIZE == 16384 and ROM_BANK == "auto":
ROM_BANK = "F6"
if ROM_BANK == "auto":
ROM_BANK = None
file = open(OUTPUT_FILE, "wb")
for x in range(0, ROM_SIZE):
bankSwitch(bus, x + ROM_OFFSET, ROM_BANK)
setAddress(bus, realAddress(x + ROM_OFFSET))
byte = readByteFast(bus)
file.write(struct.pack('B', byte))
sys.stdout.write("\rRead {0} of {1} bytes" . format(x + 1, ROM_SIZE));
sys.stdout.flush()
file.close()
print("\nDone!")
bus.close();
| 3.28125 | 3 |
storage/bucket.py | FabianoBFCarvalho/api-python | 0 | 12787223 | <reponame>FabianoBFCarvalho/api-python
import os
import cloudstorage
from google.appengine.api import app_identity
import webapp2
import googleapiclient.http
import base64
storage = googleapiclient.discovery.build('storage', 'v1')
class Bucket(webapp2.RequestHandler):
def get(self):
bucket_name = os.environ.get(
'BUCKET_NAME', app_identity.get_default_gcs_bucket_name())
print('--------------------------------------------------------------')
print(bucket_name)
print('--------------------------------------------------------------')
return bucket_name
def create_file(self, file):
"""
save file in storage
:param file:
:return: path_url
"""
# new_file = base64.b64decode(file[22:])
# new_file = base64.rpartition(',')[2]
print("*******************************************************")
print(file)
# print(new_file)
path_url = '/' + self.get() + '/image'
storage_file = cloudstorage.open(path_url, 'w', content_type='image/jpeg')
storage_file.write(file)
storage_file.close()
print('@@@@@@@@@@@@@@@@@@@ FINISH @@@@@@@@@@@@@@@@@@@@')
return path_url
| 2.890625 | 3 |
olypy/formatters.py | akhand2222/olypy | 0 | 12787224 | <reponame>akhand2222/olypy
'''
Helper functions for reading/writing Olympia lib files
'''
import sys
from functools import partial
from collections import OrderedDict
def fixed(count, char, key, array):
if not array or len(array) == 0:
return
if len(array) % count != 0:
raise ValueError(key + ' array is of incorrect length')
print(key, end='')
print(char, end='')
while True:
chunk = array[:count]
print(' '.join([str(i) for i in chunk]), end='')
array = array[count:]
if array:
print(' \\\n\t', end='')
else:
break
print()
def print_list(array, first, rest):
if len(array) > first:
chunk = array[:first]
array = array[first:]
print(' '.join([str(i) for i in chunk]), end=' ') # yes, we want the trailing space
if array:
print('\\\n\t', end='')
while len(array) > rest:
chunk = array[:rest]
array = array[rest:]
print(' '.join([str(i) for i in chunk]), end=' ') # yes, we want the trailing space
if array:
print('\\\n\t', end='')
print(' '.join([str(i) for i in array]), end=' \n') # yes, we want the trailing space
def default_print(key, array):
if not array or len(array) == 0:
return
if key != 'firstline':
print(key, end=' ')
print(' '.join([str(i) for i in array]))
def boxlist_print(key, array):
if not array or len(array) == 0:
return
print(key, end='')
if key[-1] != '\t':
print(end=' ')
print_list(array, 9, 11)
return
def boxlist_print_pd(key, array):
if len(array) < 4 or len(array) > 6:
raise ValueError('array must be of length 4 to 6')
boxlist_print(key, array)
return
def boxlist_print_tab(key, array):
boxlist_print(key + '\t', array)
def known_print(key, array):
if len(array) == 0:
return
print(key, end=' ')
print_list(array, 10, 11)
def admit_print(key, arrayarray):
if key != ' am':
raise ValueError
for array in arrayarray:
if len(array) < 2:
raise ValueError
chunk = array[:2]
array = array[2:]
print(key, ' '.join([str(i) for i in chunk]), end=' ')
if len(array):
print_list(array, 7, 11)
else:
print()
return
grand_format = OrderedDict([
('firstline', 1),
('na', 1),
('il', partial(fixed, 2, '\t')),
('tl', partial(fixed, 8, '\t')),
('an', boxlist_print),
('ad', boxlist_print),
('ah', boxlist_print),
('LI', OrderedDict([('wh', 1),
('hl', boxlist_print)])),
('CH', OrderedDict([('ni', 1), ('lo', 1), ('pl', 1), ('he', 1), ('si', 1), ('lk', 1), ('lr', 1),
('sl', partial(fixed, 5, '\t')),
('pr', 1), ('mo', 1), ('bh', 1), ('gu', 1), ('tf', 1), ('bp', 1), ('ra', 1),
('at', 1), ('df', 1), ('mi', 1), ('po', 1),
('ct', boxlist_print),
('dt', partial(fixed, 3, ' '))])),
('CM', OrderedDict([('im', 1), ('ma', 1), ('ca', 1), ('as', 1), ('hm', 1), ('qc', 1), ('rb', 1),
('hs', 1), ('cm', 1), ('pr', 1), ('kw', 1), ('dg', 1), ('sr', 1), ('bf', 1),
('vp', 1), ('pl', 1), ('pc', 1), ('ar', 1), ('ot', 1),
('vi', known_print)])),
('LO', OrderedDict([('pd', boxlist_print_pd),
('hi', 1), ('sh', 1), ('ba', 1), ('dg', 1), ('sl', 1), ('lc', 1)])),
('SL', OrderedDict([('te', boxlist_print),
('da', 1), ('de', 1), ('ca', 1), ('bm', 1), ('er', 1), ('eg', 1), ('mo', 1),
('gr', 1), ('sd', 1), ('cl', 1), ('sh', 1), ('mc', 1), ('op', 1), ('lo', 1),
('cp', 1), ('uf', 1), ('sf', 1), ('ql', 1), ('td', 1),
('nc', boxlist_print),
('lt', boxlist_print),
('lf', boxlist_print),
('bs', boxlist_print),
('lw', 1), ('lp', 1)])),
('IT', OrderedDict([('pl', 1), ('wt', 1), ('lc', 1), ('rc', 1), ('fc', 1), ('mu', 1), ('pr', 1),
('an', 1), ('at', 1), ('df', 1), ('mi', 1), ('bp', 1), ('ca', 1), ('un', 1)])),
('IM', OrderedDict([('au', 1), ('cl', 1), ('cr', 1), ('cc', 1), ('uk', 1), ('qc', 1), ('ab', 1),
('db', 1), ('mb', 1), ('ba', 1), ('rd', 1), ('tn', 1), ('oc', 1), ('ti', 1),
('rc', 1), ('pc', 1), ('ct', 1), ('lo', 1), ('mu', 1), ('ms', 1)])),
('PL', OrderedDict([('fn', 1), ('em', 1), ('ve', 1), ('pw', 1), ('np', 1), ('fs', 1), ('ft', 1),
('fo', 1), ('nt', 1), ('tf', 1), ('sl', 1), ('sb', 1), ('so', 1), ('dr', 1),
('ci', 1), ('bm', 1), ('lt', 1),
('kn', known_print),
('un', boxlist_print),
('uf', boxlist_print),
('am', admit_print)])),
('SK', OrderedDict([('tl', 1), ('rs', 1),
('of', boxlist_print),
('re', boxlist_print),
('rq', partial(fixed, 3, '\t')),
('pr', 1), ('np', 1), ('ne', 1)])),
('GA', OrderedDict([('tl', 1), ('nj', 1), ('nu', 1), ('sk', 1), ('rh', 1)])),
('MI', OrderedDict([('sb', 1), ('di', 1), ('mc', 1), ('md', 1), ('ss', 1), ('ca', 1), ('gc', 1),
('mh', 1), ('co', 1), ('ov', 1), ('ol', 1), ('bs', 1), ('sn', 1), ('ds', 1),
('nm', known_print)])),
('CO', OrderedDict([('li', 1),
('ar', partial(fixed, 8, ' ')),
('cs', 1), ('wa', 1), ('st', 1), ('us', 1), ('ue', 1), ('de', 1), ('po', 1),
('pr', 1), ('if', 1)])),
])
# these are things which are strings and not lists
# TODO: put this info into the table above
first_level_strings = set(('na',))
second_level_strings = set(('ds', 'li', 'pl', 'sn', 'fn', 'pw', 'em', 've'))
def print_one_thing(datum):
key1s_seen = set()
for key1, value1 in grand_format.items():
if datum.get(key1) is None:
continue
key1s_seen.add(key1)
if value1 == 1:
default_print(key1, datum.get(key1))
elif callable(value1):
value1(key1, datum.get(key1))
elif isinstance(value1, dict):
print(key1)
key2s_seen = set()
for key2, value2 in value1.items():
if datum[key1].get(key2) is None:
continue
key2s_seen.add(key2)
if value2 == 1:
default_print(' '+key2, datum[key1].get(key2))
elif callable(value2):
value2(' '+key2, datum[key1].get(key2))
else:
raise ValueError('unknown format value for subtype')
if datum[key1].keys() != key2s_seen:
extras = set(datum[key1].keys()).difference(key2s_seen)
raise KeyError('saw extra key2s, key1={} extras={} datum={}'.format(key1, extras, datum))
else:
raise ValueError('unknown format value for type')
print()
if datum.keys() != key1s_seen:
extras = set(datum.keys()).difference(key1s_seen)
raise KeyError('saw extra key1s, extras={} datum={}'.format(extras, datum))
def read_oly_file(f, verbose=False):
'''
Unlike io.c, we gut it out :-) basically we don't know the details about this file
format, we just GO GO GO. The copylib test & exceptions while printing make sure
we did the right thing here.
'''
data = {}
prev = ''
box = ''
subbox = ''
if isinstance(f, str):
f = open(f, 'r')
for line in f:
untrimmed_line = line
line = line.rstrip()
if line.startswith('#'):
continue
if prev:
line = prev + line
prev = ''
new = line.rstrip(' \\') # trailing \ and associated whitespace
if new != line:
prev = new
continue
else:
line = new
if line == '':
box = ''
subbox = ''
continue
pieces = line.split()
what = pieces.pop(0)
if not box:
if line.startswith('\t') or line.startswith(' '):
raise ValueError('line cannot start with whitespace')
if what.isdigit():
box = what
data[box] = {}
data[box]['firstline'] = [' '.join([what] + pieces)]
continue
else:
raise ValueError('unknown first line')
if len(what) == 2 and what.isupper():
subbox = what
data[box] = data.get(box, {})
data[box][subbox] = data[box].get(subbox, {})
continue
if not(line.startswith('\t') or line.startswith(' ')):
if data[box].get(what) is not None:
raise ValueError('saw a non-continuation for an existing item')
if what in first_level_strings:
data[box][what] = [untrimmed_line[3:].rstrip('\n')]
else:
data[box][what] = pieces
subbox = ''
else:
if what != 'am' and data[box].get(subbox).get(what) is not None:
raise ValueError
if what == 'am':
am = data[box].get(subbox, {}).get('am', [])
am.append(pieces) # list of lists
data[box][subbox]['am'] = am
elif what in second_level_strings:
data[box][subbox][what] = [untrimmed_line[4:].rstrip('\n')]
else:
data[box][subbox][what] = pieces
if verbose:
print('read', len(data), verbose, 'boxes.', file=sys.stderr)
return data
| 2.796875 | 3 |
app/main.py | JevinJ/SubredditStats | 1 | 12787225 | <filename>app/main.py
import argparse
from collections import defaultdict
from datetime import datetime
import praw
from unidecode import unidecode
import fileio as fio
import constants
class SubredditStats:
def __init__(self, **kwargs):
'''
:param bot_name: The name/site of the bot as defined in praw.ini
:param target: Target subreddit(s), eg: 'pcgaming' or 'pcgaming+gaming'
:param csv_period: Time period in days to generate a full CSV file of results.
'''
print(f'Running with commands --name({kwargs["bot_name"]}), --target({kwargs["target"]}),'
f' and generating a new csv every {kwargs["csv_period"]} day(s)')
self.reddit = praw.Reddit(kwargs['bot_name'])
self.target = self.reddit.subreddit(kwargs['target'])
self.target_foldername = kwargs['target'].replace('+', '_')
fio.create_directory_path(f'output/{self.target_foldername}/completedcsv')
self.csv_period = kwargs['csv_period']
self.word_frequency = defaultdict(int)
self.user_filter = set(filter(None, fio.load_file('', 'user_filter.txt')))
self.word_filter = set(filter(None, fio.load_file('', 'word_filter.txt')))
self.last_csv_generation = datetime.now()
self.last_dataset_save = datetime.now()
self.main()
def filter_text(self, text, min_length=1, max_length=16):
'''
Text filtering, filters punctuation, converts non-ascii, undesired, very short/long words,
or anything in word_filter.txt
:param text: The full text body of a comment or submission title.
:param min_length: The minimum length of a single word in text, anything less is filtered out.
:param max_length: The maximum length of a single word in text, anything more is filtered out.
:return: A list of words that have been filtered.
'''
result = []
for word in unidecode(text.lower()).split(' '):
if len(word) > min_length and len(word) < max_length:
if word.isalpha() and word not in self.word_filter:
result.append(''.join(word.translate(constants.PUNCTUATION_TRANS)))
return result
def update_dataset(self, text):
'''
Increment the values in the word frequency dict for each word in text.
:param text: A list of words that have been filtered/parsed and are ready to be included into word_frequency.
'''
text = self.filter_text(text)
for word in text:
self.word_frequency[word] += 1
def main(self):
while True:
for comment in self.target.stream.comments(pause_after=-1):
if comment is None:
break
if str(comment.author).lower() not in self.user_filter:
self.update_dataset(comment.body)
for submission in self.target.stream.submissions(pause_after=-1):
if submission is None:
break
if str(submission.author).lower() not in self.user_filter:
self.update_dataset(submission.title)
time_now = datetime.now()
#If an hour has passed since start-up/last save, save the word frequencies into a file and wipe it.
if (time_now - self.last_dataset_save).seconds // 3600 >= 1:
pickle_filename = f'{self.last_dataset_save.strftime(constants.FILE_TIMESTAMP_FORMAT)}.pickle'
fio.save_pickle(self.word_frequency, f'output/{self.target_foldername}', pickle_filename)
self.word_frequency = defaultdict(int)
self.last_dataset_save = datetime.now()
#If self.csv_period days have passed since start-up/last save, generate a csv and wipe saved pickle files.
if (time_now - self.last_csv_generation).seconds // 86400 >= 1:
csv_filename = f'{self.last_csv_generation.strftime(constants.FILE_TIMESTAMP_FORMAT)}.csv'
fio.generate_csv(f'output/{self.target_foldername}',
f'output/{self.target_foldername}/completedcsv',
csv_filename)
self.last_csv_generation = datetime.now()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('bot_name', type=str, metavar='BOT_NAME',
help='The name/site of the bot as defined in praw.ini')
parser.add_argument('target', type=str, metavar='SUB_NAME',
help='Subreddit(s) to target, "+" to target multiple subs, eg: pcgaming or pcgaming+gaming')
parser.add_argument('--interval', type=lambda i: abs(int(i)) or 1, metavar='CSV_PERIOD', dest='csv_period', default=1,
help='Time interval in days to compile a new csv containing word frequencies,' + \
' may not be less than 1. default=1')
SubredditStats(**vars(parser.parse_args()))
| 2.71875 | 3 |
data/basic_deterministic.py | RandalJBarnes/OnekaPy | 0 | 12787226 | <filename>data/basic_deterministic.py
PROJECTNAME = 'Basic deterministic example'
TARGET = 0
NPATHS = 100
DURATION = 10*365.25
NREALIZATIONS = 1
BASE = 0.0
C_DIST = (35.0, 50.0, 75.0)
P_DIST = (0.25)
T_DIST = (20.0, 25.0)
BUFFER = 100
SPACING = 2
UMBRA = 10
SMOOTH = 4
CONFINED = True
TOL = 1
MAXSTEP = 20
WELLS = [
(2250, 2250, 0.25, (600, 750, 900)),
(1750, 2750, 0.25, (600, 750, 900))
]
OBSERVATIONS = [
(1000, 1000, 100, 2),
(1000, 1500, 105, 2),
(1000, 2000, 110, 2),
(1000, 2500, 115, 2),
(1000, 3000, 120, 2),
(1500, 1000, 95, 2),
(1500, 1500, 100, 2),
(1500, 2000, 105, 2),
(1500, 2500, 110, 2),
(1500, 3000, 115, 2),
(2000, 1000, 90, 2),
(2000, 1500, 95, 2),
(2000, 2000, 100, 2),
(2000, 2500, 105, 2),
(2000, 3000, 110, 2),
(2500, 1000, 85, 2),
(2500, 1500, 90, 2),
(2500, 2000, 95, 2),
(2500, 2500, 100, 2),
(2500, 3000, 105, 2),
(3000, 1000, 80, 2),
(3000, 1500, 85, 2),
(3000, 2000, 90, 2),
(3000, 2500, 95, 2),
(3000, 3000, 100, 2)
]
| 1.695313 | 2 |
ctr-in-action/deepctr/__init__.py | wdxtub/compute-ad-note | 21 | 12787227 | <gh_stars>10-100
from . import layers
from . import models
from .utils import check_version
__version__ = '0.4.1'
check_version(__version__)
| 1.09375 | 1 |
replay.py | fahdfareed/IoT-security-Implication | 0 | 12787228 | import json
f = open('removed_duplicates_sorted_2.json')
data = json.load(f)
f.close()
ones = []
for i in data["ones"]:
if "ali" in i["comment"]:
i["scam"] = 1
ones.append(i)
else:
ones.append(i)
data["ones"] = ones
with open('removed_duplicates_sorted_2.json', 'w') as outfile:
json.dump(data, outfile)
| 2.734375 | 3 |
fython/lex/mark_ropx.py | nicolasessisbreton/fython | 41 | 12787229 | <filename>fython/lex/mark_ropx.py
from ..config import *
from ..lexem import *
follower = [l.commax, l.rparx, l.rketx]
def mark_ropx(module):
lexem = module.lexem
for i in range(1, len(lexem)):
x = lexem[i]
t = x.type
v = x.value.value
if t == l.opx:
if v == ':':
n = lexem[i+1].type
if n in follower:
x.type = l.ropx
x.value = v
x.value = ROpX(x)
| 2.5 | 2 |
steem/steem.py | Netherdrake/steem-python | 24 | 12787230 | <filename>steem/steem.py
from .commit import Commit
from .steemd import Steemd
class Steem:
""" Connect to the Steem network.
Args:
nodes (list): A list of Steem HTTP RPC nodes to connect to. If not provided, official Steemit nodes will be used.
debug (bool): Elevate logging level to `logging.DEBUG`. Defaults to `logging.INFO`.
no_broadcast (bool): If set to ``True``, committal actions like sending funds will have no effect (simulation only).
Optional Arguments (kwargs):
Args:
keys (list): A list of wif keys. If provided, the Wallet will use these keys rather than the
ones found in BIP38 encrypted wallet.
unsigned (bool): (Defaults to False) Use this for offline signing.
expiration (int): (Defualts to 60) Size of window in seconds that the transaction
needs to be broadcasted in, before it expires.
Returns:
Steemd class instance. It can be used to execute commands against steem node.
Example:
If you would like to override the official Steemit nodes (default), you can pass your own.
When currently used node goes offline, ``Steemd`` will automatically fail-over to the next available node.
.. code-block:: python
nodes = [
'https://steemd.yournode1.com',
'https://steemd.yournode2.com',
]
s = Steemd(nodes)
"""
def __init__(self, nodes=None, no_broadcast=False, **kwargs):
self.steemd = Steemd(
nodes=nodes,
**kwargs
)
self.commit = Commit(
steemd_instance=self.steemd,
no_broadcast=no_broadcast,
**kwargs
)
def __getattr__(self, item):
""" Bind .commit, .steemd methods here as a convenience. """
if hasattr(self.steemd, item):
return getattr(self.steemd, item)
if hasattr(self.commit, item):
return getattr(self.commit, item)
raise AttributeError('Steem has no attribute "%s"' % item)
if __name__ == '__main__':
s = Steem()
print(s.get_account_count())
| 2.578125 | 3 |
feedzero/ingest/tests/test_models.py | dammitjim/badfeed | 0 | 12787231 | <gh_stars>0
import pytest
from feedzero.ingest.models import IngestLog
@pytest.mark.django_db
class TestIngestLog:
@pytest.mark.parametrize("state", [state[0] for state in IngestLog.STATE_CHOICES])
def test_can_create_different_log_states(self, feed, state):
"""Should be able to create each potential state."""
log = IngestLog.objects.create(state=state, feed=feed)
assert log.state == state
def test_feed_deletion_cascades(self, feed, ingest_log_factory):
"""Should delete corresponding logs if the related feed is deleted."""
log1 = ingest_log_factory(feed=feed)
log2 = ingest_log_factory(feed=feed)
feed.delete()
assert len(IngestLog.objects.filter(pk__in=[log1.pk, log2.pk])) == 0
| 2.28125 | 2 |
gmail_sender/__main__.py | awiseman/gmail-sender | 0 | 12787232 | <filename>gmail_sender/__main__.py<gh_stars>0
from __future__ import print_function
import argparse
import base64
import mimetypes
import os.path
import pickle
import sys
from email.mime.audio import MIMEAudio
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from google.auth.transport.requests import Request
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/gmail.send']
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("subject", type=str, help="message subject")
parser.add_argument("sender", type=str, help="email to send message from")
parser.add_argument("recipient", type=str, nargs="+", help="addressee(s) of the message")
parser.add_argument("-m", "--message", type=str, help="message to send")
parser.add_argument("-M", "--message-file", type=str, help="message to send from a file")
parser.add_argument("-a", "--attach", type=str, help="path to file to attach")
parser.add_argument("-c", "--content-id", type=str, default="<image>",
help="content id to use for attachment")
parser.add_argument("-i", "--inline", help="inline the attachment", action="store_true")
parser.add_argument("-d", "--dry-run", help="don't actually send the email", action="store_true")
parser.add_argument("--html", help="treat message as html", action="store_true")
args = parser.parse_args()
if args.message and args.message_file:
print("-m/--message and -M/--message-file are mutually exclusive")
sys.exit(2)
return args
def get_message_body(args):
if args.message:
return args.message
elif args.message_file:
with open(args.message_file, "r") as f:
return f.read()
else:
return ""
def create_message(args, recipient):
"""Create a message for an email.
Args:
args.sender: Email address of the sender.
args.to: Email address of the receiver.
args.subject: The subject of the email message.
Returns:
An object containing a base64url encoded email object.
"""
subtype = 'html' if args.html else 'us-ascii'
message_text = get_message_body(args)
message = MIMEMultipart() if args.attach else MIMEText(message_text, subtype)
if args.attach:
msg = MIMEText(message_text, subtype)
message.attach(msg)
attachment = prepare_attachment(args)
message.attach(attachment)
message['to'] = recipient
message['from'] = args.sender
message['subject'] = args.subject
return {'raw': bytes.decode(base64.urlsafe_b64encode(message.as_string().encode()))}
def prepare_attachment(args):
file_to_attach = args.attach
content_type, encoding = mimetypes.guess_type(file_to_attach)
if content_type is None or encoding is not None:
content_type = 'application/octet-stream'
main_type, sub_type = content_type.split('/', 1)
if main_type == 'text':
fp = open(file_to_attach, 'rb')
msg = MIMEText(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'image':
fp = open(file_to_attach, 'rb')
msg = MIMEImage(fp.read(), _subtype=sub_type)
fp.close()
elif main_type == 'audio':
fp = open(file_to_attach, 'rb')
msg = MIMEAudio(fp.read(), _subtype=sub_type)
fp.close()
else:
fp = open(file_to_attach, 'rb')
msg = MIMEBase(main_type, sub_type)
msg.set_payload(fp.read())
fp.close()
filename = os.path.basename(file_to_attach)
disposition = 'inline' if args.inline else 'attachment'
msg.add_header('Content-Disposition', disposition, filename=filename)
if args.inline:
msg.add_header('Content-ID', args.content_id)
return msg
def send_message(service, user_id, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
message = service.users().messages().send(userId=user_id, body=message).execute()
print('Message Id: {}'.format(message['id']))
return message
def main():
args = parse_arguments()
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)
creds = flow.run_local_server()
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('gmail', 'v1', credentials=creds)
# Call the Gmail API
for recipient in args.recipient:
message = create_message(args, recipient)
if args.dry_run:
print(message)
else:
send_message(service, 'me', message)
if __name__ == '__main__':
main()
| 2.734375 | 3 |
try_local.py | Fredy/GCP_Things | 0 | 12787233 | <gh_stars>0
#!/usr/bin/env python
from flask import Flask, request
from main import image_borders
if __name__ == "__main__":
app = Flask(__name__)
@app.route('/')
def index():
return image_borders(request)
app.run('127.0.0.1', 8002, debug=True)
| 2.46875 | 2 |
system_query/__init__.py | vatai/system-query | 7 | 12787234 | """Initialization of system_query package."""
__all__ = [
'query_all', 'query_cpu', 'query_gpus', 'query_ram', 'query_software', 'query_and_export']
from .all_info import query_all
from .cpu_info import query_cpu
from .gpu_info import query_gpus
# from .host_info import query_host
# from .os_info import query_os
from .ram_info import query_ram
from .software_info import query_software
# from .swap_info import query_swap
from .query import query_and_export
| 1.539063 | 2 |
tests/test_scottbrian_paratools/test_smart_event.py | ScottBrian/scottbrian_paratools | 0 | 12787235 | <filename>tests/test_scottbrian_paratools/test_smart_event.py<gh_stars>0
"""test_smart_event.py module."""
###############################################################################
# Standard Library
###############################################################################
from enum import Enum
import logging
import time
from typing import Any, cast, Dict, Final, List, Optional, Union
import threading
###############################################################################
# Third Party
###############################################################################
import pytest
###############################################################################
# Local
###############################################################################
from .conftest import Cmds, ThreadPairDesc, ThreadPairDescs, ExpLogMsgs
from scottbrian_paratools.smart_event import (
SmartEvent,
WUCond,
SmartEventConflictDeadlockDetected,
SmartEventInconsistentFlagSettings,
SmartEventRemoteThreadNotAlive,
SmartEventWaitDeadlockDetected,
SmartEventWaitUntilTimeout)
from scottbrian_paratools.thread_pair import (
ThreadPair,
ThreadPairAlreadyPairedWithRemote,
ThreadPairDetectedOpFromForeignThread,
ThreadPairErrorInRegistry,
ThreadPairIncorrectNameSpecified,
ThreadPairNameAlreadyInUse,
ThreadPairNotPaired,
ThreadPairPairWithSelfNotAllowed,
ThreadPairPairWithTimedOut,
ThreadPairRemotePairedWithOther)
logger = logging.getLogger(__name__)
logger.debug('about to start the tests')
###############################################################################
# SmartEvent test exceptions
###############################################################################
class ErrorTstSmartEvent(Exception):
"""Base class for exception in this module."""
pass
class IncorrectActionSpecified(ErrorTstSmartEvent):
"""IncorrectActionSpecified exception class."""
pass
class UnrecognizedMessageType(ErrorTstSmartEvent):
"""UnrecognizedMessageType exception class."""
pass
class UnrecognizedCmd(ErrorTstSmartEvent):
"""UnrecognizedCmd exception class."""
pass
###############################################################################
# Cmd Constants
###############################################################################
Cmd = Enum('Cmd', 'Wait Wait_TOT Wait_TOF Wait_Clear Resume Sync Exit '
'Next_Action')
###############################################################################
# Action
###############################################################################
Action = Enum('Action',
'MainWait '
'MainSync MainSync_TOT MainSync_TOF '
'MainResume MainResume_TOT MainResume_TOF '
'ThreadWait ThreadWait_TOT ThreadWait_TOF '
'ThreadResume ')
###############################################################################
# action_arg fixtures
###############################################################################
action_arg_list = [Action.MainWait,
Action.MainSync,
Action.MainSync_TOT,
Action.MainSync_TOF,
Action.MainResume,
Action.MainResume_TOT,
Action.MainResume_TOF,
Action.ThreadWait,
Action.ThreadWait_TOT,
Action.ThreadWait_TOF,
Action.ThreadResume]
action_arg_list1 = [Action.MainWait
# Action.MainResume,
# Action.MainResume_TOT,
# Action.MainResume_TOF,
# Action.ThreadWait,
# Action.ThreadWait_TOT,
# Action.ThreadWait_TOF,
# Action.ThreadResume
]
action_arg_list2 = [ # Action.MainWait,
# Action.MainResume,
# Action.MainResume_TOT,
Action.MainResume_TOF
# Action.ThreadWait,
# Action.ThreadWait_TOT,
# Action.ThreadWait_TOF,
# Action.ThreadResume
]
@pytest.fixture(params=action_arg_list) # type: ignore
def action_arg1(request: Any) -> Any:
"""Using different reply messages.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return request.param
@pytest.fixture(params=action_arg_list) # type: ignore
def action_arg2(request: Any) -> Any:
"""Using different reply messages.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return request.param
###############################################################################
# timeout_arg fixtures
###############################################################################
timeout_arg_list = [None, 'TO_False', 'TO_True']
@pytest.fixture(params=timeout_arg_list) # type: ignore
def timeout_arg1(request: Any) -> Any:
"""Using different requests.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return request.param
@pytest.fixture(params=timeout_arg_list) # type: ignore
def timeout_arg2(request: Any) -> Any:
"""Using different requests.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return request.param
###############################################################################
# code fixtures
###############################################################################
code_arg_list = [None, 42]
@pytest.fixture(params=code_arg_list) # type: ignore
def code_arg1(request: Any) -> Any:
"""Using different codes.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(int, request.param)
@pytest.fixture(params=code_arg_list) # type: ignore
def code_arg2(request: Any) -> Any:
"""Using different codes.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(int, request.param)
###############################################################################
# log_msg fixtures
###############################################################################
log_msg_arg_list = [None, 'log msg1']
@pytest.fixture(params=log_msg_arg_list) # type: ignore
def log_msg_arg1(request: Any) -> Any:
"""Using different log messages.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(int, request.param)
@pytest.fixture(params=log_msg_arg_list) # type: ignore
def log_msg_arg2(request: Any) -> Any:
"""Using different log messages.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(int, request.param)
###############################################################################
# log_enabled fixtures
###############################################################################
log_enabled_list = [True, False]
@pytest.fixture(params=log_enabled_list) # type: ignore
def log_enabled_arg(request: Any) -> bool:
"""Using different log messages.
Args:
request: special fixture that returns the fixture params
Returns:
The params values are returned one at a time
"""
return cast(bool, request.param)
###############################################################################
# TestSmartEventBasic class to test SmartEvent methods
###############################################################################
###############################################################################
# SmartEventDesc class
###############################################################################
class SmartEventDesc(ThreadPairDesc):
"""Describes a SmartEvent with name and thread to verify."""
def __init__(self,
name: Optional[str] = '',
s_event: Optional[SmartEvent] = None,
thread: Optional[threading.Thread] = None, # type: ignore
state: Optional[int] = 0, # 0 is unknown
paired_with: Optional[Any] = None) -> None:
"""Initialize the SmartEventDesc.
Args:
name: name of the SmartEvent
s_event: the SmartEvent being tracked by this desc
thread: the thread associated with this SmartEvent
state: describes whether the SmartEvent is alive and registered
paired_with: names the SmartEvent paired with this one, if one
"""
ThreadPairDesc.__init__(self,
name=name,
thread_pair=s_event,
thread=thread,
state=state,
paired_with=paired_with)
def verify_state(self) -> None:
"""Verify the state of the SmartEvent."""
ThreadPairDesc.verify_state(self)
self.verify_smart_event_desc()
if self.paired_with is not None:
self.paired_with.verify_smart_event_desc()
###########################################################################
# verify_smart_event_init
###########################################################################
def verify_smart_event_desc(self) -> None:
"""Verify the SmartEvent object is initialized correctly."""
assert isinstance(self.thread_pair, SmartEvent)
assert isinstance(self.thread_pair.event, threading.Event)
# assert isinstance(self.thread, threading.Thread)
assert self.thread_pair.name == self.name
assert self.thread_pair.thread is self.thread
assert not self.thread_pair.wait_wait
assert not self.thread_pair.wait_timeout_specified
assert not self.thread_pair.deadlock
assert not self.thread_pair.conflict
assert self.thread_pair.code is None
# class SmartEventDescs:
# """Contains a collection of SmartEventDesc items."""
#
# ###########################################################################
# # __init__
# ###########################################################################
# def __init__(self):
# """Initialize object."""
# self._descs_lock = threading.RLock()
# self.descs: Dict[str, SmartEventDesc] = {}
#
# ###########################################################################
# # add_desc
# ###########################################################################
# def add_desc(self,
# desc: SmartEventDesc,
# verify: bool = True) -> None:
# """Add desc to collection.
#
# Args:
# desc: the desc to add
# verify: specify False when verification should not be done
#
# """
# with self._descs_lock:
# self.cleanup_registry()
# desc.state = SmartEventDesc.STATE_ALIVE_REGISTERED
# self.descs[desc.name] = desc
# if verify:
# self.verify_registry()
#
# ###########################################################################
# # thread_end
# ###########################################################################
# def thread_end(self,
# name: str) -> None:
# """Update SmartEventDescs to show a thread ended.
#
# Args:
# name: name of SmartEvent for desc to be updated
#
# """
# with self._descs_lock:
# # Note that this action does not cause registry cleanup
# # make sure thread is not alive
# assert not self.descs[name].s_event.thread.is_alive()
#
# # make sure we are transitioning correctly
# assert (self.descs[name].state
# == SmartEventDesc.STATE_ALIVE_REGISTERED)
# self.descs[name].state = SmartEventDesc.STATE_NOT_ALIVE_REGISTERED
#
# ###################################################################
# # verify the registry
# ###################################################################
# self.verify_registry()
#
# ###########################################################################
# # cleanup
# ###########################################################################
# def cleanup(self) -> None:
# """Perform cleanup for SmartEventDescs."""
# # Cleanup applies to all of the descs and is done
# # when first thing when a new SmartEvent is instantiated and
# # registered, or when a pair_with is done. This action is called
# # here for the other cases that trigger cleanup, such as
# # getting a SmartEventRemoteThreadNotAlive error.
# with self._descs_lock:
# self.cleanup_registry()
#
# ###################################################################
# # verify the registry
# ###################################################################
# self.verify_registry()
#
# ###########################################################################
# # paired
# ###########################################################################
# def paired(self,
# name1: Optional[str] = '',
# name2: Optional[str] = '',
# verify: bool = True) -> None:
# """Update SmartEventDescs to show paired status.
#
# Args:
# name1: name of SmartEvent for desc that is paired with name2
# name2: name of SmartEvent for desc that is paired with name1, or
# null if name1 became unpaired
# verify: specify False when verification should not be done
#
# """
# with self._descs_lock:
# self.cleanup_registry()
# # make sure we can allow the pair
# assert self.descs[name1].s_event.thread.is_alive()
# assert (self.descs[name1].state
# == SmartEventDesc.STATE_ALIVE_REGISTERED)
# assert name1 in SmartEvent._registry
# assert name1 in self.descs
#
# # note that name2 will normally be the SmartEventDesc
# # that we are pairing with, but it could be None in the case
# # where we are doing a second or subsequent pairing but the
# # remote fails to to do the pair, which means we lose the
# # residual name2 SmartEventDesc
# if name2:
# assert name2 in SmartEvent._registry
# assert self.descs[name2].s_event.thread.is_alive()
# assert (self.descs[name2].state
# == SmartEventDesc.STATE_ALIVE_REGISTERED)
# assert name2 in SmartEvent._registry
# assert name2 in self.descs
# self.descs[name1].paired_with = self.descs[name2]
# self.descs[name2].paired_with = self.descs[name1]
# else:
# self.descs[name1].paired_with = None
#
# ###################################################################
# # verify the registry
# ###################################################################
# if verify:
# self.verify_registry()
#
# ###########################################################################
# # verify_registry
# ###########################################################################
# def verify_registry(self):
# """Verify the registry."""
# with self._descs_lock:
# num_registered = 0
# for key, item in self.descs.items():
# if (item.state == SmartEventDesc.STATE_ALIVE_REGISTERED
# or item.state
# == SmartEventDesc.STATE_NOT_ALIVE_REGISTERED):
# num_registered += 1
# item.verify_state()
#
# assert len(SmartEvent._registry) == num_registered
#
# ###########################################################################
# # cleanup_registry
# ###########################################################################
# def cleanup_registry(self):
# """Cleanup the registry."""
# for key, item in self.descs.items():
# if item.state == SmartEventDesc.STATE_NOT_ALIVE_REGISTERED:
# assert not item.s_event.thread.is_alive()
# item.state = SmartEventDesc.STATE_NOT_ALIVE_UNREGISTERED
###############################################################################
# outer_f1
###############################################################################
def outer_f1(cmds: Cmds,
descs: ThreadPairDescs,
) -> None:
"""Outer function to test SmartEvent.
Args:
cmds: Cmds object to tell alpha when to go
descs: tracks set of SmartEventDesc items
"""
logger.debug('outer_f1 entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event))
# tell alpha OK to verify (i.e., beta_smart_event set with s_event)
cmds.queue_cmd('alpha', 'go')
s_event.pair_with(remote_name='alpha')
assert s_event.sync(log_msg='outer beta sync point 1')
assert s_event.wait(log_msg='outer f1 wait 12')
assert s_event.sync(log_msg='outer beta sync point 2')
assert s_event.resume(log_msg='outer f1 resume 23')
assert s_event.sync(log_msg='outer beta sync point 3')
logger.debug('outer f1 exiting')
###############################################################################
# OuterThreadApp class
###############################################################################
class OuterThreadApp(threading.Thread):
"""Outer thread app for test."""
def __init__(self,
cmds: Cmds,
descs: ThreadPairDescs
) -> None:
"""Initialize the object.
Args:
cmds: used to tell alpha to go
descs: tracks set of ThreadPairDescs items
"""
super().__init__()
self.cmds = cmds
self.descs = descs
self.s_event = SmartEvent(name='beta', thread=self)
def run(self) -> None:
"""Run the test."""
print('beta run started')
# normally, the add_desc is done just after the instantiation, but
# in this case the thread is not made alive until now, and the
# add_desc checks that the thread is alive
self.descs.add_desc(SmartEventDesc(name='beta',
s_event=self.s_event,
thread=self))
self.cmds.queue_cmd('alpha')
self.s_event.pair_with(remote_name='alpha')
self.descs.paired('alpha', 'beta')
assert self.s_event.sync(log_msg='outer beta sync point 1')
assert self.s_event.wait(log_msg='outer f1 wait 12')
assert self.s_event.sync(log_msg='outer beta sync point 2')
assert self.s_event.resume(log_msg='outer f1 resume 23')
assert self.s_event.sync(log_msg='outer beta sync point 3')
logger.debug('beta run exiting')
###############################################################################
# OuterThreadEventApp class
###############################################################################
class OuterThreadEventApp(threading.Thread, SmartEvent):
"""Outer thread event app for test."""
def __init__(self,
cmds: Cmds,
descs: ThreadPairDescs) -> None:
"""Initialize the object.
Args:
cmds: used to send cmds between threads
descs: tracks set of SmartEventDesc items
"""
threading.Thread.__init__(self)
SmartEvent.__init__(self, name='beta', thread=self)
self.cmds = cmds
self.descs = descs
def run(self):
"""Run the test."""
print('beta run started')
# normally, the add_desc is done just after the instantiation, but
# in this case the thread is not made alive until now, and the
# add_desc checks that the thread is alive
self.descs.add_desc(SmartEventDesc(name='beta',
s_event=self,
thread=self))
self.cmds.queue_cmd('alpha')
self.pair_with(remote_name='alpha', timeout=3)
self.descs.paired('alpha', 'beta')
assert self.sync(log_msg='outer beta sync point 1')
assert self.wait(log_msg='outer f1 wait 12')
assert self.sync(log_msg='outer beta sync point 2')
assert self.resume(log_msg='outer f1 resume 23')
assert self.sync(log_msg='outer beta sync point 3')
logger.debug('beta run exiting')
###############################################################################
# TestSmartEventBasic class
###############################################################################
class TestSmartEventBasic:
"""Test class for SmartEvent basic tests."""
###########################################################################
# repr for SmartEvent
###########################################################################
def test_smart_event_repr(self,
thread_exc: Any) -> None:
"""Test event with code repr.
Args:
thread_exc: captures thread exceptions
"""
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
expected_repr_str = 'SmartEvent(name="alpha")'
assert repr(smart_event) == expected_repr_str
smart_event2 = SmartEvent(name="AlphaDog")
descs.add_desc(SmartEventDesc(name='AlphaDog',
s_event=smart_event2,
thread=threading.current_thread()))
expected_repr_str = 'SmartEvent(name="AlphaDog")'
assert repr(smart_event2) == expected_repr_str
def f1():
s_event = SmartEvent(name='beta1')
descs.add_desc(SmartEventDesc(name='beta1',
s_event=s_event,
thread=threading.current_thread()))
f1_expected_repr_str = 'SmartEvent(name="beta1")'
assert repr(s_event) == f1_expected_repr_str
cmds.queue_cmd('alpha', 'go')
cmds.get_cmd('beta1')
def f2():
s_event = SmartEvent(name='beta2')
descs.add_desc(SmartEventDesc(name='beta2',
s_event=s_event,
thread=threading.current_thread()))
f1_expected_repr_str = 'SmartEvent(name="beta2")'
assert repr(s_event) == f1_expected_repr_str
cmds.queue_cmd('alpha', 'go')
cmds.get_cmd('beta2')
cmds = Cmds()
a_thread1 = threading.Thread(target=f1)
a_thread1.start()
cmds.get_cmd('alpha')
a_thread2 = threading.Thread(target=f2)
a_thread2.start()
cmds.get_cmd('alpha')
cmds.queue_cmd('beta1', 'go')
a_thread1.join()
descs.thread_end('beta1')
cmds.queue_cmd('beta2', 'go')
a_thread2.join()
descs.thread_end('beta2')
###########################################################################
# test_smart_event_instantiate_with_errors
###########################################################################
def test_smart_event_instantiate_with_errors(self) -> None:
"""Test register_thread alpha first."""
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
# not OK to instantiate a new smart_event with same name
with pytest.raises(ThreadPairNameAlreadyInUse):
_ = SmartEvent(name='alpha')
with pytest.raises(ThreadPairIncorrectNameSpecified):
_ = SmartEvent(name=42) # type: ignore
# try wait, resume, and pause_until without having been paired
with pytest.raises(ThreadPairNotPaired):
smart_event.wait()
with pytest.raises(ThreadPairNotPaired):
smart_event.resume()
with pytest.raises(ThreadPairNotPaired):
smart_event.pause_until(WUCond.RemoteWaiting)
# try to pair with unknown remote
with pytest.raises(ThreadPairPairWithTimedOut):
smart_event.pair_with(remote_name='beta', timeout=0.1)
# try to pair with bad name
with pytest.raises(ThreadPairIncorrectNameSpecified):
smart_event.pair_with(remote_name=3) # type: ignore
# make sure everything still the same
descs.verify_registry()
###########################################################################
# test_smart_event_pairing_with_errors
###########################################################################
def test_smart_event_pairing_with_errors(self) -> None:
"""Test register_thread during instantiation."""
def f1(name: str) -> None:
"""Func to test instantiate SmartEvent.
Args:
name: name to use for s_event
"""
logger.debug(f'{name} f1 entered')
s_event = SmartEvent(name=name)
descs.add_desc(SmartEventDesc(name=name,
s_event=s_event))
cmds.queue_cmd('alpha', 'go')
# not OK to pair with self
with pytest.raises(ThreadPairPairWithSelfNotAllowed):
s_event.pair_with(remote_name=name)
s_event.pair_with(remote_name='alpha')
# not OK to pair with remote a second time
with pytest.raises(ThreadPairAlreadyPairedWithRemote):
s_event.pair_with(remote_name='alpha')
s_event.sync(timeout=3,
log_msg=f'{name} f1 sync point 1')
logger.debug(f'{name} f1 exiting')
cmds = Cmds()
descs = ThreadPairDescs()
beta_t = threading.Thread(target=f1, args=('beta',))
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
beta_t.start()
# not OK to pair with self
with pytest.raises(ThreadPairPairWithSelfNotAllowed):
smart_event.pair_with(remote_name='alpha')
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
# not OK to pair with remote a second time
with pytest.raises(ThreadPairAlreadyPairedWithRemote):
smart_event.pair_with(remote_name='beta')
smart_event.sync(log_msg='alpha sync point 1')
beta_t.join()
descs.thread_end(name='beta')
# at this point, f1 has ended. But, the registry will not have changed,
# so everything will still show paired, even both alpha and beta
# SmartEvents. Alpha SmartEvent will detect that beta is no longer
# alive if a function is attempted.
descs.verify_registry()
#######################################################################
# second case - f1 with same name beta
#######################################################################
beta_t2 = threading.Thread(target=f1, args=('beta',))
beta_t2.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
smart_event.sync(log_msg='alpha sync point 1 again')
beta_t2.join()
descs.thread_end(name='beta')
# at this point, f1 has ended. But, the registry will not have changed,
# so everything will still show paired, even both alpha and beta
# SmartEvents. Alpha SmartEvent will detect that beta is no longer
# alive if a function is attempted.
descs.verify_registry()
#######################################################################
# third case, use different name for f1. Should clean up old beta
# from the registry.
#######################################################################
with pytest.raises(ThreadPairNameAlreadyInUse):
smart_event = SmartEvent(name='alpha') # create fresh
beta_t3 = threading.Thread(target=f1, args=('charlie',))
beta_t3.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='charlie')
descs.paired('alpha', 'charlie')
assert 'beta' not in SmartEvent._registry
smart_event.sync(log_msg='alpha sync point 1 again')
beta_t3.join()
descs.thread_end(name='charlie')
# at this point, f1 has ended. But, the registry will not have changed,
# so everything will still show paired, even both alpha and charlie
# SmartEvents. Alpha SmartEvent will detect that charlie is no longer
# alive if a function is attempted.
# change name in SmartEvent, then register a new entry to force the
# ThreadPairErrorInRegistry error
smart_event.remote.name = 'bad_name'
with pytest.raises(ThreadPairErrorInRegistry):
_ = SmartEvent(name='alpha2')
# restore the good name to allow verify_registry to succeed
smart_event.remote.name = 'charlie'
descs.verify_registry()
###########################################################################
# test_smart_event_pairing_with_multiple_threads
###########################################################################
def test_smart_event_pairing_with_multiple_threads(self) -> None:
"""Test register_thread during instantiation."""
def f1(name: str) -> None:
"""Func to test instantiate SmartEvent.
Args:
name: name to use for s_event
"""
logger.debug(f'{name} f1 entered')
s_event = SmartEvent(name=name)
descs.add_desc(SmartEventDesc(name=name,
s_event=s_event))
# not OK to pair with self
with pytest.raises(ThreadPairPairWithSelfNotAllowed):
s_event.pair_with(remote_name=name)
cmds.queue_cmd('alpha', 'go')
s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
# alpha needs to wait until we are officially paired to avoid
# timing issue when pairing with charlie
cmds.queue_cmd('alpha')
# not OK to pair with remote a second time
with pytest.raises(ThreadPairAlreadyPairedWithRemote):
s_event.pair_with(remote_name='alpha')
cmds.queue_cmd('alpha', 'go')
s_event.sync(log_msg=f'{name} f1 sync point 1')
logger.debug(f'{name} f1 exiting')
def f2(name: str) -> None:
"""Func to test instantiate SmartEvent.
Args:
name: name to use for s_event
"""
logger.debug(f'{name} f2 entered')
s_event = SmartEvent(name=name)
descs.add_desc(SmartEventDesc(name=name,
s_event=s_event))
# not OK to pair with self
with pytest.raises(ThreadPairPairWithSelfNotAllowed):
s_event.pair_with(remote_name=name)
with pytest.raises(ThreadPairPairWithTimedOut):
s_event.pair_with(remote_name='alpha', timeout=1)
s_event.pair_with(remote_name='alpha2')
descs.paired('alpha2', 'charlie')
# not OK to pair with remote a second time
with pytest.raises(ThreadPairAlreadyPairedWithRemote):
s_event.pair_with(remote_name='alpha2')
cmds.queue_cmd('alpha', 'go')
s_event.sync(log_msg=f'{name} f1 sync point 1')
logger.debug(f'{name} f2 exiting')
#######################################################################
# mainline
#######################################################################
descs = ThreadPairDescs()
cmds = Cmds()
beta_t = threading.Thread(target=f1, args=('beta',))
charlie_t = threading.Thread(target=f2, args=('charlie',))
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event))
beta_t.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
#######################################################################
# pair with charlie
#######################################################################
cmds.get_cmd('alpha')
smart_event2 = SmartEvent(name='alpha2')
descs.add_desc(SmartEventDesc(name='alpha2',
s_event=smart_event2))
charlie_t.start()
smart_event2.pair_with(remote_name='charlie')
cmds.get_cmd('alpha')
smart_event.sync(log_msg='alpha sync point 1')
beta_t.join()
descs.thread_end(name='beta')
smart_event2.sync(log_msg='alpha sync point 2')
charlie_t.join()
descs.thread_end(name='charlie')
# at this point, f1 and f2 have ended. But, the registry will not have
# changed, so everything will still show paired, even all
# SmartEvents. Any SmartEvents requests will detect that
# their pairs are no longer active and will trigger cleanup to
# remove any not alive entries from the registry. The SmartEvent
# objects for not alive threads remain pointed to by the alive
# entries so that they may still report SmartEventRemoteThreadNotAlive.
descs.verify_registry()
# cause cleanup via a sync request
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.sync(log_msg='mainline sync point 3')
descs.cleanup()
# try to pair with old beta - should timeout
with pytest.raises(ThreadPairPairWithTimedOut):
smart_event.pair_with(remote_name='beta', timeout=1)
# the pair_with sets smart_event.remote to none before trying the
# pair_with, and leaves it None when pair_with fails
descs.paired('alpha')
# try to pair with old charlie - should timeout
with pytest.raises(ThreadPairPairWithTimedOut):
smart_event.pair_with(remote_name='charlie', timeout=1)
# try to pair with nobody - should timeout
with pytest.raises(ThreadPairPairWithTimedOut):
smart_event.pair_with(remote_name='nobody', timeout=1)
# try to pair with old beta - should timeout
with pytest.raises(ThreadPairPairWithTimedOut):
smart_event2.pair_with(remote_name='beta', timeout=1)
# the pair_with sets smart_event.remote to none before trying the
# pair_with, and leaves it None when pair_with fails
descs.paired('alpha2')
# try to pair with old charlie - should timeout
with pytest.raises(ThreadPairPairWithTimedOut):
smart_event2.pair_with(remote_name='charlie', timeout=1)
# try to pair with nobody - should timeout
with pytest.raises(ThreadPairPairWithTimedOut):
smart_event2.pair_with(remote_name='nobody', timeout=1)
descs.verify_registry()
###########################################################################
# test_smart_event_pairing_with_multiple_threads
###########################################################################
def test_smart_event_remote_pair_with_other_error(self) -> None:
"""Test pair_with error case."""
def f1() -> None:
"""Func to test pair_with SmartEvent."""
logger.debug('beta f1 entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event))
cmds.queue_cmd('alpha', 'go')
with pytest.raises(ThreadPairRemotePairedWithOther):
s_event.pair_with(remote_name='alpha')
cmds.get_cmd('beta')
logger.debug(f'beta f1 exiting')
def f2() -> None:
"""Func to test pair_with SmartEvent."""
logger.debug('charlie f2 entered')
s_event = SmartEvent(name='charlie')
descs.add_desc(SmartEventDesc(name='charlie',
s_event=s_event),
verify=False)
cmds.queue_cmd('alpha', 'go')
s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'charlie', verify=False)
cmds.queue_cmd('alpha', 'go')
s_event.sync(log_msg='charlie f1 sync point 1')
logger.debug(f'charlie f2 exiting')
#######################################################################
# mainline
#######################################################################
descs = ThreadPairDescs()
cmds = Cmds()
beta_t = threading.Thread(target=f1)
charlie_t = threading.Thread(target=f2)
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event))
beta_t.start()
cmds.get_cmd('alpha')
beta_se = SmartEvent._registry['beta']
# make sure beta has alpha as target of pair_with
while beta_se.remote is None:
time.sleep(1)
#######################################################################
# pair with charlie
#######################################################################
charlie_t.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='charlie')
cmds.get_cmd('alpha')
cmds.queue_cmd('beta')
# wait for beta to raise ThreadPairRemotePairedWithOther and end
beta_t.join()
descs.thread_end(name='beta')
# sync up with charlie to allow charlie to exit
smart_event.sync(log_msg='alpha sync point 1')
charlie_t.join()
descs.thread_end(name='charlie')
# cause cleanup via a sync request
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.sync(log_msg='mainline sync point 3')
descs.cleanup()
###########################################################################
# test_smart_event_pairing_cleanup
###########################################################################
def test_smart_event_pairing_cleanup(self) -> None:
"""Test register_thread during instantiation."""
def f1(name: str, remote_name: str, idx: int) -> None:
"""Func to test instantiate SmartEvent.
Args:
name: name to use for s_event
remote_name: name to pair with
idx: index into beta_smart_events
"""
logger.debug(f'{name} f1 entered, remote {remote_name}, idx {idx}')
s_event = SmartEvent(name=name)
descs.add_desc(SmartEventDesc(name=name,
s_event=s_event))
cmds.queue_cmd('alpha')
s_event.pair_with(remote_name=remote_name,
log_msg=f'f1 {name} pair with {remote_name} '
f'for idx {idx}')
s_event.sync(log_msg=f'{name} f1 sync point 1')
assert s_event.sync(timeout=3,
log_msg=f'{name} f1 sync point 2')
logger.debug(f'{name} f1 exiting')
#######################################################################
# mainline start
#######################################################################
cmds = Cmds()
descs = ThreadPairDescs()
#######################################################################
# create 4 beta threads
#######################################################################
beta_t0 = threading.Thread(target=f1, args=('beta0', 'alpha0', 0))
beta_t1 = threading.Thread(target=f1, args=('beta1', 'alpha1', 1))
beta_t2 = threading.Thread(target=f1, args=('beta2', 'alpha2', 2))
beta_t3 = threading.Thread(target=f1, args=('beta3', 'alpha3', 3))
#######################################################################
# create alpha0 SmartEvent and desc, and verify
#######################################################################
smart_event0 = SmartEvent(name='alpha0')
descs.add_desc(SmartEventDesc(name='alpha0',
s_event=smart_event0))
#######################################################################
# create alpha1 SmartEvent and desc, and verify
#######################################################################
smart_event1 = SmartEvent(name='alpha1')
descs.add_desc(SmartEventDesc(name='alpha1',
s_event=smart_event1))
#######################################################################
# create alpha2 SmartEvent and desc, and verify
#######################################################################
smart_event2 = SmartEvent(name='alpha2')
descs.add_desc(SmartEventDesc(name='alpha2',
s_event=smart_event2))
#######################################################################
# create alpha3 SmartEvent and desc, and verify
#######################################################################
smart_event3 = SmartEvent(name='alpha3')
descs.add_desc(SmartEventDesc(name='alpha3',
s_event=smart_event3))
#######################################################################
# start beta0 thread, and verify
#######################################################################
beta_t0.start()
cmds.get_cmd('alpha')
smart_event0.pair_with(remote_name='beta0')
smart_event0.sync(log_msg='alpha0 sync point 1')
descs.paired('alpha0', 'beta0')
#######################################################################
# start beta1 thread, and verify
#######################################################################
beta_t1.start()
cmds.get_cmd('alpha')
smart_event1.pair_with(remote_name='beta1')
smart_event1.sync(log_msg='alpha1 sync point 1')
descs.paired('alpha1', 'beta1')
#######################################################################
# start beta2 thread, and verify
#######################################################################
beta_t2.start()
cmds.get_cmd('alpha')
smart_event2.pair_with(remote_name='beta2')
smart_event2.sync(log_msg='alpha2 sync point 1')
descs.paired('alpha2', 'beta2')
#######################################################################
# start beta3 thread, and verify
#######################################################################
beta_t3.start()
cmds.get_cmd('alpha')
smart_event3.pair_with(remote_name='beta3')
smart_event3.sync(log_msg='alpha3 sync point 1')
descs.paired('alpha3', 'beta3')
#######################################################################
# let beta0 finish
#######################################################################
smart_event0.sync(log_msg='alpha0 sync point 1')
beta_t0.join()
descs.thread_end(name='beta0')
#######################################################################
# replace old beta0 w new beta0 - should cleanup registry old beta0
#######################################################################
beta_t0 = threading.Thread(target=f1, args=('beta0', 'alpha0', 0))
beta_t0.start()
cmds.get_cmd('alpha')
smart_event0.pair_with(remote_name='beta0')
smart_event0.sync(log_msg='alpha0 sync point 1')
descs.paired('alpha0', 'beta0')
#######################################################################
# let beta1 and beta3 finish
#######################################################################
smart_event1.sync(log_msg='alpha1 sync point 2')
beta_t1.join()
descs.thread_end(name='beta1')
smart_event3.sync(log_msg='alpha3 sync point 3')
beta_t3.join()
descs.thread_end(name='beta3')
#######################################################################
# replace old beta1 w new beta1 - should cleanup old beta1 and beta3
#######################################################################
beta_t1 = threading.Thread(target=f1, args=('beta1', 'alpha1', 1))
beta_t1.start()
cmds.get_cmd('alpha')
smart_event1.pair_with(remote_name='beta1')
smart_event1.sync(log_msg='alpha1 sync point 1')
descs.paired('alpha1', 'beta1')
# should get not alive for beta3
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event3.sync(log_msg='mainline sync point 4')
# should still be the same
descs.verify_registry()
#######################################################################
# get a new beta3 going
#######################################################################
beta_t3 = threading.Thread(target=f1, args=('beta3', 'alpha3', 3))
beta_t3.start()
cmds.get_cmd('alpha')
smart_event3.pair_with(remote_name='beta3')
smart_event3.sync(log_msg='alpha3 sync point 1')
descs.paired('alpha3', 'beta3')
#######################################################################
# let beta1 and beta2 finish
#######################################################################
smart_event1.sync(log_msg='alpha1 sync point 5')
beta_t1.join()
descs.thread_end(name='beta1')
smart_event2.sync(log_msg='alpha2 sync point 6')
beta_t2.join()
descs.thread_end(name='beta2')
#######################################################################
# trigger cleanup for beta1 and beta2
#######################################################################
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event2.sync(log_msg='alpha2 sync point 7')
descs.cleanup()
#######################################################################
# should get SmartEventRemoteThreadNotAlive for beta1 and beta2
#######################################################################
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.sync(log_msg='alpha1 sync point 8')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event2.sync(log_msg='alpha 2 sync point 9')
descs.verify_registry()
#######################################################################
# get a new beta2 going
#######################################################################
beta_t2 = threading.Thread(target=f1, args=('beta2', 'alpha2', 2))
beta_t2.start()
cmds.get_cmd('alpha')
smart_event2.pair_with(remote_name='beta2')
smart_event2.sync(log_msg='alpha2 sync point 1')
descs.paired('alpha2', 'beta2')
smart_event2.sync(log_msg='alpha2 sync point 2')
beta_t2.join()
descs.thread_end(name='beta2')
#######################################################################
# let beta0 complete
#######################################################################
smart_event0.sync(log_msg='alpha0 sync point 2')
beta_t0.join()
descs.thread_end(name='beta0')
#######################################################################
# let beta3 complete
#######################################################################
smart_event3.sync(log_msg='alpha0 sync point 2')
beta_t3.join()
descs.thread_end(name='beta3')
###########################################################################
# test_smart_event_foreign_op_detection
###########################################################################
def test_smart_event_foreign_op_detection(self) -> None:
"""Test register_thread with f1."""
#######################################################################
# mainline and f1 - mainline pairs with beta
#######################################################################
logger.debug('start test 1')
def f1():
print('beta f1 entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event))
my_c_thread = threading.current_thread()
assert s_event.thread is my_c_thread
assert s_event.thread is threading.current_thread()
s_event.pair_with(remote_name='alpha')
s_event.sync(log_msg='f1 beta sync point 1')
logger.debug('f1 beta about to enter cmd loop')
while True:
beta_cmd = cmds.get_cmd('beta')
if beta_cmd == Cmd.Exit:
break
logger.debug(f'thread_func1 received cmd: {beta_cmd}')
if beta_cmd == Cmd.Wait:
assert s_event.wait()
elif beta_cmd == Cmd.Resume:
with pytest.raises(SmartEventWaitUntilTimeout):
s_event.pause_until(WUCond.RemoteWaiting,
timeout=0.002)
with pytest.raises(SmartEventWaitUntilTimeout):
s_event.pause_until(WUCond.RemoteWaiting, timeout=0.01)
with pytest.raises(SmartEventWaitUntilTimeout):
s_event.pause_until(WUCond.RemoteWaiting, timeout=0.02)
s_event.sync(log_msg='f1 beta sync point 2')
s_event.pause_until(WUCond.RemoteWaiting)
s_event.pause_until(WUCond.RemoteWaiting, timeout=0.001)
s_event.pause_until(WUCond.RemoteWaiting, timeout=0.01)
s_event.pause_until(WUCond.RemoteWaiting, timeout=0.02)
s_event.pause_until(WUCond.RemoteWaiting, timeout=-0.02)
s_event.pause_until(WUCond.RemoteWaiting, timeout=-1)
s_event.pause_until(WUCond.RemoteWaiting, timeout=0)
s_event.resume()
def foreign1(s_event):
logger.debug('foreign1 entered')
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
s_event.resume()
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
s_event.pair_with(remote_name='beta')
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
s_event.pair_with(remote_name='beta', timeout=1)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
s_event.pause_until(WUCond.RemoteWaiting, timeout=0.02)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
s_event.pause_until(WUCond.RemoteWaiting, timeout=0.02)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
s_event.pause_until(WUCond.RemoteWaiting)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
s_event.wait()
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
s_event.sync()
logger.debug('foreign1 exiting')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event1 = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event1))
alpha_t = threading.current_thread()
my_f1_thread = threading.Thread(target=f1)
my_foreign1_thread = threading.Thread(target=foreign1,
args=(smart_event1,))
with pytest.raises(ThreadPairNotPaired):
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=-0.002)
with pytest.raises(ThreadPairNotPaired):
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=0)
with pytest.raises(ThreadPairNotPaired):
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=0.002)
with pytest.raises(ThreadPairNotPaired):
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=0.2)
with pytest.raises(ThreadPairNotPaired):
smart_event1.pause_until(WUCond.RemoteWaiting)
logger.debug('mainline about to start beta thread')
my_f1_thread.start()
smart_event1.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
smart_event1.sync(log_msg='mainline sync point 1')
cmds.queue_cmd('beta', Cmd.Wait)
my_foreign1_thread.start() # attempt to resume beta (should fail)
my_foreign1_thread.join()
logger.debug('about to pause_until RemoteWaiting')
smart_event1.pause_until(WUCond.RemoteWaiting)
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=0.001)
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=0.01)
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=0.02)
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=-0.02)
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=-1)
smart_event1.pause_until(WUCond.RemoteWaiting, timeout=0)
smart_event1.resume()
cmds.queue_cmd('beta', Cmd.Resume)
smart_event1.sync(log_msg='mainline sync point 2')
assert smart_event1.wait()
cmds.queue_cmd('beta', Cmd.Exit)
my_f1_thread.join()
descs.thread_end(name='beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.resume()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.wait()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.pause_until(WUCond.RemoteWaiting)
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.sync(log_msg='mainline sync point 3')
assert smart_event1.thread is alpha_t
###########################################################################
# test_smart_event_outer_thread_f1
###########################################################################
def test_smart_event_outer_thread_f1(self) -> None:
"""Test simple sequence with outer thread f1."""
#######################################################################
# mainline
#######################################################################
logger.debug('mainline starting')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event))
f1_thread = threading.Thread(target=outer_f1, args=(cmds, descs))
f1_thread.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
smart_event.sync(log_msg='mainline sync point 1')
smart_event.resume(log_msg='alpha resume 12')
smart_event.sync(log_msg='mainline sync point 2')
smart_event.wait(log_msg='alpha wait 23')
smart_event.sync(log_msg='mainline sync point 3')
f1_thread.join()
descs.thread_end(name='beta')
logger.debug('mainline exiting')
###########################################################################
# test_smart_event_outer_thread_app
###########################################################################
def test_smart_event_outer_thread_app(self) -> None:
"""Test simple sequence with outer thread app."""
#######################################################################
# mainline
#######################################################################
logger.debug('mainline starting')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event))
thread_app = OuterThreadApp(cmds=cmds, descs=descs)
thread_app.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta', timeout=3)
smart_event.sync(log_msg='mainline sync point 1')
smart_event.resume(log_msg='alpha resume 12')
smart_event.sync(log_msg='mainline sync point 2')
smart_event.wait(log_msg='alpha wait 23')
smart_event.sync(log_msg='mainline sync point 3')
thread_app.join()
descs.thread_end(name='beta')
logger.debug('mainline exiting')
###########################################################################
# test_smart_event_outer_thread_app
###########################################################################
def test_smart_event_outer_thread_event_app(self) -> None:
"""Test simple sequence with outer thread event app."""
#######################################################################
# mainline
#######################################################################
logger.debug('mainline starting')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event))
thread_event_app = OuterThreadEventApp(cmds=cmds, descs=descs)
thread_event_app.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta', timeout=3)
smart_event.sync(log_msg='mainline sync point 1')
smart_event.resume(log_msg='alpha resume 12')
smart_event.sync(log_msg='mainline sync point 2')
smart_event.wait(log_msg='alpha wait 23')
smart_event.sync(log_msg='mainline sync point 3')
thread_event_app.join()
descs.thread_end(name='beta')
logger.debug('mainline exiting')
###########################################################################
# test_smart_event_wait_deadlock_detection
###########################################################################
def test_smart_event_wait_deadlock_detection(self) -> None:
"""Test deadlock detection with f1."""
#######################################################################
# f1
#######################################################################
def f1(ml_thread):
logger.debug('beta f1 beta entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event))
my_c_thread = threading.current_thread()
cmds.get_cmd('beta')
s_event.pair_with(remote_name='alpha')
assert s_event.remote.thread is ml_thread
assert s_event.remote.thread is alpha_t
assert s_event.thread is my_c_thread
assert s_event.thread is threading.current_thread()
s_event.sync(log_msg='beta f1 thread sync point 1')
with pytest.raises(SmartEventWaitDeadlockDetected):
s_event.wait()
s_event.sync(log_msg='beta f1 thread sync point 2')
s_event.wait() # clear the resume that comes after the deadlock
s_event.sync(log_msg='beta f1 thread sync point 3')
s_event.pause_until(WUCond.RemoteWaiting, timeout=2)
with pytest.raises(SmartEventWaitDeadlockDetected):
s_event.wait()
s_event.sync(log_msg='beta f1 thread sync point 4')
s_event.resume()
#######################################################################
# mainline start
#######################################################################
cmds = Cmds()
descs = ThreadPairDescs()
alpha_t = threading.current_thread()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event))
my_f1_thread = threading.Thread(target=f1, args=(alpha_t,))
with pytest.raises(ThreadPairNotPaired):
smart_event.pause_until(WUCond.RemoteWaiting, timeout=-0.002)
with pytest.raises(ThreadPairNotPaired):
smart_event.pause_until(WUCond.RemoteWaiting, timeout=0)
with pytest.raises(ThreadPairNotPaired):
smart_event.pause_until(WUCond.RemoteWaiting, timeout=0.002)
with pytest.raises(ThreadPairNotPaired):
smart_event.pause_until(WUCond.RemoteWaiting, timeout=0.2)
with pytest.raises(ThreadPairNotPaired):
smart_event.pause_until(WUCond.RemoteWaiting)
my_f1_thread.start()
with pytest.raises(ThreadPairNotPaired):
smart_event.pause_until(WUCond.RemoteWaiting)
# tell f1 to proceed to pair_with
cmds.queue_cmd('beta', Cmd.Exit)
smart_event.pair_with(remote_name='beta', timeout=3)
descs.paired('alpha', 'beta')
smart_event.sync(log_msg='mainline sync point 1')
with pytest.raises(SmartEventWaitDeadlockDetected):
smart_event.wait()
smart_event.sync(log_msg='mainline sync point 2')
smart_event.resume()
smart_event.sync(log_msg='mainline sync point 3')
with pytest.raises(SmartEventWaitDeadlockDetected):
smart_event.wait()
smart_event.sync(log_msg='mainline sync point 4')
assert smart_event.wait() # clear resume
my_f1_thread.join()
descs.thread_end(name='beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.resume()
descs.cleanup()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.wait()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.sync(log_msg='mainline sync point 5')
assert smart_event.thread is alpha_t
assert smart_event.remote.thread is my_f1_thread
###########################################################################
# test_smart_event_inner_thread_app
###########################################################################
def test_smart_event_inner_thread_app(self) -> None:
"""Test SmartEvent with thread_app."""
#######################################################################
# ThreadApp
#######################################################################
class MyThread(threading.Thread):
"""MyThread class to test SmartEvent."""
def __init__(self,
alpha_smart_event: SmartEvent,
alpha_thread: threading.Thread
) -> None:
"""Initialize the object.
Args:
alpha_smart_event: alpha SmartEvent to use for verification
alpha_thread: alpha thread to use for verification
"""
super().__init__()
self.s_event = SmartEvent(name='beta', thread=self)
self.alpha_s_event = alpha_smart_event
self.alpha_thread = alpha_thread
def run(self):
"""Run the tests."""
logger.debug('run started')
# normally, the add_desc is done just after the
# instantiation, but
# in this case the thread is not made alive until now, and the
# add_desc checks that the thread is alive
descs.add_desc(SmartEventDesc(name='beta',
s_event=self.s_event,
thread=self))
cmds.queue_cmd('alpha')
self.s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
assert self.s_event.remote is self.alpha_s_event
assert (self.s_event.remote.thread
is self.alpha_thread)
assert self.s_event.remote.thread is alpha_t
assert self.s_event.thread is self
my_run_thread = threading.current_thread()
assert self.s_event.thread is my_run_thread
assert self.s_event.thread is threading.current_thread()
with pytest.raises(SmartEventWaitUntilTimeout):
self.s_event.pause_until(WUCond.RemoteResume,
timeout=0.009)
self.s_event.sync(log_msg='beta run sync point 1')
self.s_event.pause_until(WUCond.RemoteResume, timeout=5)
self.s_event.pause_until(WUCond.RemoteResume)
assert self.s_event.wait(log_msg='beta run wait 12')
self.s_event.sync(log_msg='beta run sync point 2')
self.s_event.sync(log_msg='beta run sync point 3')
self.s_event.resume()
self.s_event.sync(log_msg='beta run sync point 4')
logger.debug('beta run exiting 45')
#######################################################################
# mainline starts
#######################################################################
cmds = Cmds()
descs = ThreadPairDescs()
alpha_t = threading.current_thread()
smart_event1 = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event1))
my_taa_thread = MyThread(smart_event1, alpha_t)
my_taa_thread.start()
cmds.get_cmd('alpha')
smart_event1.pair_with(remote_name='beta')
smart_event1.sync(log_msg='mainline sync point 1')
assert smart_event1.resume(log_msg='mainline resume 12')
smart_event1.sync(log_msg='mainline sync point 2')
with pytest.raises(SmartEventWaitUntilTimeout):
smart_event1.pause_until(WUCond.RemoteResume, timeout=0.009)
smart_event1.sync(log_msg='mainline sync point 3')
smart_event1.pause_until(WUCond.RemoteResume, timeout=5)
smart_event1.pause_until(WUCond.RemoteResume)
assert smart_event1.wait(log_msg='mainline wait 34')
smart_event1.sync(log_msg='mainline sync point 4')
my_taa_thread.join()
descs.thread_end('beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.resume()
descs.cleanup()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.wait()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.pause_until(WUCond.RemoteWaiting)
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.pause_until(WUCond.RemoteResume)
with pytest.raises(ThreadPairPairWithTimedOut):
smart_event1.pair_with(remote_name='beta', timeout=1)
descs.paired('alpha')
with pytest.raises(ThreadPairNotPaired):
smart_event1.wait()
with pytest.raises(ThreadPairNotPaired):
smart_event1.pause_until(WUCond.RemoteWaiting)
with pytest.raises(ThreadPairNotPaired):
smart_event1.pause_until(WUCond.RemoteResume)
assert smart_event1.thread is alpha_t
assert smart_event1.remote is None
descs.verify_registry()
###########################################################################
# test_smart_event_inner_thread_app2
###########################################################################
def test_smart_event_inner_thread_app2(self) -> None:
"""Test SmartEvent with thread_app."""
#######################################################################
# mainline and ThreadApp - mainline provide beta SmartEvent
#######################################################################
class MyThread2(threading.Thread):
def __init__(self,
s_event: SmartEvent,
alpha_t1: threading.Thread):
super().__init__()
self.s_event = s_event
# not really a good idea to set the thread - this test case
# may not be realistic - need to consider whether the idea
# of passing in a pre-instantiated SmartEvent (which gets
# its thread set during instantiation) is something we want
# to support given that we have to change the thread
self.s_event.thread = self
self.alpha_t1 = alpha_t1
def run(self):
print('run started')
# normally, the add_desc is done just after the
# instantiation, but
# in this case the thread is not made alive until now, and the
# add_desc checks that the thread is alive
descs.add_desc(SmartEventDesc(name='beta',
s_event=self.s_event,
thread=self))
cmds.queue_cmd('alpha')
self.s_event.pair_with(remote_name='alpha2')
assert self.s_event.remote.thread is self.alpha_t1
assert self.s_event.remote.thread is alpha_t
assert self.s_event.thread is self
my_run_thread = threading.current_thread()
assert self.s_event.thread is my_run_thread
assert self.s_event.thread is threading.current_thread()
with pytest.raises(SmartEventWaitDeadlockDetected):
self.s_event.wait()
assert self.s_event.wait()
self.s_event.pause_until(WUCond.RemoteWaiting)
self.s_event.pause_until(WUCond.RemoteWaiting, timeout=2)
self.s_event.resume()
cmds = Cmds()
descs = ThreadPairDescs()
smart_event2 = SmartEvent(name='alpha2')
descs.add_desc(SmartEventDesc(name='alpha2',
s_event=smart_event2))
smart_event3 = SmartEvent(name='beta')
alpha_t = threading.current_thread()
my_tab_thread = MyThread2(smart_event3, alpha_t)
my_tab_thread.start()
cmds.get_cmd('alpha')
smart_event2.pair_with(remote_name='beta')
descs.paired('alpha2', 'beta')
smart_event2.pause_until(WUCond.RemoteWaiting)
with pytest.raises(SmartEventWaitDeadlockDetected):
smart_event2.wait()
smart_event2.resume()
assert smart_event2.wait()
my_tab_thread.join()
descs.thread_end('beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event2.resume()
descs.cleanup()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event2.wait()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event2.pause_until(WUCond.RemoteWaiting)
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event2.pause_until(WUCond.RemoteResume)
assert smart_event2.thread is alpha_t
assert smart_event2.remote.thread is my_tab_thread
descs.verify_registry()
###########################################################################
# test_smart_event_inner_thread_event_app
###########################################################################
def test_smart_event_inner_thread_event_app(self) -> None:
"""Test SmartEvent with thread_event_app."""
#######################################################################
# mainline and ThreadEventApp - mainline sets alpha and beta
#######################################################################
class MyThreadEvent1(threading.Thread, SmartEvent):
def __init__(self,
alpha_t1: threading.Thread):
threading.Thread.__init__(self)
SmartEvent.__init__(self, name='beta', thread=self)
self.alpha_t1 = alpha_t1
def run(self):
logger.debug('run started')
# normally, the add_desc is done just after the
# instantiation, but
# in this case the thread is not made alive until now, and the
# add_desc checks that the thread is alive
descs.add_desc(SmartEventDesc(name='beta',
s_event=self,
thread=self))
cmds.queue_cmd('alpha')
self.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
assert self.remote.thread is self.alpha_t1
assert self.remote.thread is alpha_t
assert self.thread is self
my_run_thread = threading.current_thread()
assert self.thread is my_run_thread
assert self.thread is threading.current_thread()
assert self.wait()
self.pause_until(WUCond.RemoteWaiting, timeout=2)
with pytest.raises(SmartEventWaitDeadlockDetected):
self.wait()
self.resume()
logger.debug('run exiting')
cmds = Cmds()
descs = ThreadPairDescs()
alpha_t = threading.current_thread()
my_te1_thread = MyThreadEvent1(alpha_t)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
my_te1_thread.pause_until(WUCond.RemoteWaiting,
timeout=0.005)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
my_te1_thread.wait(timeout=0.005)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
my_te1_thread.resume(timeout=0.005)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
my_te1_thread.sync(timeout=0.005)
with pytest.raises(ThreadPairDetectedOpFromForeignThread):
my_te1_thread.pair_with(remote_name='alpha', timeout=0.5)
assert my_te1_thread.remote is None
assert my_te1_thread.thread is my_te1_thread
my_te1_thread.start()
cmds.get_cmd('alpha')
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event))
with pytest.raises(ThreadPairNotPaired):
smart_event.sync()
with pytest.raises(ThreadPairNotPaired):
smart_event.wait()
with pytest.raises(ThreadPairNotPaired):
smart_event.resume()
smart_event.pair_with(remote_name='beta')
smart_event.resume()
with pytest.raises(SmartEventWaitDeadlockDetected):
smart_event.wait()
assert smart_event.wait()
my_te1_thread.join()
descs.thread_end('beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.resume()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.wait()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.pause_until(WUCond.RemoteWaiting)
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.pause_until(WUCond.RemoteResume)
assert my_te1_thread.remote is not None
assert my_te1_thread.remote.thread is not None
assert my_te1_thread.remote.thread is alpha_t
assert my_te1_thread.thread is my_te1_thread
###########################################################################
# test_smart_event_inner_thread_event_app2
###########################################################################
def test_smart_event_inner_thread_event_app2(self) -> None:
"""Test SmartEvent with thread_event_app."""
#######################################################################
# mainline and ThreadApp - mainline sets alpha thread_app sets beta
#######################################################################
class MyThreadEvent2(threading.Thread, SmartEvent):
def __init__(self,
alpha_t1: threading.Thread):
threading.Thread.__init__(self)
SmartEvent.__init__(self, name='beta', thread=self)
self.alpha_t1 = alpha_t1
def run(self):
logger.debug('run started')
assert self.remote is None
assert self.thread is self
my_run_thread = threading.current_thread()
assert self.thread is my_run_thread
assert self.thread is threading.current_thread()
# normally, the add_desc is done just after the
# instantiation, but
# in this case the thread is not made alive until now, and the
# add_desc checks that the thread is alive
descs.add_desc(SmartEventDesc(name='beta',
s_event=self,
thread=self))
cmds.queue_cmd('alpha')
self.pair_with(remote_name='alpha')
assert self.remote.thread is self.alpha_t1
assert self.remote.thread is alpha_t
descs.paired('alpha', 'beta')
with pytest.raises(SmartEventWaitDeadlockDetected):
self.wait()
assert self.wait()
self.resume()
logger.debug('run exiting')
cmds = Cmds()
descs = ThreadPairDescs()
alpha_t = threading.current_thread()
my_te2_thread = MyThreadEvent2(alpha_t)
my_te2_thread.start()
cmds.get_cmd('alpha')
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event))
smart_event.pair_with(remote_name='beta')
smart_event.pause_until(WUCond.RemoteWaiting, timeout=2)
with pytest.raises(SmartEventWaitDeadlockDetected):
smart_event.wait()
assert smart_event.resume()
assert smart_event.wait()
my_te2_thread.join()
descs.thread_end('beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.resume()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.wait()
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.pause_until(WUCond.RemoteWaiting, timeout=2)
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event.pause_until(WUCond.RemoteResume, timeout=2)
assert smart_event.thread is alpha_t
assert smart_event.remote.thread is my_te2_thread
###########################################################################
# test_smart_event_two_f_threads
###########################################################################
def test_smart_event_two_f_threads(self) -> None:
"""Test register_thread with thread_event_app."""
#######################################################################
# two threads - mainline sets alpha and beta
#######################################################################
def fa1():
logger.debug('fa1 entered')
my_fa_thread = threading.current_thread()
s_event = SmartEvent(name='fa1')
descs.add_desc(SmartEventDesc(name='fa1',
s_event=s_event,
thread=my_fa_thread))
assert s_event.thread is my_fa_thread
s_event.pair_with(remote_name='fb1')
descs.paired('fa1', 'fb1')
logger.debug('fa1 about to wait')
s_event.wait()
logger.debug('fa1 back from wait')
s_event.pause_until(WUCond.RemoteWaiting, timeout=2)
s_event.resume()
def fb1():
logger.debug('fb1 entered')
my_fb_thread = threading.current_thread()
s_event = SmartEvent(name='fb1')
descs.add_desc(SmartEventDesc(name='fb1',
s_event=s_event,
thread=my_fb_thread))
assert s_event.thread is my_fb_thread
s_event.pair_with(remote_name='fa1')
logger.debug('fb1 about to resume')
s_event.resume()
s_event.wait()
# tell mainline we are out of the wait - OK to do descs fa1 end
cmds.queue_cmd('alpha')
# wait for mainline to give to go ahead after doing descs fa1 end
cmds.get_cmd('beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
s_event.resume()
descs.cleanup()
with pytest.raises(SmartEventRemoteThreadNotAlive):
s_event.wait()
with pytest.raises(SmartEventRemoteThreadNotAlive):
s_event.pause_until(WUCond.RemoteWaiting)
#######################################################################
# mainline
#######################################################################
cmds = Cmds()
descs = ThreadPairDescs()
fa1_thread = threading.Thread(target=fa1)
fb1_thread = threading.Thread(target=fb1)
logger.debug('starting fa1_thread')
fa1_thread.start()
logger.debug('starting fb1_thread')
fb1_thread.start()
fa1_thread.join()
cmds.get_cmd('alpha')
descs.thread_end('fa1')
cmds.queue_cmd('beta', 'go')
fb1_thread.join()
descs.thread_end('fb1')
###########################################################################
# test_smart_event_two_f_threads2
###########################################################################
def test_smart_event_two_f_threads2(self) -> None:
"""Test register_thread with thread_event_app."""
#######################################################################
# two threads - fa2 and fb2 set their own threads
#######################################################################
def fa2():
logger.debug('fa2 entered')
s_event = SmartEvent(name='fa2')
my_fa_thread = threading.current_thread()
assert s_event.thread is my_fa_thread
descs.add_desc(SmartEventDesc(name='fa2',
s_event=s_event,
thread=my_fa_thread))
s_event.pair_with(remote_name='fb2')
cmds.get_cmd('beta')
logger.debug('fa2 about to deadlock')
with pytest.raises(SmartEventWaitDeadlockDetected):
logger.debug('fa2 about to wait')
s_event.wait()
logger.debug('fa2 back from wait')
logger.debug('fa2 about to pause_until')
s_event.pause_until(WUCond.RemoteWaiting, timeout=2)
logger.debug('fa2 about to resume')
s_event.resume()
s_event.wait()
logger.debug('fa2 exiting')
def fb2():
logger.debug('fb2 entered')
s_event = SmartEvent(name='fb2')
my_fb_thread = threading.current_thread()
descs.add_desc(SmartEventDesc(name='fb2',
s_event=s_event,
thread=my_fb_thread))
assert s_event.thread is my_fb_thread
s_event.pair_with(remote_name='fa2')
descs.paired('fa2', 'fb2')
cmds.queue_cmd('beta')
logger.debug('fb2 about to deadlock')
with pytest.raises(SmartEventWaitDeadlockDetected):
logger.debug('fb2 about to wait')
s_event.wait()
logger.debug('fb2 back from wait')
logger.debug('fb2 about to pause_until')
logger.debug('fb2 about to wait')
s_event.wait()
s_event.resume()
# tell mainline we are out of the wait - OK to do descs fa1 end
cmds.queue_cmd('alpha')
# wait for mainline to give to go ahead after doing descs fa1 end
cmds.get_cmd('beta')
logger.debug('fb2 about to try resume for SmartEventRemoteThreadNotAlive')
with pytest.raises(SmartEventRemoteThreadNotAlive):
s_event.resume()
descs.cleanup()
logger.debug('fb2 about to try wait for SmartEventRemoteThreadNotAlive')
with pytest.raises(SmartEventRemoteThreadNotAlive):
s_event.wait()
logger.debug('fb2 exiting')
cmds = Cmds()
descs = ThreadPairDescs()
fa2_thread = threading.Thread(target=fa2)
fb2_thread = threading.Thread(target=fb2)
fa2_thread.start()
fb2_thread.start()
fa2_thread.join()
cmds.get_cmd('alpha')
descs.thread_end('fa2')
cmds.queue_cmd('beta', 'go')
fb2_thread.join()
descs.thread_end('fb2')
###############################################################################
# TestResumeExc Class
###############################################################################
class TestResumeExc:
"""Test SmartEvent resume() exceptions."""
###########################################################################
# test_smart_event_sync_f1
###########################################################################
def test_smart_event_resume_exc_f1(self) -> None:
"""Test register_thread with f1."""
def f1():
logger.debug('f1 beta entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha')
s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
s_event.sync(log_msg='f1 beta sync point 1')
cmds.queue_cmd('alpha', 'go')
cmds.get_cmd('beta')
s_event.sync(log_msg='f1 beta sync point 2')
s_event.resume(log_msg='f1 beta resume 3')
s_event.sync(log_msg='f1 beta sync point 4')
logger.debug('f1 beta exiting 5')
logger.debug('mainline entered')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event1 = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event1,
thread=threading.current_thread()))
f1_thread = threading.Thread(target=f1)
f1_thread.start()
cmds.get_cmd('alpha')
smart_event1.pair_with(remote_name='beta')
assert smart_event1.sync(log_msg='mainline sync point 1')
cmds.get_cmd('alpha')
smart_event1.remote.deadlock = True
smart_event1.remote.conflict = True
with pytest.raises(SmartEventInconsistentFlagSettings):
smart_event1.resume(log_msg='alpha error resume 1a')
smart_event1.remote.deadlock = False
smart_event1.remote.conflict = False
smart_event1.remote.wait_wait = True
smart_event1.remote.sync_wait = True
with pytest.raises(SmartEventInconsistentFlagSettings):
smart_event1.resume(log_msg='alpha error resume 1b')
smart_event1.remote.wait_wait = False
smart_event1.remote.sync_wait = False
smart_event1.remote.deadlock = True
with pytest.raises(SmartEventInconsistentFlagSettings):
smart_event1.resume(log_msg='alpha error resume 1c')
smart_event1.remote.deadlock = False
smart_event1.remote.conflict = True
with pytest.raises(SmartEventInconsistentFlagSettings):
smart_event1.resume(log_msg='alpha error resume 1d')
smart_event1.remote.conflict = False
cmds.queue_cmd('beta', 'go')
smart_event1.sync(log_msg='mainline sync point 2')
smart_event1.wait(log_msg='mainline wait 3')
smart_event1.sync(log_msg='mainline sync point 4')
f1_thread.join()
descs.thread_end('beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.resume(log_msg='mainline sync point 5')
descs.cleanup()
logger.debug('mainline exiting')
###############################################################################
# TestSync Class
###############################################################################
class TestSync:
"""Test SmartEvent sync function."""
###########################################################################
# test_smart_event_sync_f1
###########################################################################
def test_smart_event_sync_f1(self) -> None:
"""Test register_thread with f1."""
def f1():
logger.debug('f1 beta entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha')
s_event.pair_with(remote_name='alpha')
s_event.sync(log_msg='f1 beta sync point 1')
s_event.wait()
s_event.sync(log_msg='f1 beta sync point 2')
s_event.resume()
s_event.sync(log_msg='f1 beta sync point 3')
s_event.sync(log_msg='f1 beta sync point 4')
s_event.wait()
logger.debug('f1 beta exiting')
logger.debug('mainline entered')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event1 = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event1,
thread=threading.current_thread()))
f1_thread = threading.Thread(target=f1)
f1_thread.start()
cmds.get_cmd('alpha')
smart_event1.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
smart_event1.sync(log_msg='mainline sync point 1')
smart_event1.resume()
smart_event1.sync(log_msg='mainline sync point 2')
smart_event1.wait()
smart_event1.sync(log_msg='mainline sync point 3')
smart_event1.resume()
smart_event1.sync(log_msg='mainline sync point 4')
f1_thread.join()
descs.thread_end('beta')
logger.debug('mainline exiting')
###########################################################################
# test_smart_event_sync_exc
###########################################################################
def test_smart_event_sync_exc(self,
thread_exc: Any) -> None:
"""Test register_thread with f1.
Args:
thread_exc: capture thread exceptions
"""
def f1():
logger.debug('f1 beta entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha')
s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
assert s_event.sync(log_msg='f1 beta sync point 1')
with pytest.raises(SmartEventConflictDeadlockDetected):
s_event.wait(log_msg='f1 beta wait 2')
assert s_event.sync(log_msg='f1 beta sync point 3')
s_event.resume(log_msg='f1 beta resume 4')
assert s_event.sync(log_msg='f1 beta sync point 5')
assert s_event.wait(log_msg='f1 beta wait 6')
s_event.pause_until(WUCond.RemoteWaiting)
s_event.resume()
assert s_event.sync(log_msg='f1 beta sync point 8')
# When one thread issues a sync request, and the other issues a
# wait request, a conflict deadlock is recognized. The
# process is of conflict detection is that one side recognizes the
# conflict, sets a flag to tell the other side that the conflict
# exists, and then raises the SmartEventConflictDeadlockDetected error.
# The other side, upon seeing the conflict flag set, will also
# raise the SmartEventConflictDeadlockDetected error.
# We want to ensure that sync code that detects the conflict is
# exercised here which requires setting certain flags in a way
# that coaxes each side into behaving such that the sync
# detection code will run. We will do this as follows:
# make sure alpha is in sync code now looping in phase 1
while not s_event.remote.sync_wait:
time.sleep(.1)
# make alpha think it is in sync phase 2 and continue looping
# until beta sets sync_cleanup from True back to False
with s_event.status.status_lock:
s_event.remote.sync_wait = False
s_event.status.sync_cleanup = True
# pre-resume to set beta event and set alpha wait_wait to get beta
# thinking alpha is resumed and waiting and will eventually
# leave (i.e., get beta the think that alpha not in a sync
# deadlock)
s_event.resume()
s_event.remote.wait_wait = True
# Now issue the wait. There is no way to prove that alpha saw
# the deadlock first, but we will see later whether the code
# coverage will show that the sync detection code ran.
with pytest.raises(SmartEventConflictDeadlockDetected):
s_event.wait(log_msg='f1 beta wait 89')
s_event.status.sync_cleanup = False
assert s_event.sync(log_msg='f1 beta sync point 9')
logger.debug('f1 beta exiting 10')
logger.debug('mainline entered')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event1 = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event1,
thread=threading.current_thread()))
f1_thread = threading.Thread(target=f1)
f1_thread.start()
cmds.get_cmd('alpha')
smart_event1.pair_with(remote_name='beta')
assert smart_event1.sync(log_msg='mainline sync point 1')
# See the comments in f1 regarding the detection and handling of a
# confict deadlock. We need to force the code in the following
# scenario to behave such that beta will be the side that detects
# the conflict. This will be done as follows:
# make sure beta is looping in wait code
smart_event1.pause_until(WUCond.RemoteWaiting)
# set remote.wait_wait to False to trick alpha in the folloiwng
# sync request to think alpha is NOT in wait request code so
# that alpha does not detect the conflict.
smart_event1.remote.wait_wait = False
# Issue the sync request. If all goes well, beta will see the conflict
# first, set the conflict flag and then raise the
# SmartEventConflictDeadlockDetected error. We can't prove that it worked out
# that way, but the coverage report will tell us whether the
# detection code in wait ran.
with pytest.raises(SmartEventConflictDeadlockDetected):
smart_event1.sync(log_msg='mainline sync point 2')
assert smart_event1.sync(log_msg='mainline sync point 3')
assert smart_event1.wait(log_msg='mainline wait 4')
assert smart_event1.sync(log_msg='mainline sync point 5')
smart_event1.resume(log_msg='mainline resume 6')
assert not smart_event1.sync(log_msg='mainline sync point 7',
timeout=0.5)
assert smart_event1.wait()
assert smart_event1.sync(log_msg='mainline sync point 8')
# thread will ensure we see conflict first
with pytest.raises(SmartEventConflictDeadlockDetected):
smart_event1.sync(log_msg='mainline sync point 10')
logger.debug('mainline about to issue wait to clear trick pre-resume')
smart_event1.wait() # clear the trick pre-resume from beta
assert smart_event1.sync(log_msg='mainline sync point 9')
f1_thread.join()
descs.thread_end('beta')
with pytest.raises(SmartEventRemoteThreadNotAlive):
smart_event1.sync(log_msg='mainline sync point 10')
descs.cleanup()
logger.debug('mainline exiting 9')
###############################################################################
# TestWaitClear Class
###############################################################################
class TestWaitClear:
"""Test SmartEvent clearing of event set flag."""
###########################################################################
# test_smart_event_f1_clear
###########################################################################
def test_smart_event_f1_clear(self) -> None:
"""Test smart event timeout with f1 thread."""
def f1():
logger.debug('f1 entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha')
s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
cmds.start_clock(iter=1)
assert s_event.wait()
assert 2 <= cmds.duration() <= 3
assert not s_event.remote.event.is_set()
cmds.start_clock(iter=2)
assert s_event.wait()
assert 2 <= cmds.duration() <= 3
assert not s_event.remote.event.is_set()
cmds.pause(2, iter=3)
s_event.resume()
cmds.pause(2, iter=4)
s_event.resume()
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
beta_thread = threading.Thread(target=f1)
beta_thread.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
cmds.pause(2, iter=1)
smart_event.resume()
cmds.pause(2, iter=2)
smart_event.resume()
cmds.start_clock(iter=3)
assert smart_event.wait()
assert 2 <= cmds.duration() <= 3
assert not smart_event.remote.event.is_set()
cmds.start_clock(iter=4)
assert smart_event.wait()
assert 2 <= cmds.duration() <= 3
assert not smart_event.remote.event.is_set()
beta_thread.join()
descs.thread_end('beta')
###########################################################################
# test_smart_event_thread_app_clear
###########################################################################
def test_smart_event_thread_app_clear(self) -> None:
"""Test smart event timeout with thread_app thread."""
class MyThread(threading.Thread):
def __init__(self) -> None:
super().__init__()
self.s_event = SmartEvent(name='beta', thread=self)
def run(self):
logger.debug('ThreadApp run entered')
# s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=self.s_event,
thread=self))
cmds.queue_cmd('alpha')
self.s_event.pair_with(remote_name='alpha')
assert not self.s_event.remote.event.is_set()
assert not self.s_event.event.is_set()
self.s_event.sync(log_msg='beta run sync point 1')
cmds.start_clock(iter=1)
assert self.s_event.wait(log_msg='beta run wait 12')
assert 2 <= cmds.duration() <= 3
assert not self.s_event.remote.event.is_set()
assert not self.s_event.event.is_set()
self.s_event.sync(log_msg='beta run sync point 2')
cmds.start_clock(iter=2)
assert self.s_event.wait(log_msg='beta run wait 23')
assert 2 <= cmds.duration() <= 3
assert not self.s_event.remote.event.is_set()
assert not self.s_event.event.is_set()
self.s_event.sync(log_msg='beta run sync point 3')
cmds.pause(2, iter=3)
self.s_event.resume(log_msg='beta run resume 34')
self.s_event.sync(log_msg='beta run sync point 4')
cmds.pause(2, iter=4)
self.s_event.resume(log_msg='beta run resume 45')
self.s_event.sync(log_msg='beta run sync point 5')
logger.debug('beta run exiting 910')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
thread_app = MyThread()
thread_app.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
smart_event.sync(log_msg='mainline sync point 1')
cmds.pause(2, iter=1)
smart_event.resume(log_msg='mainline resume 12')
smart_event.sync(log_msg='mainline sync point 2')
cmds.pause(2, iter=2)
smart_event.resume(log_msg='mainline resume 23')
smart_event.sync(log_msg='mainline sync point 3')
cmds.start_clock(iter=3)
assert smart_event.wait(log_msg='mainline wait 34')
assert 2 <= cmds.duration() <= 3
assert not smart_event.event.is_set()
assert not smart_event.remote.event.is_set()
smart_event.sync(log_msg='mainline sync point 4')
cmds.start_clock(iter=4)
assert smart_event.wait(log_msg='mainline sync point 45')
assert 2 <= cmds.duration() <= 3
assert not smart_event.event.is_set()
assert not smart_event.remote.event.is_set()
smart_event.sync(log_msg='mainline sync point 5')
thread_app.join()
descs.thread_end('beta')
###############################################################################
# TestSmartEventTimeout Class
###############################################################################
class TestSmartEventTimeout:
"""Test SmartEvent timeout cases."""
###########################################################################
# test_smart_event_f1_wait_time_out
###########################################################################
def test_smart_event_f1_wait_time_out(self) -> None:
"""Test smart event wait timeout with f1 thread."""
def f1():
logger.debug('f1 entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha')
s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
s_event.sync(log_msg='f1 beta sync point 1')
assert s_event.wait(timeout=2)
s_event.sync(log_msg='f1 beta sync point 2')
s_time = time.time()
assert not s_event.wait(timeout=0.5)
assert 0.5 <= time.time() - s_time <= 0.75
s_event.sync(log_msg='f1 beta sync point 3')
s_event.pause_until(WUCond.RemoteWaiting)
s_event.resume(log_msg='f1 beta resume 34')
s_event.sync(log_msg='f1 beta sync point 4')
s_event.sync(log_msg='f1 beta sync point 5')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
beta_thread = threading.Thread(target=f1)
beta_thread.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
smart_event.pause_until(WUCond.ThreadsReady)
smart_event.sync(log_msg='mainline sync point 1')
smart_event.pause_until(WUCond.RemoteWaiting)
smart_event.resume(log_msg='mainline resume 12')
smart_event.sync(log_msg='mainline sync point 2')
smart_event.sync(log_msg='mainline sync point 3')
assert smart_event.wait(timeout=2)
smart_event.sync(log_msg='mainline sync point 4')
start_time = time.time()
assert not smart_event.wait(timeout=0.75)
assert 0.75 <= time.time() - start_time <= 1
smart_event.sync(log_msg='mainline sync point 5')
beta_thread.join()
descs.thread_end('beta')
###########################################################################
# test_smart_event_f1_resume_time_out
###########################################################################
def test_smart_event_f1_resume_time_out(self) -> None:
"""Test smart event wait timeout with f1 thread."""
def f1() -> None:
"""The remote thread for requests."""
logger.debug('f1 entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha')
s_event.pair_with(remote_name='alpha')
# s_event.sync(log_msg='f1 beta sync point 1')
# the first resume will set the flag ON and the flag will stay ON
# since there is no matching wait
assert not s_event.event.is_set()
assert s_event.resume(timeout=2)
assert s_event.event.is_set()
# this second resume will timeout waiting for the flag to go OFF
cmds.start_clock(iter=1)
assert not s_event.resume(timeout=0.5)
assert 0.5 <= cmds.duration() <= 0.75
assert s_event.event.is_set()
s_event.sync(log_msg='f1 beta sync point 1')
s_event.sync(log_msg='f1 beta sync point 2')
# this first resume will complete within the timeout
s_event.remote.wait_wait = True # simulate waiting
s_event.remote.deadlock = True # simulate deadlock
cmds.start_clock(iter=2)
assert s_event.resume(timeout=1)
assert 0.5 <= cmds.duration() <= 0.75
# s_event.sync(log_msg='f1 beta sync point 3')
s_event.sync(log_msg='f1 beta sync point 4')
# this resume will timeout
s_event.remote.wait_wait = True # simulate waiting
s_event.remote.deadlock = True # simulate deadlock
cmds.start_clock(iter=3)
assert not s_event.resume(timeout=0.5)
assert 0.5 <= cmds.duration() <= 0.75
s_event.sync(log_msg='f1 beta sync point 5')
s_event.sync(log_msg='f1 beta sync point 6')
# this wait will clear the flag - use timeout to prevent f1 beta
# sync from raising SmartEventConflictDeadlockDetected
assert s_event.wait(log_msg='f1 beta wait 67',
timeout=1)
s_event.sync(log_msg='f1 beta sync point 7')
cmds.pause(0.5, iter=5) # we purposely skipped 4
# clear the deadlock within the resume timeout to allow mainline
# resume to complete
s_event.deadlock = False
s_event.wait_wait = False
s_event.sync(log_msg='f1 beta sync point 8')
cmds.pause(0.75, iter=6)
# clear the deadlock after resume timeout to cause ml to timeout
s_event.deadlock = False
s_event.wait_wait = False
s_event.sync(log_msg='f1 beta sync point 9')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
beta_thread = threading.Thread(target=f1)
beta_thread.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
smart_event.pause_until(WUCond.ThreadsReady)
smart_event.sync(log_msg='mainline sync point 1')
# this wait will clear the flag - use timeout to prevent sync
# from raising SmartEventConflictDeadlockDetected
assert smart_event.remote.event.is_set()
assert smart_event.wait(log_msg='mainline wait 12',
timeout=1)
assert not smart_event.wait_timeout_specified
smart_event.sync(log_msg='mainline sync point 2')
cmds.pause(0.5, iter=2) # we purposely skipped iter=1
# clear the deadlock within resume timeout to allow f1 resume to
# complete
smart_event.deadlock = False
smart_event.wait_wait = False
# smart_event.sync(log_msg='mainline sync point 3')
smart_event.sync(log_msg='mainline sync point 4')
cmds.pause(0.75, iter=3)
# clear the deadlock after the resume timeout to cause f1 to timeout
smart_event.deadlock = False
smart_event.wait_wait = False
smart_event.sync(log_msg='mainline sync point 5')
# the first resume will set the flag ON and the flag will stay ON
# since there is no matching wait
assert smart_event.resume(timeout=2)
# this second resume will timeout waiting for the flag to go OFF
cmds.start_clock(iter=4)
assert not smart_event.resume(timeout=0.3)
assert 0.3 <= cmds.duration() <= 0.6
smart_event.sync(log_msg='mainline sync point 6')
smart_event.sync(log_msg='mainline sync point 7')
# this first resume will complete within the timeout
smart_event.remote.wait_wait = True # simulate waiting
smart_event.remote.deadlock = True # simulate deadlock
cmds.start_clock(iter=5)
assert smart_event.resume(timeout=1)
assert 0.5 <= cmds.duration() <= 0.75
smart_event.sync(log_msg='mainline sync point 8')
# this resume will timeout
smart_event.remote.wait_wait = True # simulate waiting
smart_event.remote.deadlock = True # simulate deadlock
cmds.start_clock(iter=6)
assert not smart_event.resume(timeout=0.5)
assert 0.5 <= cmds.duration() <= 0.75
smart_event.sync(log_msg='mainline sync point 9')
beta_thread.join()
descs.thread_end('beta')
###########################################################################
# test_smart_event_thread_app_time_out
###########################################################################
def test_smart_event_thread_app_time_out(self) -> None:
"""Test smart event timeout with thread_app thread."""
class MyThread(threading.Thread):
def __init__(self):
super().__init__()
self.s_event = SmartEvent(name='beta', thread=self)
def run(self):
logger.debug('ThreadApp run entered')
descs.add_desc(SmartEventDesc(name='beta',
s_event=self.s_event,
thread=self))
cmds.queue_cmd('alpha')
self.s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
cmds.start_clock(iter=1)
assert not self.s_event.wait(timeout=2)
assert 2 <= cmds.duration() < 3
assert self.s_event.sync(log_msg='beta sync point 1')
assert self.s_event.sync(log_msg='beta sync point 2')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
thread_app = MyThread()
thread_app.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
assert smart_event.sync(log_msg='alpha sync point 1')
cmds.start_clock(iter=2)
assert not smart_event.wait(timeout=2)
assert 2 <= cmds.duration() < 3
assert smart_event.sync(log_msg='alpha sync point 2')
thread_app.join()
descs.thread_end('beta')
###############################################################################
# TestSmartEventCode Class
###############################################################################
class TestSmartEventCode:
"""Test SmartEvent resume codes."""
###########################################################################
# test_smart_event_f1_event_code
###########################################################################
def test_smart_event_f1_event_code(self) -> None:
"""Test smart event code with f1 thread."""
def f1():
logger.debug('f1 entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha')
s_event.pair_with(remote_name='alpha')
assert not s_event.remote.code
assert not s_event.code
assert not s_event.get_code()
s_event.sync(log_msg='beta sync point 1')
assert s_event.wait(timeout=2)
assert not s_event.remote.code
assert s_event.code == 42
assert 42 == s_event.get_code()
s_event.sync(log_msg='beta sync point 2')
s_event.resume(code='forty-two')
assert s_event.remote.code == 'forty-two'
assert s_event.code == 42
assert 42 == s_event.get_code()
s_event.sync(log_msg='beta sync point 3')
assert s_event.remote.code == 'forty-two'
assert s_event.code == 42
assert 42 == s_event.get_code()
assert not s_event.wait(timeout=.5)
assert s_event.remote.code == 'forty-two'
assert s_event.code == 42
assert 42 == s_event.get_code()
s_event.sync(log_msg='beta sync point 4')
s_event.sync(log_msg='beta sync point 5')
assert s_event.remote.code == 'forty-two'
assert s_event.code == 'twenty one'
assert 'twenty one' == s_event.get_code()
assert s_event.remote.event.is_set()
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
beta_thread = threading.Thread(target=f1)
beta_thread.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
smart_event.sync(log_msg='mainline sync point 1')
assert not smart_event.get_code()
assert not smart_event.code
assert not smart_event.remote.code
smart_event.resume(code=42)
assert not smart_event.get_code()
assert not smart_event.code
assert smart_event.remote.code == 42
smart_event.sync(log_msg='mainline sync point 2')
assert smart_event.wait()
assert smart_event.get_code() == 'forty-two'
assert smart_event.code == 'forty-two'
assert smart_event.remote.code == 42
smart_event.sync(log_msg='mainline sync point 3')
smart_event.sync(log_msg='mainline sync point 4')
smart_event.resume(code='twenty one')
smart_event.sync(log_msg='mainline sync point 5')
beta_thread.join()
smart_event.code = None
smart_event.remote.code = None
descs.thread_end('beta')
###########################################################################
# test_smart_event_thread_app_event_code
###########################################################################
def test_smart_event_thread_app_event_code(self) -> None:
"""Test smart event code with thread_app thread."""
class MyThread(threading.Thread):
def __init__(self):
super().__init__()
self.s_event = SmartEvent(name='beta', thread=self)
def run(self):
logger.debug('ThreadApp run entered')
descs.add_desc(SmartEventDesc(name='beta',
s_event=self.s_event,
thread=self))
cmds.queue_cmd('alpha')
self.s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
assert self.s_event.get_code() is None
assert not self.s_event.wait(timeout=2, log_msg='beta wait 1')
self.s_event.sync(log_msg='beta sync point 2')
self.s_event.sync(log_msg='beta sync point 3')
assert self.s_event.remote.event.is_set()
assert self.s_event.code == 42
assert self.s_event.get_code() == 42
self.s_event.resume(log_msg='beta resume 4',
code='forty-two')
cmds = Cmds()
descs = ThreadPairDescs()
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
thread_app = MyThread()
thread_app.start()
cmds.get_cmd('alpha')
smart_event.pair_with(remote_name='beta')
smart_event.pause_until(WUCond.ThreadsReady)
smart_event.sync(log_msg='mainline sync point 2')
smart_event.resume(code=42)
smart_event.sync(log_msg='mainline sync point 3')
assert smart_event.wait(log_msg='mainline wait 4')
assert smart_event.get_code() == 'forty-two'
thread_app.join()
smart_event.code = None
smart_event.remote.code = None
descs.thread_end('beta')
###########################################################################
# test_smart_event_thread_event_app_event_code
###########################################################################
def test_smart_event_thread_event_app_event_code(self) -> None:
"""Test smart event code with thread_event_app thread."""
class MyThread(threading.Thread, SmartEvent):
def __init__(self) -> None:
threading.Thread.__init__(self)
SmartEvent.__init__(self, name='beta', thread=self)
def run(self):
logger.debug('ThreadApp run entered')
descs.add_desc(SmartEventDesc(name='beta',
s_event=self,
thread=self))
cmds.queue_cmd('alpha')
self.pair_with(remote_name='alpha')
assert not self.remote.code
assert not self.code
assert not self.get_code()
self.sync(log_msg='beta sync point 1')
assert not self.wait(timeout=0.5)
assert not self.remote.code
assert not self.code
assert not self.get_code()
self.sync(log_msg='beta sync point 2')
self.sync(log_msg='beta sync point 3')
assert not self.remote.code
assert self.code == 42
assert self.get_code() == 42
self.resume(code='forty-two')
assert self.remote.code == 'forty-two'
assert self.code == 42
assert self.get_code() == 42
self.sync(log_msg='beta sync point 4')
self.sync(log_msg='beta sync point 5')
assert self.remote.code == 'forty-two'
assert self.code == 42
assert self.get_code() == 42
assert self.wait(timeout=0.5, log_msg='beta wait 56')
assert self.remote.code == 'forty-two'
assert self.code == 42
assert self.get_code() == 42
self.sync(log_msg='beta sync point 6')
cmds = Cmds()
descs = ThreadPairDescs()
thread_event_app = MyThread()
thread_event_app.start()
cmds.get_cmd('alpha')
time.sleep(2) # make beta loop in pair_with
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
smart_event.pair_with(remote_name='beta')
descs.paired('alpha', 'beta')
assert not smart_event.code
assert not smart_event.remote.code
assert not smart_event.get_code()
smart_event.sync(log_msg='mainline sync point 1')
smart_event.sync(log_msg='mainline sync point 2')
assert not smart_event.code
assert not smart_event.remote.code
assert not smart_event.get_code()
smart_event.resume(code=42, log_msg='mainline resume for beta 56')
assert not smart_event.code
assert smart_event.remote.code == 42
assert not smart_event.get_code()
smart_event.sync(log_msg='mainline sync point 3')
smart_event.sync(log_msg='mainline sync point 4')
assert smart_event.code == 'forty-two'
assert smart_event.remote.code == 42
assert smart_event.get_code() == 'forty-two'
assert smart_event.wait()
assert smart_event.code == 'forty-two'
assert smart_event.remote.code == 42
assert smart_event.get_code() == 'forty-two'
smart_event.sync(log_msg='mainline sync point 5')
assert smart_event.code == 'forty-two'
assert smart_event.remote.code == 42
assert smart_event.get_code() == 'forty-two'
smart_event.sync(log_msg='mainline sync point 6')
thread_event_app.join()
smart_event.code = None
smart_event.remote.code = None
descs.thread_end('beta')
###############################################################################
# TestSmartEventLogger Class
###############################################################################
class TestSmartEventLogger:
"""Test log messages."""
###########################################################################
# test_smart_event_f1_event_logger
###########################################################################
def test_smart_event_f1_event_logger(self,
caplog,
log_enabled_arg) -> None:
"""Test smart event logger with f1 thread.
Args:
caplog: fixture to capture log messages
log_enabled_arg: fixture to indicate whether log is enabled
"""
def f1():
exp_log_msgs.add_msg('f1 entered')
logger.debug('f1 entered')
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha')
exp_log_msgs.add_beta_pair_with_msg('beta pair_with alpha 1',
['beta', 'alpha'])
s_event.pair_with(remote_name='alpha',
log_msg='beta pair_with alpha 1')
descs.paired('alpha', 'beta')
exp_log_msgs.add_beta_sync_msg('beta sync point 1')
s_event.sync(log_msg='beta sync point 1')
exp_log_msgs.add_beta_wait_msg('wait for mainline to post 12')
assert s_event.wait(log_msg='wait for mainline to post 12')
exp_log_msgs.add_beta_sync_msg('beta sync point 2')
s_event.sync(log_msg='beta sync point 2')
exp_log_msgs.add_beta_resume_msg('post mainline 23')
s_event.resume(log_msg='post mainline 23')
exp_log_msgs.add_beta_sync_msg('beta sync point 3')
s_event.sync(log_msg='beta sync point 3')
exp_log_msgs.add_beta_sync_msg('beta sync point 4')
s_event.sync(log_msg='beta sync point 4')
cmds = Cmds()
descs = ThreadPairDescs()
if log_enabled_arg:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
alpha_call_seq = ('test_smart_event.py::TestSmartEventLogger.'
'test_smart_event_f1_event_logger')
beta_call_seq = ('test_smart_event.py::f1')
exp_log_msgs = ExpLogMsgs(alpha_call_seq, beta_call_seq)
l_msg = 'mainline started'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
beta_thread = threading.Thread(target=f1)
beta_thread.start()
cmds.get_cmd('alpha')
exp_log_msgs.add_alpha_pair_with_msg('alpha pair_with beta 1',
['alpha', 'beta'])
smart_event.pair_with(remote_name='beta',
log_msg='alpha pair_with beta 1')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 1')
smart_event.sync(log_msg='mainline sync point 1')
smart_event.pause_until(WUCond.RemoteWaiting)
exp_log_msgs.add_alpha_resume_msg('post beta 12')
smart_event.resume(log_msg='post beta 12')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 2')
smart_event.sync(log_msg='mainline sync point 2')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 3')
smart_event.sync(log_msg='mainline sync point 3')
exp_log_msgs.add_alpha_wait_msg('wait for pre-post 23')
assert smart_event.wait(log_msg='wait for pre-post 23')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 4')
smart_event.sync(log_msg='mainline sync point 4')
beta_thread.join()
descs.thread_end('beta')
exp_log_msgs.add_msg('mainline all tests complete')
logger.debug('mainline all tests complete')
exp_log_msgs.verify_log_msgs(caplog=caplog,
log_enabled_tf=log_enabled_arg)
# restore root to debug
logging.getLogger().setLevel(logging.DEBUG)
###########################################################################
# test_smart_event_thread_app_event_logger
###########################################################################
def test_smart_event_thread_app_event_logger(self,
caplog,
log_enabled_arg) -> None:
"""Test smart event logger with thread_app thread.
Args:
caplog: fixture to capture log messages
log_enabled_arg: fixture to indicate whether log is enabled
"""
class MyThread(threading.Thread):
def __init__(self,
exp_log_msgs1: ExpLogMsgs):
super().__init__()
self.s_event = SmartEvent(name='beta', thread=self)
self.exp_log_msgs = exp_log_msgs1
def run(self):
l_msg = 'ThreadApp run entered'
self.exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
descs.add_desc(SmartEventDesc(name='beta',
s_event=self.s_event,
thread=self))
cmds.queue_cmd('alpha')
self.exp_log_msgs.add_beta_pair_with_msg('beta pair alpha 2',
['beta', 'alpha'])
self.s_event.pair_with(remote_name='alpha',
log_msg='beta pair alpha 2')
self.exp_log_msgs.add_beta_sync_msg('beta sync point 1')
self.s_event.sync(log_msg='beta sync point 1')
self.exp_log_msgs.add_beta_wait_msg('wait 12')
assert self.s_event.wait(log_msg='wait 12')
self.exp_log_msgs.add_beta_sync_msg('beta sync point 2')
self.s_event.sync(log_msg='beta sync point 2')
self.s_event.pause_until(WUCond.RemoteWaiting)
self.exp_log_msgs.add_beta_resume_msg('post mainline 34',
True, 'forty-two')
self.s_event.resume(code='forty-two',
log_msg='post mainline 34')
self.exp_log_msgs.add_beta_sync_msg('beta sync point 3')
self.s_event.sync(log_msg='beta sync point 3')
self.exp_log_msgs.add_beta_sync_msg('beta sync point 4')
self.s_event.sync(log_msg='beta sync point 4')
cmds = Cmds()
descs = ThreadPairDescs()
if log_enabled_arg:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
alpha_call_seq = ('test_smart_event.py::TestSmartEventLogger.'
'test_smart_event_thread_app_event_logger')
beta_call_seq = 'test_smart_event.py::MyThread.run'
exp_log_msgs = ExpLogMsgs(alpha_call_seq, beta_call_seq)
l_msg = 'mainline starting'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
thread_app = MyThread(exp_log_msgs)
thread_app.start()
cmds.get_cmd('alpha')
exp_log_msgs.add_alpha_pair_with_msg('alpha pair beta 2',
['alpha', 'beta'])
smart_event.pair_with(remote_name='beta',
log_msg='alpha pair beta 2')
descs.paired('alpha', 'beta')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 1')
smart_event.sync(log_msg='mainline sync point 1')
smart_event.pause_until(WUCond.RemoteWaiting)
exp_log_msgs.add_alpha_resume_msg(
f'post thread {smart_event.remote.name} 23', True, 42)
smart_event.resume(log_msg=f'post thread {smart_event.remote.name} 23',
code=42)
exp_log_msgs.add_alpha_sync_msg('mainline sync point 2')
smart_event.sync(log_msg='mainline sync point 2')
exp_log_msgs.add_alpha_wait_msg('wait for post from thread 34')
assert smart_event.wait(log_msg='wait for post from thread 34')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 3')
smart_event.sync(log_msg='mainline sync point 3')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 4')
smart_event.sync(log_msg='mainline sync point 4')
thread_app.join()
smart_event.code = None
smart_event.remote.code = None
descs.thread_end('beta')
l_msg = 'mainline all tests complete'
exp_log_msgs.add_msg(l_msg)
logger.debug('mainline all tests complete')
exp_log_msgs.verify_log_msgs(caplog=caplog,
log_enabled_tf=log_enabled_arg)
# restore root to debug
logging.getLogger().setLevel(logging.DEBUG)
###########################################################################
# test_smart_event_thread_event_app_event_logger
###########################################################################
def test_smart_event_thread_event_app_event_logger(self,
caplog,
log_enabled_arg
) -> None:
"""Test smart event logger with thread_event_app thread.
Args:
caplog: fixture to capture log messages
log_enabled_arg: fixture to indicate whether log is enabled
"""
class MyThread(threading.Thread, SmartEvent):
def __init__(self,
exp_log_msgs1: ExpLogMsgs):
threading.Thread.__init__(self)
SmartEvent.__init__(self, name='beta', thread=self)
self.exp_log_msgs = exp_log_msgs1
def run(self):
self.exp_log_msgs.add_msg('ThreadApp run entered')
logger.debug('ThreadApp run entered')
descs.add_desc(SmartEventDesc(name='beta',
s_event=self,
thread=self))
cmds.queue_cmd('alpha')
self.exp_log_msgs.add_beta_pair_with_msg('beta to alpha 3',
['beta', 'alpha'])
self.pair_with(remote_name='alpha',
log_msg='beta to alpha 3')
descs.paired('alpha', 'beta')
self.exp_log_msgs.add_beta_sync_msg('beta sync point 1')
self.sync(log_msg='beta sync point 1')
self.exp_log_msgs.add_beta_wait_msg(
'wait for mainline to post 12')
assert self.wait(log_msg='wait for mainline to post 12')
self.exp_log_msgs.add_beta_sync_msg('beta sync point 2')
self.sync(log_msg='beta sync point 2')
self.pause_until(WUCond.RemoteWaiting)
self.exp_log_msgs.add_beta_resume_msg('post mainline 23')
self.resume(log_msg='post mainline 23')
self.exp_log_msgs.add_beta_sync_msg('beta sync point 3')
self.sync(log_msg='beta sync point 3')
cmds = Cmds()
descs = ThreadPairDescs()
if log_enabled_arg:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
alpha_call_seq = ('test_smart_event.py::TestSmartEventLogger.'
'test_smart_event_thread_event_app_event_logger')
beta_call_seq = 'test_smart_event.py::MyThread.run'
exp_log_msgs = ExpLogMsgs(alpha_call_seq, beta_call_seq)
l_msg = 'mainline starting'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
thread_event_app = MyThread(exp_log_msgs1=exp_log_msgs)
thread_event_app.start()
cmds.get_cmd('alpha')
exp_log_msgs.add_alpha_pair_with_msg('alpha to beta 3',
['alpha', 'beta'])
smart_event.pair_with(remote_name='beta',
log_msg='alpha to beta 3')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 1')
smart_event.sync(log_msg='mainline sync point 1')
smart_event.pause_until(WUCond.RemoteWaiting)
exp_log_msgs.add_alpha_resume_msg(
f'post thread {thread_event_app.name} 12')
smart_event.resume(log_msg=f'post thread '
f'{thread_event_app.name} 12')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 2')
smart_event.sync(log_msg='mainline sync point 2')
exp_log_msgs.add_alpha_wait_msg('wait for post from thread 23')
assert smart_event.wait(log_msg='wait for post from thread 23')
exp_log_msgs.add_alpha_sync_msg('mainline sync point 3')
smart_event.sync(log_msg='mainline sync point 3')
thread_event_app.join()
descs.thread_end('beta')
exp_log_msgs.add_msg('mainline all tests complete')
logger.debug('mainline all tests complete')
exp_log_msgs.verify_log_msgs(caplog=caplog,
log_enabled_tf=log_enabled_arg)
# restore root to debug
logging.getLogger().setLevel(logging.DEBUG)
###############################################################################
# TestCombos Class
###############################################################################
class TestCombos:
"""Test various combinations of SmartEvent."""
###########################################################################
# test_smart_event_thread_f1_combos
###########################################################################
def test_smart_event_f1_combos(self,
action_arg1: Any,
code_arg1: Any,
log_msg_arg1: Any,
action_arg2: Any,
caplog: Any,
thread_exc: Any) -> None:
"""Test the SmartEvent with f1 combos.
Args:
action_arg1: first action
code_arg1: whether to set and recv a code
log_msg_arg1: whether to specify a log message
action_arg2: second action
caplog: fixture to capture log messages
thread_exc: intercepts thread exceptions
"""
alpha_call_seq = ('test_smart_event.py::TestCombos.action_loop')
beta_call_seq = ('test_smart_event.py::thread_func1')
exp_log_msgs = ExpLogMsgs(alpha_call_seq, beta_call_seq)
l_msg = 'mainline entered'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds = Cmds()
descs = ThreadPairDescs()
cmds.l_msg = log_msg_arg1
cmds.r_code = code_arg1
f1_thread = threading.Thread(target=thread_func1,
args=(cmds,
descs,
exp_log_msgs))
l_msg = 'mainline about to start thread_func1'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
f1_thread.start()
self.action_loop(action1=action_arg1,
action2=action_arg2,
cmds=cmds,
descs=descs,
exp_log_msgs=exp_log_msgs,
thread_exc1=thread_exc)
l_msg = 'main completed all actions'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Exit)
f1_thread.join()
descs.thread_end('beta')
if log_msg_arg1:
exp_log_msgs.verify_log_msgs(caplog=caplog, log_enabled_tf=True)
###########################################################################
# test_smart_event_thread_f1_combos
###########################################################################
def test_smart_event_f1_f2_combos(self,
action_arg1: Any,
code_arg1: Any,
log_msg_arg1: Any,
action_arg2: Any,
caplog: Any,
thread_exc: Any) -> None:
"""Test the SmartEvent with f1 anf f2 combos.
Args:
action_arg1: first action
code_arg1: whether to set and recv a code
log_msg_arg1: whether to specify a log message
action_arg2: second action
caplog: fixture to capture log messages
thread_exc: intercepts thread exceptions
"""
alpha_call_seq = ('test_smart_event.py::TestCombos.action_loop')
beta_call_seq = ('test_smart_event.py::thread_func1')
exp_log_msgs = ExpLogMsgs(alpha_call_seq, beta_call_seq)
l_msg = 'mainline entered'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds = Cmds()
descs = ThreadPairDescs()
cmds.l_msg = log_msg_arg1
cmds.r_code = code_arg1
f1_thread = threading.Thread(target=thread_func1,
args=(cmds,
descs,
exp_log_msgs))
f2_thread = threading.Thread(target=self.action_loop,
args=(action_arg1,
action_arg2,
cmds,
descs,
exp_log_msgs,
thread_exc))
l_msg = 'mainline about to start thread_func1'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
f1_thread.start()
f2_thread.start()
l_msg = 'main completed all actions'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
f2_thread.join()
descs.thread_end('alpha')
cmds.queue_cmd('beta', Cmd.Exit)
f1_thread.join()
descs.thread_end('beta')
if log_msg_arg1:
exp_log_msgs.verify_log_msgs(caplog=caplog, log_enabled_tf=True)
###########################################################################
# test_smart_event_thread_thread_app_combos
###########################################################################
def test_smart_event_thread_app_combos(self,
action_arg1: Any,
code_arg1: Any,
log_msg_arg1: Any,
action_arg2: Any,
caplog: Any,
thread_exc: Any) -> None:
"""Test the SmartEvent with ThreadApp combos.
Args:
action_arg1: first action
code_arg1: whether to set and recv a code
log_msg_arg1: whether to specify a log message
action_arg2: second action
caplog: fixture to capture log messages
thread_exc: intercepts thread exceptions
"""
class SmartEventApp(threading.Thread):
"""SmartEventApp class with thread."""
def __init__(self,
cmds: Cmds,
exp_log_msgs: ExpLogMsgs
) -> None:
"""Initialize the object.
Args:
cmds: commands for beta to do
exp_log_msgs: container for expected log messages
"""
super().__init__()
self.smart_event = SmartEvent(name='beta', thread=self)
self.cmds = cmds
self.exp_log_msgs = exp_log_msgs
def run(self):
"""Thread to send and receive messages."""
l_msg = 'SmartEventApp run started'
self.exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
thread_func1(
cmds=self.cmds,
descs=descs,
exp_log_msgs=self.exp_log_msgs,
s_event=self.smart_event)
l_msg = 'SmartEventApp run exiting'
self.exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
alpha_call_seq = ('test_smart_event.py::TestCombos.action_loop')
beta_call_seq = ('test_smart_event.py::thread_func1')
exp_log_msgs = ExpLogMsgs(alpha_call_seq, beta_call_seq)
l_msg = 'mainline entered'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds = Cmds()
descs = ThreadPairDescs()
cmds.l_msg = log_msg_arg1
cmds.r_code = code_arg1
f1_thread = SmartEventApp(cmds,
exp_log_msgs)
l_msg = 'mainline about to start SmartEventApp'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
f1_thread.start()
self.action_loop(action1=action_arg1,
action2=action_arg2,
cmds=cmds,
descs=descs,
exp_log_msgs=exp_log_msgs,
thread_exc1=thread_exc)
l_msg = 'main completed all actions'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Exit)
f1_thread.join()
descs.thread_end('beta')
if log_msg_arg1:
exp_log_msgs.verify_log_msgs(caplog=caplog, log_enabled_tf=True)
###########################################################################
# test_smart_event_thread_thread_app_combos
###########################################################################
def test_smart_event_thread_event_app_combos(self,
action_arg1: Any,
code_arg1: Any,
log_msg_arg1: Any,
action_arg2: Any,
caplog: Any,
thread_exc: Any) -> None:
"""Test the SmartEvent with ThreadApp combos.
Args:
action_arg1: first action
code_arg1: whether to set and recv a code
log_msg_arg1: whether to specify a log message
action_arg2: second action
caplog: fixture to capture log messages
thread_exc: intercepts thread exceptions
"""
class SmartEventApp(threading.Thread, SmartEvent):
"""SmartEventApp class with thread and event."""
def __init__(self,
cmds: Cmds,
exp_log_msgs: ExpLogMsgs
) -> None:
"""Initialize the object.
Args:
cmds: commands for beta to do
exp_log_msgs: container for expected log messages
"""
threading.Thread.__init__(self)
SmartEvent.__init__(self,
name='beta',
thread=self)
self.cmds = cmds
self.exp_log_msgs = exp_log_msgs
def run(self):
"""Thread to send and receive messages."""
l_msg = 'SmartEventApp run started'
self.exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
thread_func1(
cmds=self.cmds,
descs=descs,
exp_log_msgs=self.exp_log_msgs,
s_event=self)
l_msg = 'SmartEventApp run exiting'
self.exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
alpha_call_seq = ('test_smart_event.py::TestCombos.action_loop')
beta_call_seq = ('test_smart_event.py::thread_func1')
exp_log_msgs = ExpLogMsgs(alpha_call_seq, beta_call_seq)
l_msg = 'mainline entered'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds = Cmds()
descs = ThreadPairDescs()
cmds.l_msg = log_msg_arg1
cmds.r_code = code_arg1
f1_thread = SmartEventApp(cmds,
exp_log_msgs)
l_msg = 'mainline about to start SmartEventApp'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
f1_thread.start()
self.action_loop(action1=action_arg1,
action2=action_arg2,
cmds=cmds,
descs=descs,
exp_log_msgs=exp_log_msgs,
thread_exc1=thread_exc)
l_msg = 'main completed all actions'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Exit)
f1_thread.join()
descs.thread_end('beta')
if log_msg_arg1:
exp_log_msgs.verify_log_msgs(caplog=caplog, log_enabled_tf=True)
###########################################################################
# action loop
###########################################################################
def action_loop(self,
action1: Any,
action2: Any,
cmds: Cmds,
descs: ThreadPairDescs,
exp_log_msgs: Any,
thread_exc1: Any
) -> None:
"""Actions to perform with the thread.
Args:
action1: first smart event request to do
action2: second smart event request to do
cmds: contains cmd queues and other test args
descs: tracking and verification for registry
exp_log_msgs: container for expected log messages
thread_exc1: contains any uncaptured errors from thread
Raises:
IncorrectActionSpecified: The Action is not recognized
UnrecognizedCmd: beta send mainline an unrecognized command
"""
cmds.get_cmd('alpha') # go1
smart_event = SmartEvent(name='alpha')
descs.add_desc(SmartEventDesc(name='alpha',
s_event=smart_event,
thread=threading.current_thread()))
cmds.queue_cmd('beta') # go2
smart_event.pair_with(remote_name='beta')
cmds.get_cmd('alpha') # go3
actions = []
actions.append(action1)
actions.append(action2)
for action in actions:
if action == Action.MainWait:
l_msg = 'main starting Action.MainWait'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Resume)
assert smart_event.wait()
if cmds.r_code:
assert smart_event.code == cmds.r_code
assert cmds.r_code == smart_event.get_code()
elif action == Action.MainSync:
l_msg = 'main starting Action.MainSync'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Sync)
if cmds.l_msg:
exp_log_msgs.add_alpha_sync_msg(cmds.l_msg, True)
assert smart_event.sync(log_msg=cmds.l_msg)
else:
assert smart_event.sync()
elif action == Action.MainSync_TOT:
l_msg = 'main starting Action.MainSync_TOT'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Sync)
if cmds.l_msg:
exp_log_msgs.add_alpha_sync_msg(cmds.l_msg, True)
assert smart_event.sync(timeout=5,
log_msg=cmds.l_msg)
else:
assert smart_event.sync(timeout=5)
elif action == Action.MainSync_TOF:
l_msg = 'main starting Action.MainSync_TOF'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
l_msg = r'alpha timeout of a sync\(\) request.'
exp_log_msgs.add_msg(l_msg)
if cmds.l_msg:
exp_log_msgs.add_alpha_sync_msg(cmds.l_msg, False)
assert not smart_event.sync(timeout=0.3,
log_msg=cmds.l_msg)
else:
assert not smart_event.sync(timeout=0.3)
# for this case, we did not tell beta to do anything, so
# we need to tell ourselves to go to next action.
# Note that we could use a continue, but we also want
# to check for thread exception which is what we do
# at the bottom
cmds.queue_cmd('alpha', Cmd.Next_Action)
elif action == Action.MainResume:
l_msg = 'main starting Action.MainResume'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if cmds.r_code:
assert smart_event.resume(code=cmds.r_code)
assert smart_event.remote.code == cmds.r_code
else:
assert smart_event.resume()
assert not smart_event.remote.code
assert smart_event.event.is_set()
cmds.queue_cmd('beta', Cmd.Wait)
elif action == Action.MainResume_TOT:
l_msg = 'main starting Action.MainResume_TOT'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if cmds.r_code:
assert smart_event.resume(code=cmds.r_code, timeout=0.5)
assert smart_event.remote.code == cmds.r_code
else:
assert smart_event.resume(timeout=0.5)
assert not smart_event.remote.code
assert smart_event.event.is_set()
cmds.queue_cmd('beta', Cmd.Wait)
elif action == Action.MainResume_TOF:
l_msg = 'main starting Action.MainResume_TOF'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
l_msg = (f'{smart_event.name} timeout '
r'of a resume\(\) request with '
r'self.event.is_set\(\) = True and '
'self.remote.deadlock = False')
exp_log_msgs.add_msg(l_msg)
assert not smart_event.event.is_set()
# pre-resume to set flag
if cmds.r_code:
assert smart_event.resume(code=cmds.r_code)
assert smart_event.remote.code == cmds.r_code
else:
assert smart_event.resume()
assert not smart_event.remote.code
assert smart_event.event.is_set()
if cmds.r_code:
start_time = time.time()
assert not smart_event.resume(code=cmds.r_code,
timeout=0.3)
assert 0.3 <= (time.time() - start_time) <= 0.5
assert smart_event.remote.code == cmds.r_code
else:
start_time = time.time()
assert not smart_event.resume(timeout=0.5)
assert 0.5 <= (time.time() - start_time) <= 0.75
assert not smart_event.remote.code
assert smart_event.event.is_set()
# tell thread to clear wait
cmds.queue_cmd('beta', Cmd.Wait_Clear)
elif action == Action.ThreadWait:
l_msg = 'main starting Action.ThreadWait'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Wait)
smart_event.pause_until(WUCond.RemoteWaiting)
if cmds.r_code:
smart_event.resume(code=cmds.r_code)
assert smart_event.remote.code == cmds.r_code
else:
smart_event.resume()
elif action == Action.ThreadWait_TOT:
l_msg = 'main starting Action.ThreadWait_TOT'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Wait_TOT)
smart_event.pause_until(WUCond.RemoteWaiting)
# time.sleep(0.3)
if cmds.r_code:
smart_event.resume(code=cmds.r_code)
assert smart_event.remote.code == cmds.r_code
else:
smart_event.resume()
elif action == Action.ThreadWait_TOF:
l_msg = 'main starting Action.ThreadWait_TOF'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Wait_TOF)
smart_event.pause_until(WUCond.RemoteWaiting)
elif action == Action.ThreadResume:
l_msg = 'main starting Action.ThreadResume'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
cmds.queue_cmd('beta', Cmd.Resume)
smart_event.pause_until(WUCond.RemoteResume)
assert smart_event.wait()
if cmds.r_code:
assert smart_event.code == cmds.r_code
assert cmds.r_code == smart_event.get_code()
else:
raise IncorrectActionSpecified('The Action is not recognized')
while True:
thread_exc1.raise_exc_if_one() # detect thread error
alpha_cmd = cmds.get_cmd('alpha')
if alpha_cmd == Cmd.Next_Action:
break
else:
raise UnrecognizedCmd
# clear the codes to allow verify registry to work
smart_event.code = None
smart_event.remote.code = None
###############################################################################
# thread_func1
###############################################################################
def thread_func1(cmds: Cmds,
descs: ThreadPairDescs,
exp_log_msgs: Any,
s_event: Optional[SmartEvent] = None,
) -> None:
"""Thread to test SmartEvent scenarios.
Args:
cmds: commands to do
descs: used to verify registry and SmartEvent status
exp_log_msgs: expected log messages
s_event: instance of SmartEvent
Raises:
UnrecognizedCmd: Thread received an unrecognized command
"""
l_msg = 'thread_func1 beta started'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if s_event is None:
s_event = SmartEvent(name='beta')
descs.add_desc(SmartEventDesc(name='beta',
s_event=s_event,
thread=threading.current_thread()))
cmds.queue_cmd('alpha', 'go1')
cmds.get_cmd('beta') # go2
s_event.pair_with(remote_name='alpha')
descs.paired('alpha', 'beta')
cmds.queue_cmd('alpha', 'go3')
while True:
beta_cmd = cmds.get_cmd('beta')
if beta_cmd == Cmd.Exit:
break
l_msg = f'thread_func1 received cmd: {beta_cmd}'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if beta_cmd == Cmd.Wait:
l_msg = 'thread_func1 doing Wait'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if cmds.l_msg:
exp_log_msgs.add_beta_wait_msg(cmds.l_msg, True)
assert s_event.wait(log_msg=cmds.l_msg)
else:
assert s_event.wait()
if cmds.r_code:
assert s_event.code == cmds.r_code
assert cmds.r_code == s_event.get_code()
cmds.queue_cmd('alpha', Cmd.Next_Action)
elif beta_cmd == Cmd.Wait_TOT:
l_msg = 'thread_func1 doing Wait_TOT'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if cmds.l_msg:
exp_log_msgs.add_beta_wait_msg(cmds.l_msg, True)
assert s_event.wait(log_msg=cmds.l_msg)
else:
assert s_event.wait()
if cmds.r_code:
assert s_event.code == cmds.r_code
assert cmds.r_code == s_event.get_code()
cmds.queue_cmd('alpha', Cmd.Next_Action)
elif beta_cmd == Cmd.Wait_TOF:
l_msg = 'thread_func1 doing Wait_TOF'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
l_msg = (f'{s_event.name} timeout of a '
r'wait\(\) request with '
'self.wait_wait = True and '
'self.sync_wait = False')
exp_log_msgs.add_msg(l_msg)
start_time = time.time()
if cmds.l_msg:
exp_log_msgs.add_beta_wait_msg(cmds.l_msg, False)
assert not s_event.wait(timeout=0.5,
log_msg=cmds.l_msg)
else:
assert not s_event.wait(timeout=0.5)
assert 0.5 < (time.time() - start_time) < 0.75
cmds.queue_cmd('alpha', Cmd.Next_Action)
elif beta_cmd == Cmd.Wait_Clear:
l_msg = 'thread_func1 doing Wait_Clear'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if cmds.l_msg:
exp_log_msgs.add_beta_wait_msg(cmds.l_msg, True)
assert s_event.wait(log_msg=cmds.l_msg)
else:
assert s_event.wait()
if cmds.r_code:
assert s_event.code == cmds.r_code
assert cmds.r_code == s_event.get_code()
cmds.queue_cmd('alpha', Cmd.Next_Action)
elif beta_cmd == Cmd.Sync:
l_msg = 'thread_func1 beta doing Sync'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if cmds.l_msg:
exp_log_msgs.add_beta_sync_msg(cmds.l_msg, True)
assert s_event.sync(log_msg=cmds.l_msg)
else:
assert s_event.sync()
cmds.queue_cmd('alpha', Cmd.Next_Action)
elif beta_cmd == Cmd.Resume:
l_msg = 'thread_func1 beta doing Resume'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
if cmds.r_code:
if cmds.l_msg:
exp_log_msgs.add_beta_resume_msg(cmds.l_msg,
True,
cmds.r_code)
assert s_event.resume(code=cmds.r_code,
log_msg=cmds.l_msg)
else:
assert s_event.resume(code=cmds.r_code)
assert s_event.remote.code == cmds.r_code
else:
if cmds.l_msg:
exp_log_msgs.add_beta_resume_msg(cmds.l_msg, True)
assert s_event.resume(log_msg=cmds.l_msg)
else:
assert s_event.resume()
cmds.queue_cmd('alpha', Cmd.Next_Action)
else:
raise UnrecognizedCmd('Thread received an unrecognized cmd')
l_msg = 'thread_func1 beta exiting'
exp_log_msgs.add_msg(l_msg)
logger.debug(l_msg)
| 2.0625 | 2 |
xena_gdc_etl/gdc.py | ayan-b/xena-GDC-ETL | 5 | 12787236 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module provides basic and minimum necessary functions for carrying out
data query and download for Xena GDC ETL pipelines.
"""
# Ensure Python 2 and 3 compatibility
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import warnings
import pandas as pd
import requests
from .utils import mkdir_p, reduce_json_array, get_json_objects, get_to_drops
GDC_API_BASE = 'https://api.gdc.cancer.gov'
_SUPPORTED_FILE_TYPES = {
'txt',
'vcf',
'bam',
'tsv',
'xml',
'maf',
'xlsx',
'tar',
'gz',
'md5',
'xls',
}
_SUPPORTED_DATASETS = [
{'data_type': 'Copy Number Segment'},
{'data_type': 'Masked Copy Number Segment'},
{'data_type': 'Isoform Expression Quantification'},
{'data_type': 'miRNA Expression Quantification'},
{'data_type': 'Methylation Beta Value'},
{'analysis.workflow_type': 'HTSeq - Counts'},
{'analysis.workflow_type': 'HTSeq - FPKM'},
{'analysis.workflow_type': 'HTSeq - FPKM-UQ'},
{'analysis.workflow_type': 'MuSE Variant Aggregation and Masking'},
{'analysis.workflow_type': 'MuTect2 Variant Aggregation and Masking'},
{
'analysis.workflow_type':
'SomaticSniper Variant Aggregation and Masking'
},
{'analysis.workflow_type': 'VarScan2 Variant Aggregation and Masking'},
{'data_type': 'Biospecimen Supplement'},
{'data_type': 'Clinical Supplement'},
]
# https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/tcga-study-abbreviations
TCGA_STUDY_ABBR = {
'LAML': 'Acute Myeloid Leukemia',
'ACC': 'Adrenocortical carcinoma',
'BLCA': 'Bladder Urothelial Carcinoma',
'LGG': 'Brain Lower Grade Glioma',
'BRCA': 'Breast invasive carcinoma',
'CESC': (
'Cervical squamous cell carcinoma and endocervical adenocarcinoma'
),
'CHOL': 'Cholangiocarcinoma',
'LCML': 'Chronic Myelogenous Leukemia',
'COAD': 'Colon adenocarcinoma',
'CNTL': 'Controls',
'ESCA': 'Esophageal carcinoma',
'FPPP': 'FFPE Pilot Phase II',
'GBM': 'Glioblastoma multiforme',
'HNSC': 'Head and Neck squamous cell carcinoma',
'KICH': 'Kidney Chromophobe',
'KIRC': 'Kidney renal clear cell carcinoma',
'KIRP': 'Kidney renal papillary cell carcinoma',
'LIHC': 'Liver hepatocellular carcinoma',
'LUAD': 'Lung adenocarcinoma',
'LUSC': 'Lung squamous cell carcinoma',
'DLBC': 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma',
'MESO': 'Mesothelioma',
'MISC': 'Miscellaneous',
'OV': 'Ovarian serous cystadenocarcinoma',
'PAAD': 'Pancreatic adenocarcinoma',
'PCPG': 'Pheochromocytoma and Paraganglioma',
'PRAD': 'Prostate adenocarcinoma',
'READ': 'Rectum adenocarcinoma',
'SARC': 'Sarcoma',
'SKCM': 'Skin Cutaneous Melanoma',
'STAD': 'Stomach adenocarcinoma',
'TGCT': 'Testicular Germ Cell Tumors',
'THYM': 'Thymoma',
'THCA': 'Thyroid carcinoma',
'UCS': 'Uterine Carcinosarcoma',
'UCEC': 'Uterine Corpus Endometrial Carcinoma',
'UVM': 'Uveal Melanoma',
}
def simple_and_filter(in_dict={}, exclude_dict={}):
"""Make a simple GDC API compatible query filter from a dict, in which
individual conditions are joint by the "and" logic.
In the return filter, individual conditions in the ``in_dict`` and
``exclude_dict`` will be joint by the "and" operator, meaning a hit has to
match all conditions. Here, a condition can use either a "in" operator
(specified in the ``in_dict``) or a "exclude" operator (specified in the
``exclude_dict``).
See details at
https://docs.gdc.cancer.gov/API/Users_Guide/Search_and_Retrieval/#filters-specifying-the-query
Args:
in_dict (dict): A dict describing query conditions with the "in"
operator. Each (key, value) pair represents for one condition. The
"key" is the 'field' operand. Operator between "key" and "value"
is "in".
exclude_dict (dict): A dict describing query conditions with the
"exclude" operator. Each (key, value) pair represents for one
condition. The "key" is the 'field' operand. Operator between
"key" and "value" is "exclude_dict".
Returns:
dict: A dict of filter conforming to GDC API's format. It should then
be converted to JSON format and used in the following http request.
"""
if not in_dict and not exclude_dict:
return in_dict
operation_list = []
for key in in_dict:
value = in_dict[key]
if not isinstance(value, list):
value = [value]
operation_list.append(
{"op": "in", "content": {"field": key, "value": value}}
)
for key in exclude_dict:
value = exclude_dict[key]
if not isinstance(value, list):
value = [value]
operation_list.append(
{"op": "exclude", "content": {"field": key, "value": value}}
)
return {"op": "and", "content": operation_list}
def search(
endpoint,
in_filter={},
exclude_filter={},
fields=[],
expand=[],
typ='dataframe',
method='GET',
):
"""Search one GDC endpoints and return searching results in a pandas
DataFrame if possible.
When searching results cannot be safely converted to a pandas DataFrame,
results will be returned in the JSON format as it is returned from GDC
API.
Args:
endpoint (str): One string of GDC API supported endpoint. See:
https://docs.gdc.cancer.gov/API/Users_Guide/Getting_Started/#api-endpoints
in_filter (dict, optional): A dict of query conditions which will be
used to perform the query. Each (key, value) pair represents for
one ondition. It will be passed to ``simple_and_filter`` for
making a query filter compatible with GDC API. Please check
``simple_and_filter`` function for details.
exclude_filter (dict, optional): An optional dict of query conditions
which will be used to perform the query. Each (key, value) pair
represents for one condition. It will be passed to
``simple_and_filter`` for making a query filter compatible with
GDC API. Please check ``simple_and_filter`` function for details.
fields (list or str, optional): One or more fields to be queried. Each
field will be used as a column name in the returned DataFrame. It
can be a comma separated string or a list of field strings or a
combination of both.
expand (list or str, optional): One or more field groups to be
queried. It can be a comma separated string or a list of field
strings or a combination of both.
typ (str): type of search result to return (JSON or dataframe).
Defaults to 'dataframe'.
method (str): HTTP method for the search. Defaults to 'GET'.
..
Returns:
pandas.core.frame.DataFrame or str: A search result in form of a
pandas DataFrame or a JSON formatted string, depending on the
value of ``typ`` and the DataFrame convertibility of JSON.
"""
try:
assert typ.lower() in ['json', 'dataframe']
except (AttributeError, AssertionError):
raise ValueError(
'typ should be a string of either JSON or dataframe, '
'not {}'.format(typ)
)
filters = simple_and_filter(in_dict=in_filter, exclude_dict=exclude_filter)
if isinstance(fields, str):
fields = [fields]
if isinstance(expand, str):
expand = [expand]
payload = {'size': 1}
if filters:
payload['filters'] = json.dumps(filters)
if fields:
payload['fields'] = ','.join(fields)
if expand:
payload['expand'] = ','.join(expand)
url = '{}/{}'.format(GDC_API_BASE, endpoint)
if method.upper() == 'POST':
response = requests.post(url, data=payload)
elif method.upper() == 'GET':
response = requests.get(url, params=payload)
else:
raise ValueError(
'Invalid method: {}\n method must be either "GET" '
'or "POST".'.format(method)
)
try:
payload['size'] = response.json()['data']['pagination']['total']
except KeyError:
payload.pop('size')
response = requests.get(url, params=payload)
if typ.lower() == 'json':
return response.json()
else:
warnings.warn(
'Fail to get a table of results. JSON returned. '
'Please check the result carefully.',
stacklevel=2,
)
return response.json()
if method.upper() == 'POST':
response = requests.post(url, data=payload)
else:
response = requests.get(url, params=payload)
if response.status_code == 200:
results = response.json()['data']['hits']
if typ.lower() == 'json':
return results
try:
return pd.io.json.json_normalize(reduce_json_array(results))
except Exception:
warnings.warn(
'Fail to convert searching results into table. '
'JSON will be returned.',
stacklevel=2,
)
return results
else:
warnings.warn(
'Searching failed with HTTP status code: '
'{}'.format(response.status_code),
stacklevel=2,
)
return None
def get_ext(file_name):
"""Get all extensions supported by this module in the file name.
Supported extensions are defined in the constant "_SUPPORTED_FILE_TYPES".
Multiple extensions will be separated by ".".
Args:
file_name (str): The filename will be split by "." and checked from
left to right. Extensions will be kept starting from the first
(and left most) supported extension.
Returns:
str: A string of extensions joint by ".".
"""
# https://github.com/broadinstitute/gdctools/blob/master/gdctools/lib/meta.py
name_list = file_name.split('.')
for i in range(len(name_list)):
if name_list[i] in _SUPPORTED_FILE_TYPES:
break
return '.'.join(name_list[i:])
def download(uuids, download_dir='.', chunk_size=4096):
"""Download GDC's open access data according to UUID(s).
Args:
uuids (str, list or dict): A single UUID (str), a list of UUIDs (list)
or a dict whose keys are UUIDs for target file(s). If "uuids" is
str or list, downloaded file(s) will be renamed to
"UUID.extension" where "extension" is extracted by "get_ext()"
from the original filename. Renamed file(s) will be saved at
"download_dir". If "uuids" is a dict, the argument "download_dir"
will be ignored; values of dict will be paths for saving
corresponding downloaded files.
download_dir (str, optional): The directory for saving downloaded
file(s) when "uuids" is str or list. It will be ignored if "uuids"
is dict. Defaults to ".".
chunk_size (int, optional): The chunk size is the number of bytes it
should read into memory, when the response is got with
"stream=True". Check the documentation of "requests" module for
details. Defaults to 4096.
Returns:
list: a list of paths for downloaded files.
"""
if isinstance(uuids, str):
uuids = {uuids: None}
elif isinstance(uuids, list):
uuids = {uuid: None for uuid in uuids}
elif not isinstance(uuids, dict):
raise TypeError(
'uuids is a {}; it should be a string, a list or a '
'dict'.format(type(uuids))
)
total = len(uuids)
count = 0
download_list = []
data_endpt = '{}/data/'.format(GDC_API_BASE)
for uuid in uuids:
count += 1
response = requests.get(data_endpt + uuid, stream=True)
if response.status_code == 200:
file_size = int(response.headers['Content-Length'])
if uuids[uuid] is None:
content_disp = response.headers['Content-Disposition']
ori_name = content_disp[content_disp.find('filename=') + 9 :] # noqa: E203, E501
new_filename = uuid + '.' + get_ext(ori_name)
path = os.path.join(
os.path.abspath(download_dir), new_filename
)
else:
path = os.path.abspath(uuids[uuid])
status = '\r[{:d}/{:d}] Download to "{}": {:4.0%}'
mkdir_p(os.path.dirname(path))
with open(path, 'wb') as f:
downloaded = 0
print(status.format(count, total, path, 0), end='')
sys.stdout.flush()
for chunk in response.iter_content(chunk_size):
f.write(chunk)
downloaded = downloaded + chunk_size
print(
status.format(
count, total, path, min(1, downloaded / file_size)
),
end='',
)
sys.stdout.flush()
download_list.append(path)
else:
print('\rFail to download file {}.'.format(uuid))
print('')
return download_list
def get_project_info(projects=None):
"""Get info for project(s) of interest through GDC API.
Args:
projects (list or str): one (str) or a list of GDC "project_id"(s),
whose info will be returned. If None, projects will not be
filtered, i.e. info for all GDC projects will be returned.
Defaults to None.
Returns:
pandas.core.frame.DataFrame: A DataFrame of project info including
"project ID", "project name", "primary site" and "program name".
"""
in_filter = {}
if projects is not None:
if isinstance(projects, list):
in_filter = {'projects.project_id': projects}
else:
in_filter = {'projects.project_id': [projects]}
project_df = search(
'projects',
in_filter=in_filter,
fields=['name', 'primary_site', 'project_id', 'program.name'],
)
return project_df.set_index('id')
def get_samples_clinical(projects=None):
"""Get info for all samples of ``projects`` and clinical info for all
cases of ``projects`` through GDC API.
Args:
projects (list or str): one (str) or a list of GDC "project_id"(s),
whose info will be returned. If None, projects will not be
filtered, i.e. info for all GDC projects will be returned.
Defaults to None.
Returns:
pandas.core.frame.DataFrame: A DataFrame organized by samples, having
info for all samples of ``projects``, as well as corresponding
clinical info.
"""
in_filter = {}
if projects is not None:
if isinstance(projects, list):
in_filter = {'project.project_id': projects}
else:
in_filter = {'project.project_id': [projects]}
fields = [
'case_id',
'created_datetime',
'disease_type',
'id',
'primary_site',
'state',
'submitter_id',
'updated_datetime',
]
expand = [
'demographic',
'diagnoses',
'exposures',
'family_histories',
'project',
'samples',
'tissue_source_site',
]
res = search(
'cases', in_filter=in_filter, fields=fields, expand=expand, typ='json'
)
to_drops = set()
for ele in res:
to_drops |= set(get_to_drops(ele))
print("Dropping columns {} for {} projects".format(to_drops, projects))
reduced_no_samples_json = reduce_json_array(
[{k: v for k, v in d.items() if k != 'samples'} for d in res]
)
cases_df = pd.io.json.json_normalize(reduced_no_samples_json)
# In the list of reduced json, "samples" fields for each case are not
# consistently ``list`` (if there is only 1 sample for the case, it will
# be reduced into "naked" ``dict``). Therefore, it cannot be normalized
# correctly with ``record_path `` "samples". Use the raw json instead.
# Besides, there are cases (34 by 12/11/2017) which doesn't have any
# samples and thus doesn't have the key "samples". Ignore them.
# for r in res:
# r.setdefault('samples', [{}])
# samples_json.append(r)
samples_df = pd.io.json.json_normalize(
[r for r in res if 'samples' in r],
'samples',
'id',
record_prefix='samples.',
)
merged_df = pd.merge(cases_df, samples_df, how='inner', on='id')
merged_df.drop(list(to_drops), axis=1, inplace=True)
return merged_df
def gdc_check_new(new_file_uuids):
"""
This function help check a list of GDC's updated files and summarize
impacted project(s), data_type(s) and analysis.workflow_type(s).
"""
df_list = []
for uuids in (
new_file_uuids[i : i + 20000] # noqa: E203
for i in range(0, len(new_file_uuids), 20000)
):
df = search(
'files',
in_filter={'access': 'open', 'file_id': uuids},
fields=[
'cases.project.project_id',
'data_type',
'analysis.workflow_type',
],
method='POST',
)
try:
df['cases'] = df['cases'].map(
lambda c: ', '.join({p['project']['project_id'] for p in c})
)
except: # noqa: E722
pass
df_list.append(df)
df = pd.concat(df_list, axis=0)
try:
df = df.drop('id', axis=1)
except KeyError:
pass
try:
df = df.drop_duplicates()
except: # noqa: E722
pass
df.to_csv(sys.stdout, sep='\t', index=False)
def map_two_fields(endpoint, input_field, output_field, input_values=[]):
"""This function helps map values from ``input_field`` of certain
``endpoint`` to values from ``output_field`` of the same ``endpoint``. It
returns a dict whose keys are values from ``input_field`` of ``endpoint``
and values are values from ``output_field`` of ``endpoint``. It can also
accept a list of values from ``input_field`` to filter the return dict.
Args:
endpoint (str): One string of GDC API supported endpoint. This
function only does mapping for two fields from the same endpoint.
For available endpoints, see:
https://docs.gdc.cancer.gov/API/Users_Guide/Getting_Started/#api-endpoints
input_field (str): One valid field of the ``endpoint``. Values from
this field will be used as keys of the return dict.
``input_values``, if provided, are values on this field.
output_field (str):One valid field of the ``endpoint``.
input_values (list, optional): query values on ``input_field`` which
needs to be mapped. It helps limit/filter the return.
Returns:
dict: A dict whose keys are ``input_values`` if it's not empty or all
possible values from ``input_field`` of ``endpoint``. Values of return
dict are values from ``output_field`` of ``endpoint``.
"""
raw_results = search(
endpoint=endpoint,
in_filter={input_field: input_values} if input_values else {},
fields=[input_field, output_field],
typ="json",
method='POST',
)
# Split input_field and output_field into shared_path, input_specific_path
# and output_specific_path
input_keys = input_field.split('.')
output_keys = output_field.split('.')
for i in range(min([len(input_keys), len(output_keys)])):
if input_keys[i] != output_keys[i]:
break
shared_path = '.'.join(input_keys[:i])
input_sub_path = '.'.join(input_keys[i:])
output_sub_path = '.'.join(output_keys[i:])
# Get the list of dicts by shared_path
if shared_path:
shared_objs = get_json_objects(raw_results, shared_path)
else:
if isinstance(raw_results, list):
shared_objs = raw_results
else:
shared_objs = [raw_results]
while shared_objs and isinstance(shared_objs[0], list):
shared_objs = [obj for objs in shared_objs for obj in objs]
# For shared_objects, get the list of values by input_specific_path and
# output_specific_path
map = {}
for shared_obj in shared_objs:
input_found = get_json_objects(shared_obj, input_sub_path)
output_found = get_json_objects(shared_obj, output_sub_path)
for v in input_found:
if input_values and v not in input_values:
continue
while output_found and isinstance(output_found[0], list):
output_found = [obj for objs in output_found for obj in objs]
if v in map:
map[v] |= set(output_found)
else:
map[v] = set(output_found)
# Fill in failed input_values
for v in input_values:
if v not in map:
map[v] = set()
return {k: list(map[k]) for k in map}
| 2.25 | 2 |
nexpose/nexpose.py | liberza/python-nexpose | 0 | 12787237 | #!/usr/bin/python3
import defusedxml.ElementTree as ET
import urllib.request
import urllib.parse
import sys
import ssl
__author__ = '<NAME> <<EMAIL>>'
class NexposeException(Exception):
'''Raise this exception when the Nexpose API returns errors.'''
pass
class Nexpose:
'''
Nexpose API wrapper.
'''
def __init__(self, hostname, port):
self.hostname = hostname
self.port = port
self.url = 'https://%s:%s/api/1.1/xml' % (self.hostname, self.port)
self.session_id = None
# Often the Nexpose Console is run with a self-signed cert.
# We allow for that here.
self.ctx = ssl.create_default_context()
self.ctx.check_hostname = False
self.ctx.verify_mode = ssl.CERT_NONE
def api_request(self, xml_string):
'''Send an API request and return the response\'s root XML element.'''
# Encode the xml so that urllib will accept it.
post_data = (xml_string).encode('utf-8')
# Prepare the request.
request = urllib.request.Request(self.url)
request.add_header("Content-type", "text/xml")
# Get a response.
response = urllib.request.urlopen(request,
post_data,
context=self.ctx).read()
xml_response = ET.fromstring(response)
# Check for errors and return response.
if xml_response.attrib.get('success') != ('0' or None):
return xml_response
else:
raise NexposeException(response)
def login(self, username, password):
'''Send a LoginRequest and capture the returned session-id.'''
xml_string = '<LoginRequest user-id=\"%s\" password=\"%s\" />'\
% (username, password)
xml_response = self.api_request(xml_string)
self.session_id = xml_response.attrib.get('session-id')
return xml_response
def logout(self):
'''Send a LogoutRequest.'''
xml_string = "<LogoutRequest session-id=\"%s\" />" % (self.session_id)
xml_response = self.api_request(xml_string)
return xml_response
def get_sites(self):
'''Return a list of dicts containing site information.'''
xml_string = '<SiteListingRequest session-id=\"%s\">\
</SiteListingRequest>' % self.session_id
xml_response = self.api_request(xml_string)
site_list = []
for SiteSummary in xml_response.iter('SiteSummary'):
site = {}
site['id'] = SiteSummary.get('id')
site['name'] = SiteSummary.get('name')
site['description'] = SiteSummary.get('description')
site['riskfactor'] = SiteSummary.get('riskfactor')
site['riskscore'] = SiteSummary.get('riskscore')
site_list.append(site)
return site_list
def get_site_hosts(self, site_id):
'''Return list of ranges and hostnames associated with a site.'''
xml_string = '<SiteConfigRequest session-id=\"%s\" site-id=\"%s\">\
</SiteConfigRequest>' % (self.session_id, site_id)
xml_response = self.api_request(xml_string)
host_list = []
site = xml_response.find('Site')
hosts = site.find('Hosts')
for host in hosts.getchildren():
if host.tag == 'range':
if host.attrib.get('to') is None:
host_list.append({'range' : host.attrib.get('from')})
else:
host_list.append({'range' : ('%s-%s' % \
(host.attrib.get('from'), host.attrib.get('to')))})
elif host.tag == 'host':
host_list.append({'host' : host.text})
return host_list
def get_site_scan_config(self, site_id):
'''Return a dict of configuration info for a site.'''
xml_string = '<SiteConfigRequest session-id=\"%s\" site-id=\"%s\">\
</SiteConfigRequest>' % (self.session_id, site_id)
xml_response = self.api_request(xml_string)
site = xml_response.find('Site')
scan_config = site.find('ScanConfig')
config = {}
config['template_id'] = scan_config.attrib.get('templateID')
config['name'] = scan_config.attrib.get('name')
config['id'] = scan_config.attrib.get('configID')
config['engine_id'] = scan_config.attrib.get('engineID')
config['config_version'] = scan_config.attrib.get('configVersion')
return config
def get_scan_summary_attributes(self, scan_id, engine_id):
'''
Send a ScanStatisticsRequest and return the ScanSummary
attributes as a dict.
'''
xml_string = '<ScanStatisticsRequest session-id = \"%s\" \
engine-id = \"%s\" scan-id = \"%s\">\
</ScanStatisticsRequest>' % \
(self.session_id, engine_id, scan_id)
xml_response = self.api_request(xml_string)
scan_summary = xml_response.find('ScanSummary')
scan_summary_attributes = {}
for key in scan_summary.attrib:
scan_summary_attributes[key] = scan_summary.attrib[key]
return scan_summary_attributes
def scan_site(self, site_id):
'''Send SiteScanRequest and return dict of scan id and engine id.'''
xml_string = '<SiteScanRequest session-id = \"%s\" site-id=\"%s\">\
</SiteScanRequest>' % (self.session_id, site_id)
xml_response = self.api_request(xml_string)
scan = xml_response.find('Scan')
scan_id = scan.attrib.get('scan-id')
engine_id = scan.attrib.get('engine-id')
return {'scan_id' : scan_id, 'engine_id' : engine_id}
def get_site_devices(self, site_id):
'''Return a list of devices in a site.'''
xml_string = '<SiteDeviceListingRequest session-id = \"%s\" \
site-id = \"%s\"></SiteDeviceListingRequest>' % \
(self.session_id, site_id)
xml_response = self.api_request(xml_string)
print(ET.tostring(xml_response, encoding='ascii', method='xml'))
def scan_site_hosts(self, site_id, host_list):
'''
Send SiteDevicesScanRequest and return dict of scan id and engine
id. host_list is a list of ranges or hostnames as get_site_hosts()
would return.
'''
hosts_string = ''
for host in host_list:
ip_range = host.get('range')
if ip_range is not None:
split_ip_range = ip_range.split('-')
if len(split_ip_range) == 1:
hosts_string += ('<range from=\"%s\"/>' % \
str(split_ip_range[0]))
elif len(split_ip_range) == 2:
hosts_string += ('<range from=\"%s\" to=\"%s\"/>' % \
(split_ip_range[0],
split_ip_range[1]))
else:
raise Exception('Invalid IP range: %s' % ip_range)
else:
hostname = host.get('host')
hostname = hostname.replace("'","")
hosts_string += ('<host>%s</host>' % hostname)
xml_string = '<SiteDevicesScanRequest session-id=\"%s\" \
site-id=\"%s\"><Devices></Devices><Hosts>%s</Hosts>\
</SiteDevicesScanRequest>' % (self.session_id,
site_id,
hosts_string)
xml_response = self.api_request(xml_string)
scan = xml_response.find('Scan')
scan_id = scan.attrib.get('scan-id')
engine_id = scan.attrib.get('engine-id')
return {'scan_id': scan_id, 'engine_id' : engine_id}
if __name__ == '__main__':
# Usage: ./nexpose.py hostname port username password
try:
nexpose = Nexpose(sys.argv[1], sys.argv[2])
nexpose.login(sys.argv[3], sys.argv[4])
print(nexpose.get_site_scan_config('1'))
except urllib.error.URLError as e:
print("URLError: Perhaps you entered the wrong URL or port? %s" % e)
exit()
try:
nexpose.logout()
except:
print('Tried to logout when we weren\'t signed in.')
pass
| 2.515625 | 3 |
2020/day02.py | kyz/adventofcode | 0 | 12787238 | import re, collections
def count_valid(passwords, valid):
count = 0
m = re.compile(r"(\d+)-(\d+) (.): (.+)")
for p in passwords:
n1, n2, c, password = m.match(p).groups()
if valid(int(n1), int(n2), c, password):
count += 1
return count
def policy1(lo, hi, c, password):
letterfreq = collections.Counter(password)
return lo <= letterfreq[c] <= hi
def policy2(p1, p2, c, password):
return (password[p1-1] == c) ^ (password[p2-1] == c)
with open("day02.txt", "r") as fh:
passwords = [line.strip() for line in fh.readlines()]
print("2020 day 02 part 1: %d" % count_valid(passwords, policy1))
print("2020 day 02 part 2: %d" % count_valid(passwords, policy2))
| 3.65625 | 4 |
tests/extension/__init__.py | OriolAbril/sphinx-codeautolink | 0 | 12787239 | """
Basic extension tests.
The tests are structured as .txt files, parsed and executed here.
The structure of the file is::
number of expected autolinks
# split
lines to add to the default conf.py
# split
index.html content
"""
import re
import sys
import pytest
from pathlib import Path
from bs4 import BeautifulSoup
from sphinx.cmd.build import main as sphinx_main
# Insert test package root to path for all tests
sys.path.insert(0, str(Path(__file__).parent / "src"))
default_conf = """
extensions = [
"sphinx.ext.autodoc",
"sphinx_codeautolink",
]
autodoc_default_options = {
"members": True,
"undoc-members": True,
}
"""
txt_tests = list(Path(__file__).parent.glob('*.txt'))
any_whitespace = re.compile(r'\s*')
xfails = {
'ref_fluent_attrs.txt': sys.version_info < (3, 8),
'ref_fluent_call.txt': sys.version_info < (3, 8),
'ref_import_from_complex.txt': sys.version_info < (3, 8),
}
@pytest.mark.parametrize('file', txt_tests)
def test_extension(file: Path, tmp_path: Path):
if xfails.get(file.name, False):
pytest.xfail('Expected to fail.')
links, conf, index = file.read_text('utf-8').split('# split')
links = links.strip().split('\n')
if len(links) == 1 and not links[0]:
links = []
src_dir = tmp_path / 'src'
src_dir.mkdir()
(src_dir / 'conf.py').write_text(default_conf + conf, 'utf-8')
(src_dir / 'index.rst').write_text(index, 'utf-8')
build_dir = tmp_path / 'build'
sphinx_main(['-M', 'html', str(src_dir), str(build_dir)])
index_html = build_dir / 'html' / 'index.html'
text = index_html.read_text('utf-8')
soup = BeautifulSoup(text, 'html.parser')
blocks = list(soup.find_all('a', attrs={'class': 'sphinx-codeautolink-a'}))
assert len(blocks) == len(links)
for block, link in zip(blocks, links):
assert any_whitespace.sub('', ''.join(block.strings)) == link
| 2.625 | 3 |
nrpylatex/__init__.py | zachetienne/nrpylatex | 4 | 12787240 | from nrpylatex.parse_latex import Lexer, Parser, Tensor
from nrpylatex.parse_latex import ParseError, TensorError, OverrideWarning
from nrpylatex.parse_latex import parse_latex
__version__ = "1.0.8"
| 1.265625 | 1 |
train.py | AbcEric/AlphaZero_Gomoku | 0 | 12787241 | <reponame>AbcEric/AlphaZero_Gomoku
# -*- coding: utf-8 -*-
"""
An implementation of the training pipeline of AlphaZero for Gomoku(五子棋)
@author: <NAME>
参考:
https://zhuanlan.zhihu.com/p/32089487 (说明)
https://link.zhihu.com/?target=https%3A//github.com/junxiaosong/AlphaZero_Gomoku (代码)
对于在6*6的棋盘上下4子棋这种情况,大约通过500~1000局的self-play训练(2小时),就能训练出比较靠谱的AI
对于在8*8的棋盘上下5子棋这种情况,通过大约2000~3000局自我对弈训练(2天)
MCTS:蒙特卡罗树搜索(Monte Carlo Tree Search),是一类树搜索算法的统称,可以较为有效地解决一些探索空间巨大的问题。
要求的条件是zero-sum(零和博弈,能分出胜负)、fully information(信息公开)、determinism(确定性)、sequential(顺序执行)、discrete(操作是离散)
AlphaGo也是基于MCTS算法,但做了很多优化:http://www.algorithmdog.com/alphago-zero-notes 的介绍,替换了经典实现的UCB算法,
使用policy network的输出替换父节点访问次数,使用子节点访问次数作为分母保证exploration,Q值改为从快速走子网络得到的所有叶子节点的均值,
神经网络也是从前代的CNN改为最新的ResNet
备忘:
300次训练后,loss约4.5,entropy约3.8,explain_var约0.04
900次训练后:loss约3.9~4.2,entropy约3.3,explain_var为负?(当去掉-1平局后,explain_var马上上升?)
优化:
1. logging模块采用.yaml配置文件:方便控制是否控制台显示,训练参数配置,使用也更方便等;(OK)
2. 采用sgf人工比赛棋谱,大大降低训练数据生成时间,训练速度提升1个数量级;同时,数据质量也提高;拓展到15x15标准棋盘;(OK)
3. 评价时加大n_playout作为对手,同时记录对局过程来复盘, 增加装饰器@print_time记录运行时间。采用AI模型作为对手每次相同的情况应对完全一致,没有exploration方式选取;(OK)
Q:
待改进:
1. 多线程:可用一个进程负责self-play和training的部分, 另外4个进程只负责self-play的部分”。喂数据应该专门开一个进程(用 Queue 储存和读取),这样 GPU 就不会经常停下来等候。没必要,放Google Colab上试试;
2. 关于GPU不比CPU快这个问题,可能的原因:一是AlphaZero训练本身就有很大一部分运算是需要在cpu上进行的,频繁的在cpu和gpu之间交换数据本身也会有一定开销。(Deepmind用了很多TPU)
其次,棋盘很小,而且我用的网络本身也很浅,所以网络forward计算这部分运算放到GPU上带来的收益可能都被额外的数据传输开销抵掉了。
3. 将evalution的步数记录下来:由于用1000的MCTS模拟,也可采用高质量的下法来训练,主要的瓶颈在生成训练数据。(要确保训练数据不重复!)
4. 用图形连续显示运行结果:避免每次刷新,而且可记录手数!
5. MCTS采用递归性能较低,能否采用队列方式实现?来提升效率,进一步研究MCTS
"""
from __future__ import print_function
from mytoolkit import print_time, load_config
import logging.config
# logging设置只能执行一次,要确保最先执行,在其它包含文件之前,否则包含文件会WARNING及以上才会记录。
logging.config.dictConfig(load_config('./conf/train_config.yaml')['train_logging'])
# 目的得到当前程序名,便于定位。
_logger = logging.getLogger(__name__)
import random, os
import numpy as np
from collections import defaultdict, deque
from game import Board, Game
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
# from policy_value_net import PolicyValueNet # Theano and Lasagne
# from policy_value_net_pytorch import PolicyValueNet # Pytorch
# from policy_value_net_tensorflow import PolicyValueNet # Tensorflow
from policy_value_net_keras import PolicyValueNet # Keras
from queue import Queue
import threading
from subprocess import *
from mcts_yinxin import monitor_yixin_response, YixinPlayer
class TrainPipeline():
def __init__(self, conf, init_model=None, best_model="best_policy.model"):
# params of the board and the game
self.board_width = conf['board_width'] # 棋盘大小:6x6
self.board_height = conf['board_height']
self.n_in_row = conf['n_in_row'] # 在6x6棋盘下4子棋
self.board = Board(width=self.board_width,
height=self.board_height,
n_in_row=self.n_in_row)
self.game = Game(self.board)
# training params
self.learn_rate = conf['learn_rate']
self.lr_multiplier = conf['lr_multiplier'] # 用于动态调整学习率
self.temp = conf['temp'] # temperature parameter,取值在(0, 1],控制explorati级别,是否去尝试新走法
self.n_playout = conf['n_playout'] # 缺省AI只执行400次模拟
self.c_puct = conf['c_puct'] # ?
self.buffer_size = conf['buffer_size']
self.batch_size = conf['batch_size'] # mini-batch size for training
self.data_buffer = deque(maxlen=self.buffer_size) # data_buffer是一个deque(双向队列),设定了maxlen,满了之后新进来的就会把最老的挤出去
self.play_batch_size = conf['play_batch_size']
self.epochs = conf['epochs'] # num of train_steps for each update
self.kl_targ = conf['kl_targ'] # KL散度
self.check_freq = conf['check_freq'] # 每50次训练后,评估一下性能
self.game_batch_num = conf['game_batch_num'] # 训练总次数
self.best_win_ratio = 0.2 # 当前的最佳胜率,从0开始
self.pure_mcts_playout_num = conf['pure_mcts_playout_num'] # 纯MCTS(电脑对手):每次从1000次MC模拟开始
if init_model:
# start training from an initial policy-value net
self.policy_value_net = PolicyValueNet(self.board_width,
self.board_height,
model_file=init_model)
else:
# start training from a new policy-value net
self.policy_value_net = PolicyValueNet(self.board_width,
self.board_height)
# 用自己训练的MCTS作为模拟对手:注意is_selfplay = 1 !!!
self.mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.n_playout,
is_selfplay=1)
def show_mcts(self):
_logger.info("Game info: %d*%d*%d" % (self.board_width, self.board_height, self.n_in_row))
_logger.info("pure_mcts_playout_num= %d" % self.pure_mcts_playout_num)
_logger.info("best_win_ration= %4.3f" % self.best_win_ratio)
_logger.info("play_batch_size= %d" % self.play_batch_size)
_logger.info("buffer_size= %d" % self.buffer_size)
_logger.info("batch_size= %d" % self.batch_size)
_logger.info("learn_rate= %4.3f*%4.3f" % (self.learn_rate, self.lr_multiplier))
def get_equi_data(self, play_data):
# 数据增强:每种情况有8种表现形式,通过旋转或镜像后,结果相同
extend_data = []
for state, mcts_porb, winner in play_data:
for i in [1, 2, 3, 4]:
# rotate counterclockwise
equi_state = np.array([np.rot90(s, i) for s in state])
equi_mcts_prob = np.rot90(np.flipud(
mcts_porb.reshape(self.board_height, self.board_width)), i)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
# flip horizontally
equi_state = np.array([np.fliplr(s) for s in equi_state])
equi_mcts_prob = np.fliplr(equi_mcts_prob)
extend_data.append((equi_state,
np.flipud(equi_mcts_prob).flatten(),
winner))
return extend_data
def collect_selfplay_data(self, n_games=1, train_data=[]):
# 生成训练数据:三种方式
epi_len = 0
for i in range(n_games): # 可一次生成多个:输入train_data也要输入多个,或者改为生成器!
if len(train_data) != 0:
# 用事先生成数据:
winner, play_data = self.game.start_from_train_data(self.mcts_player, train_data[i], is_shown=0)
else:
# 模型自身对战产生:
winner, play_data = self.game.start_self_play(self.mcts_player, temp=self.temp)
play_data = list(play_data)[:]
# 代表模拟该局的总步数,play_data返回数据的每一步包括:4个棋盘矩阵+概率矩阵+胜负(1,-1)
epi_len += len(play_data)
# 数据增强:
play_data = self.get_equi_data(play_data) # 总步数变为8倍:
self.data_buffer.extend(play_data) # data_buffer相应增加:
self.episode_len = epi_len
return
def policy_update(self):
# 更新价值网络:评估其预测性能
# 输入数据为(state, mcts_prob, winner_z)
# 从buffer中随机选择batch_size个数据,也就是说同一对局的步骤可能被分散到不同的mini_batch中。直到被新的数据冲掉。
# 随机选取的目的:每步数据被增广为8步,避免相同的放在一起。
mini_batch = random.sample(self.data_buffer, self.batch_size)
state_batch = [data[0] for data in mini_batch] # 即为data_buffer的第1列
mcts_probs_batch = [data[1] for data in mini_batch]
winner_batch = [data[2] for data in mini_batch]
old_probs, old_v = self.policy_value_net.policy_value(state_batch)
for i in range(self.epochs):
loss, entropy = self.policy_value_net.train_step(
state_batch,
mcts_probs_batch,
winner_batch,
self.learn_rate*self.lr_multiplier)
new_probs, new_v = self.policy_value_net.policy_value(state_batch)
kl = np.mean(np.sum(old_probs * (
np.log(old_probs + 1e-10) - np.log(new_probs + 1e-10)),
axis=1)
)
if kl > self.kl_targ * 4: # early stopping if D_KL diverges badly: >0.08
_logger.info("early stopping loop(kl=%4.3f is too large) ..." % kl)
break
# _logger.info("%d-%d: policy_value_net loss=%4.3f" % (self.epochs, i+1, loss))
# 学习率的动态调整
# if kl > self.kl_targ * 2 and self.lr_multiplier > 0.1:
if kl > self.kl_targ * 2 and self.lr_multiplier > 0.05:
self.lr_multiplier /= 1.5
elif kl < self.kl_targ / 2 and self.lr_multiplier < 10:
self.lr_multiplier *= 1.5
explained_var_old = (1 -
np.var(np.array(winner_batch) - old_v.flatten()) /
np.var(np.array(winner_batch)))
explained_var_new = (1 -
np.var(np.array(winner_batch) - new_v.flatten()) /
np.var(np.array(winner_batch)))
# explained_var说明预测情况,比较理想的情况是在0~1之间逐渐增大:
_logger.info("kl=%4.3f, lr_multiplier=%4.3f, loss=%4.3f, "
"entropy=%4.3f, explained_var_old=%4.3f, explained_var_new=%4.3f"
% (kl, self.lr_multiplier, loss, entropy, explained_var_old, explained_var_new))
return loss, entropy
@print_time
def policy_evaluate(self, n_games=10, player=0):
"""
Evaluate the trained policy by playing against the pure MCTS player
Note: this is only for monitoring the progress of training
player: 0(缺省)采用MCTSplayer作为对手,1为Yinxin
n_playout: 适用于MCTSplayer,用作基准(当>2000时,运行速度慢)
注意:训练用自己做对手有问题!每次应对都一样,不断循环,没有改进和提升 .......
"""
current_mcts_player = MCTSPlayer(self.policy_value_net.policy_value_fn,
c_puct=self.c_puct,
n_playout=self.n_playout)
if player == 1:
_logger.info("采用Yinxin进行测评 ...")
qResponses = Queue()
p = Popen('C:/Program Files/Yixin/engine.exe', stdin=PIPE, stdout=PIPE)
# 注意:get_answer不能写成get_answer(),否则会一直等待get_answer执行完成。
# child_thread = threading.Thread(target=yixin.get_answer, args=(qRequests, qResponses))
child_thread = threading.Thread(target=monitor_yixin_response, args=(p, qResponses))
# 程序在主线程结束后,直接退出了,不管子线程是否运行完。
child_thread.setDaemon(True)
child_thread.start()
pure_mcts_player = YixinPlayer(p, qResponses, timeout=5)
else:
_logger.info("采用MCTS模拟进行测评,pure_mcts_playout为: %d" % self.pure_mcts_playout_num)
pure_mcts_player = MCTS_Pure(c_puct=5, n_playout=self.pure_mcts_playout_num)
win_cnt = defaultdict(int)
_logger.debug("开始对战,测试训练模型的水平 ... ")
timeout = 2
for i in range(n_games):
if player == 1:
if i % 2 == 0:
timeout = timeout * 4
_logger.debug("%d-%d: Yixin timeout = %d" % (n_games, i+1, timeout))
pure_mcts_player = YixinPlayer(p, qResponses, timeout=timeout)
# pure_mcts_player.first = True # 重新开始新的一局:
winner = self.game.start_play(current_mcts_player,
pure_mcts_player,
start_player=i % 2, # 轮流先走:
is_shown=0)
win_cnt[winner] += 1
_logger.info("%d-%d : winner is player%d (start from player%d)" % (n_games, i+1, winner, i%2+1))
win_ratio = 1.0*(win_cnt[1] + 0.5*win_cnt[-1]) / n_games
_logger.info("Win: %d, Lose: %d, Tie: %d" % (win_cnt[1], win_cnt[2], win_cnt[-1]))
return win_ratio
def run(self):
"""run the training pipeline"""
try:
# 1.使用人工实战SGF棋谱:某些是超时判负,某些是先手双三(禁手判负)
# _logger.info("使用人工实战SGF棋谱生产的数据:...")
# train_data = gen_moves_from_sgf(conf["sgf_dir"])
# 2.使用Yixin生成的对弈数据:
# _logger.info("使用与Yixin对战生成的数据steps.log ...")
# train_data = gen_moves_from_yixin("./logs/steps.log")
# random.shuffle(train_data)
# 暂时去掉平局的对局:
# train_data_without_tie = [[item[0], item[1]] for item in train_data if item[0] != -1]
# 增加带平局:
# train_data_without_tie = [[0 if item[0] == -1 else item[0], item[1]] for item in train_data]
# batch_num = int(len(train_data_without_tie)/self.play_batch_size)
# episode_len = [len(x[1]) for x in train_data_without_tie]
# _logger.info("len = %d, avg_epi_len = %d, X_train[0] = %s" %
# (len(train_data_without_tie), int(sum(episode_len)/batch_num), train_data_without_tie[0]))
# 3.采用模型自身对战生成训练数据,当棋盘很大时,速度非常慢!
train_data = []
_logger.info("模型自身互搏生成训练数据 ... ")
batch_num = self.game_batch_num
# for i in range(self.game_batch_num):
for i in range(batch_num):
# 3-1. 生成训练数据:可一次生成多个,通常play_batch_size为1
# self.collect_selfplay_data(n_games=self.play_batch_size,
# train_data=train_data_without_tie[i*self.play_batch_size:(i+1)*self.play_batch_size])
# 需要做相应修改:
self.collect_selfplay_data(n_games=self.play_batch_size, train_data=[])
_logger.info("batch %d-%d, episode_len=%02d: " % (batch_num, i+1, self.episode_len))
# data_buffer为数据增强后的总步数,不断加大, 直到10000,每次从buffer中随机选取512个来训练。
# 岂不是前面的可能会被多次选择?
if len(self.data_buffer) > self.batch_size:
# 3-2. 策略更新: 根据训练数据,更新策略模型的参数,使得loss减少。当棋谱较大时,更新较慢!
loss, entropy = self.policy_update()
# 3-3. 定期对模型进行评估:并保存模型参数(只保存参数,文件小!)
if (i+1) % self.check_freq == 0:
_logger.info("current self-play batch: %d" % (i+1))
# 先保存到当前模型:
self.policy_value_net.save_model('./current_policy.model')
_logger.info("保存当前训练模型到:current_policy.model")
# 缺省下10盘, 用MCTSplayer做对手,先手和后手各一半。
win_ratio = self.policy_evaluate(n_games=2, player=1)
# 评价后决定是否保存到best_policy模型:
if win_ratio > self.best_win_ratio:
self.policy_value_net.save_model('./best_policy.model')
_logger.info("New best policy(%3.2f>%3.2f), save to best_plicy.model!" % (win_ratio, self.best_win_ratio))
self.best_win_ratio = win_ratio
# 当MCTS被我们训练的AI模型完全打败时,pure MCTS AI就升级到每步使用2000次模拟,以此类推,不断增强,
# 而我们训练的AlphaZeroAI模型每一步始终只使用400次模拟
if self.best_win_ratio == 1.0 and self.pure_mcts_playout_num < 5000:
self.pure_mcts_playout_num += 1000
self.best_win_ratio = 0.0 # 从0开始
_logger.info("pure_mcts_playout increase to %d" % self.pure_mcts_playout_num)
# 结束时保存到当前模型:
self.policy_value_net.save_model('./current_policy.model')
_logger.info("训练结束,保存当前训练模型到:current_policy.model")
except KeyboardInterrupt:
_logger.error('quit')
def content_to_order(sequence):
# 棋谱字母转整型数字
LETTER_NUM = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o']
BIG_LETTER_NUM = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O']
NUM_LIST = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
# 棋盘字母位置速查表
seq_lookup = dict(zip(LETTER_NUM, NUM_LIST))
num2char_lookup = dict(zip(NUM_LIST, BIG_LETTER_NUM))
seq_list = sequence.split(';')
seq_list = [item[2:4] for item in seq_list]
seq_num_list = [seq_lookup[item[0]]*15+seq_lookup[item[1]] for item in seq_list]
return seq_list, seq_num_list
def gen_moves_from_sgf(sgf_path, refresh=False):
sgf_filelist = os.listdir(sgf_path)
sgf_filelist = [item for item in sgf_filelist if item.endswith('.sgf') and os.path.isfile(os.path.join(sgf_path, item))]
result = []
if not refresh: # 不用重新生成
try:
f = open("human_gomoku.txt", "r")
while 1:
p = f.readline().strip()
if len(p) > 0:
onegame = eval(p)
# print(one, type(one))
result.append(onegame)
else:
break
f.close()
# print("result=", result[-1])
return result
except Exception as e:
_logger.error("human_gomoku.txt doesn't exist: %s" % str(e))
_logger.info("generate from %s" % sgf_path)
fw = open("human_gomoku.txt", "w")
for file_name in sgf_filelist:
with open(os.path.join(sgf_path, file_name)) as f:
p = f.read() # 只有一行数据
sequence = p[p.index('SZ[15]')+7:-3]
seq_num_list = []
try:
seq_list, seq_num_list = content_to_order(sequence)
# 检查棋子是否有重复:
if len(seq_num_list) != len(list(set(seq_num_list))):
_logger.warning("%s: 有重复落子 - %s" % (file_name, seq_num_list))
continue
# _logger.debug("seq_list=%s, seq_num=%s" % (seq_list, seq_num_list))
except Exception as e:
_logger.error('file=%s, error:%s' % (file_name, str(e)))
exit(0)
if "黑胜" in file_name:
winner = 1
if len(seq_num_list) % 2 != 1:
_logger.warning("%s: the winner 1 maybe wrong. " % file_name)
elif "白胜" in file_name:
winner = 2
if len(seq_num_list) % 2 != 0:
_logger.warning("%s: the winner 2 maybe wrong. " % file_name)
else:
winner = -1
# 检查是否需要copy.deepcopy(xxx)?前面已经重新赋值!
result.append([winner, seq_num_list])
fw.write(str([winner, seq_num_list]))
fw.write("\n")
# return {'winner': winner, 'seq_list': seq_list, 'seq_num_list': seq_num_list, 'file_name':file_name}
fw.close()
return result
def gen_moves_from_yixin(yixin_file, refresh=False):
fp = open(yixin_file, "r")
result = []
while 1:
line = fp.readline().strip()
# {'steps': [112, 97, 125, 128, 156, 98, 143, 113, 99, 129, 81, 130, 131, 114, 146, 100, 86, 83, 68, 145, 161,
# 115, 85, 160], 'winner': 2, 'start_player': 2}
if len(line) > 0:
step = eval(line)["steps"]
winner = eval(line)["winner"]
# 平局暂时不添加:
if winner != -1:
result.append(step)
else:
break
# 去掉重复的对局:
no_dup = np.unique(result)
result = list(map(lambda x: [2 - len(x) % 2, x], no_dup))
_logger.info("Yixin steps.log totol_len = %d" % len(result))
return result
if __name__ == '__main__':
_logger.info("Training is begining ...")
conf = load_config('./conf/train_config.yaml')
training_pipeline = TrainPipeline(conf, init_model="current_policy.model")
# training_pipeline = TrainPipeline(conf, init_model=None) # 首次训练
training_pipeline.show_mcts() # 显示信息
# 模型性能测试:
# 1.用MCTS作为测试基准: 模拟深度pure_mcts_playout_num至少要3000,速度很慢!
# training_pipeline.pure_mcts_playout_num = 2000
# training_pipeline.policy_evaluate(n_games=2, player=0)
# 2.与Yinxin对战:可用于棋谱生成(注意:结果可能会完全相同)
# training_pipeline.policy_evaluate(n_games=10, player=1)
training_pipeline.run() # 开始训练
| 2.40625 | 2 |
tests/mail/test_sendtestemail.py | bpeschier/django | 0 | 12787242 | from __future__ import unicode_literals
from django.core import mail
from django.core.management import call_command
from django.test import SimpleTestCase
class SendTestEmailManagementCommand(SimpleTestCase):
"""
Test the sending of a test email using the `sendtestemail` command.
"""
def test_send_test_email(self):
"""
The mail is sent with the correct subject and recipient.
"""
recipient = "<EMAIL>"
call_command("sendtestemail", recipient)
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(mail_message.subject[0:15], 'Test email from')
self.assertEqual(mail_message.recipients(), [recipient])
def test_send_test_email_with_multiple_addresses(self):
"""
The mail may be sent with multiple recipients.
"""
recipients = ["<EMAIL>", "<EMAIL>"]
call_command("sendtestemail", recipients[0], recipients[1])
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(mail_message.subject[0:15], 'Test email from')
self.assertEqual(mail_message.recipients(), recipients)
| 2.625 | 3 |
src/engine/datastore/models/section.py | thomasmauerhofer/search-engine | 0 | 12787243 | #!/usr/bin/env python3
# encoding: utf-8
import pprint
from enum import Enum
from engine.datastore.models.paper_structure import PaperStructure
from engine.datastore.models.text import Text
from engine.preprocessing.text_processor import TextProcessor
from engine.utils.objects.word_hist import WordHist
class Section(PaperStructure):
def __init__(self, data):
self.heading_raw = data.get('heading_raw')
self.heading_proceed = data.get('heading_proceed') if 'heading_proceed' in data else \
TextProcessor.proceed_string(data.get('heading_raw'))
self.section_type = SectionType[data.get('section_type')]
self.imrad_types = [IMRaDType[imrad_type] for imrad_type in data.get('imrad_types')] if 'imrad_types' in data else []
self.text = [Text(text) for text in data.get('text')] if 'text' in data else []
self.subsections = [Section(subsection) for subsection in data.get('subsections')] if 'subsections' in data else []
self.word_hist = WordHist(data.get('word_hist')) if "word_hist" in data else WordHist()
def __str__(self):
pp = pprint.PrettyPrinter(indent=4)
return pp.pformat(self.to_dict())
def to_dict(self):
data = {'section_type': self.section_type.name, 'heading_raw': self.heading_raw, 'heading_proceed': self.heading_proceed,
'text': [], 'subsections': [], 'imrad_types': [], 'word_hist': self.word_hist}
for text in self.text:
data['text'].append(text.to_dict())
for subsection in self.subsections:
data['subsections'].append(subsection.to_dict())
for imrad_type in self.imrad_types:
data['imrad_types'].append(imrad_type.name)
return data
def get_combined_word_hist(self):
if not self.word_hist:
for word in self.heading_proceed.split():
self.word_hist[word] = self.word_hist[word] + 1 if word in self.word_hist else 1
for text in self.text:
for word in text.text_proceed.split():
self.word_hist[word] = self.word_hist[word] + 1 if word in self.word_hist else 1
ret = WordHist(self.word_hist.copy())
for subsection in self.subsections:
ret.append(subsection.get_combined_word_hist())
return ret
def add_text_object(self, text_type, text_raw):
if len(self.subsections):
self.subsections[-1].add_text_object(text_type, text_raw)
else:
self.text.append(Text({"text_type": text_type.name, "text_raw": text_raw}))
def add_subsection(self, section_type, heading):
self.subsections.append(Section({'section_type': section_type.name, 'heading_raw': heading}))
def add_to_imrad(self, imrad_type):
if not any(imrad_type is x for x in self.imrad_types) and \
(not (self.heading_raw.isspace() or self.heading_raw is '')):
self.imrad_types.append(imrad_type)
for subsection in self.subsections:
subsection.add_to_imrad(imrad_type)
def title_exist(self):
return bool(self.heading_proceed)
def text_exist(self):
return any([text for text in self.text if text.text_proceed])
class SectionType(Enum):
ABSTRACT = 1
SECTION = 2
SUBSECTION = 3
SUBSUBSECTION = 4
class IMRaDType(Enum):
ABSTRACT = 0
INTRODUCTION = 1
BACKGROUND = 2
METHODS = 3
RESULTS = 4
DISCUSSION = 5
ACKNOWLEDGE = 6
| 2.5 | 2 |
envdsys/envcontacts/models.py | NOAA-PMEL/envDataSystem | 1 | 12787244 | from django.db import models
# Create your models here.
class Contact(models.Model):
street_address = models.CharField(
max_length=100,
null=True,
blank=True
)
city = models.CharField(
max_length=30,
null=True,
blank=True
)
state = models.CharField(
max_length=30,
null=True,
blank=True
)
postal_code = models.CharField(
max_length=20,
null=True,
blank=True
)
country = models.CharField(
max_length=30,
null=True,
blank=True
)
website = models.URLField(null=True, blank=True)
class Meta():
abstract = True
class Person(Contact):
first_name = models.CharField(max_length=50, null=True, blank=True)
last_name = models.CharField(max_length=50, null=True, blank=True)
organization = models.ForeignKey(
'envcontacts.Organization',
on_delete=models.SET_NULL,
null=True,
blank=True
)
MOBILE = 'M'
HOME = 'H'
WORK = 'W'
OTHER = 'O'
PHONE_TYPE_CHOICES = {
(MOBILE, 'Mobile'),
(HOME, 'Home'),
(WORK, 'Work'),
(OTHER, 'Other'),
}
phone1 = models.CharField(max_length=15, null=True, blank=True)
phone1_type = models.CharField(
max_length=1,
choices=PHONE_TYPE_CHOICES,
default=MOBILE,
)
phone2 = models.CharField(max_length=15, null=True, blank=True)
phone2_type = models.CharField(
max_length=1,
choices=PHONE_TYPE_CHOICES,
default=MOBILE,
)
EMAIL_TYPE_CHOICES = {
(HOME, 'Home'),
(WORK, 'Work'),
(OTHER, 'Other'),
}
email1 = models.EmailField(null=True, blank=True)
email1_type = models.CharField(
max_length=1,
choices=EMAIL_TYPE_CHOICES,
default=WORK,
)
email2 = models.EmailField(null=True, blank=True)
email2_type = models.CharField(
max_length=1,
choices=EMAIL_TYPE_CHOICES,
default=WORK,
)
class Meta():
verbose_name_plural='People'
def __str__(self):
name = ''
if self.last_name is not None:
name = self.last_name
if self.first_name is not None:
name += ', ' + self.first_name
elif self.first_name is not None:
name = self.first_name
return name
class Organization(Contact):
name = models.CharField(
max_length=50,
null=True,
blank=True,
help_text='Enter short name for labels and ID',
)
long_name = models.CharField(
max_length=100,
null=True,
blank=True,
help_text='Enter full name of organization',
)
parent_org = models.ForeignKey(
'self',
on_delete=models.SET_NULL,
null=True,
blank=True
)
MOBILE = 'M'
HOME = 'H'
WORK = 'W'
OTHER = 'O'
PHONE_TYPE_CHOICES = {
(MOBILE, 'Mobile'),
(HOME, 'Home'),
(WORK, 'Work'),
(OTHER, 'Other'),
}
phone = models.CharField(max_length=15, null=True, blank=True)
email = models.EmailField(null=True, blank=True)
def __str__(self):
if self.name is not None:
return self.name
elif self.long_name is not None:
return self.long_name
return 'empty'
# class Organization(models.Model):
#
# name = models.CharField(
# max_length=50,
# )
#
# address = models.CharField(
# max_length=100,
# null=True,
# blank=True,
# )
#
# website = models.URLField(null=True, blank=True)
#
# phone = models.CharField(max_length=20, null=True, blank=True)
#
# def __str__(self):
# '''String representation of Organization object. '''
# return self.name
#
#
# # class Manufacturer(Organization):
# # pass
# # # contacts from Person
# #
# # # def __str__(self):
# # # '''String representation of Manufacturer object. '''
# # # return self.name
#
#
# # can we attach this to User?
# class Person(models.Model):
#
# first_name = models.CharField(max_length=20)
# last_name = models.CharField(max_length=20)
#
# email = models.EmailField(null=True, blank=True)
# phone = models.CharField(max_length=20)
#
# class Meta:
# verbose_name_plural = "People"
#
# affiliation = models.ForeignKey(
# 'Organization',
# on_delete=models.SET_NULL,
# null=True,
# blank=True
# )
#
# def __str__(self):
# '''String representation of Person object. '''
# return f'{self.last_name},{self.first_name}'
| 2.25 | 2 |
cats/v2/config.py | AdamBrianBright/cats-python | 2 | 12787245 | <filename>cats/v2/config.py
import asyncio
from dataclasses import dataclass
from typing import Type
from tornado.iostream import StreamClosedError
from cats.errors import CatsError
from cats.v2.handshake import Handshake
__all__ = [
'Config',
]
@dataclass
class Config:
idle_timeout: float | int = 120.0
input_timeout: float | int = 120.0
input_limit: int = 5
debug: bool = False
max_plain_payload: int = 16 * 1024 * 1024
stream_errors: Type[Exception] | tuple[Type[Exception]] = (
asyncio.TimeoutError,
asyncio.CancelledError,
asyncio.InvalidStateError,
StreamClosedError,
)
ignore_errors: Type[Exception] | tuple[Type[Exception]] = (
*stream_errors,
CatsError,
KeyboardInterrupt,
)
handshake: Handshake | None = None
| 2.1875 | 2 |
pred_to_eval/excel.py | FluteXu/ms-project | 0 | 12787246 | import os
import pandas as pd
import sys
sys.path.insert(0, '../')
from LGAIresult import LGAIresult
from utils.common_utils import save_split_txt, load_split_txt, write_excel
def out_excel(ann_list, time_list, pid_list, save_dir):
time_dict = get_dict(time_list)
pid_dict = get_dict(pid_list, '\t')
cols = ['patient_id', 'create_time', '气胸', '气胸位置', '胸腔积液', '积液位置','肋骨骨折', '骨折位置', '结节',\
'条索影', '网格影', '实变影', '磨玻璃密度影', '肺大疱', '肺气肿', '胸膜增厚']
excel = []
for ann_dict in ann_list:
cols_dict = get_excel_info_for_bounds(ann_dict['bounds_info'], cols)
cols_dict['patient_id'] = pid_dict[ann_dict['sub_dir'].split('/')[0]]
cols_dict['create_time'] = time_dict[ann_dict['sub_dir']]
current = []
for key, value in cols_dict.items():
current.append(value)
excel.append(current)
out_df = pd.DataFrame(excel, columns=cols)
write_excel(out_df, save_dir, file_name='njjz.xlsx')
def get_dict(list_path, parse_str=' '):
with open(list_path) as f:
lines = f.readlines()
time_dict = {}
for line in lines:
time_dict[line.split(parse_str)[0]] = line.strip().split(parse_str)[1]
return time_dict
def get_excel_info_for_bounds(bounds_info, cols):
cols_dict = {item: 0 for item in cols}
mapping = {"right": "R", "left": "L"}
location = {'胸腔积液': [], '气胸': [], '肋骨骨折': []}
for bound in bounds_info:
category = bound[0]['category']
cols_dict[category] = 1
if category in ['胸腔积液', '气胸', '肋骨骨折']:
location[category].append(bound[0]['location'])
cols_dict['积液位置'] = ''.join(set(location['胸腔积液']))
cols_dict['气胸位置'] = ''.join(set(location['气胸']))
cols_dict['骨折位置'] = ', '.join(set(location['肋骨骨折']))
return cols_dict
if __name__ == "__main__":
root_dir = '/data/shuzhang/tmp_data/njjz_nm/'
sub_dir_list = '/data/shuzhang/tmp_data/njjz_sub_dirs.txt'
time_list = '/data/shuzhang/tmp_data/njjz_sub_dirs_w_dates.txt'
pid_list = '/data/shuzhang/tmp_data/pid.txt'
save_dir = '/data/shuzhang/tmp_data/'
lg_ai = LGAIresult()
lg_ai.init(root_dir, sub_dir_list)
lg_ai.get_ann_list()
#print(lg_ai.ann_list[0]['bounds_info'][0])
out_excel(lg_ai.ann_list, time_list, pid_list, save_dir)
| 2.515625 | 3 |
benchtmpl/workflow/benchmark/loader.py | scailfin/benchmark-templates | 0 | 12787247 | <filename>benchtmpl/workflow/benchmark/loader.py
# This file is part of the Reproducible Open Benchmarks for Data Analysis
# Platform (ROB).
#
# Copyright (C) 2019 NYU.
#
# ROB is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Implementation of the template loader for benchmark workflow templates. A
benchmark template has one additional element in its serialization that contains
the specification of the result schema.
"""
from benchtmpl.workflow.benchmark.base import BenchmarkTemplate
from benchtmpl.workflow.benchmark.schema import BenchmarkResultSchema
from benchtmpl.workflow.template.loader import DefaultTemplateLoader
import benchtmpl.error as err
"""Additional top-level elements of dictionary serialization for benchmark
template handles.
"""
LABEL_RESULTS = 'results'
class BenchmarkTemplateLoader(DefaultTemplateLoader):
"""Implementation of the template loader for benchmark workflow templates.
"""
def from_dict(self, doc, identifier=None, base_dir=None, validate=True):
"""Create an instance of the benchmark template from a dictionary
serialization. Expects a dictionary that contains the three top-level
elements of the template handle plus the 'result' schema.
Parameters
----------
dict: dict
Dictionary serialization of a workflow template
identifier: string, optional
Unique template identifier. This value will override the value in
the document.
base_dir: string, optional
Optional path to directory on disk that contains static files that
are required to run the represented workflow. This value will
override the value in the document.
validate: bool, optional
Flag indicating if given template parameter declarations are to be
validated against the parameter schema or not.
Returns
-------
benchtmpl.workflow.benchmark.base.BenchmarkTemplate
Raises
------
benchtmpl.error.InvalidTemplateError
benchtmpl.error.UnknownParameterError
"""
# Ensure that the mandatory elements are present
if not LABEL_RESULTS in doc:
raise err.InvalidTemplateError('missing element \'{}\''.format(LABEL_RESULTS))
# Get handle for workflow template from super class
template = super(BenchmarkTemplateLoader, self).from_dict(
doc=doc,
identifier=identifier,
base_dir=base_dir,
validate=validate
)
# Get schema object from serialization
try:
schema = BenchmarkResultSchema.from_dict(doc[LABEL_RESULTS])
except ValueError as ex:
raise err.InvalidTemplateError(str(ex))
return BenchmarkTemplate(
identifier=template.identifier,
base_dir=template.base_dir,
workflow_spec=template.workflow_spec,
parameters=template.parameters.values(),
schema=schema
)
def to_dict(self, template):
"""Get dictionary serializationfor the wrokflow template.
Parameters
----------
template: benchtmpl.workflow.benchmark.base.BenchmarkTemplate
Expects an instance of a benchmark template handle
Returns
-------
dict
"""
# Add serialization of schema to the serialization of the super class
obj = super(BenchmarkTemplateLoader, self).to_dict(template)
obj[LABEL_RESULTS] = template.schema.to_dict()
return obj
| 2.34375 | 2 |
deepnet/impute.py | smoitra87/deepnet | 0 | 12787248 | <gh_stars>0
"""Computes partition function for RBM-like models using Annealed Importance Sampling."""
import numpy as np
from deepnet import dbm
from deepnet import util
from deepnet import trainer as tr
from choose_matrix_library import *
import sys
import numpy as np
import pdb
import time
import itertools
import matplotlib.pyplot as plt
from deepnet import visualize
import deepnet
import scipy.io as sio
def LogMeanExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).mean())
def LogSumExp(x):
offset = x.max()
return offset + np.log(np.exp(x-offset).sum())
def Display(w, hid_state, input_state, w_var=None, x_axis=None):
w = w.asarray().flatten()
plt.figure(1)
plt.clf()
plt.hist(w, 100)
visualize.display_hidden(hid_state.asarray(), 2, 'activations', prob=True)
# plt.figure(3)
# plt.clf()
# plt.imshow(hid_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
# plt.figure(4)
# plt.clf()
# plt.imshow(input_state.asarray().T, cmap=plt.cm.gray, interpolation='nearest')
#, state.shape[0], state.shape[1], state.shape[0], 3, title='Markov chains')
# plt.tight_layout(pad=0, w_pad=0, h_pad=0)
# plt.figure(5)
# plt.clf()
# plt.suptitle('Variance')
# plt.plot(np.array(x_axis), np.array(w_var))
# plt.draw()
def impute_dbm_ais(model):
"""Run approximate pll using AIS on a DBM """
def impute_rbm_gaussian_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('bernoulli_hidden1')
bern2_hidden_layer = model.GetLayerByName('bernoulli2_hidden1')
gaussian_layer = model.GetLayerByName('gaussian_hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchzeroslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
datasize_squared = cm.CUDAMatrix(np.zeros([batchsize, batchsize]))
datasize_eye = cm.CUDAMatrix(np.eye(batchsize))
datasize_eye2 = cm.CUDAMatrix(np.eye(batchsize))
if hidden_layer:
hidden_bias = hidden_layer.params['bias']
bedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli_hidden1')
w = bedge.params['weight']
if bern2_hidden_layer:
bern2_hidden_bias = bern2_hidden_layer.params['bias']
bedge2 = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'bernoulli2_hidden1')
w2 = bedge2.params['weight']
if 'bias' in input_layer.params:
input_bias = input_layer.params['bias']
if gaussian_layer:
gedge = next(e for e in model.edge if e.node1.name == 'input_layer' \
and e.node2.name == 'gaussian_hidden1')
gw = gedge.params['weight']
input_diag = input_layer.params['diag']
diag_val = input_diag.sum() / (input_layer.dimensions * input_layer.numlabels)
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
batchslice.assign(batchzeroslice)
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
if hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
if bern2_hidden_layer:
# Add the contributions from bernoulli hidden layer
cm.dot(w2.T, input_layer.state, target=bern2_hidden_layer.state)
bern2_hidden_layer.state.add_col_vec(bern2_hidden_bias)
cm.log_1_plus_exp(bern2_hidden_layer.state)
batchslice.add_sums(bern2_hidden_layer.state, axis=0)
if 'bias' in input_layer.params:
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
if gaussian_layer:
# Add contributions from gaussian hidden layer
cm.dot(gw.T, input_layer.state, target=gaussian_layer.state)
cm.dot(gaussian_layer.state.T, gaussian_layer.state, target= datasize_squared)
datasize_squared.mult(datasize_eye, target=datasize_eye2)
datasize_eye2.sum(axis=0, target=batchslice2)
# Add constants from gaussian hidden layer
integration_constant = gaussian_layer.dimensions * np.log(2*np.pi)
integration_constant += input_layer.dimensions * diag_val
batchslice2.add(integration_constant)
batchslice2.mult(0.5)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_rbm_exact(model):
""" run exact exact pll and imputation error on an rbm """
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layer = model.GetLayerByName('hidden1')
# Get input layer features
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
layer.bar = layer.deriv
zeroslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
onesrow = cm.CUDAMatrix(np.ones([1,\
batchsize]))
batchslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
batchslice2 = cm.CUDAMatrix(np.zeros([1, batchsize]))
hidden_bias = hidden_layer.params['bias']
input_bias = input_layer.params['bias']
edge = model.edge[0]
w = edge.params['weight']
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
dim_offset = dim_idx * numlabels
for label_idx in range(numlabels):
#Assign state value
label_offset = dim_idx * numlabels + label_idx
input_layer.state.set_row_slice(dim_offset, dim_offset + numlabels, \
zeroslice)
input_layer.state.set_row_slice(label_offset, label_offset+1, onesrow)
cm.dot(w.T, input_layer.state, target=hidden_layer.state)
hidden_layer.state.add_col_vec(hidden_bias)
cm.log_1_plus_exp(hidden_layer.state)
hidden_layer.state.sum(axis=0, target=batchslice)
cm.dot(input_bias.T, input_layer.state, target=batchslice2)
batchslice.add_row_vec(batchslice2)
input_layer.foo.set_row_slice(label_offset, label_offset+1, batchslice)
# Apply softmax on log Z_v as energies
input_layer.foo.reshape((numlabels, dimensions * batchsize))
input_layer.foo.apply_softmax()
data.reshape((1, dimensions * batchsize))
# Calculate Imputation Error
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_correct(data, target=input_layer.batchsize_temp)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
imperr_cpu = (dimensions - input_layer.batchsize_temp.sum(axis=0).asarray() )/ (0. + dimensions)
# Calculate Pseudo ll
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
input_layer.foo.get_softmax_cross_entropy(data, target=input_layer.batchsize_temp, \
tiny=input_layer.tiny)
input_layer.batchsize_temp.reshape((dimensions, batchsize))
pll_cpu = - input_layer.batchsize_temp.sum(axis=0).asarray()
# Undo rehapes
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
zeroslice.free_device_memory()
onesrow.free_device_memory()
batchslice.free_device_memory()
return pll_cpu, imperr_cpu
def impute_mf(model, mf_steps, hidden_mf_steps, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for dim_idx in range(dimensions):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
# Run MF steps
for mf_idx in range(mf_steps):
for hid_mf_idx in range(hidden_mf_steps):
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
input_layer.state.get_row_slice(offset, offset + numlabels , \
target=input_layer.fooslice)
input_layer.GetData()
input_layer.state.set_row_slice(offset, offset + numlabels , \
input_layer.fooslice)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (dimensions+0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def multicol_mf(model, multicols, **opts):
# Initialize stuff
batchsize = model.batchsize
input_layer = model.GetLayerByName('input_layer')
hidden_layers = []
for layer in model.layer:
if not layer.is_input:
hidden_layers.append(layer)
dimensions = input_layer.dimensions
numlabels = input_layer.numlabels
data = input_layer.data
# set up temp data structures
for layer in model.layer:
layer.foo = layer.statesize
input_layer.fooslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.barslice = cm.CUDAMatrix(np.zeros([1, batchsize]))
pll = cm.CUDAMatrix(np.zeros([1, batchsize]))
imputation_err = cm.CUDAMatrix(np.zeros([1, batchsize]))
input_layer.biasslice = cm.CUDAMatrix(np.zeros([input_layer.numlabels,\
batchsize]))
input_layer.biasslice.apply_softmax()
# Get the multicol dimensions
nBlocks, nCols = multicols.shape
# INITIALIZE TO UNIFORM RANDOM for all layers except clamped layers
for layer in model.layer:
layer.state.assign(0)
layer.ApplyActivation()
def reshape_softmax(enter=True):
if enter:
input_layer.state.reshape((numlabels, dimensions * batchsize))
input_layer.foo.reshape((numlabels, dimensions * batchsize))
data.reshape((1, dimensions * batchsize))
input_layer.batchsize_temp.reshape((1, dimensions * batchsize))
else:
input_layer.state.reshape((numlabels * dimensions, batchsize))
input_layer.foo.reshape((numlabels * dimensions, batchsize))
data.reshape((dimensions, batchsize))
input_layer.batchsize_temp.reshape((dimensions, batchsize))
# RUN Imputation Error
for mult_idx in range(nBlocks):
#-------------------------------------------
# Set state of input variables
input_layer.GetData()
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
offset = dim_idx * numlabels
input_layer.state.set_row_slice(offset, offset + numlabels, \
input_layer.biasslice)
for layer in model.layer:
if not layer.is_input:
layer.state.assign(0)
for layer in hidden_layers:
model.ComputeUp(layer, train=False, compute_input=False, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
model.ComputeUp(input_layer, train=False, compute_input=True, step=0,
maxsteps=0, use_samples=False, neg_phase=False)
# Calculate pll
reshape_softmax(enter=True)
input_layer.state.get_softmax_cross_entropy(data,\
target=input_layer.batchsize_temp, tiny=input_layer.tiny)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
pll.add_sums(input_layer.barslice, axis=0)
# Calculate imputation error
if 'blosum90' in opts:
reshape_softmax(enter=True)
input_layer.state.get_softmax_blosum90(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0)
else:
reshape_softmax(enter=True)
input_layer.state.get_softmax_correct(data, target=input_layer.batchsize_temp)
reshape_softmax(enter=False)
for col_idx in range(nCols):
dim_idx = multicols[mult_idx, col_idx]
input_layer.batchsize_temp.get_row_slice(dim_idx, dim_idx + 1 , \
target=input_layer.barslice)
imputation_err.add_sums(input_layer.barslice, axis=0, mult=-1.)
imputation_err.add(1.)
#--------------------------------------
# free device memory for newly created arrays
pll_cpu = -pll.asarray()
imperr_cpu = imputation_err.asarray()
imperr_cpu /= (nBlocks * nCols +0.)
input_layer.fooslice.free_device_memory()
input_layer.biasslice.free_device_memory()
input_layer.barslice.free_device_memory()
pll.free_device_memory()
imputation_err.free_device_memory()
return pll_cpu, imperr_cpu
def Usage():
print '%s <model file> <number of Markov chains to run> [number of words (for Replicated Softmax models)]'
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(description="Run AIS")
parser.add_argument("--model_file", type=str)
parser.add_argument("--train_file", type=str)
parser.add_argument("--infer-method", type=str, default='exact', \
help='mf/gibbs/exact/gaussian_exact')
parser.add_argument("--mf-steps", type=int, default=1)
parser.add_argument("--hidden-mf-steps", type=int, default=1)
parser.add_argument("--outf", type=str, help='Output File')
parser.add_argument("--valid_only", action='store_true', help="only run the validation set")
parser.add_argument("--blosum90", action='store_true', help="Calculate blosum90 scores")
parser.add_argument("--ncols", type=int, help="Number of multiple columns")
parser.add_argument("--multmode", type=str, help="Multicol mode",default='rand')
args = parser.parse_args()
if not args.outf :
raise ValueError('Output file not defined')
if not args.train_file or not args.model_file :
raise ValueError('Models and data missing')
board = tr.LockGPU()
model_file = args.model_file
train_file = args.train_file
model = dbm.DBM(model_file, train_file)
trainer_pb = util.ReadOperation(train_file)
dataset = os.path.basename(trainer_pb.data_proto_prefix)
# Fix paths
dirname = os.path.split(model.t_op.data_proto_prefix)[1]
import awsutil
deepnet_path = awsutil.get_deepnet_path()
model.t_op.data_proto_prefix = os.path.join(deepnet_path, 'datasets/',\
dirname)
model.t_op.skip_last_piece = False
model.t_op.get_last_piece = True
model.t_op.randomize = False
model.LoadModelOnGPU()
model.SetUpData()
if args.valid_only:
data_types = ['valid']
else:
data_types = ['train', 'valid', 'test']
datagetters = {
'train' : model.GetTrainBatch,
'valid' : model.GetValidationBatch,
'test' : model.GetTestBatch
}
batchsizes = {
'train' : model.train_data_handler.num_batches,
'valid' : model.validation_data_handler.num_batches,
'test' : model.test_data_handler.num_batches
}
opts = {}
cm.CUDAMatrix.init_random(seed=int(time.time()))
if len(model.layer) > 2 and args.infer_method=='exact':
raise ValueError('Cannot use exact Exact inference for DBMs')
from collections import defaultdict
pll_data = defaultdict(list)
imperr_data = defaultdict(list)
for data_type in data_types:
num_batches = batchsizes[data_type]
datagetter = datagetters[data_type]
for batch_idx in range(num_batches):
print("Evalutating batch {}".format(batch_idx+1))
datagetter()
if args.infer_method == 'mf':
if args.blosum90:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps, blosum90=True)
else:
pll, imperr = impute_mf(model, args.mf_steps, args.hidden_mf_steps)
elif args.infer_method == 'multicol':
ncols = args.ncols;
multicol_file = 'datasets/{0}/multicol/{1}_{2}.mat'.format(dataset,args.multmode, ncols)
multicols = sio.loadmat(multicol_file)['multicols']
multicols = np.asarray(multicols, dtype=np.int)
multicols = multicols - 1; # convert from matlab indexing
if args.blosum90:
pll, imperr = multicol_mf(model, multicols, blosum90=True)
else:
pll, imperr = multicol_mf(model, multicols)
elif args.infer_method == 'exact':
pll, imperr = impute_rbm_exact(model)
elif args.infer_method == 'gaussian_exact':
pll, imperr = impute_rbm_gaussian_exact(model)
else:
raise ValueError("Unknown infer method")
pll, imperr = pll.flatten(), imperr.flatten()
pll_data[data_type].append(pll)
imperr_data[data_type].append(imperr)
pll_data[data_type] = np.concatenate(pll_data[data_type])
imperr_data[data_type] = np.concatenate(imperr_data[data_type])
#-------------------------------------------------------------------
# Print and save the results
for dtype in pll_data :
pll = pll_data[dtype]
imperr = imperr_data[dtype]
print '%s : Pseudo-LogLikelihood %.5f, std %.5f' % (dtype, pll.mean(), pll.std())
print '%s : Imputation Error %.5f, std %.5f' % (dtype, imperr.mean(), imperr.std())
tr.FreeGPU(board)
import pickle
with open(args.outf,'wb') as fout:
pkldata = { 'pll' : pll_data, 'imperr' : imperr_data }
pickle.dump(pkldata, fout)
| 2.125 | 2 |
rosstat/validators/control/control.py | WoolenSweater/rosstat_flc | 2 | 12787249 | from ..base import AbstractValidator
from .exceptions import PrevPeriodNotImpl
from .inspectors import PeriodInspector, FormulaInspector
class ControlValidator(AbstractValidator):
name = 'Проверка контролей'
code = '4'
def __init__(self, schema):
self._schema = schema
self.errors = []
self._template = ('{control_name}; слева {left} {operator} '
'справа {right} разница {delta}')
def __repr__(self):
return '<ControlValidator errors={errors}>'.format(**self.__dict__)
def __fmt_control(self, ctrl, name):
'''Форматирование сообщения о непройденном контроле'''
return self._template.format(control_name=name, **ctrl)
def validate(self, report):
self._check_controls(report)
return not bool(self.errors)
def _check_controls(self, report):
'''Проверка отчёта по контролям'''
if report.blank:
return
for control in self._schema.controls:
self._check_control(report, control)
def _check_control(self, report, control):
'''Обёртка для обработки исключения'''
try:
if self.__check_period(report, control):
self.__check_control(report, control)
except PrevPeriodNotImpl as ex:
self.error(ex.msg, ex.id, level=0)
def __check_period(self, report, control):
'''Проверка соответствия периода контроля периоду в отчёте'''
inspector = PeriodInspector(control)
return inspector.check(report)
def __check_control(self, report, control):
'''Проверка контрольных значений отчёта'''
inspector = FormulaInspector(control,
formats=self._schema.formats,
catalogs=self._schema.catalogs,
dimension=self._schema.dimension,
skip_warns=self._schema.skip_warns)
for ctrl in inspector.check(report):
message = self.__fmt_control(ctrl, inspector.name)
self.error(message, inspector.id, level=inspector.tip)
| 2.453125 | 2 |
twemoji/parse_list/mk_seqjson.py | ericosur/myqt | 0 | 12787250 | <reponame>ericosur/myqt
#!/usr/bin/env python3
# coding: utf-8
'''
sanilize list.txt and generate seq.json
'''
import re
import json
def main():
''' main '''
fn = 'list.txt'
arr = []
with open(fn, 'rt', encoding='UTF-8') as fh:
arr = fh.readlines()
p = re.compile(r'([0-9a-f]+)')
cp_dict = {}
cnt = 0
for v in arr:
cnt += 1
m = p.findall(v)
if not m:
continue
#print('type(m):{},m:{}'.format(type(m), m))
# if len(m) == 1:
# m.append('null')
k = m[0]
if not k in cp_dict:
cp_dict[k] = []
cp_dict[k].append(m)
print('unique heading key:', len(cp_dict))
print('total cnt:', cnt)
tot = 0
for ii, v in enumerate(cp_dict):
if cp_dict[v]:
tot += len(cp_dict[v])
else:
tot += 1
print(f'{ii}: {cp_dict[v]}')
print('go through cp_dict, len:', tot)
#print(cp_dict['1f935'])
#print(cp_dict['1fa92'])
out_json_file = '../seq.json'
with open(out_json_file, 'wt', encoding='UTF-8') as ofh:
ofh.write(json.dumps(cp_dict, indent=2, sort_keys=True))
print('output json file:', out_json_file)
if __name__ == '__main__':
main()
| 2.90625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.