max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
class.py | spar7453/python-tutorial | 0 | 12794651 | class Rectangle: # define parent class
def __init__(self, width, height):
self.width = width
self.height = height
def __del__(self):
# Called when object is about to be destroyed
print(self.__class__.__name__, " is destroyed")
def __repr__(self):
return (f'{self.__class__.__name__}'
f'(width = {self.width!r}, height = {self.height!r})')
def __str__(self):
return (f'width = {self.width} , ' f'height = {self.height}')
def area(self):
return self.width * self.height
class Square(Rectangle):
def __init__(self, width, height):
Rectangle.__init__(self, width, height)
def is_square(self):
return self.width == self.height
def check_rec():
print("rectangle")
rec = Rectangle(1, 2)
print(rec) # call rec.__str__ if __str__ is defined, otherwise call __repr__
print(str(rec)) # equivalent ot print(rec)
print(repr(rec)) # if rec is not defined, returns memory address
# rec destroyed here
check_rec()
print("")
def check_square():
print("square")
square = Square(2, 2)
print(square) # call parent's method
print(str(square)) # call parent's method
print(repr(square)) # call parent's method
print(f"square.is_square() = {square.is_square()}") # child method
print(f"square.area() = {square.area()}") # parent method
# rec destroyed here
check_square()
| 4.25 | 4 |
examples/bayesian_nn.py | xiangze/edward | 1 | 12794652 | <reponame>xiangze/edward
#!/usr/bin/env python
"""Bayesian neural network using variational inference
(see, e.g., Blundell et al. (2015); Kucukelbir et al. (2016)).
Inspired by autograd's Bayesian neural network example.
This example prettifies some of the tensor naming for visualization in
TensorBoard. To view TensorBoard, run `tensorboard --logdir=log`.
References
----------
http://edwardlib.org/tutorials/bayesian-neural-network
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import edward as ed
import numpy as np
import tensorflow as tf
from edward.models import Normal
def build_toy_dataset(N=40, noise_std=0.1):
D = 1
X = np.concatenate([np.linspace(0, 2, num=N / 2),
np.linspace(6, 8, num=N / 2)])
y = np.cos(X) + np.random.normal(0, noise_std, size=N)
X = (X - 4.0) / 4.0
X = X.reshape((N, D))
return X, y
def neural_network(X):
h = tf.tanh(tf.matmul(X, W_0) + b_0)
h = tf.tanh(tf.matmul(h, W_1) + b_1)
h = tf.matmul(h, W_2) + b_2
return tf.reshape(h, [-1])
ed.set_seed(42)
N = 40 # number of data points
D = 1 # number of features
# DATA
X_train, y_train = build_toy_dataset(N)
# MODEL
with tf.name_scope("model"):
W_0 = Normal(loc=tf.zeros([D, 10]), scale=tf.ones([D, 10]), name="W_0")
W_1 = Normal(loc=tf.zeros([10, 10]), scale=tf.ones([10, 10]), name="W_1")
W_2 = Normal(loc=tf.zeros([10, 1]), scale=tf.ones([10, 1]), name="W_2")
b_0 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name="b_0")
b_1 = Normal(loc=tf.zeros(10), scale=tf.ones(10), name="b_1")
b_2 = Normal(loc=tf.zeros(1), scale=tf.ones(1), name="b_2")
X = tf.placeholder(tf.float32, [N, D], name="X")
y = Normal(loc=neural_network(X), scale=0.1 * tf.ones(N), name="y")
# INFERENCE
with tf.name_scope("posterior"):
with tf.name_scope("qW_0"):
qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, 10]), name="loc"),
scale=tf.nn.softplus(
tf.Variable(tf.random_normal([D, 10]), name="scale")))
with tf.name_scope("qW_1"):
qW_1 = Normal(loc=tf.Variable(tf.random_normal([10, 10]), name="loc"),
scale=tf.nn.softplus(
tf.Variable(tf.random_normal([10, 10]), name="scale")))
with tf.name_scope("qW_2"):
qW_2 = Normal(loc=tf.Variable(tf.random_normal([10, 1]), name="loc"),
scale=tf.nn.softplus(
tf.Variable(tf.random_normal([10, 1]), name="scale")))
with tf.name_scope("qb_0"):
qb_0 = Normal(loc=tf.Variable(tf.random_normal([10]), name="loc"),
scale=tf.nn.softplus(
tf.Variable(tf.random_normal([10]), name="scale")))
with tf.name_scope("qb_1"):
qb_1 = Normal(loc=tf.Variable(tf.random_normal([10]), name="loc"),
scale=tf.nn.softplus(
tf.Variable(tf.random_normal([10]), name="scale")))
with tf.name_scope("qb_2"):
qb_2 = Normal(loc=tf.Variable(tf.random_normal([1]), name="loc"),
scale=tf.nn.softplus(
tf.Variable(tf.random_normal([1]), name="scale")))
inference = ed.KLqp({W_0: qW_0, b_0: qb_0,
W_1: qW_1, b_1: qb_1,
W_2: qW_2, b_2: qb_2}, data={X: X_train, y: y_train})
inference.run(logdir='log')
| 3.265625 | 3 |
django_scripts_tracker/core_tracker.py | Krzysiek555/django-scripts-tracker | 2 | 12794653 | <gh_stars>1-10
import inspect
import os
from functools import wraps
from django_scripts_tracker.dependency_resolver import build_dependencies_dict, print_dependencies, run_scripts
from django_scripts_tracker.git_plugin import has_uncommited_changes
from django_scripts_tracker.models import AppliedManagementScripts
from django_scripts_tracker.settings import COMMANDS_DIRS, TERM_COLORS, CHECK_SCRIPT_GIT_STATUS
from django_scripts_tracker.tracker_commons import is_script_ignored, is_tracked_script, filter_unapplied_scripts
from django_scripts_tracker.utils import get_hash, get_script_path, print_scripts
def mark_script_as_applied(script_path):
hash_ = get_hash(script_path)
AppliedManagementScripts.objects.create(file_path=script_path, file_hash=hash_)
def tracked_script(decorated_func):
""" Decorator which logs management scripts executions """
@wraps(decorated_func)
def wrapper(*args, **kwargs):
result = decorated_func(*args, **kwargs)
script_module = inspect.getmodule(decorated_func)
script_path = get_script_path(script_module)
if CHECK_SCRIPT_GIT_STATUS:
if not has_uncommited_changes(script_path):
mark_script_as_applied(script_path)
else:
mark_script_as_applied(script_path)
return result
wrapper._is_tracked_script = True
return wrapper
def _get_trackable_scripts(cmd_dirs):
""" Returns a list of scripts (list of script paths) that should be tracked for modifications """
script_paths = []
for cmd_dir in cmd_dirs:
for entry in os.listdir(cmd_dir):
if not is_script_ignored(entry):
# script file is not ignored -> check if @tracked_script decorator is present
cmd_dir = os.path.join(*cmd_dir.split('/')) # convert system dependant slashes
script_path = os.path.join(cmd_dir, entry)
if is_tracked_script(script_path):
script_paths.append(script_path)
return script_paths
def get_unapplied_scripts():
script_paths = _get_trackable_scripts(COMMANDS_DIRS)
new_scripts, modified_scripts = filter_unapplied_scripts(script_paths)
return (new_scripts, modified_scripts)
def print_new_and_modified_scripts(new_scripts, modified_scripts, show_dependencies=False):
print('{LIGHT_CYAN}Checking management scripts:{NC}\n'
' You have {new_count} new and {mod_count} modified management scripts to be applied.'.format(
new_count=len(new_scripts), mod_count=len(modified_scripts), **TERM_COLORS))
if len(new_scripts):
print('{LIGHT_CYAN}New scripts ({count}):{NC}'.format(count=len(new_scripts), **TERM_COLORS))
print_scripts(new_scripts)
if len(modified_scripts):
print('{LIGHT_CYAN}Modified scripts ({count}):{NC}'.format(count=len(modified_scripts), **TERM_COLORS))
print_scripts(modified_scripts)
if show_dependencies and len(new_scripts + modified_scripts):
scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts)
print_dependencies(scripts_dependencies, prefix=' ')
def check_scripts_signal_handler(sender, **kwargs):
from django.conf import settings
new_scripts, modified_scripts = get_unapplied_scripts()
print_new_and_modified_scripts(new_scripts, modified_scripts)
if hasattr(settings, 'SCRIPTS_TRACKER') and settings.SCRIPTS_TRACKER \
and settings.SCRIPTS_TRACKER.get('auto_run', False):
scripts_dependencies = build_dependencies_dict(new_scripts + modified_scripts)
run_scripts(scripts_dependencies, True)
| 2.1875 | 2 |
christmas.py | MS17-010/python-misc | 0 | 12794654 | #!/usr/bin/env python
""" christmas.py
Prints a christmas tree on the terminal using coloured and blinking characters.
Uses ansi terminal escape sequences.
The '\033[' part is the escape code.
We pass '5;' for the colours other than green to make them blink.
The next part is the colour code and the 'm' ends the sequence.
To reset the colour we pass "\033[0m" after each character.
Python 3 version by antiloquax (2015), based on code from datamungeblog.com.
"""
from random import choice
from random import random
def main():
"""Make the tree and print it."""
# If you change this, use an odd number.
SIZE = 21
print(makeTree(SIZE))
def makeTree(size):
"""Creates the tree string."""
# Probability that a character will be green.
prob_gr = 0.6
# Colour codes.
colours = [31, 33, 34, 35, 36, 37]
# Characters to use for decorations. Experiment with these.
# The chr(169) and chr(174) characters may not work in all terminals
# (extended ASCII, c and r in a circle).
decs = ['@', '&', '*', chr(169), chr(174)]
# Format string for printing blinking characters.
blink_col = "\033[5;{0}m{1}\033[0m"
# String to print a green octothorpe ('#').
leaf = "\033[32m#\033[0m"
# Width of the tree, will grow by 2 each time.
width = 1
# Initialise the tree string, with a star at the top.
tree = "\n{}*\n".format(' ' * (size))
""" Main Loop starts now."""
""" We can't use the normal "format" centering approach:
("{:^nn}".format(string) where "nn" is the width of the line),
with these ansi codes. This is because Python sees the strings as being
more than one character long (15 & 10 for baubles and leaves)."""
# Loop from (size - 1) down to 0, using the counter as the padding size.
for pad in range(size - 1, -1, -1):
# Increase the width of the tree by 2.
width += 2
# Put the characters for the line in "temp".
temp = ""
for j in range(width):
# Make some leaves.
if random() < prob_gr:
temp += leaf
# And also some baubles.
else:
temp += blink_col.format(choice(colours), choice(decs))
# Add that string to the line, with padding.
tree += "{0}{1}\n".format(' ' * pad, temp)
# Add a "trunk" of 2 lines and return.
return tree + "{0}{1}\n".format(' ' * (size - 1), "000") * 2
if __name__ == "__main__":
main()
| 4.3125 | 4 |
zmon_worker_extras/check_plugins/jobs.py | heroldus/zmon-worker | 17 | 12794655 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Zalando-specific function to query DeployCtl job information
"""
from itertools import groupby
from operator import itemgetter
from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial
from zmon_worker_monitor import plugin_manager
class JobsFactory(IFunctionFactoryPlugin):
def __init__(self):
super(JobsFactory, self).__init__()
# fields from dependencies: plugin depends 1 other plugin
self.http_factory = None
def configure(self, conf):
"""
Called after plugin is loaded to pass the [configuration] section in their plugin info file
:param conf: configuration dictionary
"""
return
def create(self, factory_ctx):
"""
Automatically called to create the check function's object
:param factory_ctx: (dict) names available for Function instantiation
:return: an object that implements a check function
"""
# load plugins dependencies and store them locally for efficiency
if not self.http_factory:
self.http_factory = plugin_manager.get_plugin_obj_by_name('http', 'Function')
return propartial(JobsWrapper,
http_wrapper=self.http_factory.create(factory_ctx),
project=factory_ctx['entity'].get('name'))
class JobsWrapper(object):
def __init__(self, http_wrapper, environment, project, **kwargs):
self.url = 'https://deployctl.example.com/jobs/history.json/{}/{}'.format(environment, project)
self.__http = http_wrapper
self.http_wrapper_params = kwargs
self.name = itemgetter('name')
def __request(self):
return self.__http(self.url, **self.http_wrapper_params).json()
def lastruns(self):
start_time = itemgetter('start_seconds_ago')
return dict((job, min(runs, key=start_time)) for (job, runs) in groupby(sorted(self.__request(),
key=self.name), key=self.name))
def history(self):
return dict((job, list(runs)) for (job, runs) in groupby(sorted(self.__request(), key=self.name),
key=self.name))
| 2.140625 | 2 |
prob08/prob8.py | speyejack/EulersProblems | 0 | 12794656 | <reponame>speyejack/EulersProblems
from operator import itemgetter
from functools import reduce
num = """73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450""".replace("\n", "").strip()
search_range = 13
product = max([reduce(lambda x,y:int(x)*int(y),sub) for sub in [num[sub:sub+search_range] for sub in range(len(num)-search_range)]])
print("Greatest product: {}".format(product))
| 1.8125 | 2 |
ossim/disk/views.py | devil-r/Os-simulator | 0 | 12794657 | <filename>ossim/disk/views.py
from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect,HttpResponse
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
import json
from django.http import JsonResponse
# Create your views here.
from . models import DiskSchedAlg
from . utils import cscan,clook, scan,look,sstf,fcfs
def home(request):
algos = DiskSchedAlg.objects.all()
context = {'algos': algos}
return render(request, 'disk/index.html',context = context)
def detail(request,pk):
alg = get_object_or_404(DiskSchedAlg, pk=pk)
context = {'alg':alg,
}
return render(request,'disk/detail.html',context=context)
def demo(request):
return render(request,'disk/disk.html')
@csrf_exempt
def gateway(request):
if request.method == 'POST':
data = request.POST.get('value')
data = json.loads(data)
alg = request.POST.get('algo')
alg=json.loads(alg)
print(data)
if(alg=="CSCAN"):
result = cscan(data)
elif(alg=="CLOOK"):
result = clook(data)
elif(alg=="FCFS"):
result = fcfs(data)
elif(alg=="SCAN"):
result = scan(data)
elif(alg=="LOOK"):
result = look(data)
elif(alg=="SSTF"):
result = sstf(data)
print(result)
return JsonResponse({'output':result})
| 2.15625 | 2 |
Python/6 - kyu/6 kyu - Format a string of names like 'Bart, Lista e Maggie'.py | danielbom/codewars | 0 | 12794658 | <filename>Python/6 - kyu/6 kyu - Format a string of names like 'Bart, Lista e Maggie'.py
# https://www.codewars.com/kata/53368a47e38700bd8300030d/train/python
# My solution
def namelist(names):
return ' & '.join(', '.join(i['name'] for i in names).rsplit(', ', 1))
# Other
def namelist(names):
if not names: return ''
if len(names) == 1:
return names[0]['name']
if len(names) == 2:
return ' & '.join([i['name'] for i in names])
return names[0]['name'] + ', ' + namelist(names[1:]) | 3.6875 | 4 |
beginner_contest/068/B.py | FGtatsuro/myatcoder | 0 | 12794659 | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
exp = 0
while 2 ** (exp + 1) <= n:
exp += 1
print(2 ** exp)
| 2.984375 | 3 |
src/server/worker/worker_manager/shared/lock_application.py | dpaola2/djangy | 15 | 12794660 | <gh_stars>10-100
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'orm.settings'
from orm.models import *
def lock_application(func):
def lock_and_call(application_name, *args, **kwargs):
lock = LocalApplicationLocks.lock(application_name)
try:
func(application_name, *args, **kwargs)
finally:
LocalApplicationLocks.unlock(lock)
return lock_and_call
| 1.984375 | 2 |
sugar/tests/cors.py | acdha/django-sugar | 2 | 12794661 | <filename>sugar/tests/cors.py
from django.conf import settings
from django.test.testcases import TestCase
from django.http import HttpRequest, HttpResponse
from sugar.middleware.cors import CORSMiddleware
class CORSTests(TestCase):
def test_middleware(self):
cors = CORSMiddleware()
request = HttpRequest()
request.path = "/"
response = HttpResponse('["foo"]',
mimetype='application/json')
cors.process_response(request, response)
self.assertEqual(response['access-control-allow-origin'], '*')
def test_non_interference(self):
"CORS Middleware shouldn't touch responses outside of its mimetypes"
cors = CORSMiddleware()
request = HttpRequest()
request.path = "/cicero"
response = HttpResponse('Lorem ipsum dolor sit amet',
mimetype='text/html')
cors.process_response(request, response)
self.assertFalse(response.has_header('access-control-allow-origin'))
def test_custom_settings(self):
"CORS Middleware shouldn't touch responses outside of its mimetypes"
settings.CORS_PATHS = (
('/foo', ('application/json', ), (('Access-Control-Allow-Origin', 'foo.example.com'), )),
('/bar', ('application/json', ), (('Access-Control-Allow-Origin', 'example.com'), )),
('/', ('application/json', ), (('Access-Control-Allow-Origin', '*'), )),
)
cors = CORSMiddleware()
request = HttpRequest()
request.path = "/test"
response = HttpResponse('["foo"]', mimetype='application/json')
cors.process_response(request, response)
self.assertEqual(response['access-control-allow-origin'], '*')
request.path = "/foo/bar/baaz/quux"
cors.process_response(request, response)
self.assertEqual(response['access-control-allow-origin'], 'foo.example.com')
request.path = "/bar/baaz/quux"
cors.process_response(request, response)
self.assertEqual(response['access-control-allow-origin'], 'example.com')
| 2.21875 | 2 |
scripts/PCA.py | SalishSeaCast/SoG_upwelling_EOF_paper | 0 | 12794662 | <filename>scripts/PCA.py
#!/usr/bin/env python
#
# Code module for calculating the PCA matrices of the
# SalishSeaCast surface nitrate and temperature records.
#
# required for the analyses presented in:
#
# <NAME> and <NAME>: Wind-driven upwelling and
# surface nutrient delivery in a semi-enclosed coastal sea,
# Ocean Sci., 2022.
#
# $ cd scripts
# $ python3 PCA.py /path/to/files
import numpy as np
import xarray as xr
import sys
from datetime import datetime, timedelta
from scipy import signal, fft
from tqdm import tqdm
import tools
def regrid(xflat, yflat, valuesflat):
"""Regrid a flattened array with the landpoints removed according to the
corresponding xflat, yflat coordinate arrays. This function assumes a
2D input shape for valuesflat of [space, mode].
"""
shape = (max(yflat)+1, max(xflat)+1, valuesflat.shape[1])
valuesgridded = np.zeros(shape)
for y, x, row in zip(yflat, xflat, valuesflat):
valuesgridded[y, x, :] = row
return valuesgridded
def varimax(A, maxiter=40, tol=1e-5):
"""Calculate the varimax rotation matrix H from the n x p PC loadings matrix A. H is determined
iteratively from the Lagrange multiplier optimization of the varimax criterion.
Adapted from:
<NAME>. (1965) Factor Analysis of Data Matrices. Holt, Rinehart and Winston. New York, USA.
Chapter 18: Analytical Rotations
- Section 18.4: Simultaneous Factor Varimax Solution, Equations 18.4.1-10, pp. 428-429
- Section 18.7: Mathematical Proofs, Equations 18.7.32-54, pp. 437-438
The algorithm described in Section 18.4 has been reformulated to use SVD based on equivalent
definitions for the rotation matrix described in Section 18.7. The eigenvalue matrix is used
to evaluate convergence.
This version of the varimax algorithm is functionally identical to those found in Sci-kit learn,
Matlab, R, and presumably others.
"""
# Initialization
n, p = A.shape
H = np.eye(p)
d = 0
# Iteration
for i in tqdm(range(maxiter), desc='Calculating rotation matrix'):
d_old = d
B = A.dot(H) # -------------------------------------------------------- 18.4.5
beta = B * B * B - B.dot(np.diag(np.diag(B.T.dot(B)))) / n # ---------- 18.4.6
P, Delta, Q_T = np.linalg.svd(A.T.dot(beta)) # ------------------------ 18.7.42
H = P.dot(Q_T) # ------------------------------------------------------ 18.7.45
d = sum(Delta)
# Convergence
if d_old != 0 and d/d_old < 1 + tol: break
return H
def calc_PCA(z):
"""Calculate EOF matrices of n x p data matrix z using SVD
and optional varimax rotation
"""
# Calculate orthogonal PCA matrices
A_prime, sqrtL, E_T = np.linalg.svd(z, full_matrices=False)
A = A_prime.dot(np.diag(sqrtL))
A2 = A * A
var = A2.sum(axis=0) / A2.sum()
E = E_T.T
# Get varimax rotation matrix
R = varimax(A)
# Rotate matrices
B = A.dot(R)
B2 = B * B
var_rot = B2.sum(axis=0) / B2.sum()
U = E.dot(R)
# Sort rotated matrices
isort = var_rot.argsort()[::-1]
# Return xarray-compatible netCDF dict
PCA = {'A': A, 'E': E, 'var': var, 'B': B[:, isort], 'U': U[:, isort], 'var_rot': var_rot[isort]}
return PCA
def build_PCA_files(results_path, subsample=5, cutoff=1235):
"""Call the principal component analysis and varimax rotation
functions and build them to netCDF output
"""
# Load aggregated results file
slc = slice(None, None, subsample)
with xr.open_dataset(results_path + 'MooreMaleyAllenOS2022_modelfields.nc') as ds:
data = {var: ds[var].values for var in ('temperature', 'nitrate')}
coords = {var: ds[var].values[slc] for var in ('x', 'y')}
coords['time'] = tools.formattime(ds.time.values)
landmask = ds.landmask.values
# Calculate seasonal indices
waterpoints = tools.openwaterpoints(landmask)
nitrate = np.median(tools.flatten(data['nitrate'], waterpoints), axis=1)
isegments, iseason = tools.calc_seasonal_indices(coords['time'], nitrate)
coords['time'] = coords['time'][iseason]
# Build flattened, subsampled coordinate arrays
landmask = landmask[slc, slc]
y, x = [range(dim) for dim in landmask.shape]
maskflat = landmask.ravel().astype(bool)
xflat, yflat = [var.ravel()[maskflat] for var in np.meshgrid(x, y)]
# Calculate EOFs
for var in ['temperature', 'nitrate']:
# Subsample and flatten
raw = tools.flatten(data[var][:, slc, slc], landmask)
# Subtract lowpass filter and extract productive season
z = np.vstack([col - tools.lowpass(col, cutoff) for col in raw.T]).T[iseason, :]
# Subtract mean and calculate PCA
PCA = calc_PCA(z - z.mean(axis=0)[None, :])
# Build PCA results as xarray Dataset and save to netCDF
variables = {
'landmask': (['y', 'x'], landmask),
'median': (['y', 'x'], np.median(data[var][iseason, slc, slc], axis=0)),
'A': (['time', 'mode'], PCA['A']),
'B': (['time', 'mode'], PCA['B']),
'E': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['E'])),
'U': (['y', 'x', 'mode'], regrid(xflat, yflat, PCA['U'])),
'var': ('mode', PCA['var']),
'var_rot': ('mode', PCA['var_rot']),
}
xr.Dataset(variables, coords).to_netcdf(results_path + var + '_PCA.nc')
if __name__ == "__main__":
build_PCA_files(sys.argv[1]) | 2.5 | 2 |
gist/manager/gist_manager.py | shafikshaon/daybook | 0 | 12794663 | <reponame>shafikshaon/daybook<gh_stars>0
__author__ = '<NAME>'
from django.db import models
class GistManager(models.Manager):
def get_queryset(self):
return super(GistManager, self).get_queryset().filter(is_delete=False)
| 1.90625 | 2 |
pytorchvideo/models/net.py | kevinmtian/pytorchvideo | 2,391 | 12794664 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from typing import List, Optional
import torch
import torch.nn as nn
from pytorchvideo.layers.utils import set_attributes
from pytorchvideo.models.weight_init import init_net_weights
class Net(nn.Module):
"""
Build a general Net models with a list of blocks for video recognition.
::
Input
↓
Block 1
↓
.
.
.
↓
Block N
↓
The ResNet builder can be found in `create_resnet`.
"""
def __init__(self, *, blocks: nn.ModuleList) -> None:
"""
Args:
blocks (torch.nn.module_list): the list of block modules.
"""
super().__init__()
assert blocks is not None
self.blocks = blocks
init_net_weights(self)
def forward(self, x: torch.Tensor) -> torch.Tensor:
for idx in range(len(self.blocks)):
x = self.blocks[idx](x)
return x
class DetectionBBoxNetwork(nn.Module):
"""
A general purpose model that handles bounding boxes as part of input.
"""
def __init__(self, model: nn.Module, detection_head: nn.Module):
"""
Args:
model (nn.Module): a model that preceeds the head. Ex: stem + stages.
detection_head (nn.Module): a network head. that can take in input bounding boxes
and the outputs from the model.
"""
super().__init__()
self.model = model
self.detection_head = detection_head
def forward(self, x: torch.Tensor, bboxes: torch.Tensor):
"""
Args:
x (torch.tensor): input tensor
bboxes (torch.tensor): accociated bounding boxes.
The format is N*5 (Index, X_1,Y_1,X_2,Y_2) if using RoIAlign
and N*6 (Index, x_ctr, y_ctr, width, height, angle_degrees) if
using RoIAlignRotated.
"""
features = self.model(x)
out = self.detection_head(features, bboxes)
return out.view(out.shape[0], -1)
class MultiPathWayWithFuse(nn.Module):
"""
Build multi-pathway block with fusion for video recognition, each of the pathway
contains its own Blocks and Fusion layers across different pathways.
::
Pathway 1 ... Pathway N
↓ ↓
Block 1 Block N
↓⭠ --Fusion----↓
"""
def __init__(
self,
*,
multipathway_blocks: nn.ModuleList,
multipathway_fusion: Optional[nn.Module],
inplace: Optional[bool] = True,
) -> None:
"""
Args:
multipathway_blocks (nn.module_list): list of models from all pathways.
multipathway_fusion (nn.module): fusion model.
inplace (bool): If inplace, directly update the input list without making
a copy.
"""
super().__init__()
set_attributes(self, locals())
def forward(self, x: List[torch.Tensor]) -> torch.Tensor:
assert isinstance(
x, list
), "input for MultiPathWayWithFuse needs to be a list of tensors"
if self.inplace:
x_out = x
else:
x_out = [None] * len(x)
for pathway_idx in range(len(self.multipathway_blocks)):
if self.multipathway_blocks[pathway_idx] is not None:
x_out[pathway_idx] = self.multipathway_blocks[pathway_idx](
x[pathway_idx]
)
if self.multipathway_fusion is not None:
x_out = self.multipathway_fusion(x_out)
return x_out
| 2.90625 | 3 |
excAnalyser/countReps.py | San-B-09/BeFit | 0 | 12794665 | def countReps(phase_list, total_phases):
mx=max(phase_list)
if len(phase_list)<total_phases:
return phase_list
if mx<total_phases//2:
return phase_list
f_occ=phase_list.index(mx)
phase_list.reverse()
l_occ=len(phase_list)-phase_list.index(mx)-1
phase_list.reverse()
if (0 in phase_list[:f_occ]) and (0 in phase_list[l_occ:]):
return [0]
else:
return phase_list | 2.953125 | 3 |
problems/LQ/LQ2005/std.py | jgsu-acm/problems | 1 | 12794666 | # REPEAT (\d) -> for i in range($1)
A = 0
for i in range(2):
A = A + 4
for i in range(5):
for i in range(6):
A = A + 5
A = A + 7
for i in range(6):
A = A + 7
for i in range(4):
A = A + 2
A = A + 7
A = A + 2
for i in range(7):
for i in range(4):
A = A + 8
A = A + 7
A = A + 4
A = A + 5
A = A + 8
for i in range(8):
A = A + 5
for i in range(1):
A = A + 2
for i in range(7):
A = A + 5
A = A + 5
for i in range(2):
for i in range(3):
A = A + 1
A = A + 1
for i in range(5):
A = A + 1
for i in range(9):
for i in range(6):
A = A + 5
A = A + 1
for i in range(6):
A = A + 2
A = A + 8
A = A + 3
for i in range(2):
A = A + 5
for i in range(3):
A = A + 9
for i in range(1):
A = A + 4
for i in range(2):
A = A + 9
for i in range(1):
A = A + 6
A = A + 6
A = A + 4
for i in range(3):
A = A + 7
A = A + 1
for i in range(2):
A = A + 3
for i in range(5):
A = A + 2
A = A + 5
A = A + 2
A = A + 4
A = A + 3
for i in range(4):
A = A + 4
A = A + 3
A = A + 7
for i in range(5):
for i in range(4):
A = A + 5
A = A + 7
for i in range(5):
A = A + 3
for i in range(3):
A = A + 3
A = A + 1
A = A + 8
A = A + 2
for i in range(9):
A = A + 5
for i in range(1):
A = A + 5
A = A + 2
A = A + 8
A = A + 6
for i in range(3):
for i in range(4):
A = A + 9
for i in range(5):
A = A + 2
A = A + 1
for i in range(9):
A = A + 9
A = A + 2
for i in range(1):
A = A + 6
A = A + 8
for i in range(2):
A = A + 9
A = A + 4
A = A + 7
for i in range(2):
for i in range(7):
A = A + 3
A = A + 5
for i in range(3):
A = A + 5
A = A + 3
A = A + 6
A = A + 4
for i in range(9):
A = A + 2
A = A + 8
A = A + 2
A = A + 3
for i in range(2):
for i in range(8):
A = A + 5
A = A + 1
A = A + 6
A = A + 1
A = A + 2
for i in range(6):
for i in range(1):
A = A + 3
for i in range(1):
A = A + 2
for i in range(4):
A = A + 7
A = A + 1
A = A + 8
for i in range(6):
A = A + 5
for i in range(6):
A = A + 3
for i in range(2):
A = A + 2
A = A + 9
A = A + 7
for i in range(9):
A = A + 8
for i in range(9):
A = A + 8
A = A + 9
A = A + 3
A = A + 2
for i in range(6):
A = A + 3
for i in range(9):
A = A + 1
A = A + 9
A = A + 5
for i in range(2):
A = A + 4
A = A + 9
A = A + 8
for i in range(5):
A = A + 6
A = A + 9
A = A + 1
for i in range(1):
A = A + 4
A = A + 2
for i in range(9):
for i in range(3):
A = A + 4
for i in range(7):
A = A + 8
A = A + 3
for i in range(5):
A = A + 9
for i in range(8):
A = A + 9
A = A + 8
for i in range(4):
A = A + 7
A = A + 7
A = A + 3
A = A + 5
for i in range(6):
A = A + 7
for i in range(7):
A = A + 2
A = A + 2
A = A + 1
for i in range(8):
for i in range(1):
for i in range(4):
A = A + 6
A = A + 6
A = A + 2
for i in range(5):
A = A + 4
A = A + 8
A = A + 4
for i in range(1):
A = A + 5
for i in range(7):
A = A + 8
for i in range(6):
A = A + 4
A = A + 4
A = A + 8
for i in range(4):
A = A + 2
for i in range(2):
A = A + 4
for i in range(2):
A = A + 3
for i in range(1):
A = A + 2
A = A + 8
for i in range(2):
A = A + 7
for i in range(8):
A = A + 6
A = A + 1
A = A + 7
for i in range(8):
A = A + 2
for i in range(8):
for i in range(6):
A = A + 1
A = A + 6
for i in range(2):
A = A + 4
A = A + 1
A = A + 7
A = A + 4
for i in range(4):
for i in range(9):
A = A + 2
for i in range(1):
A = A + 2
A = A + 5
for i in range(8):
for i in range(6):
A = A + 3
for i in range(4):
A = A + 1
A = A + 6
A = A + 1
for i in range(7):
A = A + 7
for i in range(7):
A = A + 3
A = A + 9
A = A + 1
A = A + 9
for i in range(3):
A = A + 5
A = A + 5
A = A + 6
A = A + 2
for i in range(1):
A = A + 4
for i in range(2):
A = A + 7
for i in range(1):
A = A + 7
for i in range(4):
A = A + 7
A = A + 2
for i in range(5):
A = A + 9
A = A + 1
A = A + 9
A = A + 5
A = A + 9
for i in range(5):
A = A + 5
for i in range(1):
A = A + 6
for i in range(2):
A = A + 3
A = A + 2
A = A + 6
A = A + 8
A = A + 8
A = A + 7
A = A + 5
A = A + 5
for i in range(2):
A = A + 1
A = A + 7
A = A + 3
for i in range(2):
A = A + 7
A = A + 1
A = A + 4
for i in range(1):
for i in range(7):
for i in range(2):
A = A + 3
A = A + 5
A = A + 2
A = A + 6
A = A + 1
A = A + 2
A = A + 4
A = A + 9
for i in range(1):
A = A + 8
for i in range(8):
for i in range(4):
for i in range(8):
A = A + 4
for i in range(3):
A = A + 1
A = A + 8
for i in range(7):
A = A + 8
for i in range(7):
A = A + 7
A = A + 7
for i in range(7):
A = A + 6
for i in range(5):
A = A + 9
A = A + 3
for i in range(4):
A = A + 5
A = A + 5
A = A + 4
for i in range(9):
for i in range(3):
A = A + 4
A = A + 3
A = A + 6
for i in range(1):
A = A + 3
A = A + 3
A = A + 6
for i in range(6):
A = A + 7
A = A + 7
A = A + 5
A = A + 5
A = A + 1
A = A + 2
A = A + 6
A = A + 6
for i in range(9):
A = A + 6
for i in range(1):
for i in range(2):
A = A + 4
A = A + 7
for i in range(3):
A = A + 6
for i in range(5):
A = A + 3
A = A + 6
for i in range(9):
A = A + 3
A = A + 6
for i in range(5):
A = A + 8
A = A + 8
for i in range(3):
A = A + 7
A = A + 9
A = A + 8
A = A + 3
A = A + 3
A = A + 9
for i in range(6):
A = A + 9
A = A + 1
for i in range(4):
for i in range(1):
A = A + 7
for i in range(9):
A = A + 2
A = A + 9
A = A + 1
A = A + 2
A = A + 8
A = A + 7
A = A + 9
A = A + 6
for i in range(4):
for i in range(2):
A = A + 3
for i in range(3):
A = A + 4
A = A + 4
for i in range(6):
A = A + 6
A = A + 1
A = A + 5
A = A + 8
for i in range(2):
A = A + 6
for i in range(1):
for i in range(2):
A = A + 2
for i in range(3):
A = A + 1
for i in range(1):
A = A + 8
A = A + 7
A = A + 4
A = A + 2
A = A + 8
A = A + 4
for i in range(5):
for i in range(6):
A = A + 8
for i in range(9):
A = A + 5
A = A + 5
for i in range(5):
A = A + 5
for i in range(3):
for i in range(5):
A = A + 4
for i in range(4):
A = A + 6
A = A + 3
for i in range(7):
A = A + 3
A = A + 3
A = A + 1
A = A + 7
A = A + 7
A = A + 6
A = A + 5
A = A + 5
A = A + 6
for i in range(1):
A = A + 9
A = A + 3
for i in range(1):
for i in range(1):
A = A + 1
for i in range(8):
A = A + 5
for i in range(8):
A = A + 6
for i in range(4):
A = A + 9
A = A + 4
for i in range(2):
A = A + 3
A = A + 7
for i in range(5):
A = A + 7
A = A + 5
A = A + 8
A = A + 7
A = A + 8
A = A + 5
for i in range(2):
A = A + 5
A = A + 7
A = A + 8
A = A + 5
A = A + 9
for i in range(2):
for i in range(6):
A = A + 9
A = A + 1
A = A + 8
A = A + 7
A = A + 1
A = A + 5
for i in range(3):
A = A + 3
A = A + 9
A = A + 7
for i in range(3):
A = A + 9
A = A + 1
for i in range(6):
A = A + 1
for i in range(9):
for i in range(7):
A = A + 3
for i in range(5):
A = A + 5
A = A + 8
A = A + 8
A = A + 1
A = A + 2
for i in range(4):
A = A + 6
for i in range(3):
A = A + 3
A = A + 7
for i in range(8):
for i in range(1):
A = A + 7
A = A + 8
A = A + 3
A = A + 1
A = A + 2
A = A + 4
A = A + 7
for i in range(1):
for i in range(1):
for i in range(1):
A = A + 4
A = A + 6
for i in range(1):
A = A + 3
A = A + 9
A = A + 6
for i in range(9):
A = A + 1
A = A + 6
for i in range(5):
A = A + 3
A = A + 9
A = A + 5
A = A + 5
A = A + 7
A = A + 2
for i in range(2):
A = A + 7
A = A + 7
for i in range(7):
for i in range(4):
A = A + 6
A = A + 8
for i in range(6):
A = A + 6
for i in range(2):
A = A + 1
A = A + 7
A = A + 6
A = A + 7
for i in range(4):
for i in range(7):
A = A + 1
for i in range(2):
A = A + 2
A = A + 5
A = A + 8
A = A + 2
A = A + 1
A = A + 4
for i in range(8):
A = A + 5
A = A + 6
for i in range(7):
for i in range(6):
for i in range(9):
A = A + 7
A = A + 8
for i in range(4):
A = A + 6
A = A + 4
A = A + 3
A = A + 6
for i in range(9):
A = A + 3
for i in range(9):
A = A + 2
A = A + 7
A = A + 5
A = A + 2
for i in range(7):
for i in range(8):
for i in range(6):
A = A + 4
A = A + 9
A = A + 5
A = A + 3
A = A + 9
for i in range(4):
for i in range(1):
A = A + 6
A = A + 8
for i in range(1):
A = A + 6
A = A + 4
A = A + 6
for i in range(3):
A = A + 7
for i in range(3):
A = A + 4
A = A + 4
A = A + 2
A = A + 3
A = A + 7
for i in range(5):
A = A + 6
A = A + 5
for i in range(1):
for i in range(8):
A = A + 5
for i in range(3):
A = A + 6
for i in range(9):
A = A + 4
A = A + 3
for i in range(6):
for i in range(2):
A = A + 1
A = A + 5
A = A + 2
A = A + 2
A = A + 7
for i in range(4):
A = A + 7
A = A + 9
A = A + 2
for i in range(8):
A = A + 9
for i in range(9):
for i in range(2):
A = A + 3
A = A + 2
A = A + 1
A = A + 5
for i in range(9):
A = A + 1
A = A + 3
A = A + 9
for i in range(7):
A = A + 2
for i in range(5):
A = A + 9
A = A + 3
for i in range(2):
A = A + 4
for i in range(8):
A = A + 9
for i in range(5):
A = A + 5
A = A + 4
A = A + 2
A = A + 4
for i in range(6):
A = A + 2
for i in range(5):
A = A + 7
A = A + 7
A = A + 8
A = A + 3
for i in range(8):
A = A + 2
A = A + 5
for i in range(1):
A = A + 8
A = A + 5
A = A + 1
A = A + 1
A = A + 5
for i in range(2):
A = A + 6
for i in range(6):
A = A + 9
A = A + 2
A = A + 5
for i in range(4):
A = A + 7
A = A + 1
for i in range(6):
A = A + 8
A = A + 4
for i in range(3):
for i in range(2):
A = A + 1
A = A + 5
for i in range(2):
A = A + 7
for i in range(9):
A = A + 6
A = A + 8
A = A + 9
A = A + 5
for i in range(9):
for i in range(3):
A = A + 7
A = A + 7
A = A + 9
A = A + 7
for i in range(5):
A = A + 7
A = A + 2
A = A + 1
A = A + 8
A = A + 3
A = A + 5
A = A + 1
for i in range(8):
A = A + 4
A = A + 2
A = A + 2
A = A + 8
for i in range(4):
for i in range(4):
A = A + 8
for i in range(7):
A = A + 5
A = A + 2
for i in range(2):
A = A + 6
for i in range(4):
A = A + 8
A = A + 6
A = A + 1
A = A + 3
A = A + 2
A = A + 7
A = A + 4
for i in range(8):
A = A + 2
A = A + 4
for i in range(5):
for i in range(3):
for i in range(6):
A = A + 8
A = A + 1
A = A + 6
A = A + 5
A = A + 9
for i in range(8):
A = A + 7
for i in range(6):
A = A + 4
A = A + 5
for i in range(3):
A = A + 1
for i in range(1):
for i in range(5):
A = A + 6
A = A + 2
for i in range(9):
for i in range(5):
A = A + 9
A = A + 3
for i in range(9):
A = A + 9
A = A + 8
for i in range(8):
for i in range(5):
A = A + 9
A = A + 4
for i in range(9):
A = A + 3
A = A + 4
A = A + 5
for i in range(9):
for i in range(7):
A = A + 5
for i in range(3):
A = A + 7
for i in range(9):
for i in range(6):
A = A + 4
A = A + 6
for i in range(5):
for i in range(6):
A = A + 5
A = A + 3
A = A + 3
A = A + 3
A = A + 5
for i in range(7):
A = A + 5
for i in range(2):
A = A + 5
A = A + 6
for i in range(2):
A = A + 2
A = A + 5
A = A + 3
A = A + 5
A = A + 5
for i in range(4):
A = A + 2
A = A + 1
for i in range(9):
A = A + 9
A = A + 5
A = A + 6
A = A + 2
A = A + 2
A = A + 5
for i in range(9):
A = A + 5
A = A + 4
for i in range(4):
for i in range(4):
A = A + 1
A = A + 2
for i in range(6):
A = A + 9
A = A + 3
for i in range(2):
A = A + 5
A = A + 1
A = A + 1
A = A + 3
A = A + 8
for i in range(7):
A = A + 4
for i in range(6):
A = A + 9
for i in range(5):
A = A + 9
A = A + 8
A = A + 3
A = A + 9
A = A + 4
A = A + 6
for i in range(7):
A = A + 9
for i in range(9):
A = A + 4
A = A + 9
A = A + 1
A = A + 3
for i in range(5):
for i in range(1):
A = A + 4
A = A + 4
for i in range(8):
A = A + 9
A = A + 6
A = A + 2
for i in range(3):
A = A + 4
A = A + 4
for i in range(3):
A = A + 5
A = A + 2
A = A + 8
A = A + 3
A = A + 6
A = A + 4
A = A + 9
A = A + 1
A = A + 9
A = A + 5
A = A + 3
for i in range(3):
A = A + 2
A = A + 5
A = A + 8
A = A + 2
A = A + 5
for i in range(8):
for i in range(2):
A = A + 6
A = A + 7
A = A + 6
A = A + 9
A = A + 2
for i in range(2):
A = A + 3
for i in range(8):
A = A + 7
A = A + 2
A = A + 1
A = A + 4
A = A + 1
A = A + 5
A = A + 2
A = A + 1
for i in range(1):
A = A + 1
for i in range(6):
A = A + 4
A = A + 3
A = A + 3
for i in range(5):
A = A + 3
for i in range(6):
for i in range(1):
A = A + 5
A = A + 7
A = A + 7
A = A + 7
for i in range(5):
A = A + 9
A = A + 7
for i in range(5):
A = A + 9
A = A + 1
A = A + 9
A = A + 8
for i in range(1):
A = A + 2
for i in range(5):
A = A + 8
for i in range(3):
A = A + 2
A = A + 9
A = A + 6
A = A + 3
for i in range(5):
for i in range(6):
A = A + 5
A = A + 5
for i in range(4):
A = A + 5
A = A + 4
for i in range(8):
A = A + 9
A = A + 1
for i in range(8):
A = A + 8
A = A + 1
A = A + 4
for i in range(6):
A = A + 6
for i in range(2):
A = A + 3
A = A + 9
A = A + 6
A = A + 9
for i in range(1):
A = A + 4
for i in range(3):
A = A + 3
A = A + 4
A = A + 2
A = A + 8
for i in range(2):
A = A + 4
A = A + 1
for i in range(9):
A = A + 2
A = A + 9
A = A + 7
for i in range(7):
for i in range(7):
for i in range(5):
A = A + 7
for i in range(5):
A = A + 1
A = A + 1
for i in range(5):
A = A + 6
for i in range(1):
A = A + 4
for i in range(9):
A = A + 4
A = A + 1
for i in range(6):
A = A + 8
A = A + 5
for i in range(1):
A = A + 4
for i in range(5):
A = A + 8
A = A + 7
A = A + 2
for i in range(3):
A = A + 3
for i in range(8):
for i in range(8):
A = A + 4
A = A + 7
for i in range(5):
A = A + 1
for i in range(8):
A = A + 7
A = A + 8
A = A + 4
A = A + 7
A = A + 6
A = A + 9
A = A + 5
for i in range(3):
A = A + 5
for i in range(9):
A = A + 1
A = A + 7
for i in range(1):
A = A + 8
A = A + 4
for i in range(8):
for i in range(7):
A = A + 2
for i in range(4):
A = A + 6
A = A + 6
for i in range(1):
A = A + 7
A = A + 1
for i in range(9):
for i in range(5):
A = A + 6
A = A + 5
for i in range(7):
A = A + 3
A = A + 6
A = A + 8
for i in range(2):
A = A + 7
A = A + 1
A = A + 9
for i in range(3):
for i in range(3):
A = A + 5
print(A)
| 3.265625 | 3 |
setup.py | nuodb/nuodb-aws-quickstart | 2 | 12794667 | from setuptools import setup
import sys
setup(name='nuodbawsquickstart',
version='1.1.0',
description='Script to deploy a multi-region and multi-instance AWS cluster',
url='http://github.com/nuodb/nuodb-aws-quickstart',
author='<NAME>.',
author_email='<EMAIL>',
#data_files=[('nuodbawsquickstart/templates', ['nuodbawsquickstart/templates/init.py'])],
install_requires=["argparse", "boto", "requests"],
license='BSD licence, see LICENSE',
packages=['nuodbawsquickstart'],
scripts=["bin/nuodb_aws_quickstart.py"],
zip_safe=True)
| 1.304688 | 1 |
pacote2/modulo2.py | renzon/novatec | 0 | 12794668 | def para_float(n):
return float(n)
if __name__ == '__main__':
print(para_float(3))
print(__name__) | 2.515625 | 3 |
airflow-cluster/base-images/airflow/dist/oauth/airflow_oauth/contrib/auth/backends/generic_oauth.py | BIX-Digital/ods-quickstarters | 0 | 12794669 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from importlib import import_module
import flask_login
from flask import url_for, redirect, request
# Need to expose these downstream
# flake8: noqa: F401
# noinspection PyUnresolvedReferences
from flask_login import current_user, logout_user, login_required, login_user
from flask_oauthlib.client import OAuth
from airflow import models, configuration
from airflow.utils.db import provide_session
from airflow.utils.log.logging_mixin import LoggingMixin
import os, ssl
if (os.environ.get('PYTHONHTTPSVERIFY', '') is '0' and
getattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
log = LoggingMixin().log
def get_config_param(param):
return str(configuration.conf.get('oauth', param))
def has_config_param(param):
return configuration.conf.has_option('oauth', param)
class OAuthUser(models.User):
def __init__(self, user):
self.user = user
@property
def is_active(self):
"""Required by flask_login"""
return self.user
@property
def is_authenticated(self):
"""Required by flask_login"""
return self.user
@property
def is_anonymous(self):
"""Required by flask_login"""
return False
def get_id(self):
"""Returns the current user id as required by flask_login"""
return self.user.get_id()
def data_profiling(self):
"""Provides access to data profiling tools"""
return self.user.superuser if self.user else False
def is_superuser(self):
"""Access all the things"""
return self.user.superuser if self.user else False
class AuthenticationError(Exception):
pass
class OAuthBackend(object):
def __init__(self):
self.login_manager = flask_login.LoginManager()
self.login_manager.login_view = 'airflow.login'
self.login_manager.login_message = None
self.flask_app = None
self.oauth = None
self.api_rev = None
def init_app(self, flask_app):
self.flask_app = flask_app
self.login_manager.init_app(self.flask_app)
self.oauth = OAuth(self.flask_app).remote_app(
'oauth',
consumer_key=get_config_param('client_id'),
consumer_secret=get_config_param('client_secret'),
base_url=get_config_param('base_url'),
request_token_params={'scope': [
"user:info",
"user:check-access"
]},
request_token_url=None,
access_token_method=get_config_param('access_token_method'),
access_token_url=get_config_param('access_token_url'),
authorize_url=get_config_param('authorize_url'))
self.login_manager.user_loader(self.load_user)
self.flask_app.add_url_rule(get_config_param('oauth_callback_route'),
'oauth_callback',
self.oauth_callback)
def login(self, request):
log.debug('Redirecting user to OAuth login')
scheme = request.environ['HTTP_X_FORWARDED_PROTO'] \
if 'HTTP_X_FORWARDED_PROTO' in request.environ and request.environ['HTTP_X_FORWARDED_PROTO'] \
else request.scheme if request.scheme \
else None
return self.oauth.authorize(callback=url_for(
'oauth_callback',
_scheme=scheme,
_external=True),
state=request.args.get('next') or request.referrer or None)
def get_user_profile_info(self, access_token):
resp = self.oauth.get(
get_config_param("user_info_url"),
token=(access_token, ''))
if not resp or resp.status != 200:
raise AuthenticationError(
'Failed to fetch user profile, status ({0})'.format(
resp.status if resp else 'None'))
return resp.data
def dict_get(self, dic, key):
keys = key.split(".")
value = dic
for k in keys:
value = value[k]
return value
@provide_session
def load_user(self, userid, session=None):
if not userid or userid == 'None':
return None
user = session.query(models.User).filter(
models.User.id == int(userid)).first()
return OAuthUser(user)
def authorize(self, authorized_response, user_info):
"""
Parameters
----------
authorized_response
Authorized response from OAuth client
user_info: dict
User information response from OAuth client
Returns
-------
(bool, bool, bool)
Return if 1. the user is allowed to access airflow, 2. if the user
is a superuser
"""
if has_config_param("oauth_permission_backend"):
permission_backend = import_module(get_config_param("oauth_permission_backend"))
return permission_backend.authorize(self.oauth, authorized_response, user_info)
return True, True
@provide_session
def oauth_callback(self, session=None):
log.debug('OAuth callback called')
next_url = request.args.get('state') or url_for('admin.index')
if get_config_param('base_url') in next_url:
next_url = url_for('admin.index')
resp = self.oauth.authorized_response()
try:
if resp is None:
raise AuthenticationError(
'Null response from OAuth service, denying access.'
)
access_token = resp['access_token']
user_info = self.get_user_profile_info(access_token)
username_key = get_config_param("username_key")
email_key = get_config_param("email_key")
username = self.dict_get(user_info, username_key)
email = self.dict_get(user_info, email_key)
authorized, superuser = self.authorize(resp, user_info)
except AuthenticationError:
return redirect(url_for('airflow.noaccess'))
user = session.query(models.User).filter(
models.User.username == username).first()
if not authorized:
if user:
session.delete(user)
session.commit()
return redirect(url_for('airflow.noaccess'))
if not user:
user = models.User(
username=username,
email=email,
superuser=superuser)
user.superuser = superuser
session.merge(user)
session.commit()
login_user(OAuthUser(user))
session.commit()
return redirect(next_url)
login_manager = OAuthBackend()
def login(self, request):
return login_manager.login(request)
| 1.992188 | 2 |
example_main.py | gappleto97/job-splitter | 0 | 12794670 | from logging import getLogger, Formatter
from logging.handlers import RotatingFileHandler
from typing import Type
from src.framework import get_config, make_config_files, run_jobs, _sleeper
from src.zipped_logs import ZippedRotatingFileHandler
if __name__ == '__main__':
# setup section
make_config_files()
config = get_config()
formatter = Formatter(config['logging']['format'] % ())
log = getLogger()
log.setLevel(config.getint('logging', 'level'))
file_name = config['logging']['file_name']
if config.getboolean('logging', 'auto_zip'):
fh_type: Type[RotatingFileHandler] = ZippedRotatingFileHandler
else:
fh_type = RotatingFileHandler
fh = fh_type(
file_name + '.txt',
mode='w',
encoding='utf-8',
maxBytes=config.getfilesize('logging', 'max_file_size'), # type: ignore # pylint: disable=no-member
backupCount=config.getfilesize('logging', 'backup_count'), # type: ignore # pylint: disable=no-member
)
fh.setLevel(config.getint('logging', 'level'))
fh.setFormatter(formatter)
log.addHandler(fh)
# setup done
try:
run_jobs(_sleeper, [(x, ) for x in range(256)])
# optional teardown
# ...
finally:
# required teardown
log.removeHandler(fh)
fh.flush()
fh.close()
| 2.390625 | 2 |
src/model/drcn.py | saeedizadi/EDSR-PyTorch | 0 | 12794671 | <reponame>saeedizadi/EDSR-PyTorch
import os
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.nn
import torchvision.transforms as transforms
import PIL
def make_model(args, parent=False):
return DRCN(args)
class ConvBlock(torch.nn.Module):
def __init__(self, input_size, output_size, kernel_size=4, stride=2, padding=1, bias=True, activation='relu',
norm='batch'):
super(ConvBlock, self).__init__()
self.conv = torch.nn.Conv2d(input_size, output_size, kernel_size, stride, padding, bias=bias)
self.norm = norm
if self.norm == 'batch':
self.bn = torch.nn.BatchNorm2d(output_size)
elif self.norm == 'instance':
self.bn = torch.nn.InstanceNorm2d(output_size)
self.activation = activation
if self.activation == 'relu':
self.act = torch.nn.ReLU(True)
elif self.activation == 'prelu':
self.act = torch.nn.PReLU()
elif self.activation == 'lrelu':
self.act = torch.nn.LeakyReLU(0.2, True)
elif self.activation == 'tanh':
self.act = torch.nn.Tanh()
elif self.activation == 'sigmoid':
self.act = torch.nn.Sigmoid()
def forward(self, x):
if self.norm is not None:
out = self.bn(self.conv(x))
else:
out = self.conv(x)
if self.activation is not None:
return self.act(out)
else:
return out
class DRCN(torch.nn.Module):
def __init__(self, args):
super(DRCN, self).__init__()
n_colors = args.n_colors
n_feats = args.n_feats
num_recursions = 16
self.num_recursions = num_recursions
# embedding layer
self.embedding_layer = nn.Sequential(
ConvBlock(n_colors, n_feats, 3, 1, 1, norm=None),
ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None)
)
# conv block of inference layer
self.conv_block = ConvBlock(n_feats, n_feats, 3, 1, 1, norm=None)
# reconstruction layer
self.reconstruction_layer = nn.Sequential(
ConvBlock(n_feats, n_feats, 3, 1, 1, activation=None, norm=None),
ConvBlock(n_feats, n_colors, 3, 1, 1, activation=None, norm=None)
)
# initial w
self.w_init = torch.ones(self.num_recursions) / self.num_recursions
self.w = Variable(self.w_init.cuda(), requires_grad=True)
def forward(self, x):
# embedding layer
h0 = self.embedding_layer(x)
# recursions
h = [h0]
for d in range(self.num_recursions):
h.append(self.conv_block(h[d]))
y_d_ = []
out_sum = 0
for d in range(self.num_recursions):
y_d_.append(self.reconstruction_layer(h[d+1]))
out_sum += torch.mul(y_d_[d], self.w[d])
out_sum = torch.mul(out_sum, 1.0 / (torch.sum(self.w)))
# skip connection
final_out = torch.add(out_sum, x)
return final_out | 2.859375 | 3 |
src/nmrezman/phase02/__init__.py | mozzilab/NM_Radiology_AI | 1 | 12794672 | # %%
from . import (
train,
classify,
)
| 1.171875 | 1 |
gym_ext/envs/__init__.py | DaphneAntotsiou/Adversarial-Imitation-Learning-with-Trajectorial-Augmentation-and-Correction | 0 | 12794673 | __author__ = 'DafniAntotsiou'
from gym_ext.envs.inverted_pendulum_ext import InvertedPendulumEnvExt
from gym_ext.envs.HalfCheetah_ext import HalfCheetahEnvExt
from gym.envs.mujoco.mujoco_env import MujocoEnv
import mujoco_py
from mapping.mjviewerext import MjViewerExt as MjViewer
def _get_viewer(self, mode):
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == 'human':
self.viewer = MjViewer(self.sim, self.name if hasattr(self, 'name') else None)
elif mode == 'rgb_array' or mode == 'depth_array':
self.viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)
self.viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
setattr(MujocoEnv, '_get_viewer', _get_viewer)
| 1.984375 | 2 |
data_wrangling/legacy_code/gsd_processor.py | alexmitchell/file_manipulations | 0 | 12794674 | <gh_stars>0
#!/usr/bin/env python3
import os
import numpy as np
import pandas as pd
from time import asctime
from omnipickle_manager import OmnipickleManager
import global_settings as settings
from helpyr import data_loading
from helpyr import logger
#from helpyr import crawler
from helpyr import helpyr_misc as hm
# currently set to handed 2m gsd
# Doing it the quick way....
# Search for: self.gsd_txt_filepaths =
class GSDProcessor:
""" Collects combines the distributed GrainSize.txt files. Updates the omnipickle."""
def __init__(self):
self.root = settings.cart_data_dir
self.pickle_destination = settings.cart_pickles_dir
self.log_filepath = f"{settings.log_dir}/gsd_processor.txt"
# Start up logger
self.logger = logger.Logger(self.log_filepath, default_verbose=True)
self.logger.write(["Begin GSD Processor output", asctime()])
# Start up loader
self.loader = data_loading.DataLoader(
self.pickle_destination, logger=self.logger)
# Reload omnimanager
self.omnimanager = OmnipickleManager(self.logger)
self.omnimanager.restore()
self.logger.write("Updating experiment definitions")
self.omnimanager.update_tree_definitions()
self.gsd_txt_filepaths = []
def run(self):
indent_function = self.logger.run_indented_function
indent_function(self.find_gsd_txt_files,
before_msg="Finding GrainSize files", after_msg="Finished!")
indent_function(self.load_data,
before_msg="Loading and merging data", after_msg="Finished!")
indent_function(self.update_omnipickle,
before_msg="Updating omnipickle", after_msg="Finished!")
indent_function(self.omnimanager.store,
kwargs={'overwrite':{'gsd':True}},
before_msg="Storing omnipickle", after_msg="Finished!")
#print(self.omnimanager.experiments['1B'].periods[35].gsd_picklepath)
def find_gsd_txt_files(self):
# Find all the GrainSize.txt files
self.logger.write("")
#crawler = crawler.Crawler(logger=self.logger)
#crawler.set_root(self.root)
#self.gsd_txt_filepaths = crawler.get_target_files(
# "??-*L-t??-8m_sta-*_GrainSize.txt", verbose_file_list=False)
# example filename: 3B-r87L-t60-8m_sta-2000_GrainSize.txt
#crawler.end()
#gsd_list_path = '/home/alex/feed-timing/code/matlab/supporting-files/2m_2B_final_gsd_list.txt'
gsd_list_paths = [
'/home/alex/feed-timing/code/matlab/supporting-files/8m_final_gsd_list.txt',
'/home/alex/feed-timing/code/matlab/supporting-files/2m_final_gsd_list.txt',
#'/home/alex/feed-timing/data/cart/gsd_from_backups'
]
for gsd_list_path in gsd_list_paths:
with open(gsd_list_path) as f:
self.logger.write(f'Reading from {gsd_list_path}')
self.gsd_txt_filepaths.extend(f.read().splitlines())
def load_data(self):
# Load all the GrainSize.txt files and combine
gsd_txt_kwargs = {
'index_col' : None,
'header' : 0,
'skiprows' : [1],
}
run_data_frames = []
for gsd_filepath in self.gsd_txt_filepaths:
if not os.path.isfile(gsd_filepath):
self.logger.write(f"Missing file {gsd_filepath}")
continue
# Pull apart provided filepath to GrainSize.txt to get run info
gsd_dir, gsd_name = hm.nsplit(gsd_filepath, 1)
gsd_name = gsd_name.split('.', 1)[0]
scan_name, sta_str, _ = gsd_name.split('_')
exp_code, step, period, scan_length = scan_name.split('-')
# Calculate experiment time based on step and period
is_falling = step[0] == 'f'
discharge = int(step[1:-1])
period_time = int(period[1:])
discharge_order = [50, 62, 75, 87, 100]
discharge_index = discharge_order.index(discharge)
n_discharges = len(discharge_order)
calc_time = lambda l, d, t: t + 60*(d + 2*l*(n_discharges-1-d))
exp_time = calc_time(is_falling, discharge_index, period_time)
# Generate name to grain size fraction file
gsf_name = f"{scan_name}_{sta_str}_GrainSizeFractions.txt"
gsf_filepath = os.path.join(gsd_dir, gsf_name)
# Both data files are read exactly the same, do in a loop
run_data = pd.DataFrame()
for filepath in [gsd_filepath, gsf_filepath]:
# Load data and set the index label
data = self.loader.load_txt(filepath,
gsd_txt_kwargs, add_path=False)
#data.index = run_multiindex
run_data = pd.concat([run_data, data], axis=1)
# Add columns that will be later used for a multiindex
index_names = ['exp_code', 'step', 'period', 'sta_str', 'scan_length']
index_vals = [exp_code, step, period, sta_str, scan_length]
var_names = index_names + ['scan_name', 'exp_time']
var_vals = index_vals + [scan_name, exp_time]
for var_name, var_val in zip(var_names, var_vals):
run_data[var_name] = var_val
run_data_frames.append(run_data)
# Add data to combined data
self.all_data = pd.concat(run_data_frames, ignore_index=True)
self.all_data.set_index(index_names, inplace=True)
self.all_data.sort_index(inplace=True)
# Convert size classes from string to float to make consistent with
# sieve data
col_conv = {
'0.5' : 0.5 , '0.71' : 0.71, '1' : 1 , '1.4' : 1.41,
'2' : 2 , '2.8' : 2.83, '4' : 4 , '5.6' : 5.66,
'8' : 8 , '11.3' : 11.2, '16' : 16 , '22.6' : 22.3,
'32' : 32 ,
}
self.all_data.columns = [col_conv[c] if c in col_conv else c \
for c in self.all_data.columns]
def update_omnipickle(self):
# Add gsd data to omnipickle
hm.ensure_dir_exists(settings.cart_pickles_dir)
self.omnimanager.add_gsd_data(settings.cart_pickles_dir, self.all_data)
if __name__ == "__main__":
# Run the script
gsd_processor = GSDProcessor()
gsd_processor.run()
| 2.296875 | 2 |
nabu/postprocessing/data_reader.py | Darleen2019/Nabu-MSSS | 18 | 12794675 | """@file data_reader.py
contains a reader class for data"""
from six.moves import configparser
from nabu.processing.processors import processor_factory
import gzip
import os
class DataReader(object):
"""the data reader class.
a reader for data. Data is not stored in tensorflow format
as was done in data.py. Data is returned in numpy format
and is accessed by indexing instead of looping over all
data. It is currently only used in postprocessing.
"""
def __init__(self, dataconfs, segment_lengths=['full']):
"""DataReader constructor
Args:
dataconfs: the database configuration
segment_lengths: A list containing the desired lengths of segments.
Possibly multiple segment lengths
"""
if len(segment_lengths) > 1:
print(
'Warning: Not yet implemented __call__ correctly for multiple segments. The returned utt_info, does not ' \
'contain the _part sufix and processed returns only 1 processed')
self.segment_lengths = segment_lengths
self.processors = []
self.start_index_set = [0]
self.datafile_lines = []
for dataconf in dataconfs:
# read the processor config
proc_cfg_file = dataconf['processor_config']
if not os.path.isfile(proc_cfg_file):
raise BaseException('%s does not exist' % proc_cfg_file)
parsed_proc_cfg = configparser.ConfigParser()
parsed_proc_cfg.read(proc_cfg_file)
proc_cfg = dict(parsed_proc_cfg.items('processor'))
# create a processor
self.processors.append(processor_factory.factory(proc_cfg['processor'])(proc_cfg, self.segment_lengths))
# get the datafiles lines
datafile = dataconf['datafiles'] # TODO: for the moment expecting only 1 file, but this also makes sense?
if datafile[-3:] == '.gz':
open_fn = gzip.open
else:
open_fn = open
f = open_fn(datafile)
datalines = f.readlines()
self.start_index_set.append(self.start_index_set[-1]+len(datalines))
self.datafile_lines.extend(datalines)
def __call__(self, list_pos):
"""read data from the datafile list
Args:
list_pos: position on the datafile list to read
Returns:
The processed data as a numpy array"""
line = self.datafile_lines[list_pos]
for ind, start_index in enumerate(self.start_index_set):
if start_index > list_pos:
processor = self.processors[ind-1]
break
# split the name and the data line
splitline = line.strip().split(' ')
utt_name = splitline[0]
dataline = ' '.join(splitline[1:])
# process the dataline
processed, utt_info = processor(dataline)
utt_info['utt_name'] = utt_name
# Currently only returning 1 processed!
processed = processed[self.segment_lengths[0]][0]
return processed, utt_info
def get_name_for_pos(self, list_pos):
""" get the name of the utterance for the given position from the datafile list
Args:
list_pos: position on the datafile list to read
Returns:
The name of the utterance"""
line = self.datafile_lines[list_pos]
# split the name and the data line
splitline = line.strip().split(' ')
utt_name = splitline[0]
return utt_name
| 2.890625 | 3 |
tests/unit/models/test_title.py | kmarekspartz/library | 0 | 12794676 | from unittest import TestCase
from library.models.title import Title
from tests.unit.models.test_base import BaseTest
class TestTitle(BaseTest, TestCase):
cls = Title
def test_class_has_isbn(self):
"""
The Title model should have an ISBN.
"""
self.assertTrue(hasattr(self.cls, 'isbn'))
def test_instance_has_isbn(self):
"""
Each Title should have an ISBN.
"""
self.assertTrue(hasattr(self.instance, 'isbn'))
def test_class_has_name(self):
"""
The Title model should have a name.
"""
self.assertTrue(hasattr(self.cls, 'name'))
def test_instance_has_name(self):
"""
Each Title should have a name.
"""
self.assertTrue(hasattr(self.instance, 'name'))
def test_class_has_title_from_isbn(self):
"""
The Title model should have a constructor which takes an ISBN.
"""
self.assertTrue(hasattr(self.cls, 'title_from_isbn'))
| 3.53125 | 4 |
tfn/tools/jobs/keras_job.py | UPEIChemistry/TFN_Layers | 2 | 12794677 | from pathlib import Path
from typing import Tuple
from sacred.run import Run
import tensorflow as tf
from tensorflow.keras.callbacks import ReduceLROnPlateau, TensorBoard
from tensorflow.keras.models import Model
from .job import Job
from ..ingredients import (
get_data_loader,
get_builder,
)
from ..loaders import DataLoader
class KerasJob(Job):
def _main(
self,
run: Run,
seed: int,
fitable: Model = None,
fitable_config: dict = None,
loader_config: dict = None,
):
"""
Private method containing the actual work completed by the job. Implemented is a default
workflow for a basic keras/kerastuner type job.
:param run: sacred.Run object. See sacred documentation for more details on utility.
:param fitable: Optional tensorflow.keras.Model or kerastuner.Tuner object.
Model-like which contains a fit method.
:param fitable_config: Optional dict. Contains data which can be used to create a new
fitable instance.
:param loader_config: Optional dict. Contains data which can be used to create a new
DataLoader instance.
"""
loader, data = self._load_data(loader_config)
fitable = fitable or self._load_fitable(loader, fitable_config)
fitable = self._fit(run, fitable, data)
if self.exp_config["run_config"]["test"]:
self._test_fitable(run, fitable, data[-1])
if self.exp_config["run_config"]["save_model"]:
self._save_fitable(run, fitable)
return fitable
def _load_data(self, config: dict = None) -> Tuple[DataLoader, Tuple]:
"""
Obtains a loader using ingredients.get_loader and self.exp_config['loader_config']
:param config: Optional dict. config passed to get_data_loader to obtain specific
data_loader class.
:return: Loader object and the data returned by that Loader's get_data method.
"""
config = config or self.exp_config["loader_config"]
loader = get_data_loader(**config)
if self.exp_config["run_config"]["select_few"]:
data = loader.few_examples(**config["load_kwargs"])
else:
data = loader.load_data(**config["load_kwargs"])
return loader, data
def _load_fitable(self, loader: DataLoader, fitable_config: dict = None) -> Model:
"""
Defines and compiles a fitable (keras.model or keras_tuner.tuner) which implements
a 'fit' method. This method calls either get_builder, or get_hyper_factory, depending on
which type of fitable is beind loaded.
:return: Model or Tuner object.
"""
fitable_config = fitable_config or self.exp_config["builder_config"]
conf = dict(
**fitable_config,
max_z=loader.max_z,
num_points=loader.num_points,
mu=loader.mu,
sigma=loader.sigma,
)
builder = get_builder(**conf)
run_config = self.exp_config["run_config"]
compile_kwargs = dict(
loss=run_config["loss"],
loss_weights=run_config["loss_weights"],
optimizer=run_config["optimizer"],
metrics=run_config["metrics"],
run_eagerly=run_config["run_eagerly"],
)
if run_config["use_strategy"]:
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = builder.get_model()
model.compile(**compile_kwargs)
else:
model = builder.get_model()
model.compile(**compile_kwargs)
return model
def _fit(
self, run: Run, fitable: Model, data: tuple, callbacks: list = None,
) -> Model:
"""
:param run: sacred.Run object. See sacred documentation for details on utility.
:param fitable: tensorflow.keras.Model object.
:param data: tuple. train, validation, and test data in the form (train, val, test),
where train is
the tuple (x_train, y_train).
:param callbacks: Optional list. List of tensorflow.keras.Callback objects to pass to
fitable.fit method.
:return: tensorflow.keras.Model object.
"""
tensorboard_directory = self.exp_config["run_config"]["root_dir"] / "logs"
(x_train, y_train), val, _ = data
callbacks = callbacks or []
if self.exp_config["run_config"]["use_default_callbacks"]:
callbacks.extend(
[
TensorBoard(
**dict(
**self.exp_config["tb_config"],
log_dir=tensorboard_directory,
)
),
ReduceLROnPlateau(**self.exp_config["lr_config"]),
]
)
kwargs = dict(
x=x_train,
y=y_train,
epochs=self.exp_config["run_config"]["epochs"],
batch_size=self.exp_config["run_config"]["batch_size"],
validation_data=val,
class_weight=self.exp_config["run_config"]["class_weight"],
callbacks=callbacks,
verbose=self.exp_config["run_config"]["fit_verbosity"],
)
fitable.fit(**kwargs)
return fitable
def _test_fitable(self, run: Run, fitable: Model, test_data: tuple) -> float:
"""
:param fitable: tensorflow.keras.Model object.
:param test_data: tuple. contains (x_test, y_test).
:return: float. Scalar test_loss value.
"""
if test_data is None:
return 0.0
x_test, y_test = test_data
loss = fitable.evaluate(x=x_test, y=y_test, verbose=0)
print(f"Test split results: {loss}")
return loss
def _save_fitable(self, run: Run, fitable: Model):
"""
:param run: sacred.Run object. see sacred documentation for more details on utility.
:param fitable: tensorflow.keras.Model object.
"""
path = self.exp_config["run_config"]["model_path"]
if self.exp_config["run_config"]["save_verbosity"] > 0:
fitable.summary()
fitable.save(self.exp_config["run_config"]["model_path"])
run.add_artifact(path)
def _new_model_path(self, name: str):
model_path = Path(self.exp_config["run_config"]["model_path"]).parent / name
self.exp_config["run_config"]["model_path"] = model_path
return model_path
| 2.296875 | 2 |
src/data_visualiazation.py | RikiTikkiTavi/Dota2-Winner-Prediction | 1 | 12794678 | from sklearn.manifold import TSNE
import pandas as pd
import matplotlib.pyplot as plt
def visualize(data):
data_embedded = TSNE(n_components=2).fit_transform(data)
print(data_embedded)
plt.plot(data_embedded)
plt.show()
| 2.953125 | 3 |
subs2cia/pickers.py | mdVNwyRbm/subs2cia | 53 | 12794679 | from subs2cia.sources import Stream
import pycountry
import logging
def picker(streams: [Stream], target_lang: str = None, forced_stream: int = None):
r"""
Returns streams by priority. Streams which are not part of a container are preferred first,
followed by manually specified stream indices, then streams which match a specified language, finally followed
by the remaining streams.
:param streams: List of Stream objects
:param target_lang: Target language
:param forced_stream:
:return:
"""
for s in streams:
if s.is_standalone():
yield s
if forced_stream is not None:
yield streams[forced_stream]
if target_lang is not None:
target_lang = pycountry.languages.lookup(target_lang)
target_lang = target_lang.alpha_3
for s in streams:
if s.is_standalone():
continue
if s.get_language() == target_lang:
yield s
for s in streams:
if s.is_standalone():
continue
if s.get_language == target_lang:
continue
yield s
| 2.671875 | 3 |
setwallpaper/__init__.py | tinaxd/setwallpaper | 0 | 12794680 | from .wallpaper import set_wallpaper
| 1.117188 | 1 |
pypkg/utils.py | movermeyer/pypkg | 0 | 12794681 | <reponame>movermeyer/pypkg
# -*- coding: utf-8 -*-
"""
Common utilities.
"""
def fancy(message):
"""Print message with surrounding ~'s."""
return "~{0}~".format(message)
| 1.953125 | 2 |
compose_logger/log.py | demonkit/toolbox | 0 | 12794682 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
import os
from cloghandler import ConcurrentRotatingFileHandler
import logconf
def compose_logger(name, log_file):
logger = logging.Logger(name)
hdlr = ConcurrentRotatingFileHandler(
filename=os.path.join(LOG_FILE_DIR, log_file),
maxBytes=logconf.MAX_BYTES, backupCount=logconf.BACK_UP_COUNT)
formatter = logging.Formatter(logconf.VERBOSE_FORMATTER)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
| 2.5 | 2 |
neurolab/optimization/metric/crossent.py | udday2014/HebbianLearning | 6 | 12794683 | <reponame>udday2014/HebbianLearning
import torch.nn as nn
from ..optimization import MetricManager
from neurolab import params as P
# Wrapper around Pytorch CrossEntropyLoss criterion
class CrossEntMetric:
def __init__(self):
self.crossent_loss = nn.CrossEntropyLoss()
def __call__(self, outputs, targets):
if isinstance(outputs, dict): outputs = outputs[P.KEY_CLASS_SCORES]
if isinstance(targets, dict): targets = targets[P.KEY_LABEL_TARGETS]
return self.crossent_loss(outputs, targets)
# Criterion manager for cross entropy loss
class CrossEntMetricManager(MetricManager):
def __init__(self, config):
super().__init__(config)
def get_metric(self):
return CrossEntMetric()
def higher_is_better(self):
return False
def get_name(self):
return "cross-entropy" | 2.9375 | 3 |
index_builder.py | ImadEddineBek/Celeb-recognition-browser-extension | 0 | 12794684 | <filename>index_builder.py
import argparse
import random, time, sys
from configparser import ConfigParser
import boto3
from loguru import logger
from tqdm import tqdm_notebook
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import distance
import pickle
from models.mtcnn import MTCNN
from models.inception_resnet_v1 import InceptionResnetV1
import torch
from torch.utils.data import DataLoader
from torchvision import datasets
import numpy as np
import pandas as pd
import os
import pickle
from embeddings import Embedder
from index import Node
from flask import Flask, request, jsonify, Response
from flask_cors import CORS
from waitress import serve
config = ConfigParser()
config.read('config.ini')
FACE_THRESHOLD = config.getfloat('main', 'face_threshold')
METHOD = config.get('main', 'method')
CUDA = config.getboolean('main', 'cuda')
DEBUG_ENV = config.getboolean('main', 'debug')
EMBEDDING_SIZE = 512
BUCKET_NAME = 'info-ret-final-project'
REMOTE_DIRECTORY_NAME = 'data'
# DEBUG_ENV = bool(os.getenv("DEBUG_ENV", False))
# PORT = int(os.getenv("PORT", 5001))
# INDEX_TYPE = os.getenv("INDEX_TYPE", 'celebs.index')
def collate_fn(x):
return x[0]
def downloadDirectoryFroms3(bucketName, remoteDirectoryName):
s3_resource = boto3.resource('s3', aws_access_key_id='<KEY>',
aws_secret_access_key='<KEY>')
bucket = s3_resource.Bucket(bucketName)
for object in bucket.objects.filter(Prefix=remoteDirectoryName):
if not os.path.exists(os.path.dirname(object.key)):
os.makedirs(os.path.dirname(object.key))
bucket.download_file(object.key, object.key)
def build_kd_tree(dataset_folder):
logger.info(dataset_folder.split('/')[-1])
dataset = datasets.ImageFolder(dataset_folder)
logger.info({i: c for c, i in dataset.class_to_idx.items()})
dataset.idx_to_class = {i: c for c, i in dataset.class_to_idx.items()}
loader = DataLoader(dataset, collate_fn=collate_fn, num_workers=4)
embder = Embedder()
R = []
for x, y in loader:
embedding = embder.embed_one(x)
if embedding is not None:
embedding = embedding[0]
R.append((embedding, y))
kdtree = Node(K=EMBEDDING_SIZE).build_kd_tree(R)
with open('indexes/' + dataset_folder.split('/')[-1] + '.idx_to_class', 'wb') as idx_to_class_file:
pickle.dump(dataset.idx_to_class, idx_to_class_file)
with open('indexes/' + dataset_folder.split('/')[-1] + '.index', 'wb') as index_file:
pickle.dump(kdtree, index_file)
with open('indexes/' + dataset_folder.split('/')[-1] + '.data', 'wb') as data_file:
pickle.dump(R, data_file)
def build_indexes():
downloadDirectoryFroms3(BUCKET_NAME, REMOTE_DIRECTORY_NAME)
sub = [os.path.join(REMOTE_DIRECTORY_NAME, o) for o in os.listdir(REMOTE_DIRECTORY_NAME)
if os.path.isdir(os.path.join(REMOTE_DIRECTORY_NAME, o))]
for dataset_folder in sub:
build_kd_tree(dataset_folder)
def get_index(index_type):
if index_type == 'celebs.index':
index_type = 'added_.index'
with open('indexes/' + index_type, 'rb') as index_file:
kdtree = pickle.load(index_file)
with open('indexes/' + index_type.split('.')[0] + '.idx_to_class', 'rb') as idx_to_class_file:
idx_to_class = pickle.load(idx_to_class_file)
with open('indexes/' + index_type.split('.')[0] + '.data', 'rb') as data_file:
data = pickle.load(data_file)
logger.info(idx_to_class)
return kdtree, idx_to_class, data
app = Flask(__name__)
@app.route("/who_brute", methods=["GET"])
def get_brute_force():
embedding = request.args.get('embedding')
embedding = embedding.replace('[', '')
embedding = embedding.replace(']', '')
embedding = np.fromstring(embedding, dtype=float, sep=', ')
closest = 0
dist = np.inf
for emb, y in data:
cur = np.linalg.norm(emb - embedding)
if cur < dist:
dist = cur
closest = y
if closest > 1 + FACE_THRESHOLD:
logger.info("Unknown face")
return "Unknown face Similar to idx_to_class[closest]"
logger.info(idx_to_class[closest] + ' ' + str(dist))
return idx_to_class[closest]
@app.route("/who_tree", methods=["GET"])
def get_name():
embedding = request.args.get('embedding')
embedding = embedding.replace('[', '')
embedding = embedding.replace(']', '')
embedding = np.fromstring(embedding, dtype=float, sep=', ')
return idx_to_class[kdtree.get_nn(embedding, 1)[0][1]]
CORS(app)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Index which can be used to get names.')
parser.add_argument('--port', type=int, default=5000, help='port number')
parser.add_argument('--index_type', type=str, default='celebs.index', help='type of index')
# parser.add_argument('--sum', dest='accumulate', action='store_const',
# const=sum, default=max,
# help='sum the integers (default: find the max)')
args = parser.parse_args()
PORT = args.port
INDEX_TYPE = args.index_type
kdtree, idx_to_class, data = get_index(INDEX_TYPE)
# print(PORT, INDEX_TYPE)
if not DEBUG_ENV:
serve(app, host='0.0.0.0', port=PORT)
else:
app.run(debug=True, host='0.0.0.0', port=PORT)
| 1.71875 | 2 |
weborquesta/concerts/admin.py | miguel-rojorev/weborquesta | 0 | 12794685 | from django.contrib import admin
from .models import Concert
# Register your models here.
class ConcertAdmin(admin.ModelAdmin):
readonly_fields = ('created', 'updated')
admin.site.register(Concert, ConcertAdmin) | 1.664063 | 2 |
K-Nearest-Neighbors.py | nightheronry/Basic-ML-Algorithm-Reimplementations | 0 | 12794686 | <reponame>nightheronry/Basic-ML-Algorithm-Reimplementations
from __future__ import division
import numpy as np
import math
from operator import itemgetter
import sys
#This program is just a rough reimplementation of k nearest neighbors in Python. It's not particularly
#optimized in any way but it does give a sense of how the the algorithm chooses the closest
#neighbors to a particular test vector, and then how the output class is determined.
#Distance between 2 points
def distanceBetween(point1,point2):
return math.pow((math.pow(point1[0] - point2[0],2) + math.pow(point1[1] - point2[1],2)),0.5)
#Visualize these data points on an 4x4 xy graph
Xtrain = [[3,1],[3,2],[1,2],[1,3],[4,4],[5,5],[5,7],[7,5],[8,8]]
Ytrain = [2,1,2,1,1,0,0,0,1]
#Hyperparamters
numTrainExamples = len(Xtrain)
numNeighbors = 5
numClasses = 3 #Classes have to be labeled starting from 0...numCLasses - 1
Xtest = [2,1.2]
minDistance = sys.maxint
distanceAndLocation = []
for x in range(0,numTrainExamples):
distance = distanceBetween(Xtrain[x],Xtest)
distanceAndLocation.append([Xtrain[x], distance, Ytrain[x]])
distanceAndLocation = sorted(distanceAndLocation, key=itemgetter(1))
if len(distanceAndLocation) >= numNeighbors:
classCount = np.zeros(numClasses)
for i in range(0,numNeighbors):
temp = distanceAndLocation[i]
classCount[temp[2]] = classCount[temp[2]] + 1
maxCount = 0
index = 0
for i in range(0,len(classCount)):
if (classCount[i] > maxCount):
maxCount = classCount[i]
index = i
print classCount
print index
else:
print 'Number of points less than number of neighbors'
| 3.53125 | 4 |
priv/dasherl_router.py | zgbjgg/dasherl | 6 | 12794687 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The code in this py file is separated because there are
# some errors using the interface with the main app.
# imports from erlport
from erlport.erlterms import Atom
from erlport.erlang import call
def render_layout(path):
"""Render a layout from dasherl erlang side."""
# simply call to erlang module and ask for app rendering that path
layout = call(Atom("dasherl_router"), Atom("render"), [path])
# maybe route doesnt exists so validate response
if layout == 'no_such_layout':
return None
else:
return layout
| 2.1875 | 2 |
DailyProgrammer/DP20130712C.py | DayGitH/Python-Challenges | 2 | 12794688 | <reponame>DayGitH/Python-Challenges
"""
[07/12/13] Challenge #126 [Hard] Not-So-Normal Triangle Search
https://www.reddit.com/r/dailyprogrammer/comments/1i65z6/071213_challenge_126_hard_notsonormal_triangle/
# [](#HardIcon) *(Hard)*: Not-So-Normal Triangle Search
A three-dimensional triangle can be defined with three points in 3D space: one for each corner. One can compute the
[surface-normal](http://en.wikipedia.org/wiki/Normal_(geometry\)) of this triangle by using the three points to compute
the [cross-product](http://en.wikipedia.org/wiki/Cross_product).
You will be given a set of N points, such that N is greater than or equal to 3. Your goal is to find the maximum set of
non-intersecting triangles that can be constructed with these N points (points may be shared between triangles) such
that this set's average surface normal is as close to the given vector's direction as possible.
"Closeness" between the average surface normal and target vector is defined as minimizing for the smallest angle
between the two (as computed through the [dot-product](http://en.wikipedia.org/wiki/Dot_product) ). Though each
triangle has two surface normals (one for each of the two sides), we don't care about which one you choose: just make
sure that when printing the results, you respect the [right-hand rule](http://en.wikipedia.org/wiki/Right-hand_rule)
for consistency. At **minimum**, this set must match the target vector with less than 10 degrees of difference.
*Original author: /u/nint22. This challenge is a little more math-heavy than usual, but don't worry: the math isn't
hard, and Wikipedia has all the formulas you'll need. Triangle-triangle intersection will be the most tricky part!*
# Formal Inputs & Outputs
## Input Description
You will be given an integer N which represents the N-following lines, each being a 3D point in space. Each line has
three [Real-numbers](https://en.wikipedia.org/wiki/Real_number) that are space -delimited. The last line, which will be
line N+1, is the target vector that you are trying to align-with: it is also represented as three space-delimited
Real-numbers.
## Output Description
Find the largest set of triangles whose average surface normals match the target vector direction within at minimum 10
degrees. Print the result as one triangle per line, where a triangle is defined as the three point indices used. If no
set is found, print "No valid result found".
# Sample Inputs & Outputs
## Sample Input
5
0.6652 -0.1405 0.7143
0.2223 0.3001 0.7125
-0.9931 0.9613 0.0669
0.0665 0.6426 -0.4931
-0.1100 -0.3525 0.3548
0.577 -0.577 0.577
## Sample Output
**The author is still working on a solution to generate some results with; first person to post good demo data gets a
+1 gold medal! The following results are "bad"/"faked", and are only examples of "valid output format".**
0 1 2
1 4 2
"""
def main():
pass
if __name__ == "__main__":
main()
| 3.765625 | 4 |
lintcode/python/Q0582_Word_Break_II.py | lisuizhe/algorithm | 2 | 12794689 | class Solution:
"""
@param: s: A string
@param: wordDict: A set of words.
@return: All possible sentences.
"""
def wordBreak(self, s, wordDict):
# write your code here
return self._recursiveWordBreak(s, wordDict, {})
def _recursiveWordBreak(self, s, wordDict, memo):
if s in memo:
return memo[s]
if not s:
return []
res = []
for word in wordDict:
if not word or not s.startswith(word):
continue
if len(word) == len(s):
res.append(word)
else:
resultOfTheRest = self._recursiveWordBreak(s[len(word):], wordDict, memo)
for item in resultOfTheRest:
item = word + ' ' + item
res.append(item)
memo[s] = res
return res | 3.765625 | 4 |
tests/test_ingress.py | tomaslaz/EduNotice | 0 | 12794690 | """
Test ingress.py module
"""
import os
import pandas as pd
from sqlalchemy import create_engine
from edunotice.ingress import (
_update_courses,
_update_labs,
_update_subscriptions,
_update_details,
update_edu_data,
)
from edunotice.constants import (
CONST_TEST_DIR_DATA,
CONST_TEST1_FILENAME,
CONST_TEST2_FILENAME,
SQL_CONNECTION_STRING,
SQL_TEST_DBNAME1,
)
# wrong dataframe
wrong_df = pd.DataFrame(
{
"name": ["Jason", "Molly", "Tina", "Jake", "Amy"],
"year": [2012, 2012, 2013, 2014, 2014],
"reports": [4, 24, 31, 2, 3],
},
index=["Cochice", "Pima", "<NAME>", "Maricopa", "Yuma"],
)
# good data
file_path1 = os.path.join(CONST_TEST_DIR_DATA, CONST_TEST1_FILENAME)
eduhub_df1 = pd.read_csv(file_path1)
ENGINE = create_engine("%s/%s" % (SQL_CONNECTION_STRING, SQL_TEST_DBNAME1))
def test_update_courses():
"""
tests ingress._update_courses routine
"""
# wrong dataframe
success, error, _ = _update_courses(ENGINE, wrong_df)
assert success is False, error
# good data
success, error, course_dict = _update_courses(ENGINE, eduhub_df1)
assert success, error
assert len(course_dict) == 2
success, error, course_dict = _update_courses(ENGINE, eduhub_df1)
assert success, error
assert len(course_dict) == 2
def test_update_labs():
"""
tests ingress._update_labs routine
"""
# getting the courses
success, error, course_dict = _update_courses(ENGINE, eduhub_df1)
assert success, error
assert len(course_dict) == 2
# wrong dataframe
success, error, _ = _update_labs(ENGINE, wrong_df, course_dict)
assert success is False, error
# good data
success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict)
assert success, error
assert len(lab_dict) == 2
success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict)
assert success, error
assert len(lab_dict) == 2
def test_update_subscriptions():
"""
tests ingress._update_subscriptions routine
"""
# wrong dataframe
success, error, _ = _update_subscriptions(ENGINE, wrong_df)
assert success is False, error
# good data
success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1)
assert success, error
assert len(sub_dict) == 2
success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1)
assert success, error
assert len(sub_dict) == 2
def test_update_details_1():
"""
tests ingress._update_details routine
2 new subscriptions
"""
# getting the courses
success, error, course_dict = _update_courses(ENGINE, eduhub_df1)
assert success, error
assert len(course_dict) == 2
# getting the labs
success, error, lab_dict = _update_labs(ENGINE, eduhub_df1, course_dict)
assert success, error
assert len(lab_dict) == 2
# getting the subscriptions
success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df1)
assert success, error
assert len(sub_dict) == 2
# 2 new subscriptions
success, error, new_list, update_list = _update_details(
ENGINE, eduhub_df1, lab_dict, sub_dict
)
assert success, error
assert len(new_list) == 2
assert len(update_list) == 0
def test_update_details_2():
"""
tests ingress._update_details routine
1 update
"""
eduhub_df_local = pd.read_csv(
os.path.join(CONST_TEST_DIR_DATA, CONST_TEST2_FILENAME)
)
# getting the courses
success, error, course_dict = _update_courses(ENGINE, eduhub_df_local)
assert success, error
assert len(course_dict) == 2
# getting the labs
success, error, lab_dict = _update_labs(
ENGINE, eduhub_df_local, course_dict
)
assert success, error
assert len(lab_dict) == 2
# getting the subscriptions
success, error, sub_dict = _update_subscriptions(ENGINE, eduhub_df_local)
assert success, error
assert len(sub_dict) == 3
success, error, new_list, update_list = _update_details(
ENGINE, eduhub_df_local, lab_dict, sub_dict
)
assert success, error
assert len(new_list) == 1
assert len(update_list) == 2
def test_update_edu_data():
"""
tests ingress.update_edu_data routine
"""
# not a dataframe
(
success,
error,
_,
_,
sub_new_list,
sub_update_list
) = update_edu_data(ENGINE, None)
assert success is False, error
# empty dataframe
success, error, _, _, sub_new_list, sub_update_list = update_edu_data(
ENGINE, pd.DataFrame()
)
assert success is False, error
# real data
success, error, _, _, sub_new_list, sub_update_list = update_edu_data(
ENGINE, eduhub_df1
)
assert success, error
assert len(sub_new_list) == 0
assert len(sub_update_list) == 2
| 2.8125 | 3 |
jobya/users/management/commands/setup_user.py | xblzbjs/Jobya | 0 | 12794691 | from django.core.management.base import BaseCommand
from django.db import transaction
from jobya.users.models import User
from jobya.users.tests.factories import UserFactory
class Command(BaseCommand):
help = "Set up users data"
def add_arguments(self, parser):
parser.add_argument(
"total",
nargs="+",
type=int,
help="Indicates the number of users to be created",
)
@transaction.atomic
def handle(self, *args, **options):
total = options["total"][0]
self.stdout.write("Deleting old data...")
# Don't delete superuser
User.objects.filter(is_superuser=False).delete()
self.stdout.write("Creating new data...")
# Create all the users
people = []
for _ in range(total):
person = UserFactory()
people.append(person)
self.stdout.write("Success")
| 2.1875 | 2 |
codes_auto/1662.minimum-numbers-of-function-calls-to-make-target-array.py | smartmark-pro/leetcode_record | 0 | 12794692 | #
# @lc app=leetcode.cn id=1662 lang=python3
#
# [1662] minimum-numbers-of-function-calls-to-make-target-array
#
None
# @lc code=end | 1.34375 | 1 |
backup_tpqemu/thin_provisioning_sg_utils.py | PyLearner/myworks | 0 | 12794693 | import os
import re
import time
import logging
from virttest import data_dir
from virttest import env_process
from avocado.utils import process
from avocado.core import exceptions
from autotest.client.shared import error
from qemu.tests import thin_provisioning
@error.context_aware
def run(test, params, env):
"""
'thin-provisioning' functions test using sg_utils:
1) Boot up the guest with the scsi disk
2) using sg_utils to do some test
3) In guest, check the sha1 value of the guest disk
4) In host, check the sha1 value of the disk image
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def get_excution_time(session, cmd):
"""
This function is used to measure the real execution time of
the command in guest through shell command "time".
:param session: Guest session
:param cmd: Commands to execute
:return: The real execution time
"""
out = session.cmd_output(cmd)
try:
return float(re.search(r"real\s+\dm(.*)s", out).group(1))
except:
exceptions.TestError("Unable to read realtime, cmd output: %s" % out)
def run_sg_utils(disk_name, session):
"""
This function is used do to some test on the disk using sg_utils package.
:param disk_name: The Guest disk name
:param session: Guest Session
:return: None
"""
yesfile = "/home/buf"
cmd = """yes | head -n2048 > {0};"""
cmd += "sg_write_same --in {0} --num=32 --lba=80 {1};"
cmd += "sg_write_same --in /dev/zero --num=96 --lba=0 {1};"
cmd += "sg_write_same -U --in /dev/zero --num=16 --lba=0 {1};"
cmd = cmd.format(yesfile, disk_name)
session.cmd(cmd)
fetch_data_from_file = "sg_write_same --in {:s} --num=65536 --lba=131074 {:s}".format(yesfile, disk_name)
fetch_data_from_file = "(time {:s})".format(fetch_data_from_file)
realtime1 = get_excution_time(session, fetch_data_from_file)
logging.info("The real execution time of the command is:{:f}".format(realtime1))
if params.get("disk_type") == "scsi_debug":
bitmap = thin_provisioning.get_allocation_bitmap()
logging.debug("Block allocation bitmap is: {}".format(bitmap))
else:
output = process.system_output("qemu-img map --output=json {:s}".format(disk_name))
logging.debug("json map: {}".format(output))
time.sleep(0.1)
fetch_data_from_zero_device = "sg_write_same --in /dev/zero --num=65534 --lba=196608 {:s}".format(disk_name)
fetch_data_from_zero_device = "(time {:s})".format(fetch_data_from_zero_device)
realtime2 = get_excution_time(session, fetch_data_from_zero_device)
logging.info("The real execution time of the command is {:f}".format(realtime2))
out3 = session.cmd_output("sg_write_same --in /dev/zero --num=0 --lba=128 {:s}".format(disk_name))
logging.debug(out3)
if re.search(r"bad field in Write same", out3) is None:
raise exceptions.TestFail("sg_write_same command fails. output is {}".format(out3))
if realtime2 > realtime1:
raise exceptions.TestFail("time used is much longger")
thin_provisioning.destroy_vm(env)
if params.get("disk_type") == "scsi_debug":
disk_name = thin_provisioning.get_scsi_disk()[1]
params["image_name_image_test"] = disk_name
else:
disk_name = os.path.join(data_dir.get_data_dir(), params.get("image_name_image_test"))
disk_name = "{:s}.raw".format(disk_name)
params["start_vm"] = "yes"
vm_name = params["main_vm"]
env_process.preprocess_vm(test, params, env, vm_name)
vm = env.get_vm(vm_name)
vm.verify_alive()
timeout = float(params.get("login_timeout", 240))
session = vm.wait_for_login(timeout=timeout)
guest_disk_name = thin_provisioning.get_scsi_disk(session)[1]
run_sg_utils(guest_disk_name, session)
guest_sha1 = session.cmd_output("sha1sum {:s}".format(guest_disk_name)).split()[0]
host_sha1 = process.system_output("sha1sum {:s}".format(disk_name)).split()[0]
if guest_sha1 != host_sha1:
raise exceptions.TestFail("after sg_writesame, image hash value becomes different between guest and host ")
session.close()
if vm:
vm.destroy() | 2.171875 | 2 |
Ago-Dic-2018/Orlando Martinez/Proyecto Ordinario/clientes.py | angelicardz/DAS_Sistemas | 41 | 12794694 | <filename>Ago-Dic-2018/Orlando Martinez/Proyecto Ordinario/clientes.py
from bs4 import BeautifulSoup
import requests
import sqlite3
url="https://randomuser.me/api/"
ids=[]
i=0
db = sqlite3.connect('Taqueria.db')
cursor = db.cursor()
class cliente():
for i in range(0,51):
ids.append(i)
i+=1
urls = 'https://randomuser.me/api/'
request = requests.get(urls)
for urls in request.json()['results']:
nombre=urls['name']['first']
genero=urls['gender']
direccion=urls['location']['street']
celular=urls['cell']
cursor.execute("INSERT INTO Clientes(ID_CLIENTE,NOMBRE_CLIENTE,GENERO,DIRECCION,CELULAR)VALUES(?,?,?,?,?)",(i,nombre,genero,direccion,celular))
db.commit()
| 2.9375 | 3 |
Time Calculator/time_calculator.py | thevirtualbuddy/Python-freecodecamp | 0 | 12794695 |
def add_time(start, duration,day=""):
#Splitting and storing the input as a list
start_Gen = start.split() #based on spaces
startTime = start_Gen[0].split(":") #the time based on colon
#start_Gen[1] is holding either AM or PM
#print(startTime) #Holding the starting time supplied
duration=duration.split(":") #based on colon
#print(duration)
startMinutes = int(startTime[0])*60 + int(startTime[1]) #Start time in minutes
#print("Total minutes " +str(startMinutes))
durationMinutes = int(duration[0])*60 + int(duration[1]) #Given duration time in minutes
#print(durationMinutes)
totalDurationMinutes = startMinutes + durationMinutes #Total minutes with us
#print(totalDurationMinutes)
#Getting total time in hours and minutes
hoursTotal = totalDurationMinutes//60
#print(hoursTotal)
x = round((totalDurationMinutes/60)%1,2) * 60
y = round(x)
minsTotal = y
#print(minsTotal)
#print(hoursTotal)
#print(minsTotal)
#Counting days
dayC = ((totalDurationMinutes/60) - (startMinutes/60))/24
#print(dayC)
dayrem = round((dayC)%1,2)
#print(dayrem)
hourrem = dayrem*24
startDay = (int(startTime[0]) + (int(startTime[1]) / 60 ))
#print(startDay)
dayCount = round(dayC)
extrahours = hourrem+startDay
extrahours = round(extrahours)
#print(extrahours)
if extrahours >= 12 and start_Gen[1]=="PM":
dayCount+=1
#Deciding AM or PM
amPMTrack = start_Gen[1]
if extrahours>=12:
if start_Gen[1]=="PM":
amPMTrack = "AM"
else:
amPMTrack="PM"
#print("Days: "+str(dayCount))
hoursTotalFormatted = hoursTotal
hoursTotalFormatted = hoursTotal%12
#print(hoursTotalFormatted)
if hoursTotalFormatted == 0:
hoursTotalFormatted=12
minsTotalformatted = '%02d' % minsTotal
#dayTotal = hoursTotal//12
#print(dayTotal)
if day:
days = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"]
#index holds which day is sent in the argument
ind = 0
for i,myday in enumerate(days):
if myday.lower()==day.lower():
ind=i
break
#print(index)
#print(days[index])
newDay = ""
if dayCount==0:
newDay=day
else:
ind = (ind+1)%7
for i in range(1,dayCount+1):
newDay = days[ind]
i=i+1
ind = (ind+1) % 7
#print(newDay)
if day:
if (dayCount==1):
return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} (next day)')
if (dayCount>1):
return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay} ({dayCount} days later)')
return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}, {newDay}')
else:
if (dayCount==1):
return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} (next day)')
if (dayCount>1):
return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack} ({dayCount} days later)')
return (f'{hoursTotalFormatted}:{minsTotalformatted} {amPMTrack}') | 4.0625 | 4 |
tests/__init__.py | pyj4104/FuncToWav | 0 | 12794696 | import sys
sys.path.append('~/Func2Wav/FuncToWav/src')
| 1.289063 | 1 |
tests/conftest.py | LiamOSullivan/datacube-ows | 0 | 12794697 | <gh_stars>0
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
import datetime
import time
from unittest.mock import MagicMock
import numpy as np
import pytest
import requests
import xarray
import xarray as xr
from s3fs.core import S3FileSystem
from tests.utils import (MOTO_PORT, MOTO_S3_ENDPOINT_URI, coords, dim1_da,
dummy_da)
def get_boto3_client():
from botocore.session import Session
session = Session()
return session.create_client("s3", endpoint_url=MOTO_S3_ENDPOINT_URI)
@pytest.fixture
def s3_base():
# writable local S3 system
# adapted from https://github.com/dask/s3fs/blob/main/s3fs/tests/test_s3fs.py
import subprocess
proc = subprocess.Popen(["moto_server", "s3", "-p", MOTO_PORT])
timeout = 5
while timeout > 0:
try:
r = requests.get(MOTO_S3_ENDPOINT_URI)
if r.ok:
break
except:
pass
timeout -= 0.1
time.sleep(0.1)
yield
proc.terminate()
proc.wait()
@pytest.fixture()
def s3(s3_base, monkeypatch):
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "foo")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "bar")
client = get_boto3_client()
client.create_bucket(Bucket="testbucket")
S3FileSystem.clear_instance_cache()
s3 = S3FileSystem(anon=False, client_kwargs={"endpoint_url": MOTO_S3_ENDPOINT_URI})
s3.invalidate_cache()
yield s3
@pytest.fixture
def s3_config_simple(s3):
config_uri = "s3://testbucket/simple.json"
with s3.open(config_uri, "wb") as f_open:
f_open.write(b'{"test": 1234}')
@pytest.fixture
def s3_config_nested_1(s3, s3_config_simple):
config_uri = "s3://testbucket/nested_1.json"
with s3.open(config_uri, "wb") as f_open:
f_open.write(b'{"include": "simple.json", "type": "json"}')
@pytest.fixture
def s3_config_nested_2(s3, s3_config_simple):
config_uri = "s3://testbucket/nested_2.json"
with s3.open(config_uri, "wb") as f_open:
f_open.write(b'[{"test": 88888}, {"include": "simple.json", "type": "json"}]')
@pytest.fixture
def s3_config_nested_3(s3, s3_config_simple):
config_uri = "s3://testbucket/nested_3.json"
with s3.open(config_uri, "wb") as f_open:
f_open.write(b'{"test": 2222, "things": [{"test": 22562, "thing": null}, \
{"test": 22563, "thing": {"include": "simple.json", "type": "json"}}, \
{"test": 22564, "thing": {"include": "simple.json", "type": "json"}}]}'
)
@pytest.fixture
def s3_config_nested_4(s3, s3_config_simple, s3_config_nested_3):
config_uri = "s3://testbucket/nested_4.json"
with s3.open(config_uri, "wb") as f_open:
f_open.write(b'{"test": 3222, "things": [{"test": 2572, "thing": null}, \
{"test": 2573, "thing": {"include": "simple.json", "type": "json"}}, \
{"test": 2574, "thing": {"include": "nested_3.json", "type": "json"}}]}'
)
@pytest.fixture
def s3_config_mixed_nested(s3, s3_config_simple):
config_uri = "s3://testbucket/mixed_nested.json"
with s3.open(config_uri, "wb") as f_open:
f_open.write(b'{"test": 9364, \
"subtest": {"test_py": {"include": "tests.cfg.simple.simple", "type": "python"}, \
"test_json": {"include": "tests/cfg/simple.json", "type": "json"}}}'
)
@pytest.fixture
def flask_client(monkeypatch):
monkeypatch.setenv("DEFER_CFG_PARSE", "yes")
from datacube_ows.ogc import app
with app.test_client() as client:
yield client
@pytest.fixture
def minimal_dc():
dc = MagicMock()
nb = MagicMock()
nb.index = ['band1', 'band2', 'band3', 'band4']
nb.__getitem__.return_value = {
"band1": -999,
"band2": -999,
"band3": float("nan"),
"band4": "nan",
}
lmo = MagicMock()
lmo.loc = {
"foo_nativeres": nb,
"foo_nonativeres": nb,
"foo_badnativecrs": nb,
"foo_nativecrs": nb,
"foo_nonativecrs": nb,
"foo": nb,
"bar": nb,
}
dc.list_measurements.return_value = lmo
def product_by_name(s):
if 'lookupfail' in s:
return None
mprod = MagicMock()
flag_def = {
"moo": {"bits": 0},
"floop": {"bits": 1},
"blat": {"bits": 2},
"pow": {"bits": 3},
"zap": {"bits": 4},
"dang": {"bits": 5},
}
mprod.lookup_measurements.return_value = {
"band4": {
"flags_definition": flag_def
}
}
mprod.definition = {"storage": {}}
if 'nonativecrs' in s:
pass
elif 'badnativecrs' in s:
mprod.definition["storage"]["crs"] = "EPSG:9999"
elif 'nativecrs' in s:
mprod.definition["storage"]["crs"] = "EPSG:4326"
else:
pass
if 'nonativeres' in s:
pass
elif 'nativeres' in s:
mprod.definition["storage"]["resolution"] = {
"latitude": 0.001,
"longitude": 0.001,
}
else:
pass
return mprod
dc.index.products.get_by_name = product_by_name
return dc
@pytest.fixture
def minimal_global_cfg():
global_cfg = MagicMock()
global_cfg.keywords = {"global"}
global_cfg.attribution.title = "Global Attribution"
global_cfg.contact_org = None
global_cfg.contact_position = None
global_cfg.abstract = "Global Abstract"
global_cfg.authorities = {
"auth0": "http://test.url/auth0",
"auth1": "http://test.url/auth1",
}
global_cfg.published_CRSs = {
"EPSG:3857": { # Web Mercator
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
"vertical_coord_first": False,
"gml_name": "http://www.opengis.net/def/crs/EPSG/0/3857",
"alias_of": None,
},
"EPSG:4326": { # WGS-84
"geographic": True,
"vertical_coord_first": True,
"horizontal_coord": "longitude",
"vertical_coord": "latitude",
"gml_name": "http://www.opengis.net/def/crs/EPSG/0/4326",
"alias_of": None,
},
"EPSG:3577": {
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
"vertical_coord_first": False,
"gml_name": "http://www.opengis.net/def/crs/EPSG/0/3577",
"alias_of": None,
},
"TEST:CRS": {
"geographic": False,
"horizontal_coord": "horrible_zonts",
"vertical_coord": "vertex_calories",
"vertical_coord_first": False,
"gml_name": "TEST/CRS",
"alias_of": None,
},
"TEST:NATIVE_CRS": {
"geographic": False,
"horizontal_coord": "hortizonal_cults",
"vertical_coord": "verbal_tics",
"vertical_coord_first": False,
"gml_name": "TEST/NATIVE_CRS",
"alias_of": None,
},
}
global_cfg.folder_index = {
"folder.existing_folder": MagicMock(),
}
return global_cfg
@pytest.fixture
def minimal_parent():
parent = MagicMock()
parent.abstract = "Parent Abstract"
parent.keywords = {"global", "parent"}
parent.attribution.title = "Parent Attribution"
return parent
@pytest.fixture
def minimal_layer_cfg():
return {
"title": "The Title",
"abstract": "The Abstract",
"name": "a_layer",
"product_name": "foo",
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
},
"styling": {
"default_style": "band1",
"styles": [
{
"name": "band1",
"title": "Single Band Test Style",
"abstract": "",
"components": {
"red": {"band1": 1.0},
"green": {"band1": 1.0},
"blue": {"band1": 1.0},
},
"scale_range": [0, 1024]
}
]
}
}
@pytest.fixture
def minimal_multiprod_cfg():
return {
"title": "The Title",
"abstract": "The Abstract",
"name": "a_layer",
"multi_product": True,
"product_names": ["foo", "bar"],
"image_processing": {
"extent_mask_func": "datacube_ows.ogc_utils.mask_by_val",
},
"styling": {
"default_style": "band1",
"styles": [
{
"name": "band1",
"title": "Single Band Test Style",
"abstract": "",
"components": {
"red": {"band1": 1.0},
"green": {"band1": 1.0},
"blue": {"band1": 1.0},
},
"scale_range": [0, 1024]
}
]
}
}
@pytest.fixture
def mock_range():
times = [datetime.datetime(2010, 1, 1), datetime.datetime(2010, 1, 2)]
return {
"lat": {
"min": -0.1,
"max": 0.1,
},
"lon": {
"min": -0.1,
"max": 0.1,
},
"times": times,
"start_time": times[0],
"end_time": times[-1],
"time_set": set(times),
"bboxes": {
"EPSG:4326": {"top": 0.1, "bottom": -0.1, "left": -0.1, "right": 0.1, },
"EPSG:3577": {"top": 0.1, "bottom": -0.1, "left": -0.1, "right": 0.1, },
"EPSG:3857": {"top": 0.1, "bottom": -0.1, "left": -0.1, "right": 0.1, },
}
}
@pytest.fixture
def minimal_global_raw_cfg():
return {
"global": {
"title": "Test Title",
"info_url": "https://my.domain.com/about_us",
"allowed_urls": [
"http://localhost",
"http://unsecure.domain.com/odc",
"https://secure.domain.com/ows",
],
"published_CRSs": {
"EPSG:3857": { # Web Mercator
"geographic": False,
"horizontal_coord": "x",
"vertical_coord": "y",
},
"EPSG:4326": { # WGS-84
"geographic": True,
"vertical_coord_first": True
},
},
},
"layers": []
}
@pytest.fixture
def wcs_global_cfg():
return {
"formats": {
# Key is the format name, as used in DescribeCoverage XML
"GeoTIFF": {
"renderer": "datacube_ows.wcs_utils.get_tiff",
# The MIME type of the image, as used in the Http Response.
"mime": "image/geotiff",
# The file extension to add to the filename.
"extension": "tif",
# Whether or not the file format supports multiple time slices.
"multi-time": False
},
"netCDF": {
"renderer": "datacube_ows.wcs_utils.get_netcdf",
"mime": "application/x-netcdf",
"extension": "nc",
"multi-time": True,
}
},
"native_format": "GeoTIFF",
}
@pytest.fixture
def dummy_raw_data():
output = xr.Dataset({
"ir": dummy_da(3, "ir", coords),
"red": dummy_da(5, "red", coords),
"green": dummy_da(7, "green", coords),
"blue": dummy_da(2, "blue", coords),
"uv": dummy_da(-1, "uv", coords),
})
return output
@pytest.fixture
def null_mask():
return dummy_da(True, "mask", coords, dtype=np.bool)
@pytest.fixture
def dummy_raw_calc_data():
dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0]
output = xr.Dataset({
"ir": dim1_da("ir", [800, 100, 1000, 600, 200, 1000], dim_coords),
"red": dim1_da("red", [200, 500, 0, 200, 200, 700], dim_coords),
"green": dim1_da("green", [100, 500, 0, 400, 300, 200], dim_coords),
"blue": dim1_da("blue", [200, 500, 1000, 600, 100, 700], dim_coords),
"uv": dim1_da("uv", [400, 600, 900, 200, 400, 100], dim_coords),
"pq": dim1_da("pq", [0b000, 0b001, 0b010, 0b011, 0b100, 0b111], dim_coords,
attrs={
"flags_definition": {
"splodgy": {
"bits": 2,
"values": {
'0': "Splodgeless",
'1': "Splodgy",
},
"description": "All splodgy looking"
},
"ugly": {
"bits": 1,
"values": {
'0': False,
'1': True
},
"description": "Real, real ugly",
},
"impossible": {
"bits": 0,
"values": {
'0': False,
'1': "Woah!"
},
"description": "Won't happen. Can't happen. Might happen.",
},
}
})
})
return output
def dim1_null_mask(coords):
return dim1_da("mask", [True] * len(coords), coords)
@pytest.fixture
def raw_calc_null_mask():
dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0]
return dim1_da("mask", [True] * len(dim_coords), dim_coords)
@pytest.fixture
def dummy_col_map_data():
dim_coords = [-2.0, -1.0, 0.0, -1.0, -2.0, -3.0]
output = xr.Dataset({
"pq": dim1_da("pq", [0b01000, 0b11001, 0b01010, 0b10011, 0b00100, 0b10111], dim_coords,
attrs={
"flags_definition": {
"joviality": {
"bits": 3,
"values": {
'0': "Melancholic",
'1': "Joyous",
},
"description": "All splodgy looking"
},
"flavour": {
"bits": 3,
"values": {
'0': "Bland",
'1': "Tasty",
},
"description": "All splodgy looking"
},
"splodgy": {
"bits": 2,
"values": {
'0': "Splodgeless",
'1': "Splodgy",
},
"description": "All splodgy looking"
},
"ugly": {
"bits": 1,
"values": {
'0': False,
'1': True
},
"description": "Real, real ugly",
},
"impossible": {
"bits": 0,
"values": {
'0': False,
'1': "Woah!"
},
"description": "Won't happen. Can't happen. Might happen.",
},
}
})
})
return output
@pytest.fixture
def dummy_raw_ls_data():
output = xr.Dataset({
"red": dummy_da(5, "red", coords, dtype=np.int16),
"green": dummy_da(7, "green", coords, dtype=np.int16),
"blue": dummy_da(2, "blue", coords, dtype=np.int16),
"nir": dummy_da(101, "nir", coords, dtype=np.int16),
"swir1": dummy_da(1051, "swir1", coords, dtype=np.int16),
"swir2": dummy_da(1051, "swir2", coords, dtype=np.int16),
})
return output
@pytest.fixture
def dummy_raw_wo_data():
output = xr.Dataset({
"water": dummy_da(0b101,
"red",
coords,
dtype=np.uint8,
attrs = {
"flags_definition": {
"nodata": {
"bits": 0,
"description": "No data",
"values": {
'0': False,
'1': True
},
},
"noncontiguous": {
"description": "At least one EO band is missing or saturated",
"bits": 1,
"values": {
'0': False,
'1': True
},
},
"low_solar_angle": {
"description": "Low solar incidence angle",
"bits": 2,
"values": {
'0': False,
'1': True
},
},
"terrain_shadow": {
"description": "Terrain shadow",
"bits": 3,
"values": {
'0': False,
'1': True
},
},
"high_slope": {
"description": "High slope",
"bits": 4,
"values": {
'0': False,
'1': True
}
},
"cloud_shadow": {
"description": "Cloud shadow",
"bits": 5,
"values": {
'0': False,
'1': True
},
},
"cloud": {
"description": "Cloudy",
"bits": 6,
"values": {
'0': False,
'1': True
},
},
"water_observed": {
"description": "Classified as water by the decision tree",
"bits": 7,
"values": {
'0': False,
'1': True
},
},
}
})
})
return output
@pytest.fixture
def dummy_raw_fc_data():
output = xr.Dataset({
"bs": dummy_da(546, "bs", coords, dtype=np.int16),
"pv": dummy_da(723, "pv", coords, dtype=np.int16),
"npv": dummy_da(209, "npv", coords, dtype=np.int16),
})
return output
@pytest.fixture
def dummy_raw_fc_plus_wo(dummy_raw_fc_data, dummy_raw_wo_data):
return xarray.combine_by_coords(
[dummy_raw_fc_data, dummy_raw_wo_data],
join="exact")
@pytest.fixture
def configs_for_landsat():
def ndvi(data):
# Calculate NDVI (-1.0 to 1.0)
unscaled = (data["nir"] - data["red"]) / (data["nir"] + data["red"])
# Scale to [-1.0 - 1.0] to [0 - 255]
scaled = ((unscaled + 1.0) * 255 / 2).clip(0, 255)
return scaled
from datacube_ows.styles.api import scalable
@scalable
def scaled_ndvi(data):
# Calculate NDVI (-1.0 to 1.0)
return (data["nir"] - data["red"]) / (data["nir"] + data["red"])
return [
{
"components": {
"red": {"red": 1.0},
"green": {"green": 1.0},
"blue": {"blue": 1.0},
},
"scale_range": (50, 3000),
},
{
"components": {
"red": {"swir1": 1.0},
"green": {"nir": 1.0},
"blue": {"green": 1.0},
},
"scale_range": (50, 3000),
},
{
"components": {
"red": {"red": 1.0},
"green": {"red": 1.0},
"blue": {"red": 1.0},
},
"scale_range": (50, 3000),
},
{
"components": {
"red": {
"red": 0.333,
"green": 0.333,
"blue": 0.333,
},
"green": {"nir": 1.0},
"blue": {
"swir1": 0.5,
"swir2": 0.5,
},
},
"scale_range": (50, 3000),
},
{
"components": {
"red": {"red": 1.0},
"green": {},
"blue": {},
},
"scale_range": (50, 3000),
},
{
"components": {
"red": {"red": 1.0},
"green": {"green": 1.0},
"blue": {"blue": 1.0},
},
"scale_range": (10, 800),
},
{
"components": {
"red": {"red": 1.0},
"green": {"green": 1.0},
"blue": {"blue": 1.0},
},
"scale_range": (1000, 8000),
},
{
"components": {
"red": {"red": 1.0},
"green": {"green": 1.0},
"blue": {"blue": 1.0},
},
"scale_range": (1000, 3000),
},
{
"components": {
"red": {
"swir1": 1.0,
"scale_range": (1500, 3700),
},
"green": {
"nir": 1.0,
"scale_range": (1600, 3200),
},
"blue": {"green": 1.0},
},
"scale_range": (200, 1900),
},
{
"components": {
"red": {"red": 1.0},
"green": ndvi,
"blue": {"blue": 1.0},
},
"scale_range": (50, 3000),
},
{
"components": {
"red": {"red": 1.0},
"green": {
"function": scaled_ndvi,
"kwargs": {
"scale_from": (0.0, 1.0),
"scale_to": (0, 255)
}
},
"blue": {"blue": 1.0},
},
"scale_range": (50, 3000),
},
{
"components": {
"red": {"red": 1.0},
"green": {
"function": "datacube_ows.band_utils.norm_diff",
"kwargs": {
"band1": "nir",
"band2": "red",
"scale_from": (0.0, 1.0),
"scale_to": (0, 255)
}
},
"blue": {
"function": "datacube_ows.band_utils.norm_diff",
"kwargs": {
"band1": "green",
"band2": "nir",
"scale_from": (0.0, 1.0),
"scale_to": (0, 255)
}
},
},
"scale_range": (50, 3000),
},
{
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"mapped_bands": True,
"kwargs": {"band1": "nir", "band2": "red"},
},
"mpl_ramp": "RdYlGn",
"range": [-1.0, 1.0]
},
{
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"kwargs": {"band1": "nir", "band2": "red"},
},
"mpl_ramp": "ocean_r",
"range": [0.0, 1.0]
},
{
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"kwargs": {"band1": "nir", "band2": "red"},
},
"color_ramp": [
{"value": -1.0, "color": "#0000FF"},
{"value": -0.2, "color": "#005050", },
{"value": -0.1, "color": "#505050", },
{"value": -0.01, "color": "#303030", },
{"value": 0.0, "color": "black", },
{"value": 0.01, "color": "#303000", },
{"value": 0.5, "color": "#707030", },
{"value": 1.0, "color": "#FF9090", },
]
},
{
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"kwargs": {"band1": "nir", "band2": "red"},
},
"color_ramp": [
{
"value": -1.0,
"color": "#000000",
"alpha": 0.0,
},
{
"value": 0.0,
"color": "#000000",
"alpha": 0.0,
},
{
"value": 0.1,
"color": "#000030",
"alpha": 1.0,
},
{
"value": 0.3,
"color": "#703070",
},
{
"value": 0.6,
"color": "#e0e070",
},
{
"value": 1.0,
"color": "#90FF90",
}
]
},
{
"components": {
"red": {"red": 1.0},
"green": {"green": 1.0},
"blue": {"blue": 1.0},
"alpha": {
"function": "datacube_ows.band_utils.norm_diff",
"kwargs": {
"band1": "nir",
"band2": "red",
"scale_from": (0.0, 0.5),
"scale_to": (0, 255)
}
},
},
"scale_range": (50, 3000),
},
]
@pytest.fixture
def configs_for_wofs():
return [
{
"name": "observations",
"title": "Observations",
"abstract": "Observations",
"value_map": {
"water": [
{
"title": "Water",
"abstract": "",
"flags": {"water_observed": True},
"color": "Aqua",
},
{
"title": "Cloud",
"abstract": "",
"flags": {"cloud": True},
"color": "Beige",
},
{
"title": "Terrain",
"abstract": "",
# Flag rules can contain an "or" - they match if either of the conditions hold.
"flags": {"or": {"terrain_shadow": True, "high_slope": True}},
"color": "SlateGray",
},
{
"title": "Cloud Shadow and High Slope",
"abstract": "",
# Flag rules can contain an "and" - they match if all of the conditions hold.
"flags": {"and": {"cloud_shadow": True, "high_slope": True}},
"color": "DarkKhaki",
},
{
"title": "Dry",
"abstract": "",
"flags": {"water_observed": False},
"color": "Brown",
},
]
}
},
{
"name": "observations",
"title": "Observations",
"abstract": "Observations",
"value_map": {
"water": [
# Cloudy Slopes rule needs to come before the Cloud
# and High Slopes rules.
{
"title": "Cloudy Slopes",
"abstract": "",
"flags": {"and": {"cloud": True, "high_slope": True}},
"color": "BurlyWood",
},
# Only matches non-cloudy high-slopes.
{
"title": "High Slopes",
"abstract": "",
"flags": {"high_slope": True},
"color": "Brown",
},
{
"title": "Cloud",
"abstract": "",
"flags": {"cloud": True},
"color": "Beige",
},
{
"title": "Cloud Shadow",
"abstract": "",
"flags": {"cloud_shadow": True},
"color": "SlateGray",
},
{
"title": "Water",
"abstract": "",
"flags": {"water_observed": True},
"color": "Aqua",
},
{
"title": "Dry",
"abstract": "",
"flags": {"water_observed": False},
"color": "SaddleBrown",
},
]
}
},
{
"value_map": {
"water": [
{
# Make noncontiguous data transparent
"title": "",
"abstract": "",
"flags": {
"or": {
"noncontiguous": True,
"nodata": True,
},
},
"alpha": 0.0,
"color": "#ffffff",
},
{
"title": "Cloudy Steep Terrain",
"abstract": "",
"flags": {
"and": {
"high_slope": True,
"cloud": True
}
},
"color": "#f2dcb4",
},
{
"title": "Cloudy Water",
"abstract": "",
"flags": {
"and": {
"water_observed": True,
"cloud": True
}
},
"color": "#bad4f2",
},
{
"title": "Shaded Water",
"abstract": "",
"flags": {
"and": {
"water_observed": True,
"cloud_shadow": True
}
},
"color": "#335277",
},
{
"title": "Cloud",
"abstract": "",
"flags": {"cloud": True},
"color": "#c2c1c0",
},
{
"title": "Cloud Shadow",
"abstract": "",
"flags": {"cloud_shadow": True},
"color": "#4b4b37",
},
{
"title": "Terrain Shadow or Low Sun Angle",
"abstract": "",
"flags": {
"or": {
"terrain_shadow": True,
"low_solar_angle": True
},
},
"color": "#2f2922",
},
{
"title": "Steep Terrain",
"abstract": "",
"flags": {"high_slope": True},
"color": "#776857",
},
{
"title": "Water",
"abstract": "",
"flags": {
"water_observed": True,
},
"color": "#4f81bd",
},
{
"title": "Dry",
"abstract": "",
"flags": {"water_observed": False},
"color": "#96966e",
},
]
},
},
]
@pytest.fixture
def configs_for_combined_fc_wofs():
return [
{
"components": {
"red": {"bs": 1.0},
"green": {"pv": 1.0},
"blue": {"npv": 1.0}},
"scale_range": [0.0, 100.0],
},
{
"components": {
"red": {"bs": 1.0},
"green": {"pv": 1.0},
"blue": {"npv": 1.0}
},
"scale_range": [0.0, 100.0],
"pq_masks": [
{
"band": "water",
"flags": {
"nodata": False,
"noncontiguous": False,
"terrain_shadow": False,
"low_solar_angle": False,
"high_slope": False,
"cloud_shadow": False,
"cloud": False,
"water_observed": False,
}
}
]
},
{
"components": {
"red": {"bs": 1.0},
"green": {"pv": 1.0},
"blue": {"npv": 1.0}
},
"scale_range": [0.0, 100.0],
"pq_masks": [
{
"band": "water",
"enum": 1,
}
]
},
{
"components": {
"red": {"bs": 1.0},
"green": {"pv": 1.0},
"blue": {"npv": 1.0}
},
"scale_range": [0.0, 100.0],
"pq_masks": [
{
"band": "water",
"enum": 1,
"invert": True,
}
]
},
{
"components": {
"red": {"bs": 1.0},
"green": {"pv": 1.0},
"blue": {"npv": 1.0}
},
"scale_range": [0.0, 100.0],
"pq_masks": [
{
# Mask out nodata pixels.
"band": "water",
"enum": 1,
"invert": True,
},
{
# Mask out pixels with low_solar_angle, high_slope
# or cloud shadow.
"band": "water",
"flags": {
"low_solar_angle": False,
"high_slope": False,
"cloud_shadow": False,
}
},
{
# Mask out pixels with cloud AND no water observed
"band": "water",
"flags": {
"cloud": True,
"water_observed": False,
},
"invert": True,
},
]
}
]
@pytest.fixture
def multi_date_cfg():
return {
"index_function": {
"function": "datacube_ows.band_utils.norm_diff",
"kwargs": {"band1": "nir", "band2": "red"},
},
"color_ramp": [
{"value": -1.0, "color": "#0000FF"},
{"value": -0.2, "color": "#005050", },
{"value": -0.1, "color": "#505050", },
{"value": -0.01, "color": "#303030", },
{"value": 0.0, "color": "black", },
{"value": 0.01, "color": "#303000", },
{"value": 0.5, "color": "#707030", },
{"value": 1.0, "color": "#FF9090", },
],
"multi_date": [
{
"allowed_count_range": [2, 2],
"preserve_user_date_order": True,
"aggregator_function": {
"function": "datacube_ows.band_utils.multi_date_delta"
},
"mpl_ramp": "RdYlBu",
"range": [-1.0, 1.0],
}
]
}
xyt_coords = [
("x", [-1.0, -0.5, 0.0, 0.5, 1.0]),
("y", [-1.0, -0.5, 0.0, 0.5, 1.0]),
("time", [
datetime.datetime(2021, 1, 1, 22, 44, 5),
datetime.datetime.now()
])
]
@pytest.fixture
def xyt_dummydata():
return xarray.Dataset({
"red": dummy_da(1400, "red", xyt_coords, dtype="int16"),
"green": dummy_da(700, "green", xyt_coords, dtype="int16"),
"blue": dummy_da(1500, "blue", xyt_coords, dtype="int16"),
"nir": dummy_da(2000, "nir", xyt_coords, dtype="int16"),
}) | 1.804688 | 2 |
grAdapt/sampling/initializer/VerticesForceRandom.py | mkduong-ai/grAdapt | 25 | 12794698 | # python
import warnings
# Third party imports
import numpy as np
# grAdapt
from .base import Initial
from grAdapt.utils.sampling import sample_corner_bounds
class VerticesForceRandom(Initial):
"""
Samples all vertices if n_evals >= 2 ** len(bounds).
Else, a subset of vertices is sampled.
"""
def __init__(self, sampling_method):
"""
Parameters
----------
sampling_method : grAdapt.sampling.equidistributed Object
Sample low discrepancy sequences when initial point method is not feasible
"""
super().__init__(sampling_method)
def sample(self, bounds, n_evals):
"""Returns a numpy array of sampled points.
Does not include corner points of the hypercube/search space.
Parameters
----------
bounds : list of tuples or list of grAdapt.space.datatype.base
Each tuple in the list defines the bounds for the corresponding variable
Example: [(1, 2), (2, 3), (-1, 4)...]
n_evals : int
number of initial points sampled by method
Returns
-------
(self.n_evals, len(self.bounds)) numpy array
"""
super().sample(bounds, n_evals)
if 2 ** len(self.bounds) >= self.n_evals:
# sample corner points first which fits in n_evals
d_tilde = int(np.floor(np.log2(self.n_evals)))
corners_d_tilde = sample_corner_bounds(self.bounds[:d_tilde]) # (2 ** d_tilde, d_tilde) array
n_tilde = 2 ** d_tilde
# sample random fixed corner points
random_binary_array = np.random.randint(2, size=(len(self.bounds),))
remainder_bounds = self.bounds[d_tilde:]
fix_corners = np.zeros((1, len(remainder_bounds)))
for i in range(len(remainder_bounds)):
if random_binary_array[i] == 0:
fix_corners[0][i] = remainder_bounds[i][0]
else:
fix_corners[0][i] = remainder_bounds[i][1]
fix_corners_2d = np.tile(fix_corners, (n_tilde, 1)) # (n, d-d_tilde)
# corner points with fixed rest dimensions
corner_points_fixed = np.hstack((corners_d_tilde, fix_corners_2d))
# because 2 ** n_tilde <= n, sample n - n_tilde
if self.n_evals - n_tilde > 0:
random_points = self.sampling_method.sample(bounds=self.bounds,
n=(self.n_evals - n_tilde),
x_history=corner_points_fixed)
return np.vstack((corner_points_fixed, random_points))
else:
return corner_points_fixed
else:
corner_points = sample_corner_bounds(self.bounds)
num_corner_points = corner_points.shape[0]
random_points = self.sampling_method.sample(bounds=self.bounds,
n=(self.n_evals - num_corner_points),
x_history=corner_points)
return np.vstack((corner_points, random_points))
| 2.875 | 3 |
onehot.py | Youggls/WordEmbedding | 2 | 12794699 | import numpy as np
import pickle
class onehot:
def __init__(self, sentences):
self.__sentences = sentences
self.__data = {}
self.__count = {}
self.__build()
def __build(self):
self.__word_num = 1
for sentence in self.__sentences:
for word in sentence:
if word in self.__data:
self.__count[word] += 1
else:
self.__count[word] = 1
self.__data[word] = self.__word_num
self.__word_num += 1
def __getitem__(self, word):
if word not in self.__data:
print('Error! The word not in it\'s map!')
else:
ret = np.zeros((self.__word_num - 1, 1))
ret[self.__data[word] - 1] = 1
return ret
def get_voca_size(self):
return self.__word_num - 1
def get_word_frequency(self, word):
if word not in self.__data:
print('Error! The word not in it\'s map!')
else:
return self.__count[word]
def get_index_of_word(self, word):
if word not in self.__data:
print('Error! The word not in it\'s map!')
else:
return self.__data[word] - 1
| 3.09375 | 3 |
server.py | lqf96/cwng-bknd | 0 | 12794700 | #! /usr/bin/env python2.7
import os
from app import app
# Change working directory
os.chdir(os.path.dirname(__file__))
# Run application
app.run(debug=True)
| 1.835938 | 2 |
tests/test_delete_accessions.py | wallberg-umd/patsy-db | 0 | 12794701 | <filename>tests/test_delete_accessions.py<gh_stars>0
import patsy.database
from patsy.delete_accessions import delete_accessions
from patsy.model import Base
import unittest
from patsy.model import Accession
from .utils import create_test_engine, AccessionBuilder, create_perfect_match
from patsy.perfect_matches import find_perfect_matches
Session = patsy.database.Session
class TestDeleteAccession(unittest.TestCase):
def setUp(self):
create_test_engine()
engine = Session().get_bind()
Base.metadata.create_all(engine)
def test_batch_with_one_accession(self):
session = Session()
accession = AccessionBuilder().set_batch("batch_to_delete").build()
session.add(accession)
session.commit()
accessions = session.query(Accession)
self.assertEqual(1, accessions.count())
delete_accessions(session, "batch_to_delete")
session.commit()
accessions = session.query(Accession)
self.assertEqual(0, accessions.count())
def test_batch_with_two_accessions_in_different_batches(self):
session = Session()
accession1 = AccessionBuilder().set_batch("batch_to_delete").build()
accession2 = AccessionBuilder().set_batch("batch_to_preserve").build()
session.add(accession1)
session.add(accession2)
session.commit()
accessions = session.query(Accession)
self.assertEqual(2, accessions.count())
delete_accessions(session, "batch_to_delete")
session.commit()
accessions = session.query(Accession)
self.assertEqual(1, accessions.count())
self.assertEqual(accession2, accessions.first())
def test_batch_with_accession_with_perfect_match(self):
session = Session()
accession = AccessionBuilder().set_batch("batch_to_delete").build()
restore = create_perfect_match(accession)
session.add(accession)
session.add(restore)
session.commit()
accessions = session.query(Accession)
new_matches_found = find_perfect_matches(session, accessions)
self.assertEqual(1, len(new_matches_found))
self.assertEqual(1, len(accession.perfect_matches))
self.assertEqual(1, len(restore.perfect_matches))
self.assertEqual(accession, restore.perfect_matches[0])
self.assertEqual(restore, accession.perfect_matches[0])
delete_accessions(session, "batch_to_delete")
session.commit()
accessions = session.query(Accession)
self.assertEqual(0, accessions.count())
self.assertEqual([], restore.perfect_matches)
| 2.296875 | 2 |
vlpi/data/ICDUtilities.py | daverblair/vlpi | 2 | 12794702 | <reponame>daverblair/vlpi<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 08:42:18 2019
@author: davidblair
"""
from unidecode import unidecode
import pkg_resources
import string
import pickle
import pandas as pd
ICD_PATH = pkg_resources.resource_filename('vlpi', 'data/ICDData/')
class ICDCode:
def __init__(self, code, associated_string,is_terminal, parent_code=None):
"""
Parameters
----------
code : str
ICD10 code string.
associated_string : str
String defining code in codebook.
is_terminal : bool
Indicates whether code is terminal (no children).
parent_code : bool, optional
Indicates if code is parent. The default is None.
Raises
------
ValueError
If unable to add child codes to known parent code.
Returns
-------
None.
"""
self.code = code
self.associated_string = associated_string
self.parent_code = parent_code
self.child_codes = []
self.is_terminal=is_terminal
self.parent_code = parent_code
if parent_code is not None:
if parent_code.is_terminal==False:
parent_code.child_codes.append(self)
else:
raise ValueError("Attempting to add children to terminal node: child = {}, parent = {}".format(code,parent_code.code))
def __str__(self):
return self.associated_string
def __repr__(self):
return self.code
class ICDUtilities:
def _convertToUnicode(self,byteString):
return unidecode(str(byteString,"ISO-8859-1"))
def _lettersToInt(self,letter,base):
list(string.ascii_uppercase).index(letter)*base
def _convertCodeToIntVal(self,code):
intVal=0
for base,letter in enumerate(code[::-1]):
try:
intVal+=int(letter)*10**(base)
except ValueError:
intVal+=int(list(string.ascii_uppercase).index(letter))*10**(base)
return intVal
def _findParentInList(self,code,parentList):
while len(parentList) > 0:
if parentList[-1] in code:
return parentList
else:
parentList.pop()
return parentList
def ReturnCodeObject(self,code):
"""
Returns full code object (not just string) for a given code.
Parameters
----------
code : str
ICD10 code string.
Returns
-------
ICDCode
ICD10 code class for input string.
"""
if code in self.setOfUnusableCodes:
return self.UnusableICDCodes[self.unusableCodeToIndexMap[code.replace('.','')]]
else:
return self.UsableICDCodes[self.usableCodeToIndexMap[code.replace('.','')]]
def DeleteCode(self,del_code):
"""
Removes the ICD code and all children (if exist) from data structure.
Parameters
----------
del_code : str
ICD10 code to delete
Returns
-------
None
"""
all_del_codes=self._deleteCode(del_code)
marker_list_usable= [x.code for x in self.UsableICDCodes]
marker_list_unusable = [x.code for x in self.UnusableICDCodes]
for del_code in all_del_codes:
if del_code in self.setOfUnusableCodes:
self.UnusableICDCodes.pop(marker_list_unusable.index(del_code))
marker_list_unusable.remove(del_code)
else:
self.UsableICDCodes.pop(marker_list_usable.index(del_code))
marker_list_usable.remove(del_code)
self.usableCodeToIndexMap=dict(zip(marker_list_usable,range(len(marker_list_usable))))
self.unusableCodeToIndexMap=dict(zip(marker_list_unusable,range(len(marker_list_unusable))))
self.setOfUnusableCodes=set(self.unusableCodeToIndexMap.keys())
self.setOfUsableCodes=set(self.usableCodeToIndexMap.keys())
def _deleteCode(self,del_code):
del_code_obj = self.ReturnCodeObject(del_code)
parent_code = del_code_obj.parent_code
del_code_list=[del_code]
if parent_code is not None:
parent_code.child_codes.remove(del_code_obj)
if del_code_obj.is_terminal==False:
for child_code in del_code_obj.child_codes:
del_code_list+=self._deleteCode(child_code.code)
return del_code_list
def AssignCodeToChapter(self,code):
"""
Returns the chapter heading for input code
Parameters
----------
code : str
ICD10 code.
Returns
-------
str
ICD10 chapter.
"""
code = code.replace('.','')
currentCode = self.ReturnCodeObject(code)
while currentCode.parent_code is not None:
currentCode = self.ReturnCodeObject(currentCode.parent_code.code)
return str(currentCode)
def ReturnSubsumedTerminalCodes(self,parent_code):
"""
Returns all ICD10 codes that are children of the input code.
Parameters
----------
parent_code : str
ICD10 string for parent code. Do not include periods (ie J101 not J10.1)
Returns
-------
terminal_code_list : list
List of ICD10 codes that are children to parent code.
"""
all_child_codes = self.ReturnCodeObject(parent_code).child_codes
terminal_code_list=[]
for child in all_child_codes:
if child.is_terminal==True:
terminal_code_list+=[child.code]
else:
terminal_code_list+=self.ReturnSubsumedTerminalCodes(child.code)
return terminal_code_list
def __init__(self,useICD10UKBB=False,hierarchyFile=None,chapterFile=None):
"""
Class that manipulates the ICD10 codebook. It stores the codebook as a simple tree (stored as a list called ICDCodes).
To initialize the class, expects flat two text files:
1) ICD10_Chapters.txt--chapter heading for all the codes. Manually constructed.
2) ICD10 codes and hierarchy: icd10cm_order_2018.txt, https://www.cms.gov/Medicare/Coding/ICD10/2018-ICD-10-CM-and-GEMs.html
By default, the package ships with 2018 version of ICD10-CM and 2020 version of ICD10 from UK Biobank. You can upgrade to 2019 (or downgrade for that matter) by specifying the path to another ICD10 file. ICD9 codebook could be used instead, but you would need to construct data files that match the structure of the ICD10 files.
Parameters
----------
useICD10UKBB : bool, optional
Specifies class to use the UK Biobank version of ICD10 (not ICD10-CM). The default is False.
hierarchyFile : str, optional
File path to an alternative code hierarchy file. This may (unlikely) work with other encodings but has not been tested. The default is None.
chapterFile : str, optional
File path to an alternative code chapter file (ie main groups of codes). Again, this may work with other encodings but has not been tested. The default is None.
Raises
------
ValueError
ValueError raised if unable to parse some line. Prints out the line of interest.
Returns
-------
None.
"""
if hierarchyFile==None:
if useICD10UKBB:
hierarchyFile=ICD_PATH+'icd10_ukbb.txt'
else:
hierarchyFile=ICD_PATH+'icd10cm_order_2018.txt'
if chapterFile==None:
chapterFile = ICD_PATH+'ICD10_Chapters.txt'
#quick reference, to avoid having to search full tree for codes.
#Full list of linked codes
self.UsableICDCodes=[]
self.usableCodeToIndexMap={}
self.setOfUsableCodes=set()
self.UnusableICDCodes=[]
self.unusableCodeToIndexMap={}
self.setOfUnusableCodes=set()
#first load the chapters
chapter_breakpoints=[]
chapter_list=[]
currentUsableCodeCount = 0
currentUnusableCodeCount = 0
with open(chapterFile,'rb') as f:
f.readline()
for line in f:
line=self._convertToUnicode(line)
line=line.strip('\n').split('\t')
self.UnusableICDCodes+=[ICDCode('Chapter_'+line[0],line[2],False)]
start,stop = line[1].split('-')
chapter_breakpoints+=[self._convertCodeToIntVal(stop[0:3])]
chapter_list+=['Chapter_'+line[0]]
self.unusableCodeToIndexMap['Chapter_'+line[0]]=currentUnusableCodeCount
self.setOfUnusableCodes.add('Chapter_'+line[0])
currentUnusableCodeCount+=1
#now load hierarchy file
with open(hierarchyFile,'rb') as f:
currentParentList = []
for line in f:
line=self._convertToUnicode(line)
parsedLine=[]
parsedLine+=[line[0:6].strip()]
parsedLine+=[line[6:14].strip()]
parsedLine+=[line[14:16].strip()]
parsedLine+=[line[77:].strip()]
currentParentList = self._findParentInList(parsedLine[1],currentParentList)
if len(currentParentList) == 0:
intVal = self._convertCodeToIntVal(parsedLine[1][0:3])
try:
icd_chapter = chapter_list[next(x[0] for x in enumerate(chapter_breakpoints) if intVal <= x[1])]
except StopIteration:
raise ValueError('{}'.format(parsedLine[1]))
currentParentList +=[icd_chapter]
if int(parsedLine[2])==1:
self.UsableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],True,self.ReturnCodeObject(currentParentList[-1]))]
self.usableCodeToIndexMap[parsedLine[1]]=currentUsableCodeCount
self.setOfUsableCodes.add(parsedLine[1])
currentUsableCodeCount+=1
else:
self.UnusableICDCodes+=[ICDCode(parsedLine[1],parsedLine[3],False,self.ReturnCodeObject(currentParentList[-1]))]
self.unusableCodeToIndexMap[parsedLine[1]]=currentUnusableCodeCount
self.setOfUnusableCodes.add(parsedLine[1])
currentUnusableCodeCount+=1
currentParentList+=[parsedLine[1]]
class ICD10TranslationMap:
def _buildTranslationTable(self):
translation_table={'Primary Code':[],'Secondary Code(s)':[],'Relationship':[]}
for code in self.primaryEncoding.UsableICDCodes:
#first check if there is 1:1 mapping between codes
if code.code in self.secondaryEncoding.setOfUsableCodes:
translation_table['Primary Code']+=[code.code]
translation_table['Secondary Code(s)']+=[set([code.code])]
translation_table['Relationship']+=['Direct']
else:
parent = code.parent_code
if parent.code in self.secondaryEncoding.setOfUsableCodes:
translation_table['Primary Code']+=[code.code]
translation_table['Secondary Code(s)']+=[set([parent.code])]
translation_table['Relationship']+=['Parent']
else:
if len(code.child_codes)>0:
child_code_names = [x.code for x in code.child_codes]
allowed_child_codes = set(child_code_names).intersection(self.secondaryEncoding.setOfUsableCodes)
if len(allowed_child_codes)>0:
translation_table['Primary Code']+=[code.code]
translation_table['Secondary Code(s)']+=[allowed_child_codes]
translation_table['Relationship']+=['Child']
translation_table=pd.DataFrame(translation_table)
translation_table.set_index('Primary Code',drop=False,inplace=True)
return translation_table
def __init__(self,primaryEncoding=None,secondaryEncoding=None):
"""
Builds translation map between two ICD Utilities with at least some shared codes by taking advantage of shared hierarchical structure.
If primaryEncoding and secondaryEncoding are unspecified, class creates a map between ICD10-CM and ICD10 (UKBB)
Parameters
----------
primaryEncoding : ICDUtilities, optional
First encoding. The default is None.
secondaryEncoding : ICDUtilities, optional
Second encoding. The default is None.
Returns
-------
None.
"""
if (primaryEncoding is not None) or (secondaryEncoding is not None):
assert (secondaryEncoding is not None) and (secondaryEncoding is not None), "Must specify primary and secondary encoding if providing one or the other."
self.primaryEncoding=primaryEncoding
self.secondaryEncoding=secondaryEncoding
if primaryEncoding is None:
try:
with open(ICD_PATH+'icd10cm_to_ukbb.pth','rb') as f:
self.EncodingCoversionTable = pickle.load(f)
except FileNotFoundError:
self.primaryEncoding = ICDUtilities()
self.secondaryEncoding=ICDUtilities(useICD10UKBB=True)
self.EncodingCoversionTable=self._buildTranslationTable()
self.EncodingCoversionTable.to_pickle(ICD_PATH+'icd10cm_to_ukbb.pth')
else:
self.EncodingCoversionTable=self._buildTranslationTable()
def ReturnConversionSet(self,primaryCode,includeRelationship=False):
"""
Returns set of codes that represent the mapping of the primary code to the new encoding system.
Parameters
----------
primaryCode : str
Diagnostic code to be converted .
includeRelationship : bool, optional.
Specicies whether to return the relationship type in addition to code. The default is False.
Returns
-------
set
Set of codes aligned to the code of interest.
"""
if includeRelationship:
look_up=['Secondary Code(s)','Relationship']
else:
look_up='Secondary Code(s)'
try:
return self.EncodingCoversionTable.loc[primaryCode][look_up]
except KeyError:
return set([])
| 2.25 | 2 |
server/resources/api/match.py | Saakshaat/umass-match | 0 | 12794703 | import datetime
from operator import attrgetter
from fastapi import APIRouter, HTTPException
from models import user as user_model, match
from resources.crud import read, create, custom
from schemas import user as user_schemas
from schemas.match import Match, FilterParams
from . import session_dep
match_router = APIRouter()
@match_router.post('/user/{user_id}/match/', response_model=user_schemas.UserGet, status_code=201)
async def add_new_match(user_id: int, filter_params: FilterParams, db=session_dep):
user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db)
# check if user has already matched in the past 3 days
if user_data.last_matched_time:
three_days_after_match = user_data.last_matched_time + datetime.timedelta(days=3)
current_time = datetime.datetime.now()
if current_time < three_days_after_match:
next_valid_date = datetime.date(day=three_days_after_match.day, month=three_days_after_match.month,
year=three_days_after_match.year).strftime('%A %d %B %Y')
raise HTTPException(status_code=403,
detail=f"You've already matched within the past 3 days. Wait till {next_valid_date}")
# run matching algorithm
matched_user = custom.match_user(user_id=user_id, filter_params=filter_params, db=db)
# create Match Pydantic models
current_match_data = Match(current_user_id=user_id, other_user_id=matched_user.id,
other_user_name=f"{matched_user.first_name} {matched_user.last_name}")
other_match_data = Match(current_user_id=matched_user.id, other_user_id=user_id,
other_user_name=f"{user_data.first_name} {user_data.last_name}")
# create match objects in the database
current_match = create.create_single_isolated_resource(model=match.Match, data=current_match_data, db=db)
other_match = create.create_single_isolated_resource(model=match.Match, data=other_match_data, db=db)
# update last_matched_time for each user
user_data.last_matched_time = datetime.datetime.now()
matched_user.last_matched_time = datetime.datetime.now()
# commit all changes in the database
db.add(current_match)
db.add(other_match)
db.commit()
db.refresh(user_data)
return matched_user
@match_router.get("/user/{user_id}/match/latest/", response_model=user_schemas.UserGet, status_code=200)
async def get_latest_match(user_id: int, db=session_dep):
user_data = read.read_single_resource(model=user_model.User, identifier='id', value=user_id, db=db)
if user_data is None or user_data.previous_matches == []:
raise HTTPException(status_code=404, detail='No matches made yet!')
latest_match = max(user_data.previous_matches, key=attrgetter('matched_at'))
return read.read_single_resource(model=user_model.User, identifier='id', value=latest_match.other_user_id, db=db)
| 2.359375 | 2 |
server/tools/igblast116/pipeline/tools/changeo/bin/BuildTrees.py | 3rand/benchmarking-platform | 2 | 12794704 | <gh_stars>1-10
#!/usr/bin/env python3
"""
Converts TSV files into IgPhyML input files
"""
# Info
__author__ = "<NAME>"
from changeo import __version__, __date__
# Imports
import os
import random
import subprocess
import multiprocessing as mp
from argparse import ArgumentParser
from collections import OrderedDict
from textwrap import dedent
from time import time
from Bio.Seq import Seq
from functools import partial
# Presto and changeo imports
from presto.Defaults import default_out_args
from presto.IO import printLog, printMessage, printWarning, printError, printDebug
from changeo.Defaults import default_format
from changeo.IO import splitName, getDbFields, getFormatOperators, getOutputHandle, getOutputName
from changeo.Alignment import RegionDefinition
from changeo.Commandline import CommonHelpFormatter, checkArgs, getCommonArgParser, parseCommonArgs
def correctMidCodonStart(scodons, qi, debug):
"""
Find and mask split codons
Arguments:
scodons (list): list of codons in IMGT sequence.
qi (str) : input sequence.
spos (int) : starting position of IMGT sequence in input sequence.
debug (bool) : print debugging statements.
Returns:
tuple: (modified input sequence, modified starting position of IMGT sequence in input sequence).
"""
spos = 0
for i in range(0, len(scodons)):
printDebug("%s %s" % (scodons[i], qi[0:3]), debug)
if scodons[i] != "...":
if scodons[i][0:2] == "..":
scodons[i] = "NN" + scodons[i][2]
#sometimes IMGT will just cut off first letter if non-match, at which point we"ll just want to mask the
#first codon in the IMGT seq, other times it will be legitimately absent from the query, at which point
#we have to shift the frame. This attempts to correct for this by looking at the next codon over in the
#alignment
if scodons[i][2:3] != qi[2:3] or scodons[i + 1] != qi[3:6]:
qi = "NN" + qi
spos = i
break
elif scodons[i][0] == ".":
scodons[i] = "N" + scodons[i][1:3]
if scodons[i][1:3] != qi[1:3] or scodons[i+1] != qi[3:6]:
qi = "N" + qi
spos = i
break
else:
spos = i
break
return qi, spos
def checkFrameShifts(receptor, oqpos, ospos, log, debug):
"""
Checks whether a frameshift occured in a sequence
Arguments:
receptor (changeo.Receptor.Receptor): Receptor object.
oqpos (int) : position of interest in input sequence.
ospos (int) : position of interest in IMGT sequence.
log (dict) : log of information for each sequence.
debug (bool) : print debugging statements.
"""
frameshifts = 0
for ins in range(1, 3):
ros = receptor.sequence_input
ris = receptor.sequence_imgt
psite = receptor.v_seq_start - 1 + oqpos*3
pisite = ospos * 3
if (psite + 3 + ins) < len(ros) and (pisite + 3) < len(ris):
#cut out 1 or 2 nucleotides downstream of offending codon
receptor.sequence_input = ros[0:(psite + 3)] + ros[(psite + 3 + ins):]
receptor.sequence_imgt = ris[0:(pisite + 3)] + ris[(pisite + 3):]
# Debug sequence modifications
printDebug(ros, debug)
printDebug(receptor.sequence_input, debug)
printDebug(ris, debug)
printDebug(receptor.sequence_imgt, debug)
printDebug("RUNNING %d" % ins, debug)
mout = maskSplitCodons(receptor, recursive=True)
if mout[1]["PASS"]:
#if debug:
receptor.sequence_input = ros
receptor.sequence_imgt = ris
frameshifts += 1
printDebug("FRAMESHIFT of length %d!" % ins, debug)
log["FAIL"] = "SINGLE FRAME-SHIFTING INSERTION"
break
else:
receptor.sequence_input = ros
receptor.sequence_imgt = ris
return frameshifts
def findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive=False):
"""
Find and mask split codons
Arguments:
receptor (changeo.Receptor.Receptor): Receptor object.
scodons (list): list of codons in IMGT sequence
qcodons (list): list of codons in input sequence
spos (int): starting position of IMGT sequence in input sequence
s_end (int): end of IMGT sequence
qpos (int): starting position of input sequence in IMGT sequence
log (dict): log of information for each sequence
debug (bool): print debugging statements?
recursive (bool): was this function called recursively?
"""
frameshifts = 0
while spos < s_end and qpos < len(qcodons):
if debug:
print(scodons[spos] + "\t" + qcodons[qpos])
if scodons[spos] == "..." and qcodons[qpos] != "...": #if IMGT gap, move forward in imgt
spos += 1
elif scodons[spos] == qcodons[qpos]: # if both are the same, move both forward
spos += 1
qpos += 1
elif qcodons[qpos] == "N": # possible that SEQ-IMGT ends on a bunch of Ns
qpos += 1
spos += 1
else: # if not the same, mask IMGT at that site and scan forward until you find a codon that matches next site
if debug:
print("checking %s at position %d %d" % (scodons[spos], spos, qpos))
ospos=spos
oqpos=qpos
spos += 1
qpos += 1
while spos < s_end and scodons[spos] == "...": #possible next codon is just a gap
spos += 1
while qpos < len(qcodons) and spos < s_end and scodons[spos] != qcodons[qpos]:
printDebug("Checking " + scodons[spos]+ "\t" + qcodons[qpos], debug)
qpos += 1
if qcodons[qpos-1] == scodons[ospos]: #if codon in previous position is equal to original codon, it was preserved
qpos -= 1
spos = ospos
printDebug("But codon was apparently preserved", debug)
if "IN-FRAME" in log:
log["IN-FRAME"] = log["IN-FRAME"] + "," + str(spos)
else:
log["IN-FRAME"] = str(spos)
elif qpos >= len(qcodons) and spos < s_end:
printDebug("FAILING MATCH", debug)
log["PASS"] = False #if no match for the adjacent codon was found, something"s up.
log["FAIL"] = "FAILED_MATCH_QSTRING:"+str(spos)
#figure out if this was due to a frame-shift by repeating this method but with an edited input sequence
if not recursive:
frameshifts += checkFrameShifts(receptor, oqpos, ospos, log, debug)
elif spos >= s_end or qcodons[qpos] != scodons[spos]:
scodons[ospos] = "NNN"
if spos >= s_end:
printDebug("Masked %s at position %d, at end of subject sequence" % (scodons[ospos], ospos), debug)
if "END-MASKED" in log:
log["END-MASKED"] = log["END-MASKED"] + "," + str(spos)
else:
log["END-MASKED"] = str(spos)
else:
printDebug("Masked %s at position %d, but couldn't find upstream match" % (scodons[ospos], ospos), debug)
log["PASS"]=False
log["FAIL"]="FAILED_MATCH:"+str(spos)
elif qcodons[qpos] == scodons[spos]:
printDebug("Masked %s at position %d" % (scodons[ospos], ospos), debug)
scodons[ospos] = "NNN"
if "MASKED" in log:
log["MASKED"] = log["MASKED"] + "," + str(spos)
else:
log["MASKED"] = str(spos)
else:
log["PASS"] = False
log["FAIL"] = "UNKNOWN"
def maskSplitCodons(receptor, recursive=False, mask=True):
"""
Identify junction region by IMGT definition.
Arguments:
receptor (changeo.Receptor.Receptor): Receptor object.
recursive (bool) : was this method part of a recursive call?
mask (bool) : mask split codons for use with igphyml?
Returns:
str: modified IMGT gapped sequence.
log: dict of sequence information
"""
debug = False
qi = receptor.sequence_input
si = receptor.sequence_imgt
log = OrderedDict()
log["ID"]=receptor.sequence_id
log["CLONE"]=receptor.clone
log["PASS"] = True
if debug:
print(receptor.sequence_id)
# adjust starting position of query sequence
qi = qi[(receptor.v_seq_start - 1):]
#tally where --- gaps are in IMGT sequence and remove them for now
gaps = []
ndotgaps = []
nsi = ""
for i in range(0,len(si)):
if si[i] == "-":
gaps.append(1)
ndotgaps.append(1)
else:
gaps.append(0)
nsi = nsi + si[i]
if si[i] != ".":
ndotgaps.append(0)
#find any gaps not divisible by three
curgap = 0
for i in ndotgaps:
if i == 1:
curgap += 1
elif i == 0 and curgap != 0:
if curgap % 3 != 0 :
printDebug("Frame-shifting gap detected! Refusing to include sequence.", debug)
log["PASS"] = False
log["FAIL"] = "FRAME-SHIFTING DELETION"
log["SEQ_IN"] = receptor.sequence_input
log["SEQ_IMGT"] = receptor.sequence_imgt
log["SEQ_MASKED"] = receptor.sequence_imgt
return receptor.sequence_imgt, log
else:
curgap = 0
si = nsi
scodons = [si[i:i + 3] for i in range(0, len(si), 3)]
# deal with the fact that it's possible to start mid-codon
qi,spos = correctMidCodonStart(scodons, qi, debug)
qcodons = [qi[i:i + 3] for i in range(0, len(qi), 3)]
frameshifts = 0
s_end = 0 #adjust for the fact that IMGT sequences can end on gaps
for i in range(spos, len(scodons)):
if scodons[i] != "..." and len(scodons[i]) == 3 and scodons[i] != "NNN":
s_end = i
printDebug("%i:%i:%s" % (s_end, len(scodons), scodons[s_end]), debug)
s_end += 1
qpos = 0
if mask:
findAndMask(receptor, scodons, qcodons, spos, s_end, qpos, log, debug, recursive)
if not log["PASS"] and not recursive:
log["FRAMESHIFTS"] = frameshifts
if len(scodons[-1]) != 3:
if scodons[-1] == ".." or scodons[-1] == ".":
scodons[-1] = "..."
else:
scodons[-1] = "NNN"
if "END-MASKED" in log:
log["END-MASKED"] = log["END-MASKED"] + "," + str(len(scodons))
else:
log["END-MASKED"] = str(spos)
concatenated_seq = Seq("")
for i in scodons:
concatenated_seq += i
# add --- gaps back to IMGT sequence
ncon_seq = ""
counter = 0
for i in gaps:
#print(str(i) + ":" + ncon_seq)
if i == 1:
ncon_seq = ncon_seq + "."
elif i == 0:
ncon_seq = ncon_seq + concatenated_seq[counter]
counter += 1
ncon_seq = ncon_seq + concatenated_seq[counter:]
concatenated_seq = ncon_seq
log["SEQ_IN"] = receptor.sequence_input
log["SEQ_IMGT"] = receptor.sequence_imgt
log["SEQ_MASKED"] = concatenated_seq
return concatenated_seq, log
def unAmbigDist(seq1, seq2, fbreak=False):
"""
Calculate the distance between two sequences counting only A,T,C,Gs
Arguments:
seq1 (str): sequence 1
seq2 (str): sequence 2
fbreak (bool): break after first difference found?
Returns:
int: number of ACGT differences.
"""
if len(seq1) != len(seq2):
printError("Sequences are not the same length! %s %s" % (seq1, seq2))
dist = 0
for i in range(0,len(seq1)):
if seq1[i] != "N" and seq1[i] != "-" and seq1[i] != ".":
if seq2[i] != "N" and seq2[i] != "-" and seq2[i] != ".":
if seq1[i] != seq2[i]:
dist += 1
if fbreak:
break
return dist
def deduplicate(useqs, receptors, log=None, meta_data=None, delim=":"):
"""
Collapses identical sequences
Argument:
useqs (dict): unique sequences within a clone. maps sequence to index in Receptor list.
receptors (dict): receptors within a clone (index is value in useqs dict).
log (collections.OrderedDict): log of sequence errors.
meta_data (str): Field to append to sequence IDs. Splits identical sequences with different meta_data.
meta_data (str): Field to append to sequence IDs. Splits identical sequences with different meta_data.
delim (str): delimited to use when appending meta_data.
Returns:
list: deduplicated receptors within a clone.
"""
keys = list(useqs.keys())
join = {} # id -> sequence id to join with (least ambiguous chars)
joinseqs = {} # id -> useq to join with (least ambiguous chars)
ambigchar = {} #sequence id -> number ATCG nucleotides
for i in range(0,len(keys)-1):
for j in range(i+1,len(keys)):
ki = keys[i]
kj = keys[j]
if meta_data is None:
ski = keys[i]
skj = keys[j]
else:
ski, cid = keys[i].split(delim)
skj, cid = keys[j].split(delim)
ri = receptors[useqs[ki]]
rj = receptors[useqs[kj]]
dist = unAmbigDist(ski, skj, True)
m_match = True
if meta_data is not None:
matches = 0
for m in meta_data:
if ri.getField(m) == rj.getField(m) and m != "DUPCOUNT":
matches += 1
m_match = (matches == len(meta_data))
if dist == 0 and m_match:
ncounti = ki.count("A") + ki.count("T") + ki.count("G") + ki.count("C")
ncountj = kj.count("A") + kj.count("T") + kj.count("G") + kj.count("C")
ambigchar[useqs[ki]] = ncounti
ambigchar[useqs[kj]] = ncountj
# this algorithm depends on the fact that all sequences are compared pairwise, and all are zero
# distance from the sequence they will be collapse to.
if ncountj > ncounti:
nci = 0
if useqs[ki] in join:
nci = ambigchar[join[useqs[ki]]]
if nci < ncountj:
join[useqs[ki]] = useqs[kj]
joinseqs[ki] = kj
else:
ncj = 0
if useqs[kj] in join:
ncj = ambigchar[join[useqs[kj]]]
if ncj < ncounti:
join[useqs[kj]] = useqs[ki]
joinseqs[kj] = ki
# loop through list of joined sequences and collapse
keys = list(useqs.keys())
for k in keys:
if useqs[k] in join:
rfrom = receptors[useqs[k]]
rto = receptors[join[useqs[k]]]
rto.dupcount += rfrom.dupcount
if log is not None:
log[rfrom.sequence_id]["PASS"] = False
log[rfrom.sequence_id]["DUPLICATE"] = True
log[rfrom.sequence_id]["COLLAPSETO"] = joinseqs[k]
log[rfrom.sequence_id]["COLLAPSEFROM"] = k
log[rfrom.sequence_id]["FAIL"] = "Collapsed with " + rto.sequence_id
del useqs[k]
return useqs
def hasPTC(sequence):
"""
Determines whether a PTC exits in a sequence
Arguments:
sequence (str): IMGT gapped sequence in frame 1.
Returns:
int: negative if not PTCs, position of PTC if found.
"""
ptcs = ("TAA", "TGA", "TAG", "TRA", "TRG", "TAR", "TGR", "TRR")
for i in range(0, len(sequence), 3):
if sequence[i:(i+3)] in ptcs:
return i
return -1
def rmCDR3(sequences, clones):
"""
Remove CDR3 from all sequences and germline of a clone
Arguments:
sequences (list): list of sequences in clones.
clones (list): list of Receptor objects.
"""
for i in range(0,len(sequences)):
imgtar = clones[i].getField("imgtpartlabels")
germline = clones[i].getField("germline_imgt_d_mask")
nseq = []
nimgtar = []
ngermline = []
ncdr3 = 0
#print("imgtarlen: " + str(len(imgtar)))
#print("seqlen: " + str(len(sequences[i])))
#print("germline: " + str(len(germline)))
#if len(germline) < len(sequences[i]):
# print("\n" + str(clones[i].sequence_id))
# print("\n " + str((sequences[i])) )
# print("\n" + str((germline)))
for j in range(0,len(imgtar)):
if imgtar[j] != 108:
nseq.append(sequences[i][j])
if j < len(germline):
ngermline.append(germline[j])
nimgtar.append(imgtar[j])
else:
ncdr3 += 1
clones[i].setField("imgtpartlabels",nimgtar)
clones[i].setField("germline_imgt_d_mask", "".join(ngermline))
sequences[i] = "".join(nseq)
#print("Length: " + str(ncdr3))
def characterizePartitionErrors(sequences, clones, meta_data):
"""
Characterize potential mismatches between IMGT labels within a clone
Arguments:
sequences (list): list of sequences in clones.
clones (list): list of Receptor objects.
meta_data (str): Field to append to sequence IDs. Splits identical sequences with different meta_data.
Returns:
tuple: tuple of length four containing a list of IMGT positions for first sequence in clones,
the germline sequence of the first receptor in clones, the length of the first sequence in clones,
and the number of sequences in clones.
"""
sites = len(sequences[0])
nseqs = len(sequences)
imgtar = clones[0].getField("imgtpartlabels")
germline = clones[0].getField("germline_imgt_d_mask")
if germline is "":
germline = clones[0].getField("germline_imgt")
correctseqs = False
for seqi in range(0, len(sequences)):
i = sequences[seqi]
if len(i) != sites or len(clones[seqi].getField("imgtpartlabels")) != len(imgtar):
correctseqs = True
if correctseqs:
maxlen = sites
maximgt = len(imgtar)
for j in range(0,len(sequences)):
if len(sequences[j]) > maxlen:
maxlen = len(sequences[j])
if len(clones[j].getField("imgtpartlabels")) > maximgt:
imgtar = clones[j].getField("imgtpartlabels")
maximgt = len(imgtar)
sites = maxlen
for j in range(0,len(sequences)):
cimgt = clones[j].getField("imgtpartlabels")
seqdiff = maxlen - len(sequences[j])
imgtdiff = len(imgtar)-len(cimgt)
sequences[j] = sequences[j] + "N"*(seqdiff)
last = cimgt[-1]
cimgt.extend([last]*(imgtdiff))
clones[j].setField("imgtpartlabels",cimgt)
if meta_data is not None:
meta_data_ar = meta_data[0].split(",")
for c in clones:
if meta_data is not None:
c.setField(meta_data[0],c.getField(meta_data_ar[0]))
for m in range(1,len(meta_data_ar)):
st = c.getField(meta_data[0])+":"+c.getField(meta_data_ar[m])
c.setField(meta_data[0],st)
if len(c.getField("imgtpartlabels")) != len(imgtar):
printError("IMGT assignments are not the same within clone %d!\n" % c.clone,False)
printError(c.getField("imgtpartlabels"),False)
printError("%s\n%d\n" % (imgtar,j),False)
for j in range(0, len(sequences)):
printError("%s\n%s\n" % (sequences[j],clones[j].getField("imgtpartlabels")),False)
printError("ChangeO file needs to be corrected")
for j in range(0,len(imgtar)):
if c.getField("imgtpartlabels")[j] != imgtar[j]:
printError("IMGT assignments are not the same within clone %d!\n" % c.clone, False)
printError(c.getField("imgtpartlabels"), False)
printError("%s\n%d\n" % (imgtar, j))
#Resolve germline if there are differences, e.g. if reconstruction was done before clonal clustering
resolveglines = False
for c in clones:
ngermline = c.getField("germline_imgt_d_mask")
if ngermline is "":
ngermline = c.getField("germline_imgt")
if ngermline != germline:
resolveglines = True
if resolveglines:
printError("%s %s" % ("Predicted germlines are not the same among sequences in the same clone.",
"Be sure to cluster sequences into clones first and then predict germlines using --cloned"))
if sites > (len(germline)):
seqdiff = sites - len(germline)
germline = germline + "N" * (seqdiff)
if sites % 3 != 0:
printError("number of sites must be divisible by 3! len: %d, clone: %s , id: %s, seq: %s" %(len(sequences[0]),\
clones[0].clone,clones[0].sequence_id,sequences[0]))
return imgtar, germline, sites, nseqs
def outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs, delim, newgerm, conseqs, duplicate, imgt):
"""
Create intermediate sequence alignment and partition files for IgPhyML output
Arguments:
out_dir (str): directory for sequence files.
useqs_f (dict): unique sequences mapped to ids.
meta_data (str): Field to append to sequence IDs. Splits identical sequences with different meta_data.
clones (list) : list of receptor objects.
collapse (bool) : deduplicate sequences.
nseqs (int): number of sequences.
delim (str) : delimiter for extracting metadata from ID.
newgerm (str) : modified germline of clonal lineage.
conseqs (list) : consensus sequences.
duplicate (bool) : duplicate sequence if only one in a clone.
imgt (list) : IMGT numbering of clonal positions .
"""
# bootstrap these data if desired
lg = len(newgerm)
sites = range(0, lg)
transtable = clones[0].sequence_id.maketrans(" ", "_")
outfile = os.path.join(out_dir, "%s.fasta" % clones[0].clone)
with open(outfile, "w") as clonef:
if collapse:
for seq_f, num in useqs_f.items():
seq = seq_f
cid = ""
if meta_data is not None:
seq, cid = seq_f.split(delim)
cid = delim + cid.replace(":", "_")
sid = clones[num].sequence_id.translate(transtable) + cid
clonef.write(">%s\n%s\n" % (sid.replace(":","-"), seq.replace(".", "-")))
if len(useqs_f) == 1 and duplicate:
if meta_data is not None:
if meta_data[0] == "DUPCOUNT":
cid = delim + "0"
sid = clones[num].sequence_id.translate(transtable) + "_1" + cid
clonef.write(">%s\n%s\n" % (sid.replace(":","-"), seq.replace(".", "-")))
else:
for j in range(0, nseqs):
cid = ""
if meta_data is not None:
meta_data_list = []
for m in meta_data:
meta_data_list.append(clones[j].getField(m).replace(":", "_"))
cid = delim + str(delim.join(meta_data_list))
sid = clones[j].sequence_id.translate(transtable) + cid
clonef.write(">%s\n%s\n" % (sid.replace(":","-"), conseqs[j].replace(".", "-")))
if nseqs == 1 and duplicate:
if meta_data is not None:
if meta_data[0] == "DUPCOUNT":
cid = delim + "0"
sid = clones[j].sequence_id.translate(transtable)+"_1" + cid
clonef.write(">%s\n%s\n" % (sid.replace(":","-"), conseqs[j].replace(".", "-")))
germ_id = ["GERM"]
if meta_data is not None:
for i in range(1,len(meta_data)):
germ_id.append("GERM")
clonef.write(">%s_%s\n" % (clones[0].clone,"_".join(germ_id)))
for i in range(0, len(newgerm)):
clonef.write("%s" % newgerm[i].replace(".","-"))
clonef.write("\n")
#output partition file
partfile = os.path.join(out_dir, "%s.part.txt" % clones[0].clone)
with open(partfile, "w") as partf:
partf.write("%d %d\n" % (2, len(newgerm)))
partf.write("FWR:IMGT\n")
partf.write("CDR:IMGT\n")
partf.write("%s\n" % (clones[0].v_call.split("*")[0]))
partf.write("%s\n" % (clones[0].j_call.split("*")[0]))
partf.write(",".join(map(str, imgt)))
partf.write("\n")
def outputIgPhyML(clones, sequences, meta_data=None, collapse=False, ncdr3=False, logs=None,
fail_writer=None, out_dir=None, min_seq=1):
"""
Create intermediate sequence alignment and partition files for IgPhyML output
Arguments:
clones (list): receptor objects within the same clone.
sequences (list): sequences within the same clone (share indexes with clones parameter).
meta_data (str): Field to append to sequence IDs. Splits identical sequences with different meta_data
collapse (bool): if True collapse identical sequences.
ncdr3 (bool): if True remove CDR3
logs (dict): contains log information for each sequence
out_dir (str): directory for output files.
fail_writer (changeo.IO.TSVWriter): failed sequences writer object.
min_seq (int): minimum number of data sequences to include.
Returns:
int: number of clones.
"""
s = ""
delim = "_"
duplicate = True # duplicate sequences in clones with only 1 sequence?
imgtar, germline, sites, nseqs = characterizePartitionErrors(sequences, clones, meta_data)
tallies = []
for i in range(0, sites, 3):
tally = 0
for j in range(0, nseqs):
if sequences[j][i:(i + 3)] != "...":
tally += 1
tallies.append(tally)
newseqs = [] # remove gap only sites from observed data
newgerm = []
imgt = []
for j in range(0, nseqs):
for i in range(0, sites, 3):
if i == 0:
newseqs.append([])
if tallies[i//3] > 0:
newseqs[j].append(sequences[j][i:(i+3)])
lcodon = ""
for i in range(0, sites, 3):
if tallies[i//3] > 0:
newgerm.append(germline[i:(i+3)])
lcodon=germline[i:(i+3)]
imgt.append(imgtar[i])
if len(lcodon) == 2:
newgerm[-1] = newgerm[-1] + "N"
elif len(lcodon) == 1:
newgerm[-1] = newgerm[-1] + "NN"
if ncdr3:
ngerm = []
nimgt = []
for i in range(0, len(newseqs)):
nseq = []
ncdr3 = 0
for j in range(0, len(imgt)):
if imgt[j] != 108:
nseq.append(newseqs[i][j])
if i == 0:
ngerm.append(newgerm[j])
nimgt.append(imgt[j])
else:
ncdr3 += 1
newseqs[i] = nseq
newgerm = ngerm
imgt = nimgt
#print("Length: " + str(ncdr3))
useqs_f = OrderedDict()
conseqs = []
for j in range(0, nseqs):
conseq = "".join([str(seq_rec) for seq_rec in newseqs[j]])
if meta_data is not None:
meta_data_list = []
for m in range(0,len(meta_data)):
if isinstance(clones[j].getField(meta_data[m]), str):
clones[j].setField(meta_data[m],clones[j].getField(meta_data[m]).replace("_", ""))
meta_data_list.append(str(clones[j].getField(meta_data[m])))
conseq_f = "".join([str(seq_rec) for seq_rec in newseqs[j]])+delim+":".join(meta_data_list)
else:
conseq_f = conseq
if conseq_f in useqs_f and collapse:
clones[useqs_f[conseq_f]].dupcount += clones[j].dupcount
logs[clones[j].sequence_id]["PASS"] = False
logs[clones[j].sequence_id]["FAIL"] = "Duplication of " + clones[useqs_f[conseq_f]].sequence_id
logs[clones[j].sequence_id]["DUPLICATE"]=True
if fail_writer is not None:
fail_writer.writeReceptor(clones[j])
else:
useqs_f[conseq_f] = j
conseqs.append(conseq)
if collapse:
useqs_f = deduplicate(useqs_f, clones, logs, meta_data, delim)
if collapse and len(useqs_f) < min_seq:
for seq_f, num in useqs_f.items():
logs[clones[num].sequence_id]["FAIL"] = "Clone too small: " + str(len(useqs_f))
logs[clones[num].sequence_id]["PASS"] = False
return -len(useqs_f)
elif not collapse and len(conseqs) < min_seq:
for j in range(0, nseqs):
logs[clones[j].sequence_id]["FAIL"] = "Clone too small: " + str(len(conseqs))
logs[clones[j].sequence_id]["PASS"] = False
return -len(conseqs)
# Output fasta file of masked, concatenated sequences
outputSeqPartFiles(out_dir, useqs_f, meta_data, clones, collapse, nseqs,
delim, newgerm, conseqs, duplicate, imgt)
if collapse:
return len(useqs_f)
else:
return nseqs
def maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer, mask=True):
"""
Masks codons split by alignment to IMGT reference
Arguments:
r (changeo.Receptor.Receptor): receptor object for a particular sequence.
clones (list): list of receptors.
cloneseqs (list): list of masked clone sequences.
logs (dict): contains log information for each sequence.
fails (dict): counts of various sequence processing failures.
out_args (dict): arguments for output preferences.
fail_writer (changeo.IO.TSVWriter): failed sequences writer object.
Returns:
0: returns 0 if an error occurs or masking fails.
1: returns 1 masking succeeds
"""
if r.clone is None:
printError("Cannot export datasets until sequences are clustered into clones.")
if r.dupcount is None:
r.dupcount = 1
fails["rec_count"] += 1
#printProgress(rec_count, rec_count, 0.05, start_time)
ptcs = hasPTC(r.sequence_imgt)
gptcs = hasPTC(r.getField("germline_imgt_d_mask"))
if gptcs >= 0:
log = OrderedDict()
log["ID"] = r.sequence_id
log["CLONE"] = r.clone
log["SEQ_IN"] = r.sequence_input
log["SEQ_IMGT"] = r.sequence_imgt
logs[r.sequence_id] = log
logs[r.sequence_id]["PASS"] = False
logs[r.sequence_id]["FAIL"] = "Germline PTC"
fails["seq_fail"] += 1
fails["germlineptc"] += 1
return 0
if r.functional and ptcs < 0:
#If IMGT regions are provided, record their positions
rd = RegionDefinition(r.junction_length, amino_acid=False)
regions = rd.getRegions(r.sequence_imgt)
if regions["cdr3_imgt"] is not "" and regions["cdr3_imgt"] is not None:
simgt = regions["fwr1_imgt"] + regions["cdr1_imgt"] + regions["fwr2_imgt"] + regions["cdr2_imgt"] + \
regions["fwr3_imgt"] + regions["cdr3_imgt"] + regions["fwr4_imgt"]
if len(simgt) < len(r.sequence_imgt):
r.fwr4_imgt = r.fwr4_imgt + ("."*(len(r.sequence_imgt) - len(simgt)))
simgt = regions["fwr1_imgt"] + regions["cdr1_imgt"] + regions["fwr2_imgt"] + \
regions["cdr2_imgt"] + regions["fwr3_imgt"] + regions["cdr3_imgt"] + regions["fwr4_imgt"]
imgtpartlabels = [13]*len(regions["fwr1_imgt"]) + [30]*len(regions["cdr1_imgt"]) + [45]*len(regions["fwr2_imgt"]) + \
[60]*len(regions["cdr2_imgt"]) + [80]*len(regions["fwr3_imgt"]) + [108] * len(regions["cdr3_imgt"]) + \
[120] * len(regions["fwr4_imgt"])
r.setField("imgtpartlabels", imgtpartlabels)
if len(r.getField("imgtpartlabels")) != len(r.sequence_imgt) or simgt != r.sequence_imgt:
log = OrderedDict()
log["ID"] = r.sequence_id
log["CLONE"] = r.clone
log["SEQ_IN"] = r.sequence_input
log["SEQ_IMGT"] = r.sequence_imgt
logs[r.sequence_id] = log
logs[r.sequence_id]["PASS"] = False
logs[r.sequence_id]["FAIL"] = "FWR/CDR error"
logs[r.sequence_id]["FWRCDRSEQ"] = simgt
fails["seq_fail"] += 1
fails["region_fail"] += 1
return 0
elif regions["fwr3_imgt"] is not "" and regions["fwr3_imgt"] is not None:
simgt = regions["fwr1_imgt"] + regions["cdr1_imgt"] + regions["fwr2_imgt"] + regions["cdr2_imgt"] + \
regions["fwr3_imgt"]
nseq = r.sequence_imgt[len(simgt):len(r.sequence_imgt)]
if len(simgt) < len(r.sequence_imgt):
simgt = regions["fwr1_imgt"] + regions["cdr1_imgt"] + regions["fwr2_imgt"] + \
regions["cdr2_imgt"] + regions["fwr3_imgt"] + nseq
imgtpartlabels = [13] * len(regions["fwr1_imgt"]) + [30] * len(regions["cdr1_imgt"]) + [45] * len(
regions["fwr2_imgt"]) + \
[60] * len(regions["cdr2_imgt"]) + [80] * len(regions["fwr3_imgt"]) + \
[108] * int(len(nseq))
r.setField("imgtpartlabels", imgtpartlabels)
if len(r.getField("imgtpartlabels")) != len(r.sequence_imgt) or simgt != r.sequence_imgt:
log = OrderedDict()
log["ID"] = r.sequence_id
log["CLONE"] = r.clone
log["SEQ_IN"] = r.sequence_input
log["SEQ_IMGT"] = r.sequence_imgt
logs[r.sequence_id] = log
logs[r.sequence_id]["PASS"] = False
logs[r.sequence_id]["FAIL"] = "FWR/CDR error"
logs[r.sequence_id]["FWRCDRSEQ"] = simgt
fails["seq_fail"] += 1
fails["region_fail"] += 1
return 0
else:
#imgt_warn = "\n! IMGT FWR/CDR sequence columns not detected.\n! Cannot run CDR/FWR partitioned model on this data.\n"
imgtpartlabels = [0] * len(r.sequence_imgt)
r.setField("imgtpartlabels", imgtpartlabels)
mout = maskSplitCodons(r, mask=mask)
mask_seq = mout[0]
ptcs = hasPTC(mask_seq)
if ptcs >= 0:
printWarning("Masked sequence suddenly has a PTC.. %s\n" % r.sequence_id)
mout[1]["PASS"] = False
mout[1]["FAIL"] = "PTC_ADDED_FROM_MASKING"
logs[mout[1]["ID"]] = mout[1]
if mout[1]["PASS"]:
#passreads += r.dupcount
if r.clone in clones:
clones[r.clone].append(r)
cloneseqs[r.clone].append(mask_seq)
else:
clones[r.clone] = [r]
cloneseqs[r.clone] = [mask_seq]
return 1
else:
if out_args["failed"]:
fail_writer.writeReceptor(r)
fails["seq_fail"] += 1
fails["failreads"] += r.dupcount
if mout[1]["FAIL"] == "FRAME-SHIFTING DELETION":
fails["del_fail"] += 1
elif mout[1]["FAIL"] == "SINGLE FRAME-SHIFTING INSERTION":
fails["in_fail"] += 1
else:
fails["other_fail"] += 1
else:
log = OrderedDict()
log["ID"] = r.sequence_id
log["CLONE"] = r.clone
log["PASS"] = False
log["FAIL"] = "NONFUNCTIONAL/PTC"
log["SEQ_IN"] = r.sequence_input
logs[r.sequence_id] = log
if out_args["failed"]:
fail_writer.writeReceptor(r)
fails["seq_fail"] += 1
fails["nf_fail"] += 1
return 0
# Run IgPhyML on outputed data
def runIgPhyML(outfile, igphyml_out, clone_dir, nproc=1, optimization="lr", omega="e,e", kappa="e", motifs="FCH",
hotness="e,e,e,e,e,e", oformat="tab", nohlp=False, asr=-1, clean="none"):
"""
Run IgPhyML on outputted data
Arguments:
outfile (str): Output file name.
igphymlout (str): igphyml output file
nproc (int): Number of threads to parallelize IgPhyML across
optimization (str): Optimize combination of topology (t) branch lengths (l) and parameters (r) in IgPhyML.
omega (str): omega optimization in IgPhyML (--omega)
kappa (str): kappa optimization in IgPhyML (-t)
motifs (str): motifs to use in IgPhyML (--motifs)
hotness (str): motif in IgPhyML (--hotness)
oformat (str): output format for IgPhyML (tab or txt)
nohlp (bool): If True, only estimate GY94 trees and parameters
clean (str): delete intermediate files? (none, all)
"""
osplit = outfile.split(".")
outrep = ".".join(osplit[0:(len(osplit)-1)]) + "_gy.tsv"
gyout = outfile + "_igphyml_stats_gy.txt"
gy_args = ["igphyml", "--repfile", outfile, "-m", "GY", "--run_id", "gy", "--outrep", outrep, "--threads",
str(nproc),"--outname",gyout]
hlp_args = ["igphyml","--repfile", outrep, "-m", "HLP", "--run_id", "hlp", "--threads", str(nproc), "-o",
optimization, "--omega", omega, "-t", kappa, "--motifs", motifs, "--hotness", hotness, "--oformat",
oformat, "--outname", igphyml_out]
if asr >= 0:
hlp_args.append("--ASRc")
hlp_args.append(str(asr))
log = OrderedDict()
log["START"] = "IgPhyML GY94 tree estimation"
printLog(log)
try: #check for igphyml executable
subprocess.check_output(["igphyml"])
except:
printError("igphyml not found :-/")
try: #get GY94 starting topologies
p = subprocess.check_output(gy_args)
except subprocess.CalledProcessError as e:
print(" ".join(gy_args))
print('error>', e.output, '<')
printError("GY94 tree building in IgPhyML failed")
log = OrderedDict()
log["START"] = "IgPhyML HLP analysis"
log["OPTIMIZE"] = optimization
log["TS/TV"] = kappa
log["wFWR,wCDR"] = omega
log["MOTIFS"] = motifs
log["HOTNESS"] = hotness
log["NPROC"] = nproc
printLog(log)
if not nohlp:
try: #estimate HLP parameters/trees
p = subprocess.check_output(hlp_args)
except subprocess.CalledProcessError as e:
print(" ".join(hlp_args))
print('error>', e.output, '<')
printError("HLP tree building failed")
log = OrderedDict()
log["OUTPUT"] = igphyml_out
if oformat == "tab":
igf = open(igphyml_out)
names = igf.readline().split("\t")
vals = igf.readline().split("\t")
for i in range(3,len(names)-1):
log[names[i]] = round(float(vals[i]),2)
printLog(log)
if clean != "none":
log = OrderedDict()
log["START"] = "CLEANING"
log["SCOPE"] = clean
printLog(log)
todelete = open(outrep)
for line in todelete:
line = line.rstrip("\n")
line = line.rstrip("\r")
lsplit = line.split("\t")
if len(lsplit) == 4:
os.remove(lsplit[0])
os.remove(lsplit[1])
os.remove(lsplit[3])
todelete.close()
os.remove(outrep)
os.remove(outfile)
os.remove(gyout)
cilog = outrep + "_igphyml_CIlog.txt_hlp"
if os.path.isfile(cilog):
os.remove(cilog)
if oformat == "tab":
os.rmdir(clone_dir)
else:
printWarning("Using --clean all with --oformat txt will delete all tree file results.\n"
"You'll have to do that yourself.")
log = OrderedDict()
log["END"] = "IgPhyML analysis"
printLog(log)
# Note: Collapse can give misleading dupcount information if some sequences have ambiguous characters at polymorphic sites
def buildTrees(db_file, meta_data=None, target_clones=None, collapse=False, ncdr3=False, nmask=False,
sample_depth=-1, min_seq=1,append=None, igphyml=False, nproc=1, optimization="lr", omega="e,e",
kappa="e", motifs="FCH", hotness="e,e,e,e,e,e", oformat="tab", clean="none", nohlp=False,
asr=-1, format=default_format, out_args=default_out_args):
"""
Masks codons split by alignment to IMGT reference, then produces input files for IgPhyML
Arguments:
db_file (str): input tab-delimited database file.
meta_data (str): Field to append to sequence IDs. Splits identical sequences with different meta_data
target_clones (str): List of clone IDs to analyze.
collapse (bool): if True collapse identical sequences.
ncdr3 (bool): if True remove all CDR3s.
nmask (bool): if False, do not attempt to mask split codons
sample_depth (int): depth of subsampling before deduplication
min_seq (int): minimum number of sequences per clone
append (str): column name to append to sequence_id
igphyml (bool): If True, run IgPhyML on outputted data
nproc (int) : Number of threads to parallelize IgPhyML across
optimization (str): Optimize combination of topology (t) branch lengths (l) and parameters (r) in IgPhyML.
omega (str): omega optimization in IgPhyML (--omega)
kappa (str): kappa optimization in IgPhyML (-t)
motifs (str): motifs to use in IgPhyML (--motifs)
hotness (str): motif in IgPhyML (--hotness)
oformat (str): output format for IgPhyML (tab or txt)
clean (str): delete intermediate files? (none, all)
nohlp (bool): If True, only estimate GY94 trees and parameters
format (str): input and output format.
out_args (dict): arguments for output preferences.
Returns:
dict: dictionary of output pass and fail files.
"""
# Print parameter info
log = OrderedDict()
log["START"] = "BuildTrees"
log["FILE"] = os.path.basename(db_file)
log["COLLAPSE"] = collapse
printLog(log)
# Open output files
out_label = "lineages"
pass_handle = getOutputHandle(db_file,
out_label=out_label,
out_dir=out_args["out_dir"],
out_name= out_args["out_name"],
out_type="tsv")
igphyml_out = None
if igphyml:
igphyml_out = getOutputName(db_file, out_label="igphyml-pass",
out_dir=out_args["out_dir"],
out_name=out_args["out_name"],
out_type=oformat)
dir_name, __ = os.path.split(pass_handle.name)
if out_args["out_name"] is None:
__, clone_name, __ = splitName(db_file)
else:
clone_name = out_args["out_name"]
if dir_name is None:
clone_dir = clone_name
else:
clone_dir = os.path.join(dir_name, clone_name)
if not os.path.exists(clone_dir):
os.makedirs(clone_dir)
# Format options
try:
reader, writer, __ = getFormatOperators(format)
except ValueError:
printError("Invalid format %s." % format)
out_fields = getDbFields(db_file, reader=reader)
# open input file
handle = open(db_file, "r")
records = reader(handle)
fail_handle, fail_writer = None, None
if out_args["failed"]:
fail_handle = getOutputHandle(db_file,
out_label="lineages-fail",
out_dir=out_args["out_dir"],
out_name=out_args["out_name"],
out_type=out_args["out_type"])
fail_writer = writer(fail_handle, fields=out_fields)
cloneseqs = {}
clones = {}
logs = OrderedDict()
fails = {"rec_count":0, "seq_fail":0, "nf_fail":0, "del_fail":0, "in_fail":0, "minseq_fail":0,
"other_fail":0, "region_fail":0, "germlineptc":0, "fdcount":0, "totalreads":0,
"passreads":0, "failreads":0}
# Mask codons split by indels
start_time = time()
printMessage("Correcting frames and indels of sequences", start_time=start_time, width=50)
#subsampling loop
init_clone_sizes = {}
big_enough = []
all_records = []
found_no_funct = False
for r in records:
if r.functional is None:
r.functional = True
if found_no_funct is False:
printWarning("FUNCTIONAL column not found.")
found_no_funct = True
all_records.append(r)
if r.clone in init_clone_sizes:
init_clone_sizes[r.clone] += 1
else:
init_clone_sizes[r.clone] = 1
for r in all_records:
if target_clones is None or r.clone in target_clones:
if init_clone_sizes[r.clone] >= min_seq:
big_enough.append(r)
fails["totalreads"] = len(all_records)
#fails["minseq_fail"] = len(all_records) - len(big_enough)
if len(big_enough) == 0:
printError("\n\nNo sequences found that match specified criteria.",1)
if sample_depth > 0:
random.shuffle(big_enough)
total = 0
for r in big_enough:
if r.functional is None:
r.functional = True
if found_no_funct is False:
printWarning("FUNCTIONAL column not found.")
found_no_funct = True
r.sequence_id = r.sequence_id.replace(",","-") #remove commas from sequence ID
r.sequence_id = r.sequence_id.replace(":","-") #remove colons from sequence ID
r.sequence_id = r.sequence_id.replace(",","-") #remove commas from sequence ID
r.sequence_id = r.sequence_id.replace(")","-") #remove parenthesis from sequence ID
r.sequence_id = r.sequence_id.replace("(","-") #remove parenthesis from sequence ID
if(meta_data is not None):
for m in range(0,len(meta_data)):
md = r.getField(meta_data[m])
md = md.replace(",","-") #remove commas from metadata
md = md.replace(":","-") #remove colons from metadata
md = md.replace(",","-") #remove commas from metadata
md = md.replace(")","-") #remove parenthesis from metadata
md = md.replace("(","-") #remove parenthesis from metadata
r.setField(meta_data[m],md)
if append is not None:
if append is not None:
for m in append:
r.sequence_id = r.sequence_id + "_" + r.getField(m)
total += maskCodonsLoop(r, clones, cloneseqs, logs, fails, out_args, fail_writer,
mask = not nmask)
if total == sample_depth:
break
# Start processing clones
clonesizes = {}
pass_count, nclones = 0, 0
printMessage("Processing clones", start_time=start_time, width=50)
for k in clones.keys():
if len(clones[str(k)]) < min_seq:
for j in range(0, len(clones[str(k)])):
logs[clones[str(k)][j].sequence_id]["FAIL"] = "Clone too small: " + str(len(cloneseqs[str(k)]))
logs[clones[str(k)][j].sequence_id]["PASS"] = False
clonesizes[str(k)] = -len(cloneseqs[str(k)])
else:
clonesizes[str(k)] = outputIgPhyML(clones[str(k)], cloneseqs[str(k)], meta_data=meta_data, collapse=collapse,
ncdr3=ncdr3, logs=logs, fail_writer=fail_writer,
out_dir=clone_dir, min_seq=min_seq)
#If clone is too small, size is returned as a negative
if clonesizes[str(k)] > 0:
nclones += 1
pass_count += clonesizes[str(k)]
else:
fails["seq_fail"] -= clonesizes[str(k)]
fails["minseq_fail"] -= clonesizes[str(k)]
fail_count = fails["rec_count"] - pass_count
# End clone processing
printMessage("Done", start_time=start_time, end=True, width=50)
log_handle = None
if out_args["log_file"] is not None:
log_handle = open(out_args["log_file"], "w")
for j in logs.keys():
printLog(logs[j], handle=log_handle)
pass_handle.write(str(nclones)+"\n")
for key in sorted(clonesizes, key=clonesizes.get, reverse=True):
#print(key + "\t" + str(clonesizes[key]))
outfile = os.path.join(clone_dir, "%s.fasta" % key)
partfile = os.path.join(clone_dir, "%s.part.txt" % key)
if clonesizes[key] > 0:
germ_id = ["GERM"]
if meta_data is not None:
for i in range(1, len(meta_data)):
germ_id.append("GERM")
pass_handle.write("%s\t%s\t%s_%s\t%s\n" % (outfile, "N", key,"_".join(germ_id), partfile))
handle.close()
output = {"pass": None, "fail": None}
if pass_handle is not None:
output["pass"] = pass_handle.name
pass_handle.close()
if fail_handle is not None:
output["fail"] = fail_handle.name
fail_handle.close()
if log_handle is not None:
log_handle.close()
#printProgress(rec_count, rec_count, 0.05, start_time)
log = OrderedDict()
log["OUTPUT"] = os.path.basename(pass_handle.name) if pass_handle is not None else None
log["RECORDS"] = fails["totalreads"]
log["INITIAL_FILTER"] = fails["rec_count"]
log["PASS"] = pass_count
log["FAIL"] = fail_count
log["NONFUNCTIONAL"] = fails["nf_fail"]
log["FRAMESHIFT_DEL"] = fails["del_fail"]
log["FRAMESHIFT_INS"] = fails["in_fail"]
log["CLONETOOSMALL"] = fails["minseq_fail"]
log["CDRFWR_ERROR"] = fails["region_fail"]
log["GERMLINE_PTC"] = fails["germlineptc"]
log["OTHER_FAIL"] = fails["other_fail"]
if collapse:
log["DUPLICATE"] = fail_count - fails["seq_fail"]
log["END"] = "BuildTrees"
printLog(log)
#Run IgPhyML on outputted data?
if igphyml:
runIgPhyML(pass_handle.name, igphyml_out=igphyml_out, clone_dir=clone_dir, nproc=nproc,
optimization=optimization, omega=omega, kappa=kappa, motifs=motifs,
hotness=hotness, oformat=oformat, nohlp=nohlp,clean=clean,asr=asr)
return output
def getArgParser():
"""
Defines the ArgumentParser
Returns:
argparse.ArgumentParser: argument parsers.
"""
# Define input and output field help message
fields = dedent(
"""
output files:
<folder>
folder containing fasta and partition files for each clone.
lineages
successfully processed records.
lineages-fail
database records failed processing.
igphyml-pass
parameter estimates and lineage trees from running IgPhyML, if specified
required fields:
sequence_id, sequence, sequence_alignment,
germline_alignment_d_mask or germline_alignment,
v_call, j_call, clone_id, v_sequence_start
""")
# Parent parser
parser_parent = getCommonArgParser(out_file=False, log=True, format=True)
# Define argument parser
parser = ArgumentParser(description=__doc__, epilog=fields,
parents=[parser_parent],
formatter_class=CommonHelpFormatter, add_help=False)
group = parser.add_argument_group("sequence processing arguments")
group.add_argument("--collapse", action="store_true", dest="collapse",
help="""If specified, collapse identical sequences before exporting to fasta.""")
group.add_argument("--ncdr3", action="store_true", dest="ncdr3",
help="""If specified, remove CDR3 from all sequences.""")
group.add_argument("--nmask", action="store_true", dest="nmask",
help="""If specified, do not attempt to mask split codons.""")
group.add_argument("--md", nargs="+", action="store", dest="meta_data",
help="""List of fields to containing metadata to include in output fasta file
sequence headers.""")
group.add_argument("--clones", nargs="+", action="store", dest="target_clones",
help="""List of clone IDs to output, if specified.""")
group.add_argument("--minseq", action="store", dest="min_seq", type=int, default=1,
help="""Minimum number of data sequences. Any clones with fewer than the specified
number of sequences will be excluded.""")
group.add_argument("--sample", action="store", dest="sample_depth", type=int, default=-1,
help="""Depth of reads to be subsampled (before deduplication).""")
group.add_argument("--append", nargs="+", action="store", dest="append",
help="""List of columns to append to sequence ID to ensure uniqueness.""")
igphyml_group = parser.add_argument_group("IgPhyML arguments (see igphyml -h for details)")
igphyml_group.add_argument("--igphyml", action="store_true", dest="igphyml",
help="""Run IgPhyML on output?""")
igphyml_group.add_argument("--nproc", action="store", dest="nproc", type=int, default=1,
help="""Number of threads to parallelize IgPhyML across.""")
igphyml_group.add_argument("--clean", action="store", choices=("none", "all"),
dest="clean", type=str, default="none",
help="""Delete intermediate files?
none: leave all intermediate files; all: delete all intermediate files.""")
igphyml_group.add_argument("--optimize", action="store", dest="optimization", type=str, default="lr",
choices=("n","r","l","lr","tl","tlr"),
help="""Optimize combination of topology (t) branch lengths (l) and parameters (r), or
nothing (n), for IgPhyML.""")
igphyml_group.add_argument("--omega", action="store", dest="omega", type=str, default="e,e",
choices = ("e", "ce", "e,e", "ce,e", "e,ce", "ce,ce"),
help="""Omega parameters to estimate for FWR,CDR respectively:
e = estimate, ce = estimate + confidence interval""")
igphyml_group.add_argument("-t", action="store", dest="kappa", type=str, default="e",
choices=("e", "ce"),
help="""Kappa parameters to estimate:
e = estimate, ce = estimate + confidence interval""")
igphyml_group.add_argument("--motifs", action="store", dest="motifs", type=str,
default="WRC_2:0,GYW_0:1,WA_1:2,TW_0:3,SYC_2:4,GRS_0:5",
help="""Which motifs to estimate mutability.""")
igphyml_group.add_argument("--hotness", action="store", dest="hotness", type=str, default="e,e,e,e,e,e",
help="""Mutability parameters to estimate:
e = estimate, ce = estimate + confidence interval""")
igphyml_group.add_argument("--oformat", action="store", dest="oformat", type=str, default="tab",
choices=("tab", "txt"),
help="""IgPhyML output format.""")
igphyml_group.add_argument("--nohlp", action="store_true", dest="nohlp",
help="""Don't run HLP model?""")
igphyml_group.add_argument("--asr", action="store", dest="asr", type=float, default=-1,
help="""Ancestral sequence reconstruction interval (0-1).""")
return parser
if __name__ == "__main__":
"""
Parses command line arguments and calls main
"""
# Parse command line arguments
parser = getArgParser()
checkArgs(parser)
args = parser.parse_args()
args_dict = parseCommonArgs(args)
del args_dict["db_files"]
# Call main for each input file
for f in args.__dict__["db_files"]:
args_dict["db_file"] = f
buildTrees(**args_dict) | 2.109375 | 2 |
58/spiders/wuba_1.py | 16752774499/scrapy_58- | 0 | 12794705 | import scrapy
from wuba.items import WubaItem
from selenium import webdriver
from lxml import etree
from selenium.webdriver.chrome.options import Options # 无头浏览器
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
from selenium.webdriver import ChromeOptions # 规避检测
option = ChromeOptions()
option.add_experimental_option('excludeSwitches', ['enable-automation'])
bot = webdriver.Chrome(executable_path='chromedriver.exe', chrome_options=chrome_options, options=option)
class Wuba1Spider(scrapy.Spider):
name = 'wuba_1'
# allowed_domains = ['www.xxx.com']
start_urls = [
'https://xianyang.58.com/job/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d202408-01d1-d86d-1573-f4faec6defb1&ClickID=4']
#设置通用url模板
url = 'https://xianyang.58.com/job/pn%d/?param7503=1&from=yjz2_zhaopin&utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d302408-01d1-d5f6-40be-8cb0310f3453&ClickID=3'
page_num = 2
def parse(self, response):
li_list = response.xpath('//ul[@id="list_con"]/li')
for li in li_list:
#实例化一个item
item = WubaItem()
# 解析工作岗位
name = li.xpath('./div/div/a/span[2]/text()')[0].extract()
#解析详情页url
deta_url = li.xpath('./div/div/a/@href')[0].extract()
deta_url = ''.join(deta_url)
new_url = str(deta_url)
item['new_url'] = new_url
bot.get(new_url)
page = bot.page_source
tree = etree.HTML(page)
#解析工资
gongci = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[2]/span[2]//text()')
gongci = ''.join(gongci)
#解析学历
yueli = tree.xpath('/html/body/div[3]/div[3]/div[1]/div[4]/span[2]//text()')[0]
# /html/body/div[3]/div[3]/div[1]/div[4]/span[2]
#提交item
item['gongci'] = gongci
item['yueli'] = yueli
item['name'] = name
yield item
#进行分页操作
if self.page_num <= 5:
num_url = format(self.url%self.page_num)
self.page_num+=1
yield scrapy.Request(url=num_url,callback=self.parse)
| 2.5625 | 3 |
tests/test_enum.py | hellcat17/gladiator | 0 | 12794706 | <gh_stars>0
"""Test enum definition parsing."""
import xml.etree.ElementTree as xml
from gladiator.parse.enum import parse_required_enums
from gladiator.parse.feature import (
get_feature_requirements,
Feature,
FeatureApi,
FeatureVersion,
)
TESTED_FEATURE = Feature(api=FeatureApi.GL, version=FeatureVersion(major=1, minor=0))
def _get_enum_nodes(spec: xml.Element):
for node in spec:
if node.tag == "enums":
yield node
def _collect_features(spec: xml.Element):
for node in spec:
if node.tag == "feature":
yield node
def _collect_required(spec: xml.Element):
return tuple(
get_feature_requirements(
TESTED_FEATURE, tuple(_collect_features(spec))
).enums.keys()
)
def test_parse_enums(spec: xml.Element):
candidates = tuple(_get_enum_nodes(spec))
all_enums = tuple(parse_required_enums(_collect_required(spec), candidates))
attrib_mask = next(e for e in all_enums if e.name == "AttribMask")
clbuf_mask = next(e for e in all_enums if e.name == "ClearBufferMask")
assert attrib_mask.is_bitmask
assert (
next(v for v in attrib_mask.values if v.name == "GL_CURRENT_BIT").value
== "0x00000001"
)
assert (
next(v for v in attrib_mask.values if v.name == "GL_ALL_ATTRIB_BITS").value
== "0xFFFFFFFF"
)
assert (
next(v for v in attrib_mask.values if v.name == "GL_DEPTH_BUFFER_BIT").value
== "0x00000100"
)
assert clbuf_mask.is_bitmask
assert (
next(v for v in clbuf_mask.values if v.name == "GL_DEPTH_BUFFER_BIT").value
== "0x00000100"
)
| 2.515625 | 3 |
program/p1m2/Windows/meta2.py | Gabriellgpc/Sistemas_Roboticos | 4 | 12794707 | <gh_stars>1-10
# Make sure to have the server side running in CoppeliaSim:
# in a child script of a CoppeliaSim scene, add following command
# to be executed just once, at simulation start:
#
# simRemoteApi.start(19999)
#
# then start simulation, and run this program.
#
# IMPORTANT: for each successful call to simxStart, there
# should be a corresponding call to simxFinish at the end!
try:
import sim
except:
print ('--------------------------------------------------------------')
print ('"sim.py" could not be imported. This means very probably that')
print ('either "sim.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "sim.py"')
print ('--------------------------------------------------------------')
print ('')
import time
import matplotlib.pyplot as plt
import numpy as np
import sys
import ctypes
#################################### Functions definition ##############################################
# retorna Wd, We
d = 0.331 #wheel axis distance
r_w = 0.09751 #wheel radius
def pioneer_robot_model(v, omega):
v_r = (v+d*omega)
v_l = (v-d*omega)
omega_right = v_r/r_w
omega_left = v_l/r_w
return omega_right, omega_left
def send_path_4_drawing(path, sleep_time = 0.07):
for i in path[:,0:2]:
point2send = i
packedData = sim.simxPackFloats(point2send.flatten())
raw_bytes = (ctypes.c_ubyte * len(packedData)).from_buffer_copy(packedData)
returnCode = sim.simxWriteStringStream(clientID, "path_coord", raw_bytes, sim.simx_opmode_oneshot)
if returnCode != 0:
# print('Error: fail to send the path point to the simulator!')
pass
time.sleep(sleep_time)
# xi, yi, thi => ponto e orientação inicial
# xf, yf, thf => ponto e orientação final
# coef : {a0,a1,a2,a3,b0,b1,b2,b3} (dicionario com os coeficientes do pol. de grau 3)
# return: coef
def pathComputer(xi, yi, thi, xf, yf, thf):
delta = 0.001
dx = xf - xi
dy = yf - yi
coef = dict()
thi_test = (np.pi/2.0 - delta) < thi < (np.pi/2.0 + delta)
thf_test = (np.pi/2.0 - delta) < thf < (np.pi/2.0 + delta)
if (thi_test) and (thf_test):
print('Caso Especial #1')
# caso especial 1
coef['b1'] = dy #coef. livre
coef['b2'] = 0 #coef. livre
coef['a0'] = xi
coef['a1'] = 0
coef['a2'] = 3*dx
coef['a3'] = -2*dx
coef['b0'] = yi
coef['b3'] = dy - coef['b1'] - coef['b2']
elif thi_test:
print('Caso Especial #2')
#caso especial 2
alpha_f = np.tan(thf)
coef['a3'] = -dx/2.0 #coef. livre
coef['b3'] = 0 #coef. livre (qualquer valor aqui)
coef['a0'] = xi
coef['a1'] = 0
coef['a2'] = dx - coef['a3']
coef['b0'] = yi
coef['b1'] = 2*(dy - alpha_f*dx) - alpha_f*coef['a3'] + coef['b3']
coef['b2'] = (2*alpha_f*dx - dy) + alpha_f*coef['a3'] - 2*coef['b3']
elif thf_test:
print('Caso Especial #3')
#caso especial 3
alpha_i = np.tan(thi)
coef['a1'] = 3*dx/2.0 #coef. livre
coef['b2'] = 0 #coef. livre (qualquer valor aqui)
coef['a0'] = xi
coef['a2'] = 3*dx - 2*coef['a1']
coef['a3'] = coef['a1'] - 2*dx
coef['b0'] = yi
coef['b1'] = alpha_i*coef['a1']
coef['b3'] = dy - alpha_i*coef['a1'] - coef['b2']
else:
print('Caso Geral')
#caso geral
alpha_i = np.tan(thi)
alpha_f = np.tan(thf)
coef['a1'] = dx #coef. livre
coef['a2'] = 0 #coef. livre
coef['a0'] = xi
coef['a3'] = dx - coef['a1'] - coef['a2']
coef['b0'] = yi
coef['b1'] = alpha_i*coef['a1']
coef['b2'] = 3*(dy - alpha_f*dx) + 2*(alpha_f - alpha_i)*coef['a1'] + alpha_f*coef['a2']
coef['b3'] = 3*alpha_f*dx - 2*dy - (2*alpha_f - alpha_i)*coef['a1'] - alpha_f*coef['a2']
return coef
def pathGenerator(coef, l):
x = coef['a0'] + coef['a1']*l + coef['a2']*l**2 + coef['a3']*l**3
y = coef['b0'] + coef['b1']*l + coef['b2']*l**2 + coef['b3']*l**3
th= np.arctan2(coef['b1'] + 2*coef['b2']*l + 3*coef['b3']*l**2,
coef['a1'] + 2*coef['a2']*l + 3*coef['a3']*l**2)
return np.array(list(zip(x,y,th)),dtype=float)
##################################### Connecting to simulator ##################################################
clientID = sim.simxStart('127.0.0.1',19999,True,True,5000,5)
if (clientID != 0):
print('Falha na conexão')
exit()
print('Conectado!')
# handlers of motors and the robot
returnR, robot_handle = sim.simxGetObjectHandle(clientID,'Pioneer_p3dx',sim.simx_opmode_oneshot_wait)
returnT, target_handle = sim.simxGetObjectHandle(clientID,'Target',sim.simx_opmode_oneshot_wait)
if returnR != 0:
print('ERRO: Falha em GetObjectHandle para o Pioneer')
exit()
if returnT != 0:
print('ERRO: Falha em GetObjectHandle para o Target')
exit()
######################################## variables ###############################################
time.sleep(0.5)
resA,resP = 1, 1
resT = 1
while (resA != 0) and (resP != 0) and (resT != 0):
resA,ang = sim.simxGetObjectOrientation(clientID, robot_handle, -1, sim.simx_opmode_streaming)
resP,pos = sim.simxGetObjectPosition(clientID,robot_handle,-1, sim.simx_opmode_streaming)
resT,posT= sim.simxGetObjectPosition(clientID,target_handle,-1, sim.simx_opmode_streaming)
pi = [pos[0], pos[1], ang[2]]
pf = [posT[0], posT[1],0.0]
coef = pathComputer(pi[0], pi[1], pi[2], pf[0], pf[1], pf[2])
print('Coeficientes:')
print(coef)
l = np.linspace(0,1,500)
path = pathGenerator(coef, l)
# print(path.shape)
plt.plot(path[0,0], path[0,1], 'or', label='Start')
plt.plot(path[-1,0], path[-1,1], 'ob', label='End')
plt.plot(path[:,0], path[:,1], '-k', label='Path')
send_path_4_drawing(path)
plt.title('Caminho gerado')
plt.ylabel('y[m]')
plt.xlabel('x[m]')
plt.grid(True)
plt.legend()
plt.show()
####################################### graph configuration #######################################
######################################## simulation #######################################
# Before closing the connection to CoppeliaSim, make sure that the last command sent out had time to arrive. You can guarantee this with (for example):
sim.simxGetPingTime(clientID)
# Now close the connection to CoppeliaSim:
sim.simxFinish(clientID) | 2.34375 | 2 |
ShortPeriodFlapping/spf/remove_bz_offset.py | louis-richard/flapping | 0 | 12794708 | <gh_stars>0
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def remove_bz_offset(b_mms):
"""
Remove offset on Bz. The offset is computed using the time interval ["",""]
"""
offset = np.array([0., 0.06997924, 0.11059547, -0.05232682])
for i, b_xyz in enumerate(b_mms):
b_xyz[:, 2] -= offset[i]
return b_mms
| 2.609375 | 3 |
tools/parser/main.py | zxpower/MansOS | 10 | 12794709 | #!/usr/bin/python
# (because /usr/bin/env python does not work when called from IDE on Windows)
#
# Copyright (c) 2012 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, getopt, shutil
inputFileName = 'test.sl'
outputFileName = 'main.c'
architecture = 'testarch'
#architecture = 'msp430'
#architecture = 'pc'
targetOS = 'mansos'
pathToOS = '../..'
verboseMode = False
testMode = False
def exitProgram(code):
if not testMode:
exit(code)
print ("Would exit from program with code " + str(code))
raise Exception
def importsOk():
plyModuleOK = True # Python Lex Yacc - for compilation
try:
import ply
except ImportError:
plyModuleOK = False
if not plyModuleOK:
if os.name == 'posix':
installStr = "Make sure you have installed required modules. Run:\n\tsudo apt-get install"
else:
installStr = "Make sure you have installed modules:"
print ("Cannot run SEAL parser:")
if not plyModuleOK:
print ("\tPLY module not found")
installStr += " python-ply"
print (installStr)
return False
return True
def printLine(line):
sys.stderr.write(line)
def help(isError):
sys.stderr.write("Usage:\n")
sys.stderr.write(" -a <arch>, --arch Target architecture (defalt: {})\n".format(architecture))
sys.stderr.write(" -t <target>, --target Target OS (default: {0})\n".format(targetOS))
sys.stderr.write(" -o, --output <file> Output to file, '-' for stdout (default: {0})\n".format(outputFileName))
sys.stderr.write(" -p, --path <path> Path to the target OS installation (default: {0})\n".format(pathToOS))
sys.stderr.write(" -V, --verbose Verbose mode\n")
sys.stderr.write(" -v, --version Print version and exit\n")
sys.stderr.write(" -c, --continue Continue on errors (test mode)\n")
sys.stderr.write(" -h, --help Print this help\n")
sys.exit(int(isError))
def parseCommandLine(argv):
global inputFileName
global outputFileName
global architecture
global verboseMode
global testMode
global pathToOS
try:
opts, args = getopt.getopt(sys.argv[1:], "a:cho:p:t:Vv",
["arch=", "continue", "help", "output=",
"path=", "target=", "verbose", "version"])
except getopt.GetoptError as err:
# print help information and exit:
print (str(err)) # will print something like "option -a not recognized"
help(True)
isError = False
showHelp = False
for o, a in opts:
if o in ("-a", "--arch"):
architecture = a.lower()
if o in ("-t", "--target"):
targetOS = a.lower()
elif o in ("-v", "--version"):
versionFile = os.path.join("../..", "doc/VERSION")
release = "Unknown"
date = "Unknown"
try:
f = open(versionFile, "r")
lines = f.readlines()
f.close()
if len(lines) > 0:
release = lines[0].strip()
if len(lines) > 1:
date = lines[1].strip()
except:
pass
print ("MansOS version: " + release + " (Release date: " + date + ")")
sys.exit(0)
elif o in ("-V", "--verbose"):
verboseMode = True
elif o in ("-h", "--help"):
showHelp = True
elif o in ("-o", "--output"):
outputFileName = a
elif o in ("-p", "--path"):
pathToOS = a
elif o in ("-c", "--continue"):
testMode = True
if len(args):
inputFileName = args[0]
args = args[1:]
if len(args):
sys.stderr.write("Too many arguments given. ({0} remaining not parsed)\n".format(args))
isError = True
if showHelp or isError:
help(isError)
def main():
if not importsOk():
exit(1)
# import pathname where seal package is located
selfDirname = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(selfDirname, pathToOS, 'tools'))
sys.path.append(os.path.join(selfDirname, pathToOS, 'tools', 'seal', 'components'))
from seal import generator
# in case this is used multiple times
generator.components.clearGlobals()
parseCommandLine(sys.argv)
# for extension modules
sys.path.append(os.path.join(os.getcwd(), os.path.dirname(inputFileName)))
# read file to-be-parsed
with open(inputFileName, 'r') as inputFile:
contents = inputFile.read()
if contents == None:
sys.stderr.write('Failed to read file {0}'.format(inputFileName))
exitProgram(1)
# parse input file (SEAL code)
parser = generator.SealParser(architecture, printLine, verboseMode)
parser.run(contents)
if parser.isError:
exitProgram(1) # do not generate output file in this case
# generate C code to an output file
g = generator.createGenerator(targetOS)
if g is None:
sys.stderr.write('Failed to find code generator for target OS {0}'.format(targetOS))
exitProgram(1)
if outputFileName == '-':
g.generate(sys.stdout)
else:
outputDirName = os.path.dirname(outputFileName)
if len(outputDirName):
outputDirName += os.sep
if not os.path.exists(outputDirName):
os.makedirs(outputDirName)
numDirs = len(os.path.normpath(outputFileName).split(os.sep)) - 1
dirname = os.path.dirname(os.path.realpath(outputFileName))
if os.path.isabs(pathToOS):
makefilePathToOS = pathToOS.strip('\\'); # \ is special character, creates problems in makefile where this path is inserted
else:
makefilePathToOS = os.path.normpath(dirname + os.sep + ('/..' * numDirs) + os.sep + pathToOS)
with open(outputFileName, 'w') as outputFile:
g.generate(outputFile)
with open(outputDirName + "Makefile", 'w') as outputFile:
g.generateMakefile(outputFile, outputFileName, makefilePathToOS)
# use SEAL application's config file as the basis
try:
shutil.copyfile(outputDirName + ".." + os.sep + "config", outputDirName + "config-tmp")
except IOError as e:
try:
os.remove(outputDirName + "config-tmp")
except OSError as e:
pass
with open(outputDirName + "config-tmp", 'a+') as outputFile:
g.generateConfigFile(outputFile)
# replace the config file only if different: saves rebuiding time.
try:
isSame = (os.system("cmp -s " + outputDirName + "config-tmp " + outputDirName + "config") == 0)
except:
isSame = False
if not isSame:
try:
shutil.move(outputDirName + "config-tmp", outputDirName + "config")
except Exception as ex:
print (ex)
if generator.components.componentRegister.isError:
# cleanup
os.remove(outputFileName)
os.remove(outputDirName + "Makefile")
os.remove(outputDirName + "config")
return -1
if g.isComponentUsed("network"):
g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS)
g.generateForwarderCode(os.path.join(outputDirName, 'fwd'), makefilePathToOS)
g.generateCollectorCode(os.path.join(outputDirName, 'coll'), makefilePathToOS)
elif g.isComponentUsed("radio"):
g.generateBaseStationCode(os.path.join(outputDirName, 'bs'), makefilePathToOS)
if g.isComponentUsed("sdcard"):
g.generateRaw2Csv(outputDirName, os.path.join(selfDirname, 'raw2csv-template.py'))
return 0
if __name__ == '__main__':
exit(main())
| 1.9375 | 2 |
services/coordinator/src/configurator/targets/spilo_postgres_configurator.py | asmv/kubernetes-tuning-framework | 0 | 12794710 | from typing import List, Dict
from . import _target_configurator_base
from .. import configurator_enums
import template
import optimize
import launch
from os import path
import yaml
import warnings
import time
import re
class SpiloPostgresConfigurator(_target_configurator_base.TargetConfigurator):
label = "spilo"
identifier_type = launch.IdentifierType.application
pod_fetch_dict = {
"spilo": launch.IdentifierType.application
}
def __init__(self, client: 'DBClient'):
super().__init__(client)
# FIXME: Fragile path for refactoring
self.config_root = path.abspath(path.join(path.dirname(__file__), "../../../config", "target_spilo_postgres"))
def deploy(self, config: Dict[str, object], kube_context: launch.KubeContext):
config.update({
"namespace_name": kube_context.namespace_name
})
for k, v in config["param_config"].items():
try:
identifier, param = k.split(":")
if identifier == "postgres":
config["postgres_config"][param] = v
else:
warnings.warn("Unrecognized {0} parameter: {1}".format(identifier, param))
except Exception:
warnings.warn("Unrecognized parameter: {}".format(k))
continue
kubeconfig_dir = template.get_tempdir_with_config(path.join(self.config_root, "kubernetes"), config)
self.open_temp_dirs.append(kubeconfig_dir) # TODO: This is only needed for the next line, clean up later?
with open(path.join(kubeconfig_dir.name, "minimal-manifest.yaml"), "r+") as manifest_config:
minimal_manifest_yaml = yaml.load(manifest_config, Loader=yaml.SafeLoader)
postgresql_spec = minimal_manifest_yaml["spec"]["postgresql"]
if "parameters" not in postgresql_spec:
# convert to string since the postgresql crd spec only accepts string type
postgresql_spec["parameters"] = {k: str(v) for k, v in config["postgres_config"].items()}
else:
postgresql_spec["parameters"].update({k: str(v) for k, v in config["postgres_config"].items()})
manifest_config.seek(0)
manifest_config.truncate(0)
manifest_config.write(yaml.dump(minimal_manifest_yaml))
# Waiting not necessary for CRD
kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, "zalando", "manifests", "postgresql.crd.yaml"), wait_for_ready=False)
time.sleep(1)
kube_context.apply_kubectl_yaml_config(kubeconfig_dir.name, wait_for_ready=False)
kube_context.apply_kubectl_yaml_config(path.join(kubeconfig_dir.name, "cluster-level-rbac-patch.yaml"), namespaced=False)
# Need to wait manually because zalando postgres operator uses a CustomResourceDefinition that is not easily parseable to get StatefulSets
kube_context._sts_wait("acid-minimal-cluster", config["postgres_replicas"])
def prepare(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]):
if self.client is configurator_enums.DBClient.ycsb:
assert(len(pod_ids["spilo"]) > 0)
kube_context.copy_to_pod(pod_ids["spilo"][0], path.join(self.config_root, "client_ycsb", "init_table.sql"), "/init_table.sql")
while re.search("error", kube_context.run_command(pod_ids["spilo"][0], ["psql", "-U", "postgres"])):
time.sleep(1)
kube_context.run_command(pod_ids["spilo"][0], ["psql", "-U", "postgres", "-f", "/init_table.sql"])
if not re.search("now connected", kube_context.run_command(pod_ids["spilo"][0], ["psql", "-U", "postgres", "-c", r"\c test"])):
raise Exception("Table did not properly initialize. Logs:\n{}".format(kube_context.kubectl_subprocess(["logs", pod_ids["spilo"][0]])))
return
warnings.warn("Unable to prepare, no client match.")
def execute(self, config: Dict[str, object], kube_context: launch.KubeContext, pod_ids: Dict[str, List[str]]):
if self.client is configurator_enums.DBClient.ycsb:
# Not necessary to do anything once tables are configured for ycsb
return
warnings.warn("Unable to execute, no client match.")
| 2.140625 | 2 |
tests/test_nompi4py.py | rgreen1995/PTMCMCSampler | 1 | 12794711 | import pytest
from PTMCMCSampler.nompi4py import MPIDummy
class TestMPIDummp(object):
"""Test the MPIDummpy class
"""
def setup(self):
"""Setup the MPIDummy object
"""
self.mpidummy = MPIDummy()
def test_Get_rank(self):
"""Test the `Get_rank` method
"""
assert self.mpidummy.Get_rank() == 0
def test_Get_size(self):
"""Test the `Get_size` function
"""
assert self.mpidummy.Get_size() == 1
| 2.25 | 2 |
examples/jsonstats.py | SomePr0grammer/aHypixel | 0 | 12794712 | <gh_stars>0
"""
This program writes your stats to a JSON file.
"""
import hypixel
import json
client = hypixel.Client(key)
player = client.get_player("SomeHypixelNon")
stats = client.run(player.get_stats())
with open(jsonfile, 'w') as f: # replace jsonfile with the directory of the file you want to write to!
json.dump(f, stats) | 2.78125 | 3 |
cogs/clear.py | Flurrrr/baritonebot | 3 | 12794713 | import discord
import main
from discord.ext import commands
from cogs.help import Help
class Clear(commands.Cog):
def __init__(self, bot):
"""Returns embeds for the clear command."""
self.bot = bot
@commands.group(invoke_without_command=True, case_insensitive=True, aliases=['cl', 'pg', 'purge'])
@commands.check(main.mod_group)
async def clear(self, ctx, num=None, num2=None):
if num is None:
return await Help.clear(self, ctx)
try:
user_men = str(ctx.message.raw_mentions[0])
except IndexError:
user_men = ''
if ctx.guild is None:
await main.error_embed(ctx, 'You cannot use this command in DMs')
else:
if (user_men == '') and (len(num) != 18) and (num.isdigit()): # make sure a number is given and its not an ID
int_num = int(num)
if int_num > 0:
await ctx.channel.purge(limit=int_num)
channel = await self.bot.fetch_channel(main.ids(3))
await main.log_embed(None, 'Bulk messages deleted', f'{ctx.author.mention} cleared {int_num} messages in {ctx.channel.mention}', channel, ctx.author)
print(f'{ctx.author.id} cleared {int_num} messages in {ctx.channel}')
else:
await main.error_embed(ctx, 'You need to give a positive non zero number')
else:
int_num2 = int(num2)
if int_num2 > 0:
limit = 0
if user_men != '':
clear_member = self.bot.get_user(int(user_men)) # get the user if they mentioned
elif (num.isdigit()) and (len(num) == 18):
clear_member = self.bot.get_user(int(num)) # get the user if they gave an ID
else:
clear_member = ctx.guild.get_member_named(num) # get the member if they gave a name with/without discrimitor
if clear_member is None:
await main.error_embed(ctx, 'The user you gave is either invalid or the name you gave is not a member')
else:
async for message in ctx.channel.history(limit=None):
limit += 1
if message.author == clear_member:
int_num2 -= 1
if int_num2 == 0:
break
def member_check(m):
return m.author == clear_member
await ctx.channel.purge(limit=limit, check=member_check)
try:
await ctx.message.delete() # delete the command
except discord.NotFound: # ignore error if it was already deleted
pass
channel = await self.bot.fetch_channel(main.ids(3))
await main.log_embed(ctx, 'Bulk messages deleted', f'{ctx.author.mention} cleared {num2} messages in {ctx.channel.mention} from {clear_member.mention}', channel, clear_member)
print(f'{ctx.author.id} cleared {num2} messages in {ctx.channel.id} from {clear_member.id}')
else:
await main.error_embed(ctx, 'You need to give a positive non zero number')
def setup(bot):
bot.add_cog(Clear(bot))
| 2.703125 | 3 |
build/lib/PyodbcListOfDicts/PyodbcListOfDicts.py | dariyush/PyodbcListOfDicts | 1 | 12794714 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 14:52:34 2019
@author: a.mohammadi
"""
import pyodbc
from collections import OrderedDict
#%%
def GetConnection(server, database):
return pyodbc.connect( ''.join(
[r'DRIVER={ODBC Driver 13 for SQL Server};',
r'Trusted_Connection=yes;',
r'SERVER=%s;' %server,
r'DATABASE=%s;' %database,]) )
def SQLExec(query, server, dataBase, commit):
with GetConnection(server,dataBase) as connection:
with connection.cursor() as cursor:
cursor.execute(query)
try:
rows = cursor.fetchall()
except:
rows = []
description = cursor.description #[col[0] for col in cursor.description]
if commit:
connection.commit()
lst = []
for row in rows:
d = OrderedDict()
for k, v in zip( description, row ):
d[k[0]] = v
lst.append( d )
return lst
# return [{k[0]: v for k, v in zip( description, row )} for row in rows]
#%%
if __name__ == "__main__":
server, dataBase, commit = 'AMESYD03','SafeEc', 0
query = """SELECT TOP 10 * FROM [dbo].[Company]"""
lst_of_dicts = SQLExec(query, server, dataBase, commit)
from pandas import DataFrame
df = DataFrame( lst_of_dicts ) | 2.859375 | 3 |
deep_rl/actor_critic/unreal/utils.py | jkulhanek/deep-rl-pytorch | 7 | 12794715 | import torch
import torch.nn.functional as F
import gym
import gym.spaces
import numpy as np
def autocrop_observations(observations, cell_size, output_size=None):
shape = observations.size()[3:]
if output_size is None:
new_shape = tuple(map(lambda x: (x // cell_size) * cell_size, shape))
else:
new_shape = tuple(map(lambda x: x * cell_size, output_size))
margin3_top = (shape[0] - new_shape[0]) // 2
margin3_bottom = -(shape[0] - new_shape[0] - margin3_top)
margin4_top = (shape[1] - new_shape[1]) // 2
margin4_bottom = -(shape[1] - new_shape[1] - margin4_top)
if margin3_bottom == 0:
margin3_bottom = None
if margin4_bottom == 0:
margin4_bottom = None
return observations[:, :, :, margin3_top:margin3_bottom, margin4_top:margin4_bottom]
def pixel_control_reward(observations, cell_size=4, output_size=None):
'''
Args:
observations: A tensor of shape `[B,T+1,C,H,W]`, where
* `T` is the sequence length, `B` is the batch size.
* `H` is height, `W` is width.
* `C...` is at least one channel dimension (e.g., colour, stack).
* `T` and `B` can be statically unknown.
cell_size: The size of each cell.
Returns:
shape (B, T, 1, H / cell_size, W / cell_size)
'''
with torch.no_grad():
observations = autocrop_observations(observations, cell_size, output_size=output_size)
abs_observation_diff = observations[:, 1:] - observations[:, :-1]
abs_observation_diff.abs_()
obs_shape = abs_observation_diff.size()
abs_diff = abs_observation_diff.view(-1, *obs_shape[2:])
avg_abs_diff = F.avg_pool2d(abs_diff, cell_size, stride=cell_size)
avg_abs_diff = avg_abs_diff.mean(1, keepdim=True)
return avg_abs_diff.view(*obs_shape[:2] + avg_abs_diff.size()[1:])
def pixel_control_loss(observations, actions, action_values, gamma=0.9, cell_size=4):
action_value_shape = action_values.size()
batch_shape = actions.size()[:2]
with torch.no_grad():
T = observations.size()[1] - 1
pseudo_rewards = pixel_control_reward(observations, cell_size, output_size=action_values.size()[-2:])
last_rewards = action_values[:, -1].max(1, keepdim=True)[0]
for i in reversed(range(T)):
previous_rewards = last_rewards if i + 1 == T else pseudo_rewards[:, i + 1]
pseudo_rewards[:, i].add_(gamma, previous_rewards)
q_actions = actions.view(*batch_shape + (1, 1, 1)).repeat(1, 1, 1, action_value_shape[3], action_value_shape[4])
q_actions = torch.gather(action_values[:, :-1], 2, q_actions)
loss = F.mse_loss(pseudo_rewards, q_actions)
return loss
def reward_prediction_loss(predictions, rewards):
with torch.no_grad():
target = torch.zeros(predictions.size(), dtype=torch.float32, device=predictions.device)
target[:, 0] = rewards == 0
target[:, 1] = rewards > 0
target[:, 2] = rewards < 0
return F.binary_cross_entropy_with_logits(predictions, target)
def discounted_commulative_reward(rewards, base_value, gamma):
cummulative_reward = rewards.clone()
max_t = cummulative_reward.size()[1]
for i in reversed(range(max_t)):
next_values = base_value if i + 1 == max_t else cummulative_reward[:, i + 1]
cummulative_reward[:, i].add_(gamma, next_values)
return cummulative_reward
def value_loss(values, rewards, gamma):
base_value = values[:, -1]
with torch.no_grad():
cummulative_reward = discounted_commulative_reward(rewards, base_value, gamma)
return F.mse_loss(values[:, :-1], cummulative_reward)
class UnrealEnvBaseWrapper(gym.Wrapper):
def __init__(self, env):
super().__init__(env)
self.last_action_reward = None
self.observation_space = gym.spaces.Tuple((
env.observation_space,
gym.spaces.Box(0.0, 1.0, (env.action_space.n + 1,), dtype=np.float32)
))
def reset(self):
self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32)
return self.observation(self.env.reset())
def step(self, action):
observation, reward, done, stats = self.env.step(action)
self.last_action_reward = np.zeros(self.action_space.n + 1, dtype=np.float32)
self.last_action_reward[action] = 1.0
self.last_action_reward[-1] = np.clip(reward, -1, 1)
return self.observation(observation), reward, done, stats
def observation(self, observation):
return (observation, self.last_action_reward)
| 2.453125 | 2 |
tests/unit/resources/test_fcs_file_parse_args.py | primitybio/cellengine-python-toolk | 4 | 12794716 | import json
import pytest
import responses
from cellengine.utils.generate_id import generate_id
from cellengine.resources.fcs_file import FcsFile
EXP_ID = "5d38a6f79fae87499999a74b"
FCSFILE_ID = "5d64abe2ca9df61349ed8e7c"
@responses.activate
def test_should_get_fcs_file(ENDPOINT_BASE, client, fcs_files):
file_id = fcs_files[0]["_id"]
responses.add(
responses.GET,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/fcsfiles/{file_id}",
json=fcs_files[0],
)
file = FcsFile.get(EXP_ID, file_id)
assert type(file) is FcsFile
@responses.activate
def test_should_create_fcs_file(ENDPOINT_BASE, client, fcs_files):
"""Test upload of a new fcs_file.
This test must be run from the project root directory"""
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/fcsfiles",
json=fcs_files[1],
)
FcsFile.create(EXP_ID, [fcs_files[0]["_id"]], "new file")
assert json.loads(responses.calls[0].request.body) == {
"fcsFiles": ["5d64abe2ca9df61349ed8e79"],
"filename": "new file",
}
params = [
(FCSFILE_ID, [FCSFILE_ID]),
([FCSFILE_ID], [FCSFILE_ID]),
(
["fcs_file_id_1", "fcs_file_id_2", "fcs_file_id_3"],
["fcs_file_id_1", "fcs_file_id_2", "fcs_file_id_3"],
),
({EXP_ID: FCSFILE_ID}, [{EXP_ID: FCSFILE_ID}]),
([{EXP_ID: FCSFILE_ID}], [{EXP_ID: FCSFILE_ID}]),
]
@pytest.mark.parametrize("fcs_file_args,expected_response", params)
@responses.activate
def test_should_create_fcs_file_and_correctly_parse_fcs_file_args(
ENDPOINT_BASE, client, fcs_files, fcs_file_args, expected_response
):
"""Test upload of a new fcs_file.
This test must be run from the project root directory"""
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/fcsfiles",
json=fcs_files[1],
)
FcsFile.create(EXP_ID, fcs_file_args, "new file")
assert json.loads(responses.calls[0].request.body) == {
"fcsFiles": expected_response,
"filename": "new file",
}
@responses.activate
def test_should_create_fcs_file_and_correctly_parse_body_args(
ENDPOINT_BASE, client, fcs_files
):
"""Test upload of a new fcs_file.
This test must be run from the project root directory"""
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/fcsfiles",
json=fcs_files[1],
)
FcsFile.create(
EXP_ID,
FCSFILE_ID,
"new name",
add_file_number=True,
add_event_number=True,
pre_subsample_n=1,
pre_subsample_p=1,
)
assert json.loads(responses.calls[0].request.body) == {
"fcsFiles": [FCSFILE_ID],
"filename": "new name",
"addFileNumber": True,
"addEventNumber": True,
"preSubsampleN": 1,
"preSubsampleP": 1
# leave out "seed" to test param not specified
}
@responses.activate
def test_should_delete_fcs_file(ENDPOINT_BASE, client, fcs_files):
fcs_file = FcsFile.from_dict(fcs_files[0])
responses.add(
responses.DELETE,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/fcsfiles/{fcs_file._id}",
)
deleted = fcs_file.delete()
assert deleted is None
@responses.activate
def test_fcs_file_and_fcs_file_id_defined(
ENDPOINT_BASE, experiment, experiments, gates
):
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/gates",
status=201,
json=gates[0],
)
with pytest.raises(
ValueError, match="Please specify only 'fcs_file' or 'fcs_file_id'."
):
experiment.create_rectangle_gate(
"FSC-A",
"FSC-W",
"fcs_rect_gate",
x1=1,
x2=2,
y1=3,
y2=4,
fcs_file="Specimen_001_A1_A01.fcs",
fcs_file_id="5d38a7159fae87499999a74e",
tailored_per_file=True,
)
@responses.activate
def test_tailored_per_file_true(ENDPOINT_BASE, experiment, rectangle_gate):
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/gates",
status=201,
json=rectangle_gate,
)
experiment.create_rectangle_gate(
"FSC-A",
"FSC-W",
"fcs_rect_gate",
x1=1,
x2=2,
y1=3,
y2=4,
tailored_per_file=True,
)
assert json.loads(responses.calls[0].request.body)["tailoredPerFile"] is True
@responses.activate
def test_fcs_file_id_is_None_and_fcs_file_is_None(
ENDPOINT_BASE, experiment, rectangle_gate
):
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/gates",
status=201,
json=rectangle_gate,
)
experiment.create_rectangle_gate(
"FSC-A", "FSC-W", "fcs_rect_gate", x1=1, x2=2, y1=3, y2=4
)
assert json.loads(responses.calls[0].request.body)["fcsFileId"] is None
@responses.activate
def test_create_global_tailored_gate(ENDPOINT_BASE, experiment, rectangle_gate):
global_gid = generate_id()
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/gates",
status=201,
json=rectangle_gate,
)
experiment.create_rectangle_gate(
x_channel="FSC-A",
y_channel="FSC-W",
name="fcs_rect_gate",
x1=1,
x2=2,
y1=3,
y2=4,
tailored_per_file=True,
gid=global_gid,
)
assert json.loads(responses.calls[0].request.body)["tailoredPerFile"] is True
assert json.loads(responses.calls[0].request.body)["gid"] == global_gid
@responses.activate
def test_specify_fcs_file_id(ENDPOINT_BASE, experiment, rectangle_gate):
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/gates",
status=201,
json=rectangle_gate,
)
experiment.create_rectangle_gate(
x_channel="FSC-A",
y_channel="FSC-W",
name="fcs_rect_gate",
x1=1,
x2=2,
y1=3,
y2=4,
fcs_file_id="5d38a7159fae87499999a751",
tailored_per_file=True,
)
assert (
json.loads(responses.calls[0].request.body)["fcsFileId"]
== "5d38a7159fae87499999a751"
)
@responses.activate
def test_fcs_file_called_by_name(ENDPOINT_BASE, experiment, fcs_files, rectangle_gate):
responses.add(
responses.GET,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/fcsfiles",
json=[fcs_files[3]],
)
responses.add(
responses.GET,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/fcsfiles/5d64abe2ca9df61349ed8e7c",
json=fcs_files[3],
)
responses.add(
responses.POST,
ENDPOINT_BASE + f"/experiments/{EXP_ID}/gates",
status=201,
json=rectangle_gate,
)
experiment.create_rectangle_gate(
"FSC-A",
"FSC-W",
"fcs_rect_gate",
x1=1,
x2=2,
y1=3,
y2=4,
fcs_file="Specimen_001_A1_A01.fcs",
tailored_per_file=True,
)
assert json.loads(responses.calls[2].request.body)["tailoredPerFile"] is True
assert (
json.loads(responses.calls[2].request.body)["fcsFileId"]
== "5d64abe2ca9df61349ed8e7c"
)
| 2.15625 | 2 |
rrc_example_package/benchmark_rrc/python/cic/rotation_primitives.py | wq13552463699/TriFinger_Research | 12 | 12794717 | import numpy as np
from scipy.spatial.transform import Rotation
import numpy as np
import pybullet as p
def todegree(w):
return w*180/np.pi
def torad(w):
return w*np.pi/180
def angle(v1, v2):
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def add_one(index):
if index+1 == 3:
index_out = 0
else:
index_out = index+1
return index_out
def to_H(R, T=np.zeros(3)):
H = np.eye(4)
H[:-1,:-1] = R
H[:-1,-1] = T
return H
def closest_axis_2_userdefined(H, vec):
#print (H)
#print (np.linalg.inv(H[:-1,:-1]))
min_angle = 190
x_des = np.array(vec)
index = 0
sign = 0
reverse = False
for i in range(3):
x = H[:-1, i]
theta = todegree(angle(x, x_des))
#print (theta)
if theta > 90:
theta = theta - 180
if theta ==0:
reverse = True
if min_angle > np.abs(theta):
min_angle = np.abs(theta)
index = i
if theta == 0.:
if reverse:
sign = -1
else:
sign = 1
else:
sign = np.sign(theta)
return min_angle, index, sign
def R_2vect(vector_orig, vector_fin):
"""Calculate the rotation matrix required to rotate from one vector to another.
For the rotation of one vector to another, there are an infinit series of rotation matrices
possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry
plane between the two vectors. Hence the axis-angle convention will be used to construct the
matrix with the rotation axis defined as the cross product of the two vectors. The rotation
angle is the arccosine of the dot product of the two unit vectors.
Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,
the rotation matrix R is::
| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |
R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |
| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
@param vector_orig: The unrotated vector defined in the reference frame.
@type vector_orig: numpy array, len 3
@param vector_fin: The rotated vector defined in the reference frame.
@type vector_fin: numpy array, len 3
"""
# Convert the vectors to unit vectors.
vector_orig = vector_orig / np.linalg.norm(vector_orig)
vector_fin = vector_fin / np.linalg.norm(vector_fin)
# The rotation axis (normalised).
axis = np.cross(vector_orig, vector_fin)
axis_len = np.linalg.norm(axis)
if axis_len != 0.0:
axis = axis / axis_len
# Alias the axis coordinates.
x = axis[0]
y = axis[1]
z = axis[2]
if x==0 and y==0 and z==0:
z=1
# The rotation angle.
angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1))
# Trig functions (only need to do this maths once!).
ca = np.cos(angle)
sa = np.sin(angle)
R = np.eye(4)
# Calculate the rotation matrix elements.
R[0, 0] = 1.0 + (1.0 - ca) * (x ** 2 - 1.0)
R[0, 1] = -z * sa + (1.0 - ca) * x * y
R[0, 2] = y * sa + (1.0 - ca) * x * z
R[1, 0] = z * sa + (1.0 - ca) * x * y
R[1, 1] = 1.0 + (1.0 - ca) * (y ** 2 - 1.0)
R[1, 2] = -x * sa + (1.0 - ca) * y * z
R[2, 0] = -y * sa + (1.0 - ca) * x * z
R[2, 1] = x * sa + (1.0 - ca) * y * z
R[2, 2] = 1.0 + (1.0 - ca) * (z ** 2 - 1.0)
return R, axis, angle
class RotationPrimitives():
def __init__(self, H0, Hg):
self.H0 = H0
self.Hg = Hg
def set_goal(self, Hg):
self.Hg = Hg
def set_current_pose(self,H0):
self.H0 = H0
def get_control_seq(self, ax=None):
## Control Sequence will provide rotation vector and desired rotation to achieve target ##
################## Goal to Viapoint 2 ###################################
theta, index, sign = self.closest_axis_2_normal(self.Hg)
des_vec = np.array([0, 0, sign * 1])
R, r_vec_via2gw, ang_via = self.R_2vect(self.Hg[:-1, index], des_vec)
H_via2 = np.matmul(R, self.Hg)
H_via2[:-1,-1] = self.Hg[:-1,-1]
H_via2[2, -1] = 0.
r_vec_via2g = np.matmul(H_via2[:-1,:-1].T, r_vec_via2gw)
c2g = [r_vec_via2g, -ang_via, r_vec_via2gw]
#########################################################################
############# From Floor to Viapoint 1 ####################
index_H0, sign_H0 = self.find_index_z(self.H0)
#theta_0 ,index_H0, sign_H0 = self.closest_axis_2_normal(self.H0)
# print (index_H0, sign_H0, index, sign)
# input ("WAIT")
rot_index, ang_floor = self.find_rot_z(index_H0, sign_H0, index, sign)
if rot_index is not None:
r_vec_floor = np.zeros(3)
r_vec_floor[rot_index] = 1
rotation_floor = Rotation.from_rotvec(ang_floor * r_vec_floor)
R_floor_1 = rotation_floor.as_matrix()
R_floor_1 = to_H(R=R_floor_1)
H_via1 = np.matmul(self.H0, R_floor_1)
#H_via1[1,-1] = 0.3
else:
r_vec_floor = np.zeros(3)
r_vec_floor[index] = 1
ang_floor = 0.
H_via1 = self.H0
r_vec_floor_w = np.matmul(self.H0[:-1,:-1], r_vec_floor)
c01 = [r_vec_floor, ang_floor, r_vec_floor_w]
####################################################
############ From Viapoint 1 to Viapoint 2 ################
if index == 0:
vec_1 = H_via1[:-1, 1]
vec_2 = H_via2[:-1, 1]
else:
vec_1 = H_via1[:-1, 0]
vec_2 = H_via2[:-1, 0]
R12, r_vec_via12_p, ang_via12 = self.R_2vect(vec_1, vec_2)
r_vec_via12w = np.zeros(3)
r_vec_via12w[2] = np.sign(r_vec_via12_p[2])
r_vec_via12 = np.matmul(H_via1[:-1,:-1].T, r_vec_via12w)
c12 = [r_vec_via12, ang_via12, r_vec_via12w]
###########################################################
##### COMPUTE SHORTCUT: ########
rot_12 = to_H(Rotation.from_rotvec(c12[0]*c12[1]).as_matrix())
rot2g = to_H(Rotation.from_rotvec(c2g[0]*c2g[1]).as_matrix())
rot1g = np.matmul(rot_12,rot2g)
if np.allclose(rot1g, np.eye(4)):
c1g = [np.array([0,0,1]), 0.]
else:
rot1g = Rotation.from_matrix(rot1g[:-1,:-1]).as_rotvec()
c1g = [rot1g / np.linalg.norm(rot1g,ord=2), np.linalg.norm(rot1g,ord=2)]
##### Compute rotation from start to Via-2 ##
R_via2 = H_via2[:-1,:-1]
R_init = self.H0[:-1,:-1]
R_to_2 = np.matmul(R_init.T, R_via2)
if np.allclose(R_to_2, np.eye(3)):
c_to_2 = [np.array([0, 0, 1]), 0.]
else:
rot_to_2 = Rotation.from_matrix(R_to_2).as_rotvec()
c_to_2 = [rot_to_2 / np.linalg.norm(rot_to_2, ord=2), np.linalg.norm(rot_to_2, ord=2)]
##### Compute rotation from start to Goal ###
R_g = self.Hg[:-1, :-1]
R_init = self.H0[:-1, :-1]
R_to_g = np.matmul(R_init.T, R_g)
if np.allclose(R_to_g, np.eye(3)):
c_to_g = [np.array([0, 0, 1]), 0.]
else:
rot_to_g = Rotation.from_matrix(R_to_g).as_rotvec()
c_to_g = [rot_to_g / np.linalg.norm(rot_to_g, ord=2), np.linalg.norm(rot_to_g, ord=2)]
command_seq = [c01, c12,c2g]
return command_seq, [c1g], [c_to_2, c_to_g]
def find_index_z(self, H):
big_Z = 0.0
index = 0
for i in range(3):
z = H[2, i]
# print(z)
if np.abs(z) > big_Z:
big_Z = np.abs(z)
sign = np.sign(z)
index = i
return index, sign
def find_rot_z(self, index_H0, sign_H0, index, sign):
if index == index_H0:
if sign == sign_H0:
return None, None
else:
angle = np.pi
if index == 0:
rot_over = 1
else:
rot_over = 0
return rot_over, angle
else:
rot_over = 0
while (rot_over == index or rot_over == index_H0):
rot_over += 1
if sign == sign_H0:
angle = -np.pi / 2
if add_one(rot_over) != index_H0:
angle = -angle
else:
angle = np.pi / 2
if add_one(rot_over) != index_H0:
angle = -angle
return rot_over, angle
def closest_axis_2_normal(self, H):
# print (H)
# print (np.linalg.inv(H[:-1,:-1]))
min_angle = 190
x_des = np.array([0, 0, 1])
index = 0
sign = 0
reverse = False
for i in range(3):
x = H[:-1, i]
theta = todegree(angle(x, x_des))
# print (theta)
if theta > 90:
theta = theta - 180
if theta ==0:
reverse = True
if min_angle > np.abs(theta):
min_angle = np.abs(theta)
index = i
if theta == 0.:
if reverse:
sign = -1
else:
sign = 1
else:
sign = np.sign(theta)
return min_angle, index, sign
def R_2vect(self, vector_orig, vector_fin):
"""Calculate the rotation matrix required to rotate from one vector to another.
For the rotation of one vector to another, there are an infinit series of rotation matrices
possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry
plane between the two vectors. Hence the axis-angle convention will be used to construct the
matrix with the rotation axis defined as the cross product of the two vectors. The rotation
angle is the arccosine of the dot product of the two unit vectors.
Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,
the rotation matrix R is::
| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |
R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |
| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
@param vector_orig: The unrotated vector defined in the reference frame.
@type vector_orig: numpy array, len 3
@param vector_fin: The rotated vector defined in the reference frame.
@type vector_fin: numpy array, len 3
"""
# Convert the vectors to unit vectors.
vector_orig = vector_orig / np.linalg.norm(vector_orig)
vector_fin = vector_fin / np.linalg.norm(vector_fin)
# The rotation axis (normalised).
axis = np.cross(vector_orig, vector_fin)
axis_len = np.linalg.norm(axis)
if axis_len != 0.0:
axis = axis / axis_len
# Alias the axis coordinates.
x = axis[0]
y = axis[1]
z = axis[2]
if x==0 and y==0 and z==0:
z=1
# The rotation angle.
angle = np.arccos(np.clip(np.dot(vector_orig, vector_fin),-1,1))
# Trig functions (only need to do this maths once!).
ca = np.cos(angle)
sa = np.sin(angle)
R = np.eye(4)
# Calculate the rotation matrix elements.
R[0, 0] = 1.0 + (1.0 - ca) * (x ** 2 - 1.0)
R[0, 1] = -z * sa + (1.0 - ca) * x * y
R[0, 2] = y * sa + (1.0 - ca) * x * z
R[1, 0] = z * sa + (1.0 - ca) * x * y
R[1, 1] = 1.0 + (1.0 - ca) * (y ** 2 - 1.0)
R[1, 2] = -x * sa + (1.0 - ca) * y * z
R[2, 0] = -y * sa + (1.0 - ca) * x * z
R[2, 1] = x * sa + (1.0 - ca) * y * z
R[2, 2] = 1.0 + (1.0 - ca) * (z ** 2 - 1.0)
return R, axis, angle
def calculate_mutltiple_goals(init_information, obs):
goal_orients = []
# This first calcuation step allows to calculate a viapoint! I.e. it yields a goal orientation for some axis alignment
init_orient = np.zeros(3)
init_orient[:2] = np.asarray(init_information[:2])
init_orient = init_orient / np.linalg.norm(init_orient)
current_orient = np.asarray(p.getMatrixFromQuaternion(obs["object_orientation"])).reshape(3, 3)
theta, index, sign = closest_axis_2_userdefined(to_H(current_orient), init_orient)
des_vec = sign * np.array(init_orient)
Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec)
first_goal = np.matmul(Rot1[:-1, :-1], current_orient)
first_goal = Rotation.from_matrix(first_goal)
goal_orients.append([first_goal.as_quat()+[0.001,0.001,0.001,0.001],[0,0,0.0325]]) #addition of noise needed since otherwise problems code,...
# this second calculation applies the relative transformation which is desired based on the current observation!!!
# now take into account the desired rotation from the target information:
des_rotation = np.asarray(p.getMatrixFromQuaternion(init_information[10:14])).reshape(3, 3)
init_orient = np.asarray([1,0,0])
theta, index, sign = closest_axis_2_userdefined(
to_H(current_orient), init_orient)
des_vec = sign * np.array(init_orient)
Rot1, r_vec_via2gw, ang_via = R_2vect(current_orient[:, index], des_vec)
second_goal = np.matmul(Rot1[:-1, :-1], current_orient)
# now apply rotation:
second_goal = np.matmul(des_rotation, second_goal)
# now rotate back to orientation that we are now at:
second_goal = np.matmul(Rot1[:-1, :-1].T, second_goal)
second_goal = Rotation.from_matrix(second_goal)
goal_orients.append([second_goal.as_quat()+[0.001,0.001,0.001,0.001],[init_information[7],init_information[8],init_information[9]]]) #addition of noise needed since otherwise problems code,...
return goal_orients | 3.15625 | 3 |
searchtube/db.py | dermasmid/searchtube | 11 | 12794718 | <reponame>dermasmid/searchtube
from pymongo import MongoClient
import os
def get_client():
client = MongoClient('searchtube_mongo', 27017, username=os.environ['DB_USERNAME'], password=os.environ['DB_PASSWORD'])
return client
| 2.203125 | 2 |
scripts/legacy/make_maestro_index.py | lucaspbastos/mirdata | 224 | 12794719 | import argparse
import hashlib
import json
import csv
import os
MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def make_maestro_index(data_path):
metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json')
print(metadata_path)
maestro_index = {}
with open(metadata_path, 'r') as fhandle:
metadata = json.load(fhandle)
for i, row in enumerate(metadata):
print(i)
trackid = row['midi_filename'].split('.')[0]
maestro_index[trackid] = {}
midi_path = os.path.join(data_path, row['midi_filename'])
midi_checksum = md5(midi_path)
maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum]
audio_path = os.path.join(data_path, row['audio_filename'])
audio_checksum = md5(audio_path)
maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum]
with open(MAESTRO_INDEX_PATH, 'w') as fhandle:
json.dump(maestro_index, fhandle, indent=2)
def main(args):
print("creating index...")
make_maestro_index(args.maestro_data_path)
print("done!")
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.')
PARSER.add_argument(
'maestro_data_path', type=str, help='Path to MAESTRO data folder.'
)
main(PARSER.parse_args())
| 3.0625 | 3 |
modules/core/website_screenshot_generator.py | susumuasaga/Python-Web-Scraping-Cookbook | 1 | 12794720 | from subprocess import Popen, PIPE
from selenium import webdriver
from PIL import Image
import io
class WebsiteScreenshotGenerator():
def __init__(self):
self._screenshot = None
def capture(self, url, width, height, crop=True):
print ("Capturing website screenshot of: " + url)
driver = webdriver.PhantomJS()
if width and height:
driver.set_window_size(width, height)
# go and get the content at the url
driver.get(url)
# get the screenshot and make it into a Pillow Image
self._screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png()))
print("Got a screenshot with the following dimensions: {0}".format(self._screenshot.size))
if crop:
# crop the image
self._screenshot = self._screenshot.crop((0,0, width, height))
print("Cropped the image to: {0} {1}".format(width, height))
return self
@property
def image(self):
return self._screenshot
@property
def image_bytes(self):
bytesio = io.BytesIO()
self._screenshot.save(bytesio, "PNG")
bytesio.seek(0)
return bytesio.getvalue()
if __name__ == "__main__":
import const
g = WebsiteScreenshotGenerator()
#g.do_screen_capturing(const.ApodEclipsePage(), "/Users/michaelheydt/thumbnail.png", 500, 100)
g.do_screen_capturing("http://espn.go.com", 500, 100)
# need to explicitly crop | 3.09375 | 3 |
src/sst/selftests/alerts.py | DramaFever/sst | 4 | 12794721 | import sst
import sst.actions
from sst import config
# PhantomJS can not do alerts by design
if config.browser_type == 'phantomjs':
sst.actions.skip()
sst.actions.set_base_url('http://localhost:%s/' % sst.DEVSERVER_PORT)
sst.actions.go_to('/alerts')
# Accept an alert box and assert its text.
sst.actions.click_button('show-alert', wait=False)
sst.actions.accept_alert(u'JavaScript alert text')
sst.actions.assert_title('Page with JavaScript alerts')
# Accept a confirm box.
sst.actions.click_button('show-confirm', wait=False)
sst.actions.accept_alert()
sst.actions.accept_alert(u'Confirm accepted')
# Dismiss a confirm box and assert its text.
sst.actions.click_button('show-confirm', wait=False)
sst.actions.dismiss_alert(u'JavaScript confirm text')
sst.actions.accept_alert(u'Confirm dismissed')
# Enter text to a prompt box, accept it and assert its text.
sst.actions.click_button('show-prompt', wait=False)
sst.actions.accept_alert(u'JavaScript prompt text', 'Entered text')
sst.actions.accept_alert('Entered text')
# Enter text to a prompt box and dismiss it.
sst.actions.click_button('show-prompt', wait=False)
sst.actions.dismiss_alert(text_to_write='Entered text')
sst.actions.assert_title('Page with JavaScript alerts')
| 2.34375 | 2 |
ahlive/easing.py | ahuang11/ahlive | 25 | 12794722 | from collections.abc import Iterable
import numpy as np
import pandas as pd
import param
import xarray as xr
from matplotlib.colors import LinearSegmentedColormap, rgb2hex
from .configuration import DEFAULTS, EASES, INTERPS, PRECEDENCES, REVERTS
from .util import is_str
class Easing(param.Parameterized):
interp = param.ClassSelector(
default=None,
class_=Iterable,
doc=f"Interpolation method; {INTERPS}",
precedence=PRECEDENCES["interp"],
)
ease = param.ClassSelector(
default="in_out",
class_=Iterable,
doc=f"Type of easing; {EASES}",
precedence=PRECEDENCES["interp"],
)
frames = param.Integer(
default=None,
bounds=(1, None),
doc="Number of frames between each base state",
precedence=PRECEDENCES["interp"],
)
revert = param.ObjectSelector(
default=None,
objects=REVERTS,
doc="Method for reverting to the initial state; "
"boomerang finds the shortest path to the initial state, "
"traceback backtracks the original path to the initial state, and "
"rollback is like traceback, but disregards the "
"original's path durations",
precedence=PRECEDENCES["interp"],
)
num_states = param.Integer(doc="Number of states", **DEFAULTS["num_kwds"])
num_steps = param.Integer(
doc="Number of frames between each base state", **DEFAULTS["num_kwds"]
)
def __init__(self, **kwds):
super().__init__(**kwds)
def interpolate(self, da, name=""):
interp = self.interp or "cubic"
ease = self.ease
da_origin = da.copy()
is_xarray = isinstance(da, xr.DataArray)
is_bar = False
if is_xarray:
if "state" not in da.dims:
return da_origin
(
da,
name,
dims,
coords,
interp,
ease,
is_bar,
is_errorbar_morph,
) = self._prep_xarray(da)
array = self._prep_array(da)
num_items, num_states, num_steps, num_result = self._calc_shapes(array)
if (num_steps == 1 or num_states == 1) and self.revert is None:
return da_origin
steps = np.linspace(0, 1, num_steps)
interp_args = (steps, interp, ease, num_states, num_steps, num_items)
array_dtype = array.dtype
if name in ["duration", "remark", "xerr", "yerr"] and not is_errorbar_morph:
result = self._interp_first(
array, num_states, num_steps, num_items, num_result, name
)
elif interp == "fill" or name.endswith(
("zoom", "discrete_trail", "morph_trail", "tick_label", "bar_label")
):
result = self._interp_fill(array, num_states, num_steps, name)
elif np.issubdtype(array_dtype, np.datetime64):
result = self._interp_time(array, pd.to_datetime, *interp_args)
elif np.issubdtype(array_dtype, np.timedelta64):
result = self._interp_time(array, pd.to_timedelta, *interp_args)
elif np.issubdtype(array_dtype, np.number) and not is_bar:
if name == "central_longitude":
interp = "linear"
result = self._interp_numeric(array, *interp_args)
elif name in "c": # must be after number
result = self._interp_color(array, num_result)
elif is_bar:
result = self._interp_fill(array, num_states, num_steps, name)
else: # str
result = self._interp_text(array, num_states, num_steps, num_result)
if self.revert in ["traceback", "rollback"]:
result = self._apply_revert(result, name)
if is_xarray:
result = self._rebuild_da(result, da, dims, coords)
return result
def _prep_xarray(self, da):
name = da.name
interp = da.attrs.get("interp")
ease = da.attrs.get("ease")
for item_dim in da.dims:
if "item" in item_dim:
if "batch" in da.dims:
da = da.transpose(item_dim, "batch", "state", ...)
else:
da = da.transpose(item_dim, "state", ...)
break
dims = da.dims
if da.ndim > 2: # more than (item, state)
if "grid_item" in dims:
da = da.stack({"stacked": ["grid_item", "grid_y", "grid_x"]})
elif "batch" in dims:
da = da.stack({"stacked": [item_dim, "batch"]})
da = da.transpose("stacked", "state")
coords = da.drop_vars("state", errors="ignore").coords
is_bar = da.attrs.get("is_bar")
is_errorbar_morph = da.attrs.get("is_errorbar_morph")
return da, name, dims, coords, interp, ease, is_bar, is_errorbar_morph
def _prep_array(self, da):
array = np.array(da)
if array.ndim == 1:
array = array[np.newaxis, :]
if self.revert == "boomerang":
array = np.hstack([array, array[:, :1]])
return array
def _calc_shapes(self, array):
num_items, num_states = array.shape
if self.frames is None:
if num_states < 10:
num_steps = int(np.ceil(60 / num_states))
else:
num_steps = int(np.ceil(100 / num_states))
else:
num_steps = self.frames
with param.edit_constant(self):
self.num_steps = num_steps
num_result = (num_states - 1) * num_steps
return num_items, num_states, num_steps, num_result
def _apply_revert(self, result, name):
if result.ndim == 1:
result_back = result[::-1]
else:
result_back = result[:, ::-1]
if name == "duration" and self.revert == "rollback":
result_back = np.repeat(1 / 60, result_back.shape[-1])[np.newaxis, :]
result = np.hstack([result, result_back])
return result
def _rebuild_da(self, result, da, dims, coords):
if len(dims) == 1:
result = result.squeeze()
result = xr.DataArray(
result,
dims=da.dims,
coords=coords,
name=da.name,
attrs=da.attrs,
)
if "stacked" in result.dims:
result = result.unstack().transpose(*dims)
return result
def _interp_first(self, array, num_states, num_steps, num_items, num_result, name):
if is_str(array):
fill = ""
dtype = np.object
else:
fill = 0.0
dtype = None
result = np.full((num_items, num_result), fill, dtype=dtype)
indices = np.arange(num_states) * num_steps
indices[-1] -= 1
result[:, indices] = array # (1, num_states)
return result
def _interp_fill(self, array, num_states, num_steps, name):
indices = np.arange(num_states * num_steps - num_steps)
result = (
pd.DataFrame(
array,
columns=np.arange(0, num_states * num_steps, num_steps),
)
.T.reindex(indices)
.T
)
if not name.endswith("discrete_trail"):
result = result.ffill(axis=1).fillna("").values
result[:, -1] = array[:, -1]
else:
result = result.values
return result
def _interp_color(self, array, num_result):
results = []
for colors in array: # item, state
cmap = LinearSegmentedColormap.from_list("eased", colors, N=num_result)
results.append([rgb2hex(rgb) for rgb in cmap(np.arange(num_result))])
result = np.array(results)
return result
def _interp_text(self, array, num_states, num_steps, num_result):
result = np.repeat(array, num_steps, axis=-1)
num_roll = -int(np.ceil(num_steps / num_states * 2))
if num_states > 2:
result = np.roll(result, num_roll, axis=-1)
result = result[:, :num_result]
else:
half_way = int(num_result / 2)
result = result[:, half_way:-half_way]
if num_steps % 2 != 0:
result = result[:, :-1]
return result
def _interp_time(
self, array, conversion, steps, interp, ease, num_states, num_steps, num_items
):
array = array.astype(float)
result = self._interp_numeric(
array, steps, interp, ease, num_states, num_steps, num_items
)
result = conversion(result.ravel()).values
result = result.reshape(num_items, -1)
return result
def _interp_numeric(
self, array, steps, interp, ease, num_states, num_steps, num_items
):
init = np.repeat(array[:, :-1], num_steps, axis=-1)
init_nans = np.isnan(init)
init[init_nans] = 0 # temporarily fill the nans
stop = np.repeat(array[:, 1:], num_steps, axis=-1)
stop_nans = np.isnan(stop)
tiled_steps = np.tile(steps, (num_states - 1) * num_items).reshape(
num_items, -1
)
weights = getattr(self, f"_{interp.lower()}")(tiled_steps, ease)
result = stop * weights + init * (1 - weights)
result[init_nans | stop_nans] = np.nan # replace nans
return result
def _linear(self, ts, ease):
return ts
def _quadratic(self, ts, ease):
if ease == "in":
ts = ts * ts
elif ease == "out":
ts = -(ts * (ts - 2))
elif ease == "in_out":
index = ts < 0.5
ts[index] = 2 * ts[index] * ts[index]
ts[~index] = (-2 * ts[~index] * ts[~index]) + (4 * ts[~index]) - 1
return ts
def _cubic(self, ts, ease):
if ease == "in":
ts = ts * ts * ts
elif ease == "out":
ts = (ts - 1) * (ts - 1) * (ts - 1) + 1
elif ease == "in_out":
index = ts < 0.5
ts[index] = 4 * ts[index] * ts[index] * ts[index]
ts[~index] = 2 * ts[~index] - 2
ts[~index] = 0.5 * ts[~index] * ts[~index] * ts[~index] + 1
return ts
def _quartic(self, ts, ease):
if ease == "in":
ts = ts * ts * ts * ts
elif ease == "out":
ts = (ts - 1) * (ts - 1) * (ts - 1) * (1 - ts) + 1
elif ease == "in_out":
index = ts < 0.5
ts[index] = 8 * ts[index] * ts[index] * ts[index] * ts[index]
ts[~index] = ts[~index] - 1
ts[~index] = -8 * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1
return ts
def _quintic(self, ts, ease):
if ease == "in":
ts = ts * ts * ts * ts * ts
elif ease == "out":
ts = (ts - 1) * (ts - 1) * (ts - 1) * (ts - 1) * (ts - 1) + 1
elif ease == "in_out":
index = ts < 0.5
ts[index] = 16 * ts[index] * ts[index] * ts[index] * ts[index] * ts[index]
ts[~index] = (2 * ts[~index]) - 2
ts[~index] = (
0.5 * ts[~index] * ts[~index] * ts[~index] * ts[~index] * ts[~index] + 1
)
return ts
def _sine(self, ts, ease):
if ease == "in":
ts = np.sin((ts - 1) * np.pi / 2) + 1
elif ease == "out":
ts = np.sin(ts * np.pi / 2)
elif ease == "in_out":
ts = 0.5 * (1 - np.cos(ts * np.pi))
return ts
def _circular(self, ts, ease):
if ease == "in":
ts = 1 - np.sqrt(1 - (ts * ts))
elif ease == "out":
ts = np.sqrt((2 - ts) * ts)
elif ease == "in_out":
index = ts < 0.5
ts[index] = 0.5 * (1 - np.sqrt(1 - 4 * (ts[index] * ts[index])))
ts[~index] = 0.5 * (
np.sqrt(-((2 * ts[~index]) - 3) * ((2 * ts[~index]) - 1)) + 1
)
return ts
def _exponential(self, ts, ease):
if ease == "in":
index = ts != 0
ts[~index] = 0
ts[index] = np.power(2, 10 * (ts[index] - 1))
elif ease == "out":
index = ts != 1
ts[~index] = 1
ts[index] = 1 - np.power(2, -10 * ts[index])
elif ease == "in_out":
index0 = (ts != 0) & (ts < 0.5) & (ts != 1)
index1 = (ts != 0) & (ts >= 0.5) & (ts != 1)
ts[index0] = 0.5 * np.power(2, (20 * ts[index0]) - 10)
ts[index1] = -0.5 * np.power(2, (-20 * ts[index1]) + 10) + 1
return ts
def _elastic(self, ts, ease):
if ease == "in":
ts = np.sin(13 * np.pi / 2 * ts) * np.power(2, 10 * (ts - 1))
elif ease == "out":
ts = np.sin(-13 * np.pi / 2 * (ts + 1)) * np.power(2, -10 * ts) + 1
elif ease == "in_out":
index = ts < 0.5
ts[index] = (
0.5
* np.sin(13 * np.pi / 2 * (2 * ts[index]))
* np.power(2, 10 * ((2 * ts[index]) - 1))
)
ts[~index] = 0.5 * (
np.sin(-13 * np.pi / 2 * ((2 * ts[~index] - 1) + 1))
* np.power(2, -10 * (2 * ts[~index] - 1))
+ 2
)
return ts
def _back(self, ts, ease):
if ease == "in":
ts = ts * ts * ts - ts * np.sin(ts * np.pi)
elif ease == "out":
ts = 1 - ts
ts = 1 - (ts * ts * ts - ts * np.sin(ts * np.pi))
elif ease == "in_out":
index = ts < 0.5
ts[index] = 2 * ts[index]
ts[index] = 0.5 * (
ts[index] * ts[index] * ts[index]
- ts[index] * np.sin(ts[index] * np.pi)
)
ts[~index] = 1 - (2 * ts[~index] - 1)
ts[~index] = (
0.5
* (
1
- (
ts[~index] * ts[~index] * ts[~index]
- ts[~index] * np.sin(ts[~index] * np.pi)
)
)
+ 0.5
)
return ts
def _bounce(self, ts, ease):
index = ts < 0.5
if ease == "in":
ts = 1 - ts
elif ease == "in_out":
ts[index] = 1 - (ts[index] * 2)
ts[~index] = ts[~index] * 2 - 1
index0 = ts < 4 / 11
index1 = (ts < 8 / 11) & ~index0
index2 = (ts < 9 / 10) & ~index1 & ~index0
index3 = ts >= 9 / 10
ts[index0] = 121 * ts[index0] * ts[index0] / 16
ts[index1] = (
(363 / 40.0 * ts[index1] * ts[index1]) - (99 / 10.0 * ts[index1]) + 17 / 5.0
)
ts[index2] = (
(4356 / 361.0 * ts[index2] * ts[index2])
- (35442 / 1805.0 * ts[index2])
+ 16061 / 1805.0
)
ts[index3] = (
(54 / 5.0 * ts[index3] * ts[index3])
- (513 / 25.0 * ts[index3])
+ 268 / 25.0
)
if ease == "in":
ts = 1 - ts
elif ease == "out":
pass
elif ease == "in_out":
ts[index] = 0.5 * (1 - ts[index])
ts[~index] = 0.5 * ts[~index] + 0.5
return ts
| 2.296875 | 2 |
posts/views.py | Kolokol2002/Yatube | 0 | 12794723 | from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils import timezone
from requests import request
from .forms import CommentForm, GroupForm, PostForm
from .models import Comment, Follow, Group, Post, User
from django.views.generic import (ListView,
DetailView,
CreateView,
UpdateView,
DeleteView)
class IndexListView(ListView):
model = Post
template_name = 'index.html'
paginate_by = 5
context_object_name = 'post_list'
extra_context = {
'index_page': True
}
# def group_posts(request, slug):
# '''Страница с публикиями связанными с группой'''
# group = get_object_or_404(Group, slug=slug)
# post_list = Post.objects.filter(
# group=group).select_related(
# 'author', 'group').annotate(
# comment_count=Count(
# 'commented_post')).order_by("-pub_date").all()
# paginator = Paginator(post_list, 10)
# page_number = request.GET.get('page')
# page = paginator.get_page(page_number)
# return render(request, "group.html", {'group': group,
# 'post_list': page,
# 'paginator': paginator})
class GroupPostView(ListView):
model = Post
template_name = 'group.html'
paginate_by = 5
context_object_name = 'post_list'
@property
def extra_context(self):
return {
'group': get_object_or_404(Group, slug=self.kwargs['slug'])
}
def get_queryset(self, *args, **kwargs):
query_set = super().get_queryset()
return query_set.filter(
group__slug=self.kwargs['slug']).select_related(
'author', 'group')
# @login_required
# def new_post(request):
# '''Страница создания новой публикации'''
# if request.method == 'POST':
# form = PostForm(request.POST, files=request.FILES or None)
# if form.is_valid():
# post = form.save(commit=False)
# post.author = request.user
# post.save()
# messages.add_message(
# request, messages.SUCCESS, f'Новая запись добавлена', extra_tags='success'
# )
# return redirect('index')
# else:
# form = PostForm()
# return render(request, 'new_post.html', {'form': form})
# @login_required
class NewPostCreateView(LoginRequiredMixin, CreateView):
model = Post
form_class = PostForm
template_name = 'new_post.html'
extra_context = {
'title': 'Создать новою запись'
}
def form_valid(self, form):
post = form.save(commit=False)
post.author = self.request.user
post.save()
messages.add_message(
self.request,
messages.SUCCESS,
f'Новая запись добавлена'
)
return super().form_valid(form)
def get_success_url(self):
return reverse('index')
# def post_view(request, post_id, username):
# '''Страница отдельной публикации'''
# user_profile = get_object_or_404(
# User.objects.filter(username=username).annotate(
# follower_count=Count('follower', distinct=True),
# following_count=Count('following', distinct=True),
# post_count=Count('post_author', distinct=True)))
# post = get_object_or_404(
# Post.objects.annotate(
# comment_count=Count(
# 'commented_post')).select_related('author', 'group'),
# pk=post_id)
# post_comment = Comment.objects.filter(
# post=post_id).select_related('author').order_by("-created").all()
# form = CommentForm()
# following = False
# if request.user.is_authenticated:
# if Follow.objects.filter(author=user_profile,
# user=request.user).exists():
# following = True
# return render(request, 'post_view.html', {'post': post,
# 'profile': user_profile,
# 'comments': post_comment,
# 'form': form,
# 'following': following})
class PostView(ListView):
model = Comment
template_name = 'post_view.html'
context_object_name = 'comments'
@property
def extra_context(self):
user_profile = get_object_or_404(
User.objects.filter(username=self.kwargs['username']).annotate(
follower_count=Count('follower', distinct=True),
following_count=Count('following', distinct=True),
post_count=Count('post_author', distinct=True)))
post = get_object_or_404(
Post.objects.annotate(
comment_count=Count(
'commented_post')).select_related('author', 'group'),
pk=self.kwargs['post_id'])
following = False
if self.request.user.is_authenticated:
if Follow.objects.filter(author=user_profile,
user=self.request.user).exists():
following = True
return {'post': post,
'profile': user_profile,
'form': CommentForm(),
'following': following}
def get_queryset(self):
query_set = super().get_queryset()
return query_set.filter(
post=self.kwargs['post_id']).select_related('author')
# def profile(request, username):
# '''Страница с публикациями пользователя'''
# user_profile = get_object_or_404(
# User.objects.filter(
# username=username).annotate(
# follower_count=Count('follower', distinct=True),
# following_count=Count('following', distinct=True)))
# post_list = Post.objects.filter(
# author=user_profile).select_related(
# 'group', 'author').annotate(
# comment_count=Count(
# 'commented_post')).order_by("-pub_date").all()
# paginator = Paginator(post_list, 10)
# page_number = request.GET.get('page')
# page = paginator.get_page(page_number)
# following = False
# if request.user.is_authenticated:
# if Follow.objects.filter(author=user_profile,
# user=request.user).exists():
# following = True
# return render(request, "profile.html", {'profile': user_profile,
# 'post_list': page,
# 'paginator': paginator,
# 'following': following})
class ProfileView(ListView):
model = Post
template_name = 'profile.html'
paginate_by = 5
context_object_name = 'post_list'
@property
def extra_context(self):
profile = get_object_or_404(
User.objects.filter(
username=self.kwargs['username']).annotate(
follower_count=Count('follower', distinct=True),
following_count=Count('following', distinct=True)))
following = False
if self.request.user.is_authenticated:
if Follow.objects.filter(author=profile,
user=self.request.user).exists():
following = True
return {
'profile': profile,
'following': following
}
def get_queryset(self):
profile = get_object_or_404(
User.objects.filter(
username=self.kwargs['username']).annotate(
follower_count=Count('follower', distinct=True),
following_count=Count('following', distinct=True)))
query_set = super().get_queryset()
return query_set.filter(
author=profile).select_related(
'group', 'author')
# @login_required
# def post_edit(request, username, post_id):
# '''Страница редактирования публикации'''
# title = 'Редактировать запись'
# post = get_object_or_404(Post.objects.select_related('author'), pk=post_id)
# if request.user == post.author:
# if request.method == "POST":
# form = PostForm(request.POST or None,
# files=request.FILES or None,
# instance=post)
# if form.is_valid():
# post = form.save(commit=False)
# post.pub_date = timezone.now()
# post.save()
# return redirect('post', post_id=post.pk, username=username)
# else:
# form = PostForm(instance=post)
# else:
# return redirect('post', post_id=post.pk, username=post.author)
# return render(
# request, "new_post.html", {'form': form, 'title': title, 'post': post})
class PostEditUpdateView(LoginRequiredMixin, UpdateView):
model = Post
slug_field = 'username'
pk_url_kwarg = 'post_id'
form_class = PostForm
template_name = 'new_post.html'
extra_context = {
'title': 'Редактировать запись'
}
def form_valid(self, form):
post = form.save(commit=False)
post.pub_date = timezone.now()
post.save()
messages.add_message(
self.request,
messages.SUCCESS,
f'Запись обновлена!',
extra_tags='success'
)
return super().form_valid(form)
def get_success_url(self):
return reverse('profile', kwargs={
'username': self.object.author,
})
# @login_required
# def post_delete(request, username, post_id):
# '''Функция для удаления публикации'''
# post = get_object_or_404(Post, pk=post_id)
# if request.user == post.author:
# post.delete()
# return redirect('profile', username=username)
# return redirect('post', post_id=post.pk, username=post.author)
class PostDeleteView(LoginRequiredMixin, DeleteView):
model = Post
template_name = 'post_delete.html'
slug_field = 'username'
pk_url_kwarg = 'post_id'
success_message = 'Запись удалена'
def get_success_url(self):
return reverse('profile', kwargs={
'username': self.object.author,
})
# @login_required
# def add_comment(request, username, post_id):
# '''Функция для добавления комментария к публикации'''
# post = get_object_or_404(Post, pk=post_id)
# if request.method == 'POST':
# form = CommentForm(request.POST)
# if form.is_valid():
# comment = form.save(commit=False)
# comment.post = post
# comment.author = request.user
# comment.save()
# return redirect('post', post_id=post_id, username=username)
# return redirect('post', post_id=post_id, username=username)
class AddCommentView(LoginRequiredMixin, CreateView):
model = Comment
template_name = 'comments.html'
form_class = CommentForm
slug_field = 'username'
pk_url_kwarg = 'post_id'
def form_valid(self, form):
post = get_object_or_404(Post, pk=self.kwargs['post_id'])
comment = form.save(commit=False)
comment.post = post
comment.author = self.request.user
comment.save()
return super().form_valid(form)
def get_success_url(self):
return reverse('post', kwargs={'username': self.object.author,
'post_id': self.object.post.pk
})
# @login_required
# def follow_index(request):
# '''Страница с публикациями избранных пользователей'''
# follow_page = True
# post_list = Post.objects.filter(
# author__following__user=request.user).select_related(
# 'group', 'author').annotate(
# comment_count=Count(
# 'commented_post')).order_by("-pub_date").all()
# paginator = Paginator(post_list, 10)
# page_number = request.GET.get('page')
# page = paginator.get_page(page_number)
# return render(request, "follow.html", {'page': page,
# 'paginator': paginator,
# 'follow_page': follow_page})
class FollowIndexView(LoginRequiredMixin, ListView):
model = Post
template_name = 'follow.html'
paginate_by = 5
context_object_name = 'post_list'
extra_context = {
'follow_page': True
}
def get_queryset(self):
query_set = super().get_queryset()
return query_set.filter(
author__following__user=self.request.user).select_related(
'group', 'author')
@login_required
def profile_follow(request, username):
'''Функция для подписки на пользователя'''
followed_author = get_object_or_404(User, username=username)
if followed_author == request.user:
return redirect('profile', username=username)
if Follow.objects.filter(user=request.user,
author=followed_author).exists():
return redirect('profile', username=username)
Follow.objects.create(author=followed_author, user=request.user)
return redirect('profile', username=username)
@login_required
def profile_unfollow(request, username):
'''Функция для отписки от пользователя'''
follover = Follow.objects.filter(author__username=username,
user=request.user)
follover.delete()
return redirect('profile', username=username)
@login_required
def delete_comment(request, username, post_id, comment_id):
'''Функция для удаления комментария к публикации'''
comment = get_object_or_404(Comment, post=post_id, pk=comment_id)
if request.user == comment.author:
comment.delete()
return redirect('post', username=username, post_id=post_id)
# @login_required
# def edit_comment(request, username, post_id, comment_id):
# '''Функция для редактирования комментария к публикации'''
# title = 'Редактировать комментарий'
# comment = get_object_or_404(Comment, post=post_id, pk=comment_id)
# if request.user == comment.author:
# if request.method == 'POST':
# form = CommentForm(request.POST, instance=comment)
# if form.is_valid():
# comment = form.save(commit=False)
# comment.created = timezone.now()
# comment.save()
# return redirect('post', username=username, post_id=post_id)
# form = CommentForm(instance=comment)
# return render(request, "new_post.html", {'form': form, 'title': title})
class CommentEditView(LoginRequiredMixin, UpdateView):
model = Comment
template_name = 'new_post.html'
form_class = CommentForm
pk_url_kwarg = 'comment_id'
extra_context = {
'title': 'Редактировать комментарий'
}
def form_valid(self, form):
comment = form.save(commit=False)
comment.created = timezone.now()
comment.save()
messages.add_message(
self.request,
messages.SUCCESS,
f'Коментарий отредактирован',
)
return super().form_valid(form)
def get_success_url(self):
return reverse('post', kwargs={'username': self.object.author,
'post_id': self.object.post.pk
})
# @login_required
# def add_group(request):
# '''Страница для добавления группы'''
# title = 'Создать группу'
# if request.method == 'POST':
# form = GroupForm(request.POST)
# if form.is_valid():
# slug = form.cleaned_data['slug']
# form.save()
# return redirect("group", slug=slug)
# return render(request, "new_post.html", {'form': form, 'title': title})
# form = GroupForm()
# return render(request, "new_post.html", {'form': form, 'title': title})
class GroupAddView(LoginRequiredMixin, CreateView):
model = Group
template_name = 'new_post.html'
form_class = GroupForm
extra_context = {
'title': 'Создать группу'
}
def get_success_url(self, **kwargs):
return reverse('group', kwargs={'slug': self.object.slug})
def page_not_found(request, exception):
'''Страница 404'''
return render(request, "misc/404.html", {"path": request.path}, status=404)
def server_error(request):
'''Страница 500'''
return render(request, "misc/500.html", status=500) | 2.03125 | 2 |
app/addrbookapp/forms.py | kumarisneha/django_on_docker | 0 | 12794724 | <gh_stars>0
from django import forms
from django.core.validators import RegexValidator
from .models import Address
from django.forms import ModelForm, Textarea,TextInput
from django.core.exceptions import ValidationError
class AddressBookForm(forms.ModelForm):
class Meta:
model = Address
fields = ['id','user','email_id','address','phone_number']
widgets = {
'user': TextInput(attrs={'class':'form-control','name':'user','placeholder':'User Name'}),
'email_id': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Email'}),
'address': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Address'}),
'phone_number': TextInput(attrs={'class':'form-control form-control-solid placeholder-no-fix','placeholder':'Phone number'}),
}
def clean_phone_number(self):
phone_num=self.cleaned_data.get('phone_number', None)
try:
int(phone_num)
except (ValueError, TypeError):
raise ValidationError("Please enter a valid phone number")
return phone_num
def clean_email_id(self):
data = self.cleaned_data['email_id']
domain = data.split('@')[1]
domain_list = ["gmail.com", "yahoo.com", "hotmail.com",]
if domain not in domain_list:
raise ValidationError("Please enter an Email Address with a valid domain")
return data
| 2.671875 | 3 |
vendor/feedvalidator/demo/src/rdflib/syntax/parsers/__init__.py | BenoitZugmeyer/NewsBlur | 65 | 12794725 | <reponame>BenoitZugmeyer/NewsBlur
__all__ = ["RDFXMLParser", "NTParser"]
| 1.070313 | 1 |
tests/test15.py | ngurkan/judicious | 4 | 12794726 | <reponame>ngurkan/judicious
import judicious
# judicious.register("http://1172.16.58.3:5000")
judicious.register("https://imprudent.herokuapp.com")
solved = judicious.recaptcha()
print(solved)
| 1.84375 | 2 |
pymap/backend/redis/__init__.py | icgood/pymap | 18 | 12794727 |
from __future__ import annotations
import json
import uuid
from argparse import ArgumentParser, Namespace
from collections.abc import Awaitable, Callable, Mapping, AsyncIterator
from contextlib import closing, asynccontextmanager, AsyncExitStack
from datetime import datetime
from functools import partial
from secrets import token_bytes
from typing import Any, Optional, Final
from aioredis import create_redis, Redis, ConnectionClosedError
from pysasl.creds import AuthenticationCredentials
from pymap.bytes import BytesFormat
from pymap.config import BackendCapability, IMAPConfig
from pymap.context import connection_exit
from pymap.exceptions import AuthorizationFailure, IncompatibleData, \
NotAllowedError, UserNotFound
from pymap.health import HealthStatus
from pymap.interfaces.backend import BackendInterface
from pymap.interfaces.login import LoginInterface, IdentityInterface
from pymap.interfaces.token import TokensInterface
from pymap.token import AllTokens
from pymap.user import UserMetadata
from .cleanup import CleanupTask
from .filter import FilterSet
from .keys import DATA_VERSION, RedisKey, GlobalKeys, CleanupKeys, \
NamespaceKeys
from .mailbox import Message, MailboxSet
from ..session import BaseSession
__all__ = ['RedisBackend', 'Config', 'Session']
class RedisBackend(BackendInterface):
"""Defines a backend that uses redis data structures for mailbox storage.
"""
def __init__(self, login: Login, config: Config,
status: HealthStatus) -> None:
super().__init__()
self._login = login
self._config = config
self._status = status
@property
def login(self) -> Login:
return self._login
@property
def config(self) -> Config:
return self._config
@property
def status(self) -> HealthStatus:
return self._status
@classmethod
def add_subparser(cls, name: str, subparsers: Any) -> ArgumentParser:
parser = subparsers.add_parser(name, help='redis backend')
parser.add_argument('--address', metavar='URL',
default='redis://localhost',
help='the redis server address')
parser.add_argument('--select', metavar='DB', type=int,
help='the redis database for mail data')
parser.add_argument('--separator', metavar='CHAR', default='/',
help='the redis key segment separator')
parser.add_argument('--prefix', metavar='VAL', default='/mail',
help='the mail data key prefix')
parser.add_argument('--users-prefix', metavar='VAL', default='/users',
help='the user lookup key prefix')
parser.add_argument('--users-json', action='store_true',
help='the user lookup value contains JSON')
return parser
@classmethod
async def init(cls, args: Namespace, **overrides: Any) \
-> tuple[RedisBackend, Config]:
config = Config.from_args(args)
status = HealthStatus()
connect_redis = partial(cls._connect_redis, config, status)
login = Login(config, connect_redis)
return cls(login, config, status), config
@classmethod
async def _connect_redis(cls, config: Config,
status: HealthStatus) -> Redis:
try:
redis = await create_redis(config.address)
except (ConnectionClosedError, OSError):
status.set_unhealthy()
raise
else:
status.set_healthy()
stack = connection_exit.get()
stack.enter_context(closing(redis))
return redis
async def start(self, stack: AsyncExitStack) -> None:
config = self._config
global_keys = config._global_keys
connect_redis = partial(self._connect_redis, config, self._status)
cleanup_task = CleanupTask(connect_redis, global_keys).start()
stack.callback(cleanup_task.cancel)
class Config(IMAPConfig):
"""The config implementation for the redis backend.
Args:
args: The command-line arguments.
address: The redis server address.
select: The redis database for mail data.
separator: The redis key segment separator.
prefix: The prefix for mail data keys.
users_prefix: The user lookup key prefix.
users_json: True if the user lookup value contains JSON.
"""
def __init__(self, args: Namespace, *, address: str, select: Optional[int],
separator: bytes, prefix: bytes, users_prefix: bytes,
users_json: bool, **extra: Any) -> None:
super().__init__(args, admin_key=token_bytes(), **extra)
self._address = address
self._select = select
self._separator = separator
self._prefix = prefix
self._users_prefix = users_prefix
self._users_json = users_json
@property
def backend_capability(self) -> BackendCapability:
return BackendCapability(idle=True, object_id=True, multi_append=True)
@property
def address(self) -> str:
"""The redis server address. Defaults to a connection to localhost.
See Also:
:func:`aioredis.create_connection`
"""
return self._address
@property
def select(self) -> Optional[int]:
"""The redis database for mail data. If given, the `SELECT`_ command is
called after successful user lookup.
.. _SELECT: https://redis.io/commands/select
"""
return self._select
@property
def separator(self) -> bytes:
"""The bytestring used to separate segments of composite redis keys."""
return self._separator
@property
def prefix(self) -> bytes:
"""The prefix for mail data keys. This prefix does not apply to
:attr:`.users_key`.
"""
return self._prefix
@property
def users_prefix(self) -> bytes:
"""The prefix for user lookup keys."""
return self._users_prefix
@property
def users_json(self) -> bool:
"""True if the value from the user lookup key contains a JSON object
with a ``"password"`` attribute, instead of a redis hash with a
``password`` key.
See Also:
`redis hashes
<https://redis.io/topics/data-types-intro#redis-hashes>`_
"""
return self._users_json
@property
def _joiner(self) -> BytesFormat:
return BytesFormat(self.separator)
@property
def _users_root(self) -> RedisKey:
return RedisKey(self._joiner, [self.users_prefix], {})
@property
def _global_keys(self) -> GlobalKeys:
key = RedisKey(self._joiner, [self.prefix], {})
return GlobalKeys(key)
@classmethod
def parse_args(cls, args: Namespace) -> Mapping[str, Any]:
return {**super().parse_args(args),
'address': args.address,
'select': args.select,
'separator': args.separator.encode('utf-8'),
'prefix': args.prefix.encode('utf-8'),
'users_prefix': args.users_prefix.encode('utf-8'),
'users_json': args.users_json}
class Session(BaseSession[Message]):
"""The session implementation for the redis backend."""
resource = __name__
def __init__(self, redis: Redis, owner: str, config: Config,
mailbox_set: MailboxSet, filter_set: FilterSet) -> None:
super().__init__(owner)
self._redis = redis
self._config = config
self._mailbox_set = mailbox_set
self._filter_set = filter_set
@property
def config(self) -> IMAPConfig:
return self._config
@property
def mailbox_set(self) -> MailboxSet:
return self._mailbox_set
@property
def filter_set(self) -> FilterSet:
return self._filter_set
class Login(LoginInterface):
"""The login implementation for the redis backend."""
def __init__(self, config: Config,
connect_redis: Callable[[], Awaitable[Redis]]) -> None:
super().__init__()
self._config = config
self._connect_redis = connect_redis
self._tokens = AllTokens()
@property
def tokens(self) -> TokensInterface:
return self._tokens
async def authenticate(self, credentials: AuthenticationCredentials) \
-> Identity:
config = self._config
redis = await self._connect_redis()
authcid = credentials.authcid
token_key: Optional[bytes] = None
role: Optional[str] = None
if credentials.authcid_type == 'admin-token':
authcid = credentials.identity
role = 'admin'
try:
authcid_identity = Identity(config, self.tokens, redis, authcid)
metadata = await authcid_identity.get()
except UserNotFound:
metadata = UserMetadata(config)
if 'key' in metadata.params:
token_key = bytes.fromhex(metadata.params['key'])
role = role or metadata.role
await metadata.check_password(credentials, token_key=token_key)
if role != 'admin' and authcid != credentials.identity:
raise AuthorizationFailure()
return Identity(config, self.tokens, redis, credentials.identity, role)
class Identity(IdentityInterface):
"""The identity implementation for the redis backend."""
def __init__(self, config: Config, tokens: TokensInterface,
redis: Redis, name: str, role: str = None) -> None:
super().__init__()
self.config: Final = config
self.tokens: Final = tokens
self._redis: Optional[Redis] = redis
self._name = name
self._role = role
@property
def name(self) -> str:
return self._name
@property
def redis(self) -> Redis:
redis = self._redis
if redis is None:
# Other methods may not be called after new_session(), since it
# may have called SELECT on the connection.
raise RuntimeError()
return redis
async def new_token(self, *, expiration: datetime = None) -> Optional[str]:
metadata = await self.get()
if 'key' not in metadata.params:
return None
key = bytes.fromhex(metadata.params['key'])
return self.tokens.get_login_token(self.name, key)
@asynccontextmanager
async def new_session(self) -> AsyncIterator[Session]:
config = self.config
redis = self.redis
self._redis = None
if config.select is not None:
await redis.select(config.select)
global_keys = config._global_keys
namespace = await self._get_namespace(redis, global_keys, self.name)
ns_keys = NamespaceKeys(global_keys, namespace)
cl_keys = CleanupKeys(global_keys)
mailbox_set = MailboxSet(redis, ns_keys, cl_keys)
filter_set = FilterSet(redis, ns_keys)
try:
await mailbox_set.add_mailbox('INBOX')
except ValueError:
pass
yield Session(redis, self.name, config, mailbox_set, filter_set)
async def _get_namespace(self, redis: Redis, global_keys: GlobalKeys,
user: str) -> bytes:
user_key = user.encode('utf-8')
new_namespace = uuid.uuid4().hex.encode('ascii')
ns_val = b'%d/%b' % (DATA_VERSION, new_namespace)
multi = redis.multi_exec()
multi.hsetnx(global_keys.namespaces, user_key, ns_val)
multi.hget(global_keys.namespaces, user_key)
_, ns_val = await multi.execute()
version, namespace = ns_val.split(b'/', 1)
if int(version) != DATA_VERSION:
raise IncompatibleData()
return namespace
async def get(self) -> UserMetadata:
redis = self.redis
user_bytes = self.name.encode('utf-8')
user_key = self.config._users_root.end(user_bytes)
if self.config.users_json:
json_data = await redis.get(user_key)
if json_data is None:
raise UserNotFound(self.name)
data_dict = json.loads(json_data)
else:
data_dict = await redis.hgetall(user_key, encoding='utf-8')
if data_dict is None:
raise UserNotFound(self.name)
return UserMetadata(self.config, **data_dict)
async def set(self, metadata: UserMetadata) -> None:
config = self.config
redis = self.redis
if self._role != 'admin' and metadata.role:
raise NotAllowedError('Cannot assign role.')
user_key = config._users_root.end(self.name.encode('utf-8'))
user_dict = metadata.to_dict(key=token_bytes().hex())
if self.config.users_json:
json_data = json.dumps(user_dict)
await redis.set(user_key, json_data)
else:
multi = redis.multi_exec()
multi.delete(user_key)
multi.hmset_dict(user_key, user_dict)
await multi.execute()
async def delete(self) -> None:
config = self.config
user_key = config._users_root.end(self.name.encode('utf-8'))
if not await self.redis.delete(user_key):
raise UserNotFound(self.name)
| 1.953125 | 2 |
mmdnn/conversion/tensorflow/rewriter/lstm_rewriter.py | kmader/MMdnn | 3,442 | 12794728 | from mmdnn.conversion.rewriter.rewriter import UnitRewriterBase
import numpy as np
import re
class LSTMRewriter(UnitRewriterBase):
def __init__(self, graph, weights_dict):
return super(LSTMRewriter, self).__init__(graph, weights_dict)
def process_lstm_cell(self, match_result):
if 'lstm_cell' not in match_result._pattern_to_op.keys():
return
kwargs = dict()
top_node = match_result._pattern_to_op[match_result._name_to_pattern['lstm_cell']]
w_e = match_result.get_op("cell_kernel")
w = self._weights_dict[w_e.name.replace('/read', '')]
num_units = w.shape[1]//4
[wx, wh] = np.split(w, [-1 * num_units])
input_size = wx.shape[0]
kwargs['num_units'] = num_units
kwargs['input_size'] = input_size
if hasattr(top_node, 'kwargs'):
top_node.kwargs.update(kwargs)
else:
top_node.kwargs = kwargs
def process_rnn_h_zero(self, match_result):
if 'h_zero' not in match_result._name_to_pattern.keys():
return
kwargs = dict()
top_node = match_result._pattern_to_op[match_result._name_to_pattern['h_zero']]
fill_size = match_result.get_op('fill_size')
fill_value = match_result.get_op('fill_value')
kwargs['fill_size'] = fill_size.get_attr('value').int_val[0]
kwargs['fill_value'] = fill_value.get_attr('value').float_val[0]
if hasattr(top_node, 'kwargs'):
top_node.kwargs.update(kwargs)
else:
top_node.kwargs = kwargs
def process_match_result(self, match_result, pattern_name):
if pattern_name == 'lstm_cell':
self.process_lstm_cell(match_result)
elif pattern_name == 'h_zero':
if self.check_match_scope(match_result, 'LSTMCellZeroState'):
self.process_rnn_h_zero(match_result)
'''For some short pattern, to avoid match other pattern, check it's scope'''
def check_match_scope(self, match_result, scope_name):
ops = match_result._pattern_to_op.values()
for op in ops:
op_name_splits = op.name.split('/')
if len(op_name_splits) < 2:
return False
if re.sub(r'(_\d+)*$', '', op_name_splits[-2]) != scope_name:
if len(op_name_splits) > 2:
if re.sub(r'(_\d+)*$', '', op_name_splits[-3]) != scope_name:
return False
else:
return False
return True
def run(self):
return super(LSTMRewriter, self).run(['lstm_cell', 'h_zero'], 'tensorflow') | 2.25 | 2 |
adDistro/api/serializers.py | forgeno/ad-distribution | 2 | 12794729 | <reponame>forgeno/ad-distribution<filename>adDistro/api/serializers.py
from rest_framework import serializers
from . import models
from django.contrib.auth.models import User
class CurrentUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'password', 'id')
class EbayUserSerializer(serializers.Serializer):
class Meta:
model = models.EbayUser
fields = ('AdDistroId', 'EbayUsername', 'EbayPassword') | 1.609375 | 2 |
CONTENT/Resources/guides/__UNSORTED/255_verify_preorder_sequence/verify_TLE.py | impastasyndrome/DS-ALGO-OFFICIAL | 13 | 12794730 | class Solution(object):
def verifyPreorder(self, preorder):
"""
:type preorder: List[int]
:rtype: bool
"""
if len(preorder) < 2:
return True
root = preorder[0]
breakpoint = -1
for i in range(1, len(preorder)):
if preorder[i] > root:
if breakpoint < 0:
breakpoint = i
if preorder[i] < root and breakpoint > 0:
return False
if breakpoint < 0:
breakpoint = len(preorder)
return self.verifyPreorder(preorder[1:breakpoint]) and self.verifyPreorder(
preorder[breakpoint:]
)
| 3.453125 | 3 |
examples/ionsat/ionsat_mean_power.py | astronautix/space_mission_design | 3 | 12794731 | import space_mission_design
from space_mission_design.celestlab import celestlab_wrapper
from space_mission_design.visualisation import ploting_map
import numpy as np
import matplotlib.pyplot as plt
plt.style.use("presentation")
from astropy import units as u
from poliastro.bodies import Earth, Mars, Sun
from poliastro.twobody import Orbit
from tqdm.auto import tqdm
import os
from space_mission_design.power import body_illumination
wrapper = celestlab_wrapper.WrapperCelestlab(scilab_path="/home/tavant/Data/tavant/CSE-perso/tools/CNES/scilab-6.0.2/bin/",
celestlab_loader="/home/tavant/Data/tavant/CSE-perso/IonSat/power/loader_celestlab.sce")
print("Small example : propagate and plot")
specitic_params = {"year":2024, "hour":12, "inc": 51*np.pi/180, "sma": (Earth.R_mean + 300 * u.km).to(u.m).value,
"outputFileName":"results_ionsat.h5" }
wrapper.write_paramerter_file(specitic_params)
wrapper.launch_celestlab("ionsat_power.sce")
sun_position, ecf_position, eclipses, cj_date = wrapper.read_celestlab_results("results_ionsat.h5")
ploting_map.plot_planisphere(ecf_position)
ploting_map.plot_poles(ecf_position)
plt.show()
# plt.savefig("map_51deg.png", dpi=300)
| 2.109375 | 2 |
example/wishingpiece.py | KKtheGhost/PokeAuto | 23 | 12794732 | from NXController import Controller
ctr = Controller()
ctr.LS()
ctr.A()
ctr.pause(1)
ctr.A()
ctr.pause(1)
ctr.A()
ctr.pause(0.3)
ctr.h()
response = input("Restart(y/n): ")
while response == 'y':
ctr.X()
ctr.A()
ctr.pause(3)
ctr.A()
ctr.pause(1)
ctr.A()
ctr.pause(15)
ctr.A()
ctr.pause(7)
ctr.A()
ctr.pause(1)
ctr.A()
ctr.pause(1)
ctr.A()
ctr.pause(0.3)
ctr.h()
response = input("Restart(y/n): ")
ctr.A()
ctr.close() | 2.921875 | 3 |
examples/unity-simple-examples/train_agent.py | IBM/vsrl-examples | 8 | 12794733 | <reponame>IBM/vsrl-examples
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
from gym_unity.envs import UnityEnv
from stable_baselines import PPO2
from stable_baselines.common.policies import MlpPolicy
# Linux: The env_config.json and the Unity binary must be on the same folder than this Python script.
# MacOS: Copy the Unity binary to the EnvBuild folder.
file_name = 'DroneDelivery'
env_name = "../EnvBuild/" + file_name
num_episodes = 500
class StableBasGym:
@staticmethod
def run():
# LINUX: Disable the Unity window -> no_graphics=True
env = UnityEnv(env_name,
worker_id=1000,
use_visual=False,
uint8_visual=False,
allow_multiple_visual_obs=False,
no_graphics=False)
# Create the agent
model = PPO2(MlpPolicy, env, verbose=0, learning_rate=1.0e-4)
model.learn(total_timesteps=num_episodes)
env.close()
print("Successfully trained")
if __name__ == '__main__':
StableBasGym().run()
| 2.140625 | 2 |
spefit/pdf/tests/test_base.py | watsonjj/spefit | 0 | 12794734 | from spefit.pdf.base import PDFParameter, PDF
from spefit.common.stats import normal_pdf
import numpy as np
from numpy.testing import assert_allclose
import pytest
def test_pdf_parameter():
initial = 1
limits = (0, 4)
fixed = True
multi = True
param = PDFParameter(initial=initial, limits=limits, fixed=fixed, multi=multi)
assert param.initial == initial
assert param.limits == limits
assert param.fixed is fixed
assert param.multi is multi
param = PDFParameter(initial=initial, limits=limits)
assert param.initial == initial
assert param.limits == limits
assert param.fixed is False
assert param.multi is False
def test_pdf_class():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
pdf = PDF(1, normal_pdf, parameters)
assert pdf.function == normal_pdf
assert pdf.n_illuminations == 1
assert len(pdf.parameters) == 2
assert pdf.parameters["sigma"].initial == 0.1
assert np.array_equal(pdf._lookup, np.array([[0, 1]]))
pdf = PDF(2, normal_pdf, parameters)
assert pdf.function == normal_pdf
assert pdf.n_illuminations == 2
assert len(pdf.parameters) == 2
assert pdf.parameters["sigma"].initial == 0.1
assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 1]]))
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.function == normal_pdf
assert pdf.n_illuminations == 2
assert len(pdf.parameters) == 3
assert pdf.parameters["sigma0"].initial == 0.1
assert pdf.parameters["sigma1"].initial == 0.1
assert np.array_equal(pdf._lookup, np.array([[0, 1], [0, 2]]))
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2), multi=True),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.function == normal_pdf
assert pdf.n_illuminations == 2
assert len(pdf.parameters) == 4
assert pdf.parameters["sigma0"].initial == 0.1
assert pdf.parameters["sigma1"].initial == 0.1
assert np.array_equal(pdf._lookup, np.array([[0, 2], [1, 3]]))
key_array = np.array(list(pdf.parameters.keys()))
assert np.array_equal(key_array[pdf._lookup[0]], ["mean0", "sigma0"])
assert np.array_equal(key_array[pdf._lookup[1]], ["mean1", "sigma1"])
def test_lookup_parameters():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
pdf.update_parameters_initial(sigma1=0.3)
initial = np.array(list(pdf.initial.values()))
assert np.array_equal(pdf._lookup_parameters(initial, 0), np.array([0, 0.1]))
assert np.array_equal(pdf._lookup_parameters(initial, 1), np.array([0, 0.3]))
def test_call():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
x = np.linspace(-1, 6, 100)
assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 0), pdf._function(x, 0, 0.1))
assert_allclose(pdf(x, np.array([0, 0.1, 0.2]), 1), pdf._function(x, 0, 0.2))
with pytest.raises(IndexError):
pdf(x, np.array([0, 0.1, 0.2]), 2)
with pytest.raises(IndexError):
pdf(x, np.array([0, 0.1]), 1)
with pytest.raises(TypeError):
# noinspection PyTypeChecker
pdf(x, [0, 0.1, 0.2], 1)
def test_update_parameters_initial():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.parameters["mean"].initial == 0
assert pdf.parameters["sigma"].initial == 0.1
pdf.update_parameters_initial(mean=2, sigma=0.4)
assert pdf.parameters["mean"].initial == 2
assert pdf.parameters["sigma"].initial == 0.4
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.parameters["mean"].initial == 0
assert pdf.parameters["sigma0"].initial == 0.1
assert pdf.parameters["sigma1"].initial == 0.1
pdf.update_parameters_initial(mean=2, sigma=0.4)
assert pdf.parameters["mean"].initial == 2
assert pdf.parameters["sigma0"].initial == 0.4
assert pdf.parameters["sigma1"].initial == 0.4
pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5)
assert pdf.parameters["mean"].initial == 2
assert pdf.parameters["sigma0"].initial == 0.4
assert pdf.parameters["sigma1"].initial == 0.5
with pytest.raises(ValueError):
pdf.update_parameters_initial(mean0=2, sigma0=0.4, sigma1=0.5)
with pytest.raises(ValueError):
pdf.update_parameters_initial(mean=2, sigma0=0.4, sigma1=0.5, sigma2=0.5)
def test_update_parameters_limits():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
pdf = PDF(1, normal_pdf, parameters)
assert pdf.parameters["mean"].limits == (-2, 2)
assert pdf.parameters["sigma"].limits == (0, 2)
pdf.update_parameters_limits(mean=(-3, 3), sigma=(0, 4))
assert pdf.parameters["mean"].limits == (-3, 3)
assert pdf.parameters["sigma"].limits == (0, 4)
# Test mutable
limit = [2, 3]
# noinspection PyTypeChecker
pdf.update_parameters_limits(mean=limit)
assert tuple(pdf.parameters["mean"].limits) == (2, 3)
limit[0] = 1
assert tuple(pdf.parameters["mean"].limits) == (2, 3)
def test_update_parameters_fixed():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
pdf = PDF(1, normal_pdf, parameters)
assert pdf.parameters["mean"].fixed is False
assert pdf.parameters["sigma"].fixed is False
pdf.update_parameters_fixed(mean=True, sigma=True)
assert pdf.parameters["mean"].fixed is True
assert pdf.parameters["sigma"].fixed is True
# noinspection DuplicatedCode
def test_prepare_multi_illumination_parameters():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2)),
)
results = PDF._prepare_parameters(parameters, 1)
parameters, is_multi, lookup = results
assert len(parameters) == 2
assert len(is_multi) == 2
assert len(lookup) == 1
assert len(lookup[0]) == 2
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2), multi=True),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
results = PDF._prepare_parameters(parameters, 1)
parameters, is_multi, lookup = results
assert len(parameters) == 2
assert len(is_multi) == 2
assert len(lookup) == 1
assert len(lookup[0]) == 2
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2), multi=True),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
results = PDF._prepare_parameters(parameters, 2)
parameters, is_multi, lookup = results
assert len(parameters) == 4
assert len(is_multi) == 2
assert len(lookup) == 2
assert len(lookup[0]) == 2
def test_initial():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
pdf.update_parameters_initial(sigma1=0.2)
assert pdf.initial == dict(mean=0, sigma0=0.1, sigma1=0.2)
def test_n_free_parameters():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.n_free_parameters == 3
pdf.update_parameters_fixed(sigma1=True)
assert pdf.n_free_parameters == 2
def test_parameter_names():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
assert pdf.parameter_names == ["mean", "sigma0", "sigma1"]
def test_iminuit_kwargs():
parameters = dict(
mean=PDFParameter(initial=0, limits=(-2, 2)),
sigma=PDFParameter(initial=0.1, limits=(0, 2), multi=True),
)
pdf = PDF(2, normal_pdf, parameters)
pdf.update_parameters_initial(sigma1=0.2)
pdf.update_parameters_limits(sigma1=(1, 2))
pdf.update_parameters_fixed(sigma1=True)
iminuit_kwargs = pdf.iminuit_kwargs
assert len(iminuit_kwargs) == 9
assert iminuit_kwargs["mean"] == 0
assert iminuit_kwargs["sigma0"] == 0.1
assert iminuit_kwargs["sigma1"] == 0.2
assert iminuit_kwargs["limit_mean"] == (-2, 2)
assert iminuit_kwargs["limit_sigma0"] == (0, 2)
assert iminuit_kwargs["limit_sigma1"] == (1, 2)
assert iminuit_kwargs["fix_mean"] is False
assert iminuit_kwargs["fix_sigma0"] is False
assert iminuit_kwargs["fix_sigma1"] is True
# noinspection PyPep8Naming,PyArgumentList
@pytest.mark.parametrize("PDFSubclass", PDF.__subclasses__())
def test_pdf_subclasses(PDFSubclass):
pdf = PDFSubclass(n_illuminations=1)
x = np.linspace(-5, 100, 1000)
y = pdf(x, np.array(list(pdf.initial.values())), 0)
np.testing.assert_allclose(np.trapz(y, x), 1, rtol=1e-3)
# noinspection PyPep8Naming,PyArgumentList
@pytest.mark.parametrize("PDFSubclass", PDF.__subclasses__())
def test_disable_pedestal(PDFSubclass):
pdf = PDFSubclass(n_illuminations=1, disable_pedestal=True)
x = np.linspace(-5, 100, 1000)
y = pdf(x, np.array(list(pdf.initial.values())), 0)
lambda_ = pdf.initial["lambda_0"]
pedestal_contribution = np.exp(-lambda_)
np.testing.assert_allclose(np.trapz(y, x), 1 - pedestal_contribution, rtol=1e-3)
def test_from_name():
pdf = PDF.from_name("SiPMGentile", n_illuminations=1)
assert pdf.__class__.__name__ == "SiPMGentile"
with pytest.raises(ValueError):
PDF.from_name("NULL", n_illuminations=1)
| 2.3125 | 2 |
cc/keystone/middleware/response.py | rajivmucheli/keystone-extensions | 1 | 12794735 | # Copyright 2018 SAP SE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from webob import Response
class RateLimitExceededResponse(Response):
"""
defines the rate limit response and defaults, which can be overwritten via configuration.
"""
def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None):
"""
creates a new RateLimitExceededResponse with either a body or json_body
:param status: the status code
:param headers: list of header dictionaries
:param body: the response body
:param json_body: the response json body
"""
if not status:
status = '429 Too Many Requests'
if body:
super(RateLimitExceededResponse, self).__init__(
status=status, headerlist=headers, content_type=content_type, body=body, charset="UTF-8"
)
return
elif not json_body:
content_type = "application/json"
json_body = {"error": {"status": status, "message": "Too Many Requests"}}
super(RateLimitExceededResponse, self).__init__(
status=status, headerlist=headers, content_type=content_type,
json_body=json.dumps(json_body), charset="UTF-8",
)
def set_retry_after(self, retry_after):
if not self.headerlist:
self.headerlist = []
self.headerlist.append(('Retry-After', str(retry_after)))
class BlocklistResponse(Response):
"""
defines the blocklist response and defaults, which can be overwritten via configuration.
"""
def __init__(self, status=None, headers=None, content_type=None, body=None, json_body=None):
"""
creates a new BlocklistResponse with either a body or json_body
:param status: the status code
:param headers: list of header dictionaries
:param body: the response body
:param json_body: the response json body
"""
if not status:
status = '403 Forbidden'
if body:
super(BlocklistResponse, self).__init__(
status=status, headerlist=headers, content_type=content_type, body=body, charset="UTF-8"
)
return
elif not json_body:
content_type = "application/json"
json_body = {"error": {"status": status, "message": "You have been blocklisted"}}
super(BlocklistResponse, self).__init__(
status=status, headerlist=headers, content_type=content_type,
json_body=json.dumps(json_body), charset="UTF-8"
)
| 2.34375 | 2 |
build/lib/musicazoo/nlp/__main__.py | ixalis/musicpi | 0 | 12794736 | <gh_stars>0
import json
import os
import random
import re
import signal
import socket
import subprocess
import tornado.httpclient
import traceback
import urllib
import urllib2
import shmooze.lib.packet as packet
import shmooze.lib.service as service
import shmooze.settings as settings
import urllib2
from bs4 import BeautifulSoup
from urlparse import urlparse
class NLP(service.JSONCommandProcessor, service.Service):
port=settings.ports["nlp"]
queue_host='localhost'
queue_port=settings.ports["queue"]
vol_host='localhost'
vol_port=settings.ports["vol"]
pretty_params={'youtube':['title'], 'text': ['text']}
youtube_api_key = settings.youtube_api_key
def __init__(self):
print "NLP started."
self.youtube_cache = {}
super(NLP, self).__init__()
@staticmethod
def parse_duration(dstr):
# Parse ISO 8601 duration strings: PT#M#S
hms_str = dstr.strip()
try:
matches = re.match(r"PT(\d+H)?(\d{1,2}M)?(\d{1,2}S)", hms_str).groups()
except:
print hms_str
return 0, hms_str
h, m, s = [int(m.strip("HMS")) if m is not None else 0 for m in matches]
if h > 0:
human_str = "{0}:{1:02d}:{2:02d}".format(h, m, s)
else:
human_str = "{1}:{2:02d}".format(h, m, s)
return h * 360 + m * 60 + s, human_str
@service.coroutine
def youtube_search(self,q):
if q in self.youtube_cache:
print "cache hit"
raise service.Return(self.youtube_cache[q])
print "cache miss"
http_client = tornado.httpclient.AsyncHTTPClient()
# Return the args dict for the first youtube result for 'match'
youtube_search_url = "https://www.googleapis.com/youtube/v3/search"
search_data = {
"part": "snippet",
"key": self.youtube_api_key,
"order": "relevance",
"safesearch": "none",
"type": "video",
"max-results": 5,
"q": q,
}
query = q.replace(" ", "+")
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
video_ids = []
output = []
for vid in soup.findAll(attrs={'class':'yt-uix-tile-link'}):
video_ids.append(vid['href'][9:])
print(vid)
sr = {
"video_id": vid['href'],
"url": "https://www.youtube.com/watch?v={0}".format(vid['href'][9:]),
"title": vid['title'],
"thumbnail": "no",
"publish_time": "no",
"views": "no",
"duration": (1,1)}
output.append(sr)
# output.append(sr)
#youtube_video_url = "https://www.googleapis.com/youtube/v3/videos"
#video_data = {
# "part": "contentDetails,snippet,statistics",
# "key": self.youtube_api_key,
# "id": ",".join(video_ids),
#}
#video_form_data = urllib.urlencode(video_data)
#video_results = yield http_client.fetch(youtube_video_url + "?" + video_form_data)
#video_json = json.loads(video_results.body)
#output = []
#for yi in video_json['items']:
# sr = {
# "video_id": yi["id"],
# "url": "http://www.youtube.com/watch?v={0}".format(yi["id"]),
# "title": yi["snippet"]["title"],
# "thumbnail": yi["snippet"]["thumbnails"]["default"]["url"],
# "publish_time": yi["snippet"]["publishedAt"],
# "views": yi["statistics"]["viewCount"],
#"duration": self.parse_duration(yi["contentDetails"]["duration"]),
#}
#output.append(sr)
self.youtube_cache[q] = output
raise service.Return(output)
@service.coroutine
def youtube_suggest(self, q):
videos = yield self.youtube_search(q)
results = []
for v in videos:
results.append({
"title": u"{0[title]} - [{0[duration][1]}]".format(v),
"action": v['url'],
"help": "Play video from YouTube",
"match": 0,
})
raise service.Return(results)
@service.coroutine
def url_suggest(self, url):
#TODO
results = [{
"title": url,
"action": url,
"help": "",
"match": len(url)
}]
yield service.Return(results)
@service.coroutine
def wildcard_suggest(self, text):
text = text.strip()
results = []
if text.startswith("http:"):
rs = yield self.url_suggest(text)
results.extend(rs)
yr = yield self.youtube_suggest(text)
results.extend(yr)
raise service.Return(results)
@service.coroutine
def suggest(self,message):
stripped_message = message.strip()
suggestions = []
for sc, sc_help in self.suggest_commands:
if sc.startswith(stripped_message):
suggestions.append({
"title": sc,
"action": sc,
"help": sc_help,
"match": len(stripped_message)
})
rs = yield self.wildcard_suggest(message)
suggestions.extend(rs)
raise service.Return({'suggestions':suggestions})
@service.coroutine
def queue_cmd(self,cmd,args={},assert_success=True):
try:
result = yield service.json_query(self.queue_host,self.queue_port,{"cmd":cmd,"args":args})
except (socket.error,service.TimeoutError):
raise Exception("Error communicating with queue.")
if assert_success:
raise service.Return(packet.assert_success(result))
raise service.Return(result)
@service.coroutine
def vol_cmd(self,cmd,args={},assert_success=True):
try:
result = yield service.json_query(self.vol_host,self.vol_port,{"cmd":cmd,"args":args})
except (socket.error,service.TimeoutError):
raise Exception("Error communicating with volume control.")
if assert_success:
raise service.Return(packet.assert_success(result))
raise service.Return(result)
@service.coroutine
def do(self,message):
message=message.strip()
for (regex,func) in self.nlp_commands:
m=re.match(regex,message,re.I)
if m:
result = yield func(self,message,*m.groups())
raise service.Return(result)
raise Exception("Command not recognized.")
#result = yield self.queue_cmd("queue")
raise service.Return({'message':'Did '+message})
def shutdown(self):
service.ioloop.stop()
@service.coroutine
def cmd_set_vol(self,q,vol):
if vol=='up':
result=yield self.vol_cmd("get_vol")
vol=min(result['vol']+5,100)
elif vol=='down':
result=yield self.vol_cmd("get_vol")
vol=max(result['vol']-5,0)
else:
vol=int(vol)
if vol>100:
raise Exception("Volume cannot be greater than 100")
yield self.vol_cmd("set_vol",{"vol":vol})
raise service.Return("Volume set to {0}".format(vol))
@service.coroutine
def cmd_get_vol(self,q):
result=yield self.vol_cmd("get_vol")
raise service.Return("Volume is {0}".format(result.get("vol", "unknown")))
@service.coroutine
def cmd_queue(self,q):
queue=yield self.queue_cmd("queue",{"parameters":self.pretty_params})
if len(queue)==0:
raise service.Return("Queue is empty.")
result = '\n'.join([u"{0}. {1}".format(n+1,self.pretty(mod)) for (n,mod) in enumerate(queue)])
raise service.Return(result)
@service.coroutine
def cmd_current(self,q):
queue=yield self.queue_cmd("queue",{"parameters":self.pretty_params})
if len(queue)==0:
raise service.Return("(Nothing)")
result = self.pretty(queue[0])
raise service.Return(result)
@service.coroutine
def cmd_rm_top(self,q):
queue=yield self.queue_cmd("queue",{"parameters":self.pretty_params})
if len(queue)==0:
raise Exception("Queue is empty!")
mod=queue[0]
yield self.queue_cmd("rm",{"uids":[mod['uid']]})
raise service.Return(u"Removed {0}".format(self.pretty(mod)))
@service.coroutine
def cmd_rm_bot(self,q):
queue=yield self.queue_cmd("queue",{"parameters":self.pretty_params})
if len(queue)==0:
raise Exception("Queue is empty!")
mod=queue[-1]
yield self.queue_cmd("rm",{"uids":[mod['uid']]})
raise service.Return(u"Removed {0}".format(self.pretty(mod)))
@service.coroutine
def cmd_bump(self,q):
queue=yield self.queue_cmd("queue",{"parameters":self.pretty_params})
if len(queue)==0:
raise Exception("Queue is empty!")
if len(queue)==1:
raise Exception("Only one thing on the queue!")
old_uids=[mod['uid'] for mod in queue]
mod_bot=queue[-1]
new_uids=old_uids[-1:]+old_uids[0:-1]
yield self.queue_cmd("mv",{"uids":[mod['uid']]})
raise service.Return(u"Bumped {0} to the top".format(self.pretty(mod_bot)))
@service.coroutine
def cmd_yt(self,q,kw):
result=yield self.youtube_search(kw)
if not result or not result[0]:
raise Exception('No Youtube results found.')
url = result[0]["url"]
title = result[0]["title"]
yield self.queue_cmd("add",{"type":"youtube","args":{"url":url}})
raise service.Return(u'Queued "{0}"'.format(title))
@service.coroutine
def cmd_youtube_raw(self,q,url):
yield self.queue_cmd("add",{"type":"youtube","args":{"url": url}})
raise service.Return(u'Queued text.')
@service.coroutine
def cmd_image(self,q,url):
yield self.queue_cmd("set_bg",{"type":"image","args":{"url": url}})
raise service.Return(u'Queued text.')
@service.coroutine
def cmd_say(self,q,text):
yield self.queue_cmd("add",{"type":"text","args":{"text":text}})
raise service.Return(u'Queued text.')
@service.coroutine
def cmd_swear(self,q):
# Swear words according to yahoo chat.
# See: http://ridiculousfish.com/blog/posts/YahooChatRooms.html
words = "ahole,aholes,asshole,assholes,asswipe,biatch,bitch,bitches,blo_job,blow_job,blowjob,cocksucker,cunt,cunts,dickhead,fuck,fucked,fucking,fuckoff,fucks,handjob,handjobs,motherfucker,mother-fucker,motherfuckers,muthafucker,muthafuckers,nigga,niggs,nigger,niggers,pedofile,pedophile,phag,phuc,phuck,phucked,phucker,shat,shit,shits,shithead,shitter,shitting".split(",")
selection = random.sample(words, 5)
text = " ".join(selection)
yield self.queue_cmd("add",{"type":"text","args":{"text":text}})
raise service.Return(u'Queued swearing.')
@service.coroutine
def cmd_fortune(self, q):
fortune_args = settings.get("fortune_args", ['-s'])
fortune_text = subprocess.check_output(['/usr/games/fortune'] + fortune_args)
data = {
'type': 'text',
'args': {
'text': fortune_text,
#'screen_preprocessor': 'none',
'speech_preprocessor': 'pronounce_fortune',
'text2speech': 'google',
'text2screen': 'paragraph',
#'renderer': 'mono_paragraph',
'duration': 5,
}
}
yield self.queue_cmd("add", data)
raise service.Return(u"Queued fortune.")
def pretty(self,mod):
t=mod['type']
if t=='youtube' and 'title' in mod['parameters']:
return u'"{0}"'.format(mod['parameters']['title'])
#if t=='netvid':
# return u'{0}'.format(mod['parameters']['short_description'])
if t=='text' and 'text' in mod['parameters']:
return u'"{0}"'.format(mod['parameters']['text'])
return u'({0})'.format(t)
@service.coroutine
def cmd_bug(self,q,text):
bug_url = "https://api.github.com/repos/zbanks/musicazoo/issues"
suffix = "\n\nSubmitted via NLP service."
bug_data = json.dumps({'title': text, 'body' : text + suffix})
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
try:
password_mgr.add_password(None, bug_url, settings.github_login[0], settings.github_login[1])
except AttributeError:
raise service.Return(u"No github account configured in settings.json")
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
#TODO request(bug_url, bug_data, auth=(musicazoo-bugs, musicaz00)
raise service.Return(u'Submitted bug: %s - thanks!')
@service.coroutine
def cmd_help(self,q):
raise service.Return("""Commands I understand:
help|? - This
vol - Get volume
vol [num] - Set volume
vol up|down - Change volume
stop|stfu|skip|next - Remove the top video
pop|undo|oops - Remove the bottom video
bump - Move the bottom video to the top
q|queue - List the queue
cur|current - Give the current item playing
bug - Submit a bug report
Anything else - Queue Youtube video""")
commands={
'do': do,
'suggest': suggest,
}
suggest_commands = [
("vol up", "Raise the volume"),
("vol down", "Lower the volume"),
("skip", "Remove the current item on the queue that is currently playing"),
("pop", "Remove the last item on the queue"),
("bump", "Move the last item on the queue to top of the queue and play it"),
("say", "`say <quote>`: Say a quote and display it on the screen"),
("fuck", "Swear a bunch"),
("quote", "Display a quote from the fortune database"),
("image", "`image <url>`: Display an image on the screen as a background"),
("video", "`video <url>`: Play a video"),
]
#TODO: splash
nlp_commands=[
(r'^help$',cmd_help),
(r'^$',cmd_help),
(r'^\?$',cmd_help),
(r'^vol (\d+|up|down)$',cmd_set_vol),
(r'^vol$',cmd_get_vol),
(r'^stop$',cmd_rm_top),
(r'^stfu$',cmd_rm_top),
(r'^skip$',cmd_rm_top),
(r'^next$',cmd_rm_top),
(r'^pop$',cmd_rm_bot),
(r'^undo$',cmd_rm_bot),
(r'^oops$',cmd_rm_bot),
(r'^bump$',cmd_bump),
(r'^fuck$',cmd_swear),
(r'^fortune$',cmd_fortune),
(r'^quote$',cmd_fortune),
(r'^q$',cmd_queue),
(r'^queue$',cmd_queue),
(r'^cur(?:rent)?$',cmd_current),
(r'^say (.+)$',cmd_say),
(r'^image (.+)$',cmd_image),
(r'^youtube (.+)$',cmd_youtube_raw),
(r'^video (.+)$',cmd_youtube_raw),
(r'^bug (.+)$',cmd_bug),
(r'(https?://.+(?:gif|jpe?g|png|bmp))',cmd_image),
(r'(https?://.+)',cmd_youtube_raw),
(r'^(.+)$',cmd_yt),
]
nlp = NLP()
def shutdown_handler(signum,frame):
print
print "Received signal, attempting graceful shutdown..."
service.ioloop.add_callback_from_signal(nlp.shutdown)
signal.signal(signal.SIGTERM, shutdown_handler)
signal.signal(signal.SIGINT, shutdown_handler)
service.ioloop.start()
| 2.546875 | 3 |
api/api/lib/docs.py | kosyachniy/react | 0 | 12794737 | """
Google Documents functionality for the API
"""
import httplib2
import apiclient.discovery
from oauth2client.service_account import ServiceAccountCredentials
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'credentials.json',
[
'https://www.googleapis.com/auth/spreadsheets',
'https://www.googleapis.com/auth/drive',
],
)
httpAuth = credentials.authorize(httplib2.Http())
service = apiclient.discovery.build('sheets', 'v4', http=httpAuth)
def create(name, sheets, mail):
spreadsheet = service.spreadsheets().create(
body = {
'properties': {
'title': name,
'locale': 'ru_RU',
},
'sheets': [{
'properties': {
'sheetType': 'GRID',
'sheetId': i,
'title': sheet,
'gridProperties': {
'rowCount': 100,
'columnCount': 30,
},
},
} for i, sheet in enumerate(sheets)],
},
).execute()
spreadsheetId = spreadsheet['spreadsheetId']
print(f"https://docs.google.com/spreadsheets/d/{spreadsheetId}")
driveService = apiclient.discovery.build('drive', 'v3', http=httpAuth)
driveService.permissions().create(
fileId = spreadsheetId,
body = {
'type': 'user',
'role': 'writer',
'emailAddress': mail,
},
fields = 'id'
).execute()
| 3.296875 | 3 |
src/server.py | tomkcook/tunnel-server | 3 | 12794738 | #!/usr/bin/env python3
from flask import Flask
from flask import request
from util import startTunnel, stopTunnel, addressesForInterface
from argparse import ArgumentParser
import logging
app = Flask(__name__)
settings = {}
@app.route("/connect")
def connect():
address = request.remote_addr
logging.info("Connect request from {}".format(address))
startTunnel(address, settings['localIP'], settings['bridge'])
return "Success", 200
def main():
global settings
parser = ArgumentParser()
parser.add_argument("--bridge", type=str)
parser.add_argument("interface", type=str)
args = parser.parse_args()
addrs = addressesForInterface(args.interface)
if addrs is None:
logging.error("No such interface: {}".format(args.interface))
return
if len(addrs) == 0:
logging.error("Interface {} has no IP4 address.".format(args.interface))
return
settings['localIP'] = addrs[0]
settings['bridge'] = args.bridge
app.run(host=addrs[0])
if __name__ == '__main__':
main()
| 2.765625 | 3 |
Binary_Tree/binary_tree.py | kethan1/Data-Structures | 0 | 12794739 | <reponame>kethan1/Data-Structures<gh_stars>0
from typing import Any
class Node:
def __init__(self, data: Any, left=None, right=None):
self.data: Any = data
if left is not None:
left = Node(left)
if right is not None:
right = Node(right)
self.left = left
self.right = right
def set_left(self, data: Any):
self.left = Node(data)
def set_right(self, data: Any):
self.right = Node(data)
def __str__(self, top: bool = True) -> str:
lines: list = []
lines.append(str(self.data))
for child in [self.left, self.right]:
if child is not None:
for index, data in enumerate(child.__str__(top=False).split("\n")):
data = str(data)
space_after_line = " " * index
if len(lines)-1 > index:
lines[index+1] += " " + data
if top:
lines[index+1] += space_after_line
else:
lines.append(data)
if top:
lines[-1] += space_after_line
for line_number in range(1, len(lines) - 1):
if len(lines[line_number + 1]) > \
len(lines[line_number]):
lines[line_number] += \
" " * (len(lines[line_number + 1]) -
len(lines[line_number]))
lines[0] = \
" " * int((len(max(lines, key=len)) - len(str(self.data))) / 2) \
+ lines[0]
return '\n'.join(lines)
def hasChildren(self) -> bool:
return self.left is not None and self.right is not None
| 3.21875 | 3 |
esphome/components/nextion/base_component.py | OttoWinter/esphomeyaml | 249 | 12794740 | from string import ascii_letters, digits
import esphome.config_validation as cv
import esphome.codegen as cg
from esphome.components import color
from esphome.const import (
CONF_VISIBLE,
)
from . import CONF_NEXTION_ID
from . import Nextion
CONF_VARIABLE_NAME = "variable_name"
CONF_COMPONENT_NAME = "component_name"
CONF_WAVE_CHANNEL_ID = "wave_channel_id"
CONF_WAVE_MAX_VALUE = "wave_max_value"
CONF_PRECISION = "precision"
CONF_WAVEFORM_SEND_LAST_VALUE = "waveform_send_last_value"
CONF_TFT_URL = "tft_url"
CONF_ON_SLEEP = "on_sleep"
CONF_ON_WAKE = "on_wake"
CONF_ON_SETUP = "on_setup"
CONF_TOUCH_SLEEP_TIMEOUT = "touch_sleep_timeout"
CONF_WAKE_UP_PAGE = "wake_up_page"
CONF_AUTO_WAKE_ON_TOUCH = "auto_wake_on_touch"
CONF_WAVE_MAX_LENGTH = "wave_max_length"
CONF_BACKGROUND_COLOR = "background_color"
CONF_BACKGROUND_PRESSED_COLOR = "background_pressed_color"
CONF_FOREGROUND_COLOR = "foreground_color"
CONF_FOREGROUND_PRESSED_COLOR = "foreground_pressed_color"
CONF_FONT_ID = "font_id"
def NextionName(value):
valid_chars = f"{ascii_letters + digits}."
if not isinstance(value, str) or len(value) > 29:
raise cv.Invalid("Must be a string less than 29 characters")
for char in value:
if char not in valid_chars:
raise cv.Invalid(
f"Must only consist of upper/lowercase characters, numbers and the period '.'. The character '{char}' cannot be used."
)
return value
CONFIG_BASE_COMPONENT_SCHEMA = cv.Schema(
{
cv.GenerateID(CONF_NEXTION_ID): cv.use_id(Nextion),
cv.Optional(CONF_BACKGROUND_COLOR): cv.use_id(color),
cv.Optional(CONF_FOREGROUND_COLOR): cv.use_id(color),
cv.Optional(CONF_VISIBLE, default=True): cv.boolean,
}
)
CONFIG_TEXT_COMPONENT_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend(
cv.Schema(
{
cv.Required(CONF_COMPONENT_NAME): NextionName,
cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255),
}
)
)
CONFIG_BINARY_SENSOR_SCHEMA = CONFIG_BASE_COMPONENT_SCHEMA.extend(
cv.Schema(
{
cv.Optional(CONF_COMPONENT_NAME): NextionName,
cv.Optional(CONF_VARIABLE_NAME): NextionName,
}
)
)
CONFIG_SENSOR_COMPONENT_SCHEMA = CONFIG_BINARY_SENSOR_SCHEMA.extend(
cv.Schema(
{
cv.Optional(CONF_FONT_ID): cv.int_range(min=0, max=255),
}
)
)
CONFIG_SWITCH_COMPONENT_SCHEMA = CONFIG_SENSOR_COMPONENT_SCHEMA.extend(
cv.Schema(
{
cv.Optional(CONF_FOREGROUND_PRESSED_COLOR): cv.use_id(color),
cv.Optional(CONF_BACKGROUND_PRESSED_COLOR): cv.use_id(color),
}
)
)
async def setup_component_core_(var, config, arg):
if CONF_VARIABLE_NAME in config:
cg.add(var.set_variable_name(config[CONF_VARIABLE_NAME]))
elif CONF_COMPONENT_NAME in config:
cg.add(
var.set_variable_name(
config[CONF_COMPONENT_NAME],
config[CONF_COMPONENT_NAME] + arg,
)
)
if CONF_BACKGROUND_COLOR in config:
color_component = await cg.get_variable(config[CONF_BACKGROUND_COLOR])
cg.add(var.set_background_color(color_component))
if CONF_BACKGROUND_PRESSED_COLOR in config:
color_component = await cg.get_variable(config[CONF_BACKGROUND_PRESSED_COLOR])
cg.add(var.set_background_pressed_color(color_component))
if CONF_FOREGROUND_COLOR in config:
color_component = await cg.get_variable(config[CONF_FOREGROUND_COLOR])
cg.add(var.set_foreground_color(color_component))
if CONF_FOREGROUND_PRESSED_COLOR in config:
color_component = await cg.get_variable(config[CONF_FOREGROUND_PRESSED_COLOR])
cg.add(var.set_foreground_pressed_color(color_component))
if CONF_FONT_ID in config:
cg.add(var.set_font_id(config[CONF_FONT_ID]))
if CONF_VISIBLE in config:
cg.add(var.set_visible(config[CONF_VISIBLE]))
| 2.640625 | 3 |
koapy/koapy/examples/01_roll_your_own_koapy.py | miniyus/AutomaticPosting-koapy | 0 | 12794741 | import sys
from koapy.compat.pyside2.QtWidgets import QApplication
from koapy import KiwoomOpenApiPlusQAxWidget
app = QApplication(sys.argv)
control = KiwoomOpenApiPlusQAxWidget()
APIModulePath = control.GetAPIModulePath()
print(APIModulePath)
| 1.828125 | 2 |
uw_canvas/external_tools.py | uw-it-aca/uw-restclients-canvas | 1 | 12794742 | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from uw_canvas import Canvas
from uw_canvas.accounts import ACCOUNTS_API
from uw_canvas.courses import COURSES_API
class ExternalToolsException(Exception):
pass
class ExternalTools(Canvas):
def get_external_tools_in_account(self, account_id, params={}):
"""
Return external tools for the passed canvas account id.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index
"""
url = ACCOUNTS_API.format(account_id) + "/external_tools"
external_tools = []
for data in self._get_paged_resource(url, params=params):
external_tools.append(data)
return external_tools
def get_external_tools_in_account_by_sis_id(self, sis_id):
"""
Return external tools for given account sis id.
"""
return self.get_external_tools_in_account(self._sis_id(sis_id,
"account"))
def get_external_tools_in_course(self, course_id, params={}):
"""
Return external tools for the passed canvas course id.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index
"""
url = COURSES_API.format(course_id) + "/external_tools"
external_tools = []
for data in self._get_paged_resource(url, params=params):
external_tools.append(data)
return external_tools
def get_external_tools_in_course_by_sis_id(self, sis_id):
"""
Return external tools for given course sis id.
"""
return self.get_external_tools_in_course(self._sis_id(sis_id,
"course"))
def create_external_tool_in_course(self, course_id, json_data):
return self._create_external_tool(COURSES_API, course_id, json_data)
def create_external_tool_in_account(self, account_id, json_data):
return self._create_external_tool(ACCOUNTS_API, account_id, json_data)
def _create_external_tool(self, context, context_id, json_data):
"""
Create an external tool using the passed json_data.
context is either COURSES_API or ACCOUNTS_API.
context_id is the Canvas course_id or account_id, depending on context.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create
"""
url = context.format(context_id) + "/external_tools"
return self._post_resource(url, body=json_data)
def update_external_tool_in_course(self, course_id, external_tool_id,
json_data):
return self._update_external_tool(COURSES_API, course_id,
external_tool_id, json_data)
def update_external_tool_in_account(self, account_id, external_tool_id,
json_data):
return self._update_external_tool(ACCOUNTS_API, account_id,
external_tool_id, json_data)
def _update_external_tool(self, context, context_id, external_tool_id,
json_data):
"""
Update the external tool identified by external_tool_id with the passed
json data.
context is either COURSES_API or ACCOUNTS_API.
context_id is the course_id or account_id, depending on context
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.update
"""
url = context.format(context_id) + "/external_tools/{}".format(
external_tool_id)
return self._put_resource(url, body=json_data)
def delete_external_tool_in_course(self, course_id, external_tool_id):
return self._delete_external_tool(COURSES_API, course_id,
external_tool_id)
def delete_external_tool_in_account(self, account_id, external_tool_id):
return self._delete_external_tool(ACCOUNTS_API, account_id,
external_tool_id)
def _delete_external_tool(self, context, context_id, external_tool_id):
"""
Delete the external tool identified by external_tool_id.
context is either COURSES_API or ACCOUNTS_API.
context_id is the course_id or account_id, depending on context
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.destroy
"""
url = context.format(context_id) + "/external_tools/{}".format(
external_tool_id)
response = self._delete_resource(url)
return True
def _get_sessionless_launch_url(self, context, context_id, tool_id):
"""
Get a sessionless launch url for an external tool.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch
"""
url = context.format(context_id) + "/external_tools/sessionless_launch"
params = {"id": tool_id}
return self._get_resource(url, params)
def get_sessionless_launch_url_from_account(self, tool_id, account_id):
"""
Get a sessionless launch url for an external tool.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch
"""
return self._get_sessionless_launch_url(
ACCOUNTS_API, account_id, tool_id)
def get_sessionless_launch_url_from_account_sis_id(
self, tool_id, account_sis_id):
"""
Get a sessionless launch url for an external tool.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch
"""
return self.get_sessionless_launch_url_from_account(
tool_id, self._sis_id(account_sis_id, "account"))
def get_sessionless_launch_url_from_course(self, tool_id, course_id):
"""
Get a sessionless launch url for an external tool.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch
"""
return self._get_sessionless_launch_url(
COURSES_API, course_id, tool_id)
def get_sessionless_launch_url_from_course_sis_id(
self, tool_id, course_sis_id):
"""
Get a sessionless launch url for an external tool.
https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.generate_sessionless_launch
"""
return self.get_sessionless_launch_url_from_course(
tool_id, self._sis_id(course_sis_id, "course"))
| 2.484375 | 2 |
psst/model/constraints.py | ayadabd000/psst | 0 | 12794743 | <filename>psst/model/constraints.py
import numpy as np
from functools import partial
import logging
from pyomo.environ import *
logger = logging.getLogger(__file__)
eps = 1e-3
def fix_first_angle_rule(m,t, slack_bus=1):
return m.Angle[m.Buses[slack_bus], t] == 0.0
def lower_line_power_bounds_rule(m, l, t):
if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps):
return -m.ThermalLimit[l] <= m.LinePower[l, t]
else:
return Constraint.Skip
def upper_line_power_bounds_rule(m, l, t):
if m.EnforceLine[l] and np.any(np.absolute(m.ThermalLimit[l]) > eps):
return m.ThermalLimit[l] >= m.LinePower[l, t]
else:
return Constraint.Skip
def line_power_ptdf_rule(m, l, t):
return m.LinePower[l,t] == sum(float(m.PTDF[l, i]) * m.NetPowerInjectionAtBus[b, t] for i, b in enumerate(m.Buses))
def line_power_rule(m, l, t):
if m.B[l] == 99999999:
logger.debug(" Line Power Angle constraint skipped for line between {} and {} ".format(m.BusFrom[l], m.BusTo[l]))
return Constraint.Skip
else:
return m.LinePower[l,t] == m.B[l] * (m.Angle[m.BusFrom[l], t] - m.Angle[m.BusTo[l], t])
def calculate_total_demand(m, t):
return m.TotalDemand[t] == sum(m.Demand[b,t] for b in m.Buses)
def neg_load_generate_mismatch_tolerance_rule(m, b):
return sum((m.negLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0
def pos_load_generate_mismatch_tolerance_rule(m, b):
return sum((m.posLoadGenerateMismatch[b,t] for t in m.TimePeriods)) >= 0.0
def power_balance(m, b, t, has_storage=False, has_non_dispatchable_generators=False):
# Power balance at each node (S)
# bus b, time t (S)
constraint = m.NetPowerInjectionAtBus[b, t] + sum(m.LinePower[l,t] for l in m.LinesTo[b]) \
- sum(m.LinePower[l,t] for l in m.LinesFrom[b]) \
+ m.LoadGenerateMismatch[b,t] == 0
return constraint
def net_power_at_bus_rule(m, b, t, has_storage=False, has_non_dispatchable_generators=False):
constraint = sum((1 - m.GeneratorForcedOutage[g,t]) * m.GeneratorBusContributionFactor[g, b] * m.PowerGenerated[g, t] for g in m.GeneratorsAtBus[b])
if has_storage is True:
constraint = constraint + sum(m.PowerOutputStorage[s, t] for s in m.StorageAtBus[b]) \
- sum(m.PowerInputStorage[s, t] for s in m.StorageAtBus[b])
if has_non_dispatchable_generators is True:
constraint = constraint + sum(m.NondispatchablePowerUsed[g, t] for g in m.NondispatchableGeneratorsAtBus[b])
constraint = constraint + m.LoadGenerateMismatch[b,t]
constraint = constraint - m.Demand[b, t]
constraint = m.NetPowerInjectionAtBus[b, t] == constraint
return constraint
# give meaning to the positive and negative parts of the mismatch
def posneg_rule(m, b, t):
return m.posLoadGenerateMismatch[b, t] - m.negLoadGenerateMismatch[b, t] == m.LoadGenerateMismatch[b, t]
def global_posneg_rule(m, t):
return m.posGlobalLoadGenerateMismatch[t] - m.negGlobalLoadGenerateMismatch[t] == m.GlobalLoadGenerateMismatch[t]
def enforce_reserve_requirements_rule(m, t, has_storage=False,
has_non_dispatchable_generators=False,
has_global_reserves=False):
constraint = sum(m.MaximumPowerAvailable[g, t] for g in m.Generators)
if has_non_dispatchable_generators is True:
constraint = constraint + sum(m.NondispatchablePowerUsed[n,t] for n in m.NondispatchableGenerators)
if has_storage is True:
constraint = constraint + sum(m.PowerOutputStorage[s,t] for s in m.Storage)
if has_global_reserves is True:
constraint = constraint - m.ReserveRequirement[t]
constraint = constraint == m.TotalDemand[t] + m.GlobalLoadGenerateMismatch[t]
return constraint
def calculate_regulating_reserve_up_available_per_generator(m, g, t):
return m.RegulatingReserveUpAvailable[g, t] == m.MaximumPowerAvailable[g,t] - m.PowerGenerated[g,t]
def enforce_zonal_reserve_requirement_rule(m, rz, t):
return sum(m.RegulatingReserveUpAvailable[g,t] for g in m.GeneratorsInReserveZone[rz]) >= m.ZonalReserveRequirement[rz, t]
def enforce_generator_output_limits_rule_part_a(m, g, t):
return m.MinimumPowerOutput[g, t] * m.UnitOn[g, t] <= m.PowerGenerated[g,t]
def enforce_generator_output_limits_rule_part_b(m, g, t):
return m.PowerGenerated[g,t] <= m.MaximumPowerAvailable[g, t]
def enforce_generator_output_limits_rule_part_c(m, g, t):
return m.MaximumPowerAvailable[g,t] <= m.MaximumPowerOutput[g, t] * m.UnitOn[g, t]
def enforce_max_available_ramp_up_rates_rule(m, g, t):
# 4 cases, split by (t-1, t) unit status (RHS is defined as the delta from m.PowerGenerated[g, t-1])
# (0, 0) - unit staying off: RHS = maximum generator output (degenerate upper bound due to unit being off)
# (0, 1) - unit switching on: RHS = startup ramp limit
# (1, 0) - unit switching off: RHS = standard ramp limit minus startup ramp limit plus maximum power output (degenerate upper bound due to unit off)
# (1, 1) - unit staying on: RHS = standard ramp limit plus power generated in previous time period
if t == 0:
return m.MaximumPowerAvailable[g, t] <= m.PowerGeneratedT0[g] + \
m.NominalRampUpLimit[g] * m.UnitOnT0[g] + \
m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOnT0[g]) + \
m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t])
else:
return m.MaximumPowerAvailable[g, t] <= m.PowerGenerated[g, t-1] + \
m.NominalRampUpLimit[g] * m.UnitOn[g, t-1] + \
m.StartupRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1]) + \
m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t])
def enforce_max_available_ramp_down_rates_rule(m, g, t):
# 4 cases, split by (t, t+1) unit status
# (0, 0) - unit staying off: RHS = 0 (degenerate upper bound)
# (0, 1) - unit switching on: RHS = maximum generator output minus shutdown ramp limit (degenerate upper bound) - this is the strangest case.
# (1, 0) - unit switching off: RHS = shutdown ramp limit
# (1, 1) - unit staying on: RHS = maximum generator output (degenerate upper bound)
#NOTE: As expressed in Carrion-Arroyo and subsequently here, this constraint does NOT consider ramp down from initial conditions to t=1!
#if t == value(m.NumTimePeriods):
# return Constraint.Skip
#else:
# return m.MaximumPowerAvailable[g, t] <= \
# m.MaximumPowerOutput[g] * m.UnitOn[g, t+1] + \
# m.ShutdownRampLimit[g] * (m.UnitOn[g, t] - m.UnitOn[g, t+1])
#This version fixes the problem with ignoring initial conditions mentioned in the above note
if t == 0:
# Not 100% sure of this one since there is no MaximumPowerAvailableT0
return m.PowerGeneratedT0[g] <= \
m.MaximumPowerOutput[g, t] * m.UnitOn[g,t] + \
m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g,t])
else:
return m.MaximumPowerAvailable[g, t-1] <= \
m.MaximumPowerOutput[g, t] * m.UnitOn[g, t] + \
m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])
def enforce_ramp_down_limits_rule(m, g, t):
# 4 cases, split by (t-1, t) unit status:
# (0, 0) - unit staying off: RHS = maximum generator output (degenerate upper bound)
# (0, 1) - unit switching on: RHS = standard ramp-down limit minus shutdown ramp limit plus maximum generator output - this is the strangest case
#NOTE: This may never be physically true, but if a generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems
# (1, 0) - unit switching off: RHS = shutdown ramp limit
# (1, 1) - unit staying on: RHS = standard ramp-down limit
if t == 0:
return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] <= \
m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \
m.ShutdownRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) + \
m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g])
else:
return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] <= \
m.NominalRampDownLimit[g] * m.UnitOn[g, t] + \
m.ShutdownRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) + \
m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1])
def enforce_ramp_up_limits_rule(m, g, t):
# 4 cases, split by (t-1, t) unit status:
# (0, 0) - unit staying off: RHS = maximum generator output (degenerate upper bound)
# (0, 1) - unit switching on: RHS = standard ramp-down limit minus shutdown ramp limit plus maximum generator output - this is the strangest case
#NOTE: This may never be physically true, but if a generator has ShutdownRampLimit >> MaximumPowerOutput, this constraint causes problems
# (1, 0) - unit switching off: RHS = shutdown ramp limit
# (1, 1) - unit staying on: RHS = standard ramp-down limit
if t == 0:
return m.PowerGeneratedT0[g] - m.PowerGenerated[g, t] >= \
-1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \
-1 * ( m.StartupRampLimit[g] * (m.UnitOnT0[g] - m.UnitOn[g, t]) ) + \
-1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOnT0[g]) )
else:
return m.PowerGenerated[g, t-1] - m.PowerGenerated[g, t] >= \
-1 * ( m.NominalRampUpLimit[g] * m.UnitOn[g, t] ) + \
-1 * ( m.StartupRampLimit[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t]) ) + \
-1 * ( m.MaximumPowerOutput[g, t] * (1 - m.UnitOn[g, t-1]) )
# compute startup costs for each generator, for each time period
def compute_hot_start_rule(m, g, t):
if t <= value(m.ColdStartHours[g]):
if t - value(m.ColdStartHours[g]) <= value(m.UnitOnT0State[g]):
m.HotStart[g, t] = 1
m.HotStart[g, t].fixed = True
return Constraint.Skip
else:
return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(1, t) )
else:
return m.HotStart[g, t] <= sum( m.UnitOn[g, i] for i in range(t - m.ColdStartHours[g], t) )
def compute_startup_costs_rule_minusM(m, g, t):
if t == 0:
return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \
- m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOnT0[g]))
else:
return m.StartupCost[g, t] >= m.ColdStartCost[g] - (m.ColdStartCost[g] - m.HotStartCost[g])*m.HotStart[g, t] \
- m.ColdStartCost[g]*(1 - (m.UnitOn[g, t] - m.UnitOn[g, t-1]))
# compute the per-generator, per-time period shutdown costs.
def compute_shutdown_costs_rule(m, g, t):
if t == 0:
return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOnT0[g] - m.UnitOn[g, t])
else:
return m.ShutdownCost[g, t] >= m.ShutdownCostCoefficient[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])
def enforce_up_time_constraints_initial(m, g):
if value(m.InitialTimePeriodsOnLine[g]) == 0:
return Constraint.Skip
return sum((1 - m.UnitOn[g, t]) for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOnLine[g])) == 0.0
# constraint for each time period after that not involving the initial condition.
@simple_constraint_rule
def enforce_up_time_constraints_subsequent(m, g, t):
if t <= value(m.InitialTimePeriodsOnLine[g]):
# handled by the EnforceUpTimeConstraintInitial constraint.
return Constraint.Skip
elif t <= (value(m.NumTimePeriods - m.MinimumUpTime[g]) + 1):
# the right-hand side terms below are only positive if the unit was off in the previous time period but on in this one =>
# the value is the minimum number of subsequent consecutive time periods that the unit is required to be on.
if t == 0:
return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t and n <= (t + value(m.MinimumUpTime[g]) - 1)) >= \
m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOnT0[g])
else:
return sum(m.UnitOn[g, n] for n in m.TimePeriods if n >= t and n <= (t + value(m.MinimumUpTime[g]) - 1)) >= \
m.MinimumUpTime[g] * (m.UnitOn[g, t] - m.UnitOn[g, t-1])
else:
# handle the final (MinimumUpTime[g] - 1) time periods - if a unit is started up in
# this interval, it must remain on-line until the end of the time span.
if t == 0: # can happen when small time horizons are specified
return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOnT0[g])) for n in m.TimePeriods if n >= t) >= 0.0
else:
return sum((m.UnitOn[g, n] - (m.UnitOn[g, t] - m.UnitOn[g, t-1])) for n in m.TimePeriods if n >= t) >= 0.0
# constraint due to initial conditions.
def enforce_down_time_constraints_initial(m, g):
if value(m.InitialTimePeriodsOffLine[g]) == 0:
return Constraint.Skip
return sum(m.UnitOn[g, t] for t in m.TimePeriods if t <= value(m.InitialTimePeriodsOffLine[g])) == 0.0
# constraint for each time period after that not involving the initial condition.
@simple_constraint_rule
def enforce_down_time_constraints_subsequent(m, g, t):
if t <= value(m.InitialTimePeriodsOffLine[g]):
# handled by the EnforceDownTimeConstraintInitial constraint.
return Constraint.Skip
elif t <= (value(m.NumTimePeriods - m.MinimumDownTime[g]) + 1):
# the right-hand side terms below are only positive if the unit was off in the previous time period but on in this one =>
# the value is the minimum number of subsequent consecutive time periods that the unit is required to be on.
if t == 0:
return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n >= t and n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \
m.MinimumDownTime[g] * (m.UnitOnT0[g] - m.UnitOn[g, t])
else:
return sum((1 - m.UnitOn[g, n]) for n in m.TimePeriods if n >= t and n <= (t + value(m.MinimumDownTime[g]) - 1)) >= \
m.MinimumDownTime[g] * (m.UnitOn[g, t-1] - m.UnitOn[g, t])
else:
# handle the final (MinimumDownTime[g] - 1) time periods - if a unit is shut down in
# this interval, it must remain off-line until the end of the time span.
if t == 0: # can happen when small time horizons are specified
return sum(((1 - m.UnitOn[g, n]) - (m.UnitOnT0[g] - m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >= 0.0
else:
return sum(((1 - m.UnitOn[g, n]) - (m.UnitOn[g, t-1] - m.UnitOn[g, t])) for n in m.TimePeriods if n >= t) >= 0.0
def commitment_in_stage_st_cost_rule(m, st):
return m.CommitmentStageCost[st] == (sum(m.StartupCost[g,t] + m.ShutdownCost[g,t] for g in m.Generators for t in m.CommitmentTimeInStage[st]) + sum(sum(m.UnitOn[g,t] * m.MinimumProductionCost[g, t] for t in m.CommitmentTimeInStage[st]) * m.TimePeriodLength for g in m.Generators))
def generation_in_stage_st_cost_rule(m, st):
return m.GenerationStageCost[st] == sum(m.ProductionCost[g, t] for g in m.Generators for t in m.GenerationTimeInStage[st]) + m.LoadMismatchPenalty * \
(sum(m.posLoadGenerateMismatch[b, t] + m.negLoadGenerateMismatch[b, t] for b in m.Buses for t in m.GenerationTimeInStage[st]) + \
sum(m.posGlobalLoadGenerateMismatch[t] + m.negGlobalLoadGenerateMismatch[t] for t in m.GenerationTimeInStage[st]))
def StageCost_rule(m, st):
return m.StageCost[st] == m.GenerationStageCost[st] + m.CommitmentStageCost[st]
def total_cost_objective_rule(m):
return sum(m.StageCost[st] for st in m.StageSet)
def constraint_net_power(model, has_storage=False, has_non_dispatchable_generators=False):
partial_net_power_at_bus_rule = partial(net_power_at_bus_rule, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators)
model.CalculateNetPowerAtBus = Constraint(model.Buses, model.TimePeriods, rule=partial_net_power_at_bus_rule)
################################################
def constraint_line(model, ptdf=None, slack_bus=1):
model.LinePowerConstraintLower = Constraint(model.TransmissionLines, model.TimePeriods, rule=lower_line_power_bounds_rule)
model.LinePowerConstraintHigher = Constraint(model.TransmissionLines, model.TimePeriods, rule=upper_line_power_bounds_rule)
if ptdf is not None:
model.PTDF = ptdf
model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_ptdf_rule)
else:
partial_fix_first_angle_rule = partial(fix_first_angle_rule, slack_bus=slack_bus)
model.FixFirstAngle = Constraint(model.TimePeriods, rule=partial_fix_first_angle_rule)
model.CalculateLinePower = Constraint(model.TransmissionLines, model.TimePeriods, rule=line_power_rule)
def constraint_total_demand(model):
model.CalculateTotalDemand = Constraint(model.TimePeriods, rule=calculate_total_demand)
def constraint_load_generation_mismatch(model):
model.PosLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=pos_load_generate_mismatch_tolerance_rule)
model.NegLoadGenerateMismatchTolerance = Constraint(model.Buses, rule=neg_load_generate_mismatch_tolerance_rule)
model.Defineposneg_Mismatch = Constraint(model.Buses, model.TimePeriods, rule = posneg_rule)
model.Global_Defineposneg_Mismatch = Constraint(model.TimePeriods, rule = global_posneg_rule)
def constraint_power_balance(model, has_storage=False, has_non_dispatchable_generators=False):
fn_power_balance = partial(power_balance, has_storage=has_storage, has_non_dispatchable_generators=has_non_dispatchable_generators)
model.PowerBalance = Constraint(model.Buses, model.TimePeriods, rule=fn_power_balance)
def constraint_reserves(model, has_storage=False,
has_non_dispatchable_generators=False,
has_global_reserves=True,
has_regulating_reserves=True,
has_zonal_reserves=False):
if has_global_reserves is True:
fn_enforce_reserve_requirements = partial(enforce_reserve_requirements_rule, has_storage=has_storage,
has_non_dispatchable_generators=has_non_dispatchable_generators,
has_global_reserves=has_global_reserves)
model.EnforceReserveRequirements = Constraint(model.TimePeriods, rule=fn_enforce_reserve_requirements)
if has_regulating_reserves is True:
model.CalculateRegulatingReserveUpPerGenerator = Constraint(model.Generators, model.TimePeriods, rule=calculate_regulating_reserve_up_available_per_generator)
if has_zonal_reserves is True:
model.EnforceZonalReserveRequirements = Constraint(model.ReserveZones, model.TimePeriods, rule=enforce_zonal_reserve_requirement_rule)
def constraint_generator_power(model):
model.EnforceGeneratorOutputLimitsPartA = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_a)
model.EnforceGeneratorOutputLimitsPartB = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_b)
model.EnforceGeneratorOutputLimitsPartC = Constraint(model.Generators, model.TimePeriods, rule=enforce_generator_output_limits_rule_part_c)
model.EnforceMaxAvailableRampUpRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_up_rates_rule)
model.EnforceMaxAvailableRampDownRates = Constraint(model.Generators, model.TimePeriods, rule=enforce_max_available_ramp_down_rates_rule)
model.EnforceNominalRampDownLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_down_limits_rule)
model.EnforceNominalRampUpLimits = Constraint(model.Generators, model.TimePeriods, rule=enforce_ramp_up_limits_rule)
def constraint_up_down_time(model):
model.EnforceUpTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_up_time_constraints_initial)
model.EnforceUpTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_up_time_constraints_subsequent)
model.EnforceDownTimeConstraintsInitial = Constraint(model.Generators, rule=enforce_down_time_constraints_initial)
model.EnforceDownTimeConstraintsSubsequent = Constraint(model.Generators, model.TimePeriods, rule=enforce_down_time_constraints_subsequent)
def production_cost_function(m, g, t, x):
# a function for use in piecewise linearization of the cost function.
return m.TimePeriodLength * m.PowerGenerationPiecewiseValues[g,t][x] * m.FuelCost[g]
def constraint_for_cost(model):
model.ComputeProductionCosts = Piecewise(model.Generators * model.TimePeriods, model.ProductionCost, model.PowerGenerated, pw_pts=model.PowerGenerationPiecewisePoints, f_rule=production_cost_function, pw_constr_type='LB', warning_tol=1e-20)
model.ComputeHotStart = Constraint(model.Generators, model.TimePeriods, rule=compute_hot_start_rule)
model.ComputeStartupCostsMinusM = Constraint(model.Generators, model.TimePeriods, rule=compute_startup_costs_rule_minusM)
model.ComputeShutdownCosts = Constraint(model.Generators, model.TimePeriods, rule=compute_shutdown_costs_rule)
model.Compute_commitment_in_stage_st_cost = Constraint(model.StageSet, rule = commitment_in_stage_st_cost_rule)
model.Compute_generation_in_stage_st_cost = Constraint(model.StageSet, rule = generation_in_stage_st_cost_rule)
model.Compute_Stage_Cost = Constraint(model.StageSet, rule = StageCost_rule)
def objective_function(model):
model.TotalCostObjective = Objective(rule=total_cost_objective_rule, sense=minimize)
| 2.078125 | 2 |
scripts/luxafor.py | anlutro/dotfiles | 6 | 12794744 | <reponame>anlutro/dotfiles
#!/usr/bin/env /home/andreas/code/dotfiles/.venv/bin/python
from __future__ import print_function
import sys
import os.path
sys.path.append(
os.path.expanduser("~/.local/lib/python%d.%d/site-packages" % sys.version_info[:2])
)
import argparse
try:
import usb.core
except ImportError:
print(
"module usb.core not found, try: pip%d install --user pyusb"
% sys.version_info[0]
)
sys.exit(1)
def hex_to_rgb(value): # http://stackoverflow.com/a/214657
value = value.lstrip("#")
lv = len(value)
return tuple(int(value[i : i + lv // 3], 16) for i in range(0, lv, lv // 3))
class Luxafor:
def __init__(self, dev):
self.dev = dev
def set_color(self, led, hex_color):
red, green, blue = hex_to_rgb(hex_color)
self.write(1, led, red, green, blue, 0, 0)
def write(self, *values):
self.dev.write(1, values)
self.dev.write(1, values)
def fade(self, led, hex_color, duration):
red, green, blue = hex_to_rgb(hex_color)
self.write(2, led, red, green, blue, duration, 0)
def strobe(self, led, hex_color, delay, repeat):
red, green, blue = hex_to_rgb(hex_color)
self.write(3, led, red, green, blue, delay, 0, repeat)
def wave(self, hex_color, pattern, delay, repeat):
red, green, blue = hex_to_rgb(hex_color)
self.write(4, pattern, red, green, blue, 0, delay, repeat)
def pattern(self, pattern, delay):
self.write(6, pattern, delay, 0, 0, 0, 0)
def off(self):
self.write(6, 0, 0, 0, 0, 0)
def get_device():
device = usb.core.find(idVendor=0x04D8, idProduct=0xF372)
if not device:
print("Could not find device! Is the Luxafor connected?")
sys.exit(1)
try:
device.detach_kernel_driver(0)
except usb.core.USBError:
pass
device.set_configuration()
return device
COLORS = {
"red": "ff0000",
"green": "00ff00",
"blue": "0000ff",
"purple": "ff00ff",
"yellow": "ff9900",
"white": "ffffff",
"off": "000000",
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--led", type=int, default=1)
subparsers = parser.add_subparsers(dest="action")
off_parser = subparsers.add_parser("off")
color_parser = subparsers.add_parser("color", aliases=("c",))
color_parser.add_argument("color", type=str)
fade_parser = subparsers.add_parser("fade-to-color", aliases=("f",))
fade_parser.add_argument("color", type=str)
fade_parser.add_argument("-d", "--duration", type=int, default=25)
strobe_parser = subparsers.add_parser("strobe", aliases=("s",))
strobe_parser.add_argument("color", type=str)
strobe_parser.add_argument("-d", "--delay", type=int, default=25)
strobe_parser.add_argument("-r", "--repeat", type=int, default=100)
wave_parser = subparsers.add_parser("wave", aliases=("w",))
wave_parser.add_argument("color", type=str)
wave_parser.add_argument("pattern", type=int)
wave_parser.add_argument("-d", "--delay", type=int, default=25)
wave_parser.add_argument("-r", "--repeat", type=int, default=100)
pattern_parser = subparsers.add_parser("pattern", aliases=("p",))
pattern_parser.add_argument("pattern", type=int)
pattern_parser.add_argument("-d", "--delay", type=int, default=25)
args = parser.parse_args()
luxafor = Luxafor(get_device())
if "color" in args and args.color in COLORS:
args.color = COLORS[args.color]
if args.action == "off":
luxafor.set_color(args.led, "#000000")
elif args.action in ("color", "c"):
luxafor.set_color(args.led, args.color)
elif args.action in ("fade-to-color", "f"):
luxafor.fade(args.led, args.color, args.duration)
elif args.action in ("strobe", "s"):
luxafor.strobe(args.led, args.color, args.delay, args.repeat)
elif args.action in ("wave", "w"):
luxafor.wave(args.color, args.pattern, args.delay, args.repeat)
elif args.action in ("pattern", "p"):
luxafor.pattern(args.pattern, args.delay)
else:
print("Unknown action: %r" % args.action)
sys.exit(1)
if __name__ == "__main__":
main()
| 2.421875 | 2 |
example/ex_ok.py | doubleDragon/quant | 0 | 12794745 | <filename>example/ex_ok.py<gh_stars>0
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from okcoin.client import PrivateClient as OkClient
from config import settings
API_KEY = settings.OKCOIN_API_KEY
API_SECRET = settings.OKCOIN_API_SECRET
client = OkClient(API_KEY, API_SECRET)
print('ticker--------->' + str(client.ticker('ltc_cny')))
print('depth--------->' + str(client.depth('btc_cny')))
print('account------->' + str(client.account()))
| 2.0625 | 2 |
test/pymarshal/test_csv.py | stargateaudio/pymarshal | 2 | 12794746 | from pymarshal.csv import *
import pytest
def test_marshal_unmarshal_list():
class Test:
def __init__(self, a, b):
self.a = a
self.b = b
u = [Test("a", 2), Test("b", 3)]
assert u[0].a == "a", u[0].a
m = marshal_csv(u)
assert m[0][0] == "a", m[0][0]
u = unmarshal_csv_list(m, Test)
assert u[0].a == "a", u[0].a
def test__marshal_list_row_header():
class Test:
_marshal_list_row_header = "abc"
def __init__(self, a, b):
self.a = a
self.b = b
u = [Test("a", 2), Test("b", 3)]
m = marshal_csv(u)
assert m == [["abc", "a", 2], ["abc", "b", 3]], m
def test_unmarshal_csv():
class A:
_marshal_list_row_header = "a"
def __init__(self, a, b):
self.a = a
self.b = b
class B:
def __init__(self, a, b):
self.a = a
self.b = b
class C:
_marshal_list_row_header = "c"
def __init__(self, a, b):
self.a = a
self.b = b
class D:
_unmarshal_csv_map = {'a': {'arg_name': 'a', 'type': A}}
_unmarshal_csv_default_arg = {'arg_name': 'b', 'type': B}
_unmarshal_csv_singletons = {'c': {'arg_name': 'c', 'type': C}}
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def __iter__(self):
for x in self.a:
yield x
for x in self.b:
yield x
yield self.c
d = D([A(1, 2)], [B(3, 4)], C(5, 6))
m = marshal_csv(d)
u = unmarshal_csv(m, D)
assert u.a[0].a == 1, u.a[0]
def test_unmarshal_csv_raises_attribute_error():
class A:
pass
with pytest.raises(AttributeError):
unmarshal_csv([], A)
def test_unmarshal_csv_raises_value_error():
class A:
_unmarshal_csv_map = {
'a': {'arg_name': 'a', 'type': object},
}
def __init__(self, a):
self.a = a
with pytest.raises(ValueError):
unmarshal_csv([[1, 2]], A)
def test_marshal_csv_dict():
class A:
_marshal_csv_dict = True
def __init__(self, a, b):
self.a = a
self.b = b
a = A(1, 2)
m = marshal_csv(a)
assert m[0] == ['a', 1], m
assert m[1] == ['b', 2], m
u = unmarshal_csv(m, A)
assert u.a == 1, u.a
assert u.b == 2, u.b
def test_csv_cast_empty_str_to_none():
func = csv_cast_empty_str_to_none(int)
assert func('') is None
assert func('23') == 23
| 2.625 | 3 |
athanor_entity/entities/aspects.py | volundmush/athanor_entity | 0 | 12794747 | <filename>athanor_entity/entities/aspects.py
"""
The Aspect system is SUPPOSED to handle things like Character Classes, Final Fantasy Jobs, Professions, etc.
Also Species/Races.
We'll see how well that works out.
"""
class Aspect(object):
name = "Unknown Aspect"
def __init__(self, handler, slot, in_data=None):
self.persistent = handler.owner.persistent
self.handler = handler
self.slot = slot
if in_data is None:
if self.persistent:
in_data = self.handler.owner.attributes.get(key=slot, category='aspect', default=dict())
else:
in_data = dict()
self.data = in_data
def __str__(self):
return self.name
def at_before_equip(self, entity, gearset, slot):
"""
This is called whenever the owner wants to equip an item.
If it returns false, the item will not be equipped.
Override this hook to implement Aspect-specified rules about
who can equip what.
Args:
entity (Entity): The item being equipped.
gearset (gearset): The gearset being equipped to.
slot (slot): The Gearset slot being equipped to.
Returns:
equip (bool): Whether to equip it or not.
"""
return True
def at_before_get(self, entity, inventory):
"""
This is called whenever the owner wants to get an item.
If it returns false, the item will not be obtained.
Override this hook to implement Aspect-specified rules
about who can carry what.
Args:
entity (Entity): The item being nabbed.
inventory (Inventory): The proposed destination inventory.
Returns:
get (bool): Whether to get it or not.
"""
return True
| 2.90625 | 3 |
conf/landscapes_relay/configen/landscapes/models/meta/ft/conf.py | wearepal/landscapes-ml | 0 | 12794748 |
from dataclasses import dataclass, field
from omegaconf import MISSING
from typing import Any
@dataclass
class LinearProbeConf:
_target_: str = "landscapes.models.meta.ft.LinearProbe"
model: Any = MISSING # ClassificationModel
@dataclass
class BitFitConf:
_target_: str = "landscapes.models.meta.ft.BitFit"
model: Any = MISSING # ClassificationModel
| 2.15625 | 2 |
spot-ingest/common/utils.py | maduhu/Apache-Spot-Incubator | 6 | 12794749 | <gh_stars>1-10
#!/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import subprocess
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class Util(object):
@classmethod
def remove_kafka_topic(cls,zk,topic,logger):
rm_kafka_topic = "kafka-topics --delete --zookeeper {0} --topic {1}".format(zk,topic)
try:
logger.info("SPOT.Utils: Executing: {0}".format(rm_kafka_topic))
subprocess.call(rm_kafka_topic,shell=True)
except subprocess.CalledProcessError as e:
logger.error("SPOT.Utils: There was an error executing: {0}".format(e.cmd))
sys.exit(1)
@classmethod
def validate_parameter(cls,parameter,message,logger):
if parameter == None or parameter == "":
logger.error(message)
sys.exit(1)
@classmethod
def creat_hdfs_folder(cls,hdfs_path,logger):
hadoop_create_folder="hadoop fs -mkdir -p {0}".format(hdfs_path)
logger.info("SPOT.Utils: Creating hdfs folder: {0}".format(hadoop_create_folder))
subprocess.call(hadoop_create_folder,shell=True)
@classmethod
def load_to_hdfs(cls,file_local_path,file_hdfs_path,logger):
# move file to hdfs.
load_to_hadoop_script = "hadoop fs -moveFromLocal {0} {1}".format(file_local_path,file_hdfs_path)
logger.info("SPOT.Utils: Loading file to hdfs: {0}".format(load_to_hadoop_script))
subprocess.call(load_to_hadoop_script,shell=True)
@classmethod
def get_logger(cls,logger_name,create_file=False):
# create logger for prd_ci
log = logging.getLogger(logger_name)
log.setLevel(level=logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if create_file:
# create file handler for logger.
fh = logging.FileHandler('SPOT.log')
fh.setLevel(level=logging.DEBUG)
fh.setFormatter(formatter)
# reate console handler for logger.
ch = logging.StreamHandler()
ch.setLevel(level=logging.DEBUG)
ch.setFormatter(formatter)
# add handlers to logger.
if create_file:
log.addHandler(fh)
log.addHandler(ch)
return log
@classmethod
def create_watcher(cls,collector_path,new_file,logger):
logger.info("Creating collector watcher")
event_handler = new_file
observer = Observer()
observer.schedule(event_handler,collector_path)
return observer
@classmethod
def execute_cmd(cls,command,logger):
try:
logger.info("SPOT.Utils: Executing: {0}".format(command))
subprocess.call(command,shell=True)
except subprocess.CalledProcessError as e:
logger.error("SPOT.Utils: There was an error executing: {0}".format(e.cmd))
sys.exit(1)
@classmethod
def validate_data_source(cls,pipeline_type):
dirs = os.walk("{0}/pipelines/".format(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))).next()[1]
is_type_ok = True if pipeline_type in dirs else False
return is_type_ok
class NewFileEvent(FileSystemEventHandler):
pipeline_instance = None
def __init__(self,pipeline_instance):
self.pipeline_instance = pipeline_instance
def on_moved(self,event):
if not event.is_directory:
self.pipeline_instance.new_file_detected(event.dest_path)
def on_created(self,event):
if not event.is_directory:
self.pipeline_instance.new_file_detected(event.src_path)
| 1.84375 | 2 |
tf_euler/python/euler_ops/walk_ops.py | MMyheart/euler | 0 | 12794750 | # Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tf_euler.python.euler_ops import base
from tf_euler.python.euler_ops import type_ops
import numpy as np
gen_pair = base._LIB_OP.gen_pair
_random_walk = base._LIB_OP.random_walk
def random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1):
'''
Random walk from a list of nodes.
Args:
nodes: start node ids, 1-d Tensor
edge_types: list of 1-d Tensor of edge types
p: back probality
q: forward probality
default_node: default fill nodes
'''
if base.nebula_ops['random_walk']:
return nebula_random_walk(nodes, edge_types, p, q, default_node)
edge_types = [type_ops.get_edge_type_id(edge_type)
for edge_type in edge_types]
return _random_walk(nodes, edge_types, p, q, default_node)
def nebula_random_walk(nodes, edge_types, p=1.0, q=1.0, default_node=-1):
result = tf.py_func(
_nebula_random_walk,
[nodes, edge_types, p, q, default_node],
[tf.int64],
True,
'NebulaRandomWalk'
)
result[0].set_shape((nodes.shape.dims[0].value, len(edge_types) + 1))
return result[0]
def _nebula_random_walk(nodes, edge_types, p, q, default_node):
paths = []
uniq_nodes = {}.fromkeys(nodes).keys()
nql = 'USE {}; randomwalk {} from {} over {} where p=={} and q=={}'.format(
base.nebula_space,
len(edge_types),
', '.join(str(x) for x in uniq_nodes),
', '.join(str('e_' + x) for x in edge_types[0]),
p,
q
)
path_cache = {}
resp = base.nebula_client.execute_query(nql)
if resp.rows is not None:
for row in resp.rows:
path = row.columns[0].get_str()
path_nodes = map(lambda x: long(x if x != '-1' else default_node), path.split('#'))
path_cache[path_nodes[0]] = path_nodes
for node in nodes:
paths.append(path_cache[node])
return np.asarray(paths, np.int64) | 2.15625 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.