content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def wpr(c_close, c_high, c_low, period):
"""
William %R
:type c_close: np.ndarray
:type c_high: np.ndarray
:type c_low: np.ndarray
:type period: int
:rtype: (np.ndarray, np.ndarray)
"""
size = len(c_close)
out = np.array([np.nan] * size)
for i in range(period - 1, size):
e = i + 1
s = e - period
mh = np.max(c_high[s:e])
out[i] = ((mh - c_close[i]) / (mh - np.min(c_low[s:e]))) * -100
return out | 0f1d8d46464be81daa6308df97a7a8d12a90274b | 17,400 |
def delete_action_log(request, log_id):
"""
View for delete the action log.
This view can only access by superuser and staff.
"""
action = get_object_or_404(ActionLog, id=log_id)
if action.status == 0 or action.status == 1:
messages.error(request, "Cannot delete the Action log that is running or in idle state!")
return redirect('actions')
action.delete()
messages.success(request, "Delete action log successfully!")
return redirect('actions') | 8560e5280a57ddc8158b811fac29763bbaa8ef37 | 17,401 |
def hsla_to_rgba(h, s, l, a):
""" 0 <= H < 360, 0 <= s,l,a < 1
"""
h = h % 360
s = max(0, min(1, s))
l = max(0, min(1, l))
a = max(0, min(1, a))
c = (1 - abs(2*l - 1)) * s
x = c * (1 - abs(h/60%2 - 1))
m = l - c/2
if h<60:
r, g, b = c, x, 0
elif h<120:
r, g, b = x, c, 0
elif h<180:
r, g, b = 0, c, x
elif h<240:
r, g, b = 0, x, c
elif h<300:
r, g, b = x, 0, c
else:
r, g, b = c, 0, x
return (int((r+m)*255), int((g+m)*255), int((b+m)*255), int(a*255)) | 55e546756d4dd2a49581a5f950beb286dd73f3f9 | 17,402 |
from typing import Dict
from pathlib import Path
from typing import Optional
def prioritize(paths: Dict[int, Path], purpose: str) -> Optional[Path]:
"""Returns highest-priority and existing filepath from ``paths``.
Finds existing configuration or data file in ``paths`` with highest
priority and returns it, otherwise returns ``None``.
"""
for key in sorted(paths.keys(), reverse=True):
if purpose == "config":
if paths[key].exists():
return paths[key]
if purpose == "data":
return paths[key] | 2c00d0bfe696040c2c19dc1d8b3393b7be124e11 | 17,403 |
def traverse(d, path):
"""Return the value at the given path from the given nested dict/list"""
for k in path.split('.'):
if k.isdigit():
k = int(k)
d = d[k]
return d | ba832a008073da5d97ba0a237a8e0ded17e4694e | 17,404 |
def _bundle_name_with_extension(ctx):
"""Returns the name of the bundle with its extension.
Args:
ctx: The Skylark context.
Returns:
The bundle name with its extension.
"""
return _bundle_name(ctx) + _bundle_extension(ctx) | 51f9c84fa2dd0ef9c5736a59ca2cd3c2d76aa108 | 17,405 |
import subprocess
def call(args):
"""
Call args in a subprocess and display output on the fly.
Return or raise stdout, stderr, returncode
"""
if TRACE: print('Calling:', ' '.join(args))
with subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding='utf-8'
) as process:
while True:
line = process.stdout.readline()
if not line and process.poll() is not None:
break
if TRACE: print(line.rstrip(), flush=True)
stdout, stderr = process.communicate()
returncode = process.returncode
if returncode == 0:
return returncode, stdout, stderr
else:
raise Exception(returncode, stdout, stderr) | 7837a8e807bc7e3cda56b1f5e8625f936cc4bee0 | 17,406 |
def cvt_axisang_t_o2i(axisang, trans):
"""-correction: t_r, R_rt_r. outer to inner"""
trans -= get_offset(axisang)
return axisang, trans | ef263052e91ecc2fb8e668bca89a9d5622b75ff2 | 17,407 |
import pytz
import numpy
import dateutil
def processData(dict, valuename, timename='Aika', multiplier=1.0):
"""Process "raw" OData dict and strip only the time and value.
Also convert time to UTC and hydrodynamics model (COHERENS) format.
Parameters
----------
dict: dictionary
Data dictionary as received from OData fetcher
valuename: string
Value nameto process
timename: string
Time field name
multiplier: float
Multiply value with this number. Useful in e.g. unit conversions.
Returns dictionary with processed data.
"""
# Gets valuename field from dict of sites along with timefield and multiplies values by multiplier
# Returns dict of sites with list of values: time, coherenstime, value
tz = pytz.timezone('Europe/Helsinki') # Default data timezone in case it doesn't exist
if numpy.isnan(multiplier):
print("Warning: multiplier ignored (NaN)")
multiplier = 1.0
newdict = {}
for site in dict:
newdata = []
for meas in dict[site]:
time = dateutil.parser.parse(meas[timename])
# If timezone not present, assume local (Finland) timezone
if time.tzinfo is None or time.tzinfo.utcoffset(time) is None:
time = tz.localize(time)
# If timezone is not UTC, convert time to UTC
if time.tzname() != 'UTC':
time = time.astimezone(pytz.utc)
# Convert time from datetime object to COHERENS ASCII format
coherenstime = time.strftime("%Y/%m/%d;%H:%M:%S,000")
value = float(meas[valuename])*multiplier
newdata.append([time, coherenstime, value])
newdict[site] = newdata
return newdict | df452a703a3afface12dc76abb647a5e38b808c3 | 17,408 |
from datetime import datetime
def get_current_year():
"""Returns current year
"""
return str(datetime.date.today().year) | f019e7f2462a4d8db0db294fade6ca737e87a24c | 17,409 |
def parse_ans(text):
"""
Parses the given text as an answer set, i.e., a sequence of predicate
statements. Returns a (possibly empty) tuple of Predicate objects.
"""
return parser.parse_completely(
text,
parser.Rep(PredicateStatement),
devour=devour_asp
) | 44479668629b142115c27476242cbdf23b6657cc | 17,410 |
def get_xyz(data):
"""
:param data: 3D data
:return: 3D data coordinates
第1,2,3维数字依次递增
"""
nim = data.ndim
if nim == 3:
size_x, size_y, size_z = data.shape
x_arange = np.arange(1, size_x+1)
y_arange = np.arange(1, size_y+1)
z_arange = np.arange(1, size_z+1)
[xx, yy, zz] = np.meshgrid(x_arange, y_arange, z_arange, indexing='ij')
xyz = np.column_stack([zz.flatten(), yy.flatten(), xx.flatten()])
else:
"""
:param data: 2D data
:return: 2D data coordinates
第1,2维数字依次递增
"""
size_x, size_y = data.shape
x_arange = np.arange(1, size_x + 1)
y_arange = np.arange(1, size_y + 1)
[xx, yy] = np.meshgrid(x_arange, y_arange, indexing='ij')
xyz = np.column_stack([yy.flatten(), xx.flatten()])
return xyz | b1bd78fee6ca4a8fc2a33430c4ea5e922d696381 | 17,411 |
def wc_proximal_gradient(L, mu, gamma, n, verbose=1):
"""
Consider the composite convex minimization problem
.. math:: F_\\star \\triangleq \\min_x \\{F(x) \\equiv f_1(x) + f_2(x)\\},
where :math:`f_1` is :math:`L`-smooth and :math:`\\mu`-strongly convex,
and where :math:`f_2` is closed convex and proper.
This code computes a worst-case guarantee for the **proximal gradient** method (PGM).
That is, it computes the smallest possible :math:`\\tau(n, L, \\mu)` such that the guarantee
.. math :: \\|x_n - x_\\star\\|^2 \\leqslant \\tau(n, L, \\mu) \\|x_0 - x_\\star\\|^2,
is valid, where :math:`x_n` is the output of the **proximal gradient**,
and where :math:`x_\\star` is a minimizer of :math:`F`.
In short, for given values of :math:`n`, :math:`L` and :math:`\\mu`,
:math:`\\tau(n, L, \\mu)` is computed as the worst-case value of
:math:`\\|x_n - x_\\star\\|^2` when :math:`\\|x_0 - x_\\star\\|^2 \\leqslant 1`.
**Algorithm**: Proximal gradient is described by
.. math::
\\begin{eqnarray}
y_t & = & x_t - \\gamma \\nabla f_1(x_t), \\\\
x_{t+1} & = & \\arg\\min_x \\left\\{f_2(x)+\\frac{1}{2\gamma}\|x-y_t\|^2 \\right\\},
\\end{eqnarray}
for :math:`t \in \\{ 0, \\dots, n-1\\}` and where :math:`\\gamma` is a step-size.
**Theoretical guarantee**: It is well known that a **tight** guarantee for PGM is provided by
.. math :: \\|x_n - x_\\star\\|^2 \\leqslant \\max\\{(1-L\\gamma)^2,(1-\\mu\\gamma)^2\\}^n \\|x_0 - x_\\star\\|^2,
which can be found in, e.g., [1, Theorem 3.1]. It is a folk knowledge and the result can be found in many references
for gradient descent; see, e.g.,[2, Section 1.4: Theorem 3], [3, Section 5.1] and [4, Section 4.4].
**References**:
`[1] A. Taylor, J. Hendrickx, F. Glineur (2018). Exact worst-case convergence rates of the proximal gradient
method for composite convex minimization. Journal of Optimization Theory and Applications, 178(2), 455-476.
<https://arxiv.org/pdf/1705.04398.pdf>`_
[2] B. Polyak (1987). Introduction to Optimization. Optimization Software New York.
`[1] E. Ryu, S. Boyd (2016). A primer on monotone operator methods.
Applied and Computational Mathematics 15(1), 3-43.
<https://web.stanford.edu/~boyd/papers/pdf/monotone_primer.pdf>`_
`[4] L. Lessard, B. Recht, A. Packard (2016). Analysis and design of optimization algorithms via
integral quadratic constraints. SIAM Journal on Optimization 26(1), 57–95.
<https://arxiv.org/pdf/1408.3595.pdf>`_
Args:
L (float): the smoothness parameter.
mu (float): the strong convexity parameter.
gamma (float): proximal step-size.
n (int): number of iterations.
verbose (int): Level of information details to print.
- 1: No verbose at all.
- 0: This example's output.
- 1: This example's output + PEPit information.
- 2: This example's output + PEPit information + CVXPY details.
Returns:
pepit_tau (float): worst-case value.
theoretical_tau (float): theoretical value.
Example:
>>> pepit_tau, theoretical_tau = wc_proximal_gradient(L=1, mu=.1, gamma=1, n=2, verbose=1)
(PEPit) Setting up the problem: size of the main PSD matrix: 7x7
(PEPit) Setting up the problem: performance measure is minimum of 1 element(s)
(PEPit) Setting up the problem: initial conditions (1 constraint(s) added)
(PEPit) Setting up the problem: interpolation conditions for 2 function(s)
function 1 : 6 constraint(s) added
function 2 : 6 constraint(s) added
(PEPit) Compiling SDP
(PEPit) Calling SDP solver
(PEPit) Solver status: optimal (solver: SCS); optimal value: 0.6560999999942829
*** Example file: worst-case performance of the Proximal Gradient Method in function values***
PEPit guarantee: ||x_n - x_*||^2 <= 0.6561 ||x0 - xs||^2
Theoretical guarantee: ||x_n - x_*||^2 <= 0.6561 ||x0 - xs||^2
"""
# Instantiate PEP
problem = PEP()
# Declare a strongly convex smooth function and a closed convex proper function
f1 = problem.declare_function(SmoothStronglyConvexFunction, mu=mu, L=L)
f2 = problem.declare_function(ConvexFunction)
func = f1 + f2
# Start by defining its unique optimal point xs = x_*
xs = func.stationary_point()
# Then define the starting point x0 of the algorithm
x0 = problem.set_initial_point()
# Set the initial constraint that is the distance between x0 and x^*
problem.set_initial_condition((x0 - xs) ** 2 <= 1)
# Run the proximal gradient method starting from x0
x = x0
for _ in range(n):
y = x - gamma * f1.gradient(x)
x, _, _ = proximal_step(y, f2, gamma)
# Set the performance metric to the distance between x and xs
problem.set_performance_metric((x - xs) ** 2)
# Solve the PEP
pepit_verbose = max(verbose, 0)
pepit_tau = problem.solve(verbose=pepit_verbose)
# Compute theoretical guarantee (for comparison)
theoretical_tau = max((1 - mu*gamma)**2, (1 - L*gamma)**2)**n
# Print conclusion if required
if verbose != -1:
print('*** Example file: worst-case performance of the Proximal Gradient Method in function values***')
print('\tPEPit guarantee:\t ||x_n - x_*||^2 <= {:.6} ||x0 - xs||^2'.format(pepit_tau))
print('\tTheoretical guarantee:\t ||x_n - x_*||^2 <= {:.6} ||x0 - xs||^2 '.format(theoretical_tau))
# Return the worst-case guarantee of the evaluated method ( and the reference theoretical value)
return pepit_tau, theoretical_tau | ff8b67b963a2301e9b49870ffa9b6736a23420a4 | 17,412 |
import re
import os
def basic_output_filter(
filtered_prefixes=None,
filtered_patterns=None,
):
"""
Create a line filtering function to help output testing.
:param filtered_prefixes: A list of byte strings representing prefixes that will cause
output lines to be ignored if they start with one of the prefixes. By default lines
starting with the process ID (`'pid'`) and return code (`'rc'`) will be ignored.
:param filtered_patterns: A list of byte strings representing regexes that will cause
output lines to be ignored if they match one of the regexes.
"""
if filtered_prefixes is None:
filtered_prefixes = get_default_filtered_prefixes()
if filtered_patterns is None:
filtered_patterns = get_default_filtered_patterns()
filtered_patterns = map(re.compile, filtered_patterns)
def _filter(output):
filtered_output = []
for line in output.splitlines():
# Filter out stdout that comes from underlying DDS implementation
# Note: we do not currently support matching filters across multiple stdout lines.
if any(line.startswith(prefix) for prefix in filtered_prefixes):
continue
if any(pattern.match(line) for pattern in filtered_patterns):
continue
filtered_output.append(line)
if output.endswith(os.linesep):
filtered_output.append(os.linesep)
return os.linesep.join(filtered_output)
return _filter | 8db044d7d9d94ad7c4137762336d7a4b80f7a53c | 17,413 |
def all_asset_types_for_shot(shot, client=default):
"""
Args:
shot (str / dict): The shot dict or the shot ID.
Returns:
list: Asset types from assets casted in given shot.
"""
path = "shots/%s/asset-types" % shot["id"]
return sort_by_name(raw.fetch_all(path, client=client)) | a7d06e49d564dbd294636e29f488703f5027026e | 17,414 |
from typing import Iterable
def train(x_mat: ndarray, k: int, *, max_iters: int = 10, initial_centroids: Iterable = None, history: bool = False):
"""
进行k均值训练
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param k: 聚类数目
:param max_iters: 最大迭代次数
:param initial_centroids: 初始聚类中心,不提供别的话将随机挑选聚类中心
:param history: 是否返回历史信息
:return: 计算好的聚类中心;包含每个样本所属聚类中心下标的行向量;包含每一次迭代计算的聚类中心列表(history为True的话)
"""
x_mat = __t.r2m(x_mat)
m, n = x_mat.shape
if initial_centroids is None:
rand_indices = np.arange(0, m)
np.random.shuffle(rand_indices)
initial_centroids = x_mat[rand_indices[:k], :]
if not isinstance(initial_centroids, ndarray):
initial_centroids = np.asarray(initial_centroids)
idx = None
centroids_history = None
if history:
centroids_history = [initial_centroids]
for i in range(max_iters):
idx = find_closest(x_mat, initial_centroids)
initial_centroids = compute_centroids(x_mat, idx)
if history:
centroids_history.append(initial_centroids)
if history:
return initial_centroids, idx, centroids_history
else:
return initial_centroids, idx | 3a27cb709d6b267c8da19312f634f6003e2ba9a3 | 17,415 |
def download4(url, user_agent='wswp', num_retries=2):
"""Download function that includes user agent support"""
# wswp: web scraping with python
print 'Downloading:', url
headers = {'User-agent': user_agent}
request = urllib2.Request(url, headers=headers)
try:
html = urllib2.urlopen(request).read()
except urllib2.URLError as e:
print 'Download error:', e.reason
html = None
if num_retries > 0:
if hasattr(e, 'code') and 500 <= e.code < 600:
# retry 5XX HTTP errors
html = download4(url, user_agent, num_retries-1)
return html | 1381e64e93b373e68a1a07eaab1688462f905374 | 17,416 |
import unittest
def testv1():
"""Runs the unit tests without test coverage."""
tests = unittest.TestLoader().discover('./tests/api/v1', pattern='test*.py')
result = unittest.TextTestRunner(verbosity=2).run(tests)
if result.wasSuccessful():
return 0
return 1 | 519835d59ce7d370e8099e94a48b1e7309274d99 | 17,417 |
def get_third_order_displacements(cell,
symmetry,
is_plusminus='auto',
is_diagonal=False):
"""Create dispalcement dataset
Note
----
Atoms 1, 2, and 3 are defined as follows:
Atom 1: The first displaced atom. Third order force constant
between Atoms 1, 2, and 3 is calculated.
Atom 2: The second displaced atom. Second order force constant
between Atoms 2 and 3 is calculated.
Atom 3: Force is mesuared on this atom.
Parameters
----------
cell : PhonopyAtoms
Supercell
symmetry : Symmetry
Symmetry of supercell
is_plusminus : str or bool, optional
Type of displacements, plus only (False), always plus and minus (True),
and plus and minus depending on site symmetry ('auto').
is_diagonal : bool, optional
Whether allow diagonal displacements of Atom 2 or not
Returns
-------
dict
Data structure is like:
{'natom': 64,
'cutoff_distance': 4.000000,
'first_atoms':
[{'number': atom1,
'displacement': [0.03, 0., 0.],
'second_atoms': [ {'number': atom2,
'displacement': [0., -0.03, 0.],
'distance': 2.353},
{'number': ... }, ... ] },
{'number': atom1, ... } ]}
"""
positions = cell.get_scaled_positions()
lattice = cell.get_cell().T
# Least displacements of first atoms (Atom 1) are searched by
# using respective site symmetries of the original crystal.
# 'is_diagonal=False' below is made intentionally to expect
# better accuracy.
disps_first = get_least_displacements(symmetry,
is_plusminus=is_plusminus,
is_diagonal=False)
symprec = symmetry.get_symmetry_tolerance()
dds = []
for disp in disps_first:
atom1 = disp[0]
disp1 = disp[1:4]
site_sym = symmetry.get_site_symmetry(atom1)
dds_atom1 = {'number': atom1,
'direction': disp1,
'second_atoms': []}
# Reduced site symmetry at the first atom with respect to
# the displacement of the first atoms.
reduced_site_sym = get_reduced_site_symmetry(site_sym, disp1, symprec)
# Searching orbits (second atoms) with respect to
# the first atom and its reduced site symmetry.
second_atoms = get_least_orbits(atom1,
cell,
reduced_site_sym,
symprec)
for atom2 in second_atoms:
dds_atom2 = get_next_displacements(atom1,
atom2,
reduced_site_sym,
lattice,
positions,
symprec,
is_diagonal)
min_vec = get_equivalent_smallest_vectors(atom1,
atom2,
cell,
symprec)[0]
min_distance = np.linalg.norm(np.dot(lattice, min_vec))
dds_atom2['distance'] = min_distance
dds_atom1['second_atoms'].append(dds_atom2)
dds.append(dds_atom1)
return dds | 6ae03dbd10ffec75bc274b4a4115b81d28cefc40 | 17,418 |
def get_result_type(action):
"""Gets the corresponding ROS action result type.
Args:
action: ROS action name.
Returns:
Result message type. None if not found.
"""
msg_type = rostopic.get_topic_type("{}/result".format(action))[0]
# Replace 'ActionResult' with 'Result'.
return msg_type[:-12] + "Result" | f5612ac116357000106f7ff24f0d2bebb6789547 | 17,419 |
import statistics
import math
def get_turbulence(sequence):
"""
Computes turbulence for a given sequence, based on `Elzinga & Liefbroer's 2007 definition <https://www.researchgate.net/publication/225402919_De-standardization_of_Family-Life_Trajectories_of_Young_Adults_A_Cross-National_Comparison_Using_Sequence_Analysis>`_ which is also implemented in the `TraMineR <http://traminer.unige.ch/doc/seqST.html>`_ sequence analysis library.
Example
--------
>>> sequence = [1,1,2,2,3]
>>> ps.get_turbulence(sequence)
5.228...
"""
phi = get_ndistinct_subsequences(sequence)
#print('phi', phi)
state_durations = [value for key, value in get_spells(sequence)]
#print('durations', state_durations)
#print('mean duration', statistics.mean(state_durations))
variance_of_state_durations = statistics.variance(state_durations)
#print('variance', variance_of_state_durations)
tbar = statistics.mean(state_durations)
maximum_state_duration_variance = (len(sequence) - 1) * (1 - tbar) ** 2
#print('smax', maximum_state_duration_variance)
top_right = maximum_state_duration_variance + 1
bot_right = variance_of_state_durations + 1
turbulence = math.log2(phi * (top_right / bot_right))
#print('turbulence', turbulence)
return turbulence | 9900d377240b609de1cb7a5284752457947ef6c3 | 17,420 |
def reflect(array, holder=1):
"""
Reflects a np array across the y-axis
Args:
array: array to be reflected
holder: a holder variable so the function can be used in optimization algorithms. If <0.5, does not reflect.
Returns:
Reflected array
"""
c = array.copy()
if holder > 0.5:
c[:, 0] = -c[:, 0]
return c | c39cbf0bb3a949254e4f0c35b20bdf84766d2084 | 17,421 |
import os
from datetime import datetime
import time
import logging
import sys
def create_and_return_logger(logger_name, filename="log"):
"""
Function to create a custom logger that will print to terminal
as well as write to a log file
Accepts: Logger name for script that is calling logger, and filename for log.
Returns: Logger object and Log file path.
"""
LOG_FILE_DIR = os.getcwd() + "/logs"
if not os.path.exists(LOG_FILE_DIR):
os.makedirs(LOG_FILE_DIR)
LOG_FILE = f"{LOG_FILE_DIR}/{filename}_{datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S')}.log"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logFormatter = logging.Formatter(
"%(levelname)s %(asctime)s %(processName)s %(message)s"
)
sh = logging.StreamHandler(sys.stdout)
sh.setFormatter(logFormatter)
logger.handlers.clear()
logger.addHandler(sh)
fileHandler = logging.FileHandler(f"{LOG_FILE}")
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
return logger, LOG_FILE | 5860e00fe3fd52e11afd4daa27554c2e070afe06 | 17,422 |
from sys import path
def _get_all_entries(entry_list, keep_top_dir):
"""
Returns a list of all entries (files, directories) that should be copied.
The main purpose of this function is to evaluate 'keep_top_dir' and in case
it should not be kept use all the entries below the top-level directories.
"""
all_files = []
entry_list = [path.local(entry) for entry in entry_list]
if keep_top_dir:
all_files = entry_list
else:
for entry in entry_list:
if entry.isdir():
all_files.extend(entry.listdir())
else:
all_files.append(entry)
return all_files | fbd12ef87cdc21307908ad7013167559da6fc051 | 17,423 |
import os
def process_exclude_items(exclude_items=[]):
"""
Process the exclude items to get list of directories to NOT be scanned
:return: a list of directories to not be scanned if any, otherwise an empty list
"""
logger.debug("Parsing exclude items ...")
parsed_list = []
for item in exclude_items:
item = item.strip()
if not item or item.startswith('#'):
continue
exclude_item = os.path.normpath(item).replace('//', '/')
if os.path.exists(exclude_item):
# ignore the exclude_item if its not a full directory path
if exclude_item == '/':
# Found / in exclude list. No need to get the other items because / trumps all
logger.debug("Found root directory in the exclude list. Expanding it to all toplevel directories ...")
parsed_list = get_toplevel_dirs()
break
elif not exclude_item.startswith('/'):
logger.debug("Skipping partial directory path '%s' ...", exclude_item)
continue
else:
parsed_list.append(exclude_item)
else:
logger.debug("Skipping missing item '%s' ...", exclude_item)
if not parsed_list:
logger.debug("No items specified to be excluded")
else:
# Remove any duplicates and any children of parent directories before returning
parsed_list = remove_child_items(sorted(list(set(parsed_list))))
logger.debug("Exclude items: %s", parsed_list)
return parsed_list | 36374faa3182aa1db0ba0d12b6a4f3ce23a69a95 | 17,424 |
def absolute_name_scope(scope, reuse=tf.AUTO_REUSE):
"""Builds an absolute tf.name_scope relative to the current_scope.
This is helpful to reuse nested name scopes.
E.g. The following will happen when using regular tf.name_scope:
with tf.name_scope('outer'):
with tf.name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const:0
with tf.name_scope('outer'):
with tf.name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner_1/Const:0
With absolute_name_scope:
with absolute_name_scope('outer'):
with absolute_name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const:0
with absolute_name_scope('outer'):
with absolute_name_scope('inner'):
print(tf.constant(1)) # Will print outer/inner/Const_1:0
"""
current_scope = tf.get_default_graph().get_name_scope()
if not current_scope:
if scope.endswith('/'):
scope = tf.variable_scope(scope, reuse=reuse)
else:
scope = tf.variable_scope('{}/'.format(scope), reuse=reuse)
else:
scope = tf.variable_scope('{}/{}/'.format(current_scope, scope), reuse=reuse)
return scope | b9bd9e801603472c3e7e1db7a8768387b9942f3c | 17,425 |
def regina_edge_orientation_agrees(tet, vert_pair):
"""
Given tet and an ordered pair of (regina) vert nums of that tet, does this ordering
agree with regina's ordering of the verts of that edge of the triangulation
"""
edge_num = vert_pair_to_edge_num[tuple(vert_pair)]
mapping = tet.faceMapping(1, edge_num)
map_order = [mapping[0], mapping[1]]
assert set(map_order) == set(vert_pair)
return map_order == vert_pair | a70f08f56754eee24b9c4c71d5b6b537388a4ca4 | 17,426 |
from typing import Optional
from datetime import datetime
async def populate_challenge(
challenge_status: str = "process",
is_public: bool = True,
user_id: Optional[UUID] = USER_UUID,
challenge_id: UUID = POPULATE_CHALLENGE_ID,
) -> Challenge:
"""Populate challenge for routes testings."""
if not user_id:
user_id = uuid4()
user: User = await populate_user(user_id=user_id)
track, _ = await Track.get_or_create(test_track_info)
await populate_playlist()
challenge_end = datetime.utcnow() + timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "vote":
challenge_end = datetime.utcnow() - timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "end":
challenge_end = datetime.utcnow() - timedelta(days=2)
vote_end = datetime.utcnow() - timedelta(days=1)
challenge, _ = await Challenge.get_or_create(
id=challenge_id,
name="test",
challenge_end=challenge_end,
vote_end=vote_end,
is_public=is_public,
owner=user,
track=track,
)
await challenge.participants.add(user)
return challenge | fa47e65c7615af8dfebed4dd66fd92141d50e130 | 17,427 |
from typing import List
def is_common_prefix(words: List[str], length: int) -> bool:
"""Binary Search"""
word: str = words[0][:length]
for next_word in words[1:]:
if not next_word.startswith(word):
return False
return True | f57c7309f725baba0b65c92181a6f1ab2827558a | 17,428 |
def freq_upsample(s, upsample):
""" padding in frequency domain, should be used with ifft so that
signal is upsampled in time-domain.
Args:
s : frequency domain signal
upsample : an integer indicating factor of upsampling.
Returns:
padded signal
"""
if upsample == 1:
return s
assert isinstance(upsample, int) and upsample > 1
l = len(s)
if l % 2 == 0:
h = l / 2
return upsample * np.concatenate(
(s[:h], np.array([s[h] / 2.0]),
np.zeros(l * (upsample - 1) - 1),
np.array([s[h] / 2.0]), s[h+1:]))
else:
h = l / 2 + 1
return upsample * np.concatenate(
(s[:h], np.zeros(l * (upsample - 1)), s[h:])) | 78377f6c552fe4f6d764a33b3e6ee555b4aabe71 | 17,429 |
def streaming_parsing_covering(groundtruth_categories,
groundtruth_instances,
predicted_categories,
predicted_instances,
num_classes,
max_instances_per_category,
ignored_label,
offset,
normalize_by_image_size=True,
name=None):
"""Aggregates the covering across calls with different input tensors.
See tf.metrics.* functions for comparable functionality and usage.
Args:
groundtruth_categories: A 2D uint16 tensor of groundtruth category labels.
groundtruth_instances: A 2D uint16 tensor of groundtruth instance labels.
predicted_categories: A 2D uint16 tensor of predicted category labels.
predicted_instances: A 2D uint16 tensor of predicted instance labels.
num_classes: Number of classes in the dataset as an integer.
max_instances_per_category: The maximum number of instances for each class
as an integer or integer tensor.
ignored_label: The class id to be ignored in evaluation as an integer or
integer tensor.
offset: The maximum number of unique labels as an integer or integer tensor.
normalize_by_image_size: Whether to normalize groundtruth region areas by
image size. If True, groundtruth instance areas and weighted IoUs will be
divided by the size of the corresponding image before accumulated across
the dataset.
name: An optional variable_scope name.
Returns:
coverings: A tensor of shape `[3, num_classes]`, where (1) per class
coverings, (2) per class sum of weighted IoUs, and (3) per class sum of
groundtruth region areas are saved in the perspective rows.
update_ops: List of operations that update the running overall parsing
covering.
Raises:
RuntimeError: If eager execution is enabled.
"""
if tf.executing_eagerly():
raise RuntimeError('Cannot aggregate when eager execution is enabled.')
input_args = [
tf.convert_to_tensor(groundtruth_categories, tf.uint16),
tf.convert_to_tensor(groundtruth_instances, tf.uint16),
tf.convert_to_tensor(predicted_categories, tf.uint16),
tf.convert_to_tensor(predicted_instances, tf.uint16),
tf.convert_to_tensor(num_classes, tf.int32),
tf.convert_to_tensor(max_instances_per_category, tf.int32),
tf.convert_to_tensor(ignored_label, tf.int32),
tf.convert_to_tensor(offset, tf.int32),
tf.convert_to_tensor(normalize_by_image_size, tf.bool),
]
return_types = [
tf.float64,
tf.float64,
]
with tf.variable_scope(name, 'streaming_parsing_covering', input_args):
covering_results = tf.py_func(
_parsing_covering_helper, input_args, return_types, stateful=False)
weighted_iou_per_class, gt_area_per_class = tuple(covering_results)
total_weighted_iou_per_class, updated_weighted_iou_per_class = (
_running_total(
weighted_iou_per_class, [num_classes],
name='weighted_iou_per_class_total'))
total_gt_area_per_class, updated_gt_area_per_class = _running_total(
gt_area_per_class, [num_classes], name='gt_area_per_class_total')
covering_per_class = _realdiv_maybe_zero(total_weighted_iou_per_class,
total_gt_area_per_class)
coverings = tf.stack([
covering_per_class,
total_weighted_iou_per_class,
total_gt_area_per_class,
],
axis=0)
update_ops = [updated_weighted_iou_per_class, updated_gt_area_per_class]
return coverings, update_ops | 7c41f5b0c1111287759cc03cdc2a0c8a932aba11 | 17,430 |
from typing import Union
def get_rxn_lookup(medObj:Union[m.Medication, m.LocalMed, m.NDC]):
"""
DEPRECATED
Lookup RxCUI for codes from a different source
:param medObj:
:return:
"""
if isinstance(medObj, m.RxCUI):
smores_error('TBD')
return 0, []
success_count, errors = 0, []
non_rxc_dict = medObj.get_cui_all(omit=['PARENT', 'RXNORM'], inc_obj=True)
_e = {}
if len(non_rxc_dict) > 0:
for src in non_rxc_dict:
_src_e = []
_src_s = 0
for medC, medO in non_rxc_dict[src].items():
rxc_l = medO.get_linked_cui('RXNORM')
for _o in rxc_l:
if _o is None:
_src_e.append(medC)
else:
_src_s += 1
medObj.add_cui(_o)
success_count += 1 if _src_s > 0 else 0
if len(_src_e) > 0:
_e[src] = _src_e
if len(_e) > 0:
errors = _e
return success_count, errors | bba03aa380666b13db89497c720a62570a2918d0 | 17,431 |
def identity(dims):
"""
Create an identity linear operator
:param dims: array of dimensions
"""
dims = expand_dims(dims)
return identity_create(dims) | 4d57c5a0da628c8f24de4f621728176714a4ab54 | 17,432 |
def _gen_samples_2d(enn_sampler: testbed_base.EpistemicSampler,
x: chex.Array,
num_samples: int,
categorical: bool = False) -> pd.DataFrame:
"""Generate posterior samples at x (not implemented for all posterior)."""
# Generate the samples
data = []
rng = hk.PRNGSequence(jax.random.PRNGKey(seed=0))
for seed in range(num_samples):
net_out = enn_sampler(x, next(rng))
y = jax.nn.softmax(net_out)[:, 1] if categorical else net_out[:, 0]
df = pd.DataFrame({'x0': x[:, 0], 'x1': x[:, 1], 'y': y, 'seed': seed})
data.append(df)
return pd.concat(data) | 19fc06700ae42b015694fc9389c05ad0caebf54d | 17,433 |
def rules(r_index, c_index, lives, some_board, duplicate_board):
"""Apply Conway's Rules to a board
Args:
r_index (int): Current row index
c_index (int): Current column index
lives (int): Number of ALIVE cells around current position
some_board (List of lists of strings): Board used to determine rule
duplicate_board (List of lists of strings): Board used to apply rule
Returns:
[List of lists of strings]: Board used to apply rule (modified board)
"""
if some_board[r_index][c_index] == ALIVE:
if lives < 2 or lives > 3:
duplicate_board[r_index][c_index] = DEAD
else:
if lives == 3:
duplicate_board[r_index][c_index] = ALIVE
return duplicate_board | f654a134be3eccad122720cd58f577a2d7e580d8 | 17,434 |
def get_describe_tasks(cluster_name, tasks_arns):
"""Get information about a list of tasks."""
return (
ecs_client()
.describe_tasks(cluster=cluster_name, tasks=tasks_arns)
.get("tasks", [])
) | 663cc2d2241aa3d75c8f2de35780ebe9a5d4ae31 | 17,435 |
def make_bcc110(latconst=1.0):
"""
Make a cell of bcc structure with z along [110].
"""
s= NAPSystem(specorder=_default_specorder)
#...lattice
a1= np.array([ 1.0, 0.0, 0.0 ])
a2= np.array([ 0.0, 1.414, 0.0 ])
a3= np.array([ 0.0, 0.0, 1.414 ])
s.set_lattice(latconst,a1,a2,a3)
symbol = _default_specorder[0]
symbols = [ symbol, symbol, symbol, symbol]
poss = [[0.00, 0.00, 0.00],
[0.00, 0.50, 0.50],
[0.50, 0.50, 0.00],
[0.50, 0.00, 0.50]]
vels = [ [0., 0., 0.] for i in range(4) ]
frcs = [ [0., 0., 0.] for i in range(4) ]
s.add_atoms(symbols, poss, vels, frcs)
return s | 187556e30b4e89718d4c8d1179579ba498062d26 | 17,436 |
def mode_mods_to_int(mode: str) -> int:
"""Converts mode_mods (str) to mode_mods (int)."""
# NOTE: This is a temporary function to convert the leaderboard mode to an int.
# It will be removed when the site is fully converted to use the new
# stats table.
for mode_num, mode_str in enumerate((
'vn_std', 'vn_taiko', 'vn_catch', 'vn_mania',
'rx_std', 'rx_taiko', 'rx_catch',
'ap_std'
)):
if mode == mode_str:
return mode_num
else:
return 0 | 0bfaa8cf04bcee9395dff719067be9753be075c4 | 17,437 |
def ntu_tranform_skeleton(test):
"""
:param test: frames of skeleton within a video sample
"""
remove_frame = False
test = np.asarray(test)
transform_test = []
d = test[0, 0:3]
v1 = test[0, 1 * 3:1 * 3 + 3] - test[0, 0 * 3:0 * 3 + 3]
v1 = v1 / np.linalg.norm(v1)
v2_ = test[0, 12 * 3:12 * 3 + 3] - test[0, 16 * 3:16 * 3 + 3]
if np.equal(np.sum(v2_), 0):
v2_ += 1e-6
proj_v2_v1 = np.dot(v1.T, v2_) * v1 / np.linalg.norm(v1)
v2 = v2_ - np.squeeze(proj_v2_v1)
v2 = v2 / np.linalg.norm(v2)
v3 = np.cross(v2, v1) / np.linalg.norm(np.cross(v2, v1))
v1 = np.reshape(v1, (3, 1))
v2 = np.reshape(v2, (3, 1))
v3 = np.reshape(v3, (3, 1))
R = np.hstack([v2, v3, v1])
for i in range(test.shape[0]):
xyzs = []
for j in range(25):
if test[i][j * 3:j * 3 + 3].all() == 0:
remove_frame = True
break
xyz = np.squeeze(np.matmul(np.linalg.inv(R), np.reshape(test[i][j * 3:j * 3 + 3] - d, (3, 1))))
xyzs.append(xyz)
if not remove_frame:
xyzs = np.reshape(np.asarray(xyzs), (-1, 75))
transform_test.append(xyzs)
else:
remove_frame = False
transform_test = np.squeeze(np.asarray(transform_test))
return transform_test.tolist() | 6f8e9e3ff0b6fa95b5f3b8c22aef2de05730a78c | 17,438 |
import random
import time
import requests
def request_to_dataframe(UF):
"""Recebe string do estado, retona DataFrame com faixa de CEP do estado"""
#Try to load the proxy list. If after several attempts it still doesn't work, raise an exception and quit.
proxy_pool = proxy_list_to_cycle()
#Set initial values for post request's parameters.
pagini = 1
pagfim = 50
count = 1
while True:
#random sleep times to decrease the chances of being blocked.
num1 = random.randint(2,5)
time.sleep(num1)
try:
#select_proxy from proxy pool.
proxy = next(proxy_pool)
print(f"Proxy atual: {proxy}")
#Define o post Field de acordo com a página Atual. Para a primeira página os campos "Bairro", "qtdrow", "pagini", "pagfim" não são considerados.
if count == 1:
post_fields = {"UF":UF, "Localidade":""}
full_dataframe = pd.DataFrame()
else:
post_fields = {"UF": UF, "Localidade":"**", "Bairro":"", "qtdrow":"50", "pagini":str(pagini),"pagfim": str(pagfim)}
#Makes the post request
request = make_post_request(post_fields, proxy)
#Extrai tabela com as faixas de CEP do HTML. Se estivermos na primeira página, o conteúdo se encontra no primeiro index do page content, caso o contrário, se encontra no próximo index.
if count == 1:
UF_table = request_text_to_table(request = request, page_content_index = 1)
else:
UF_table = request_text_to_table(request = request, page_content_index = 0)
except requests.exceptions.ProxyError:
print("")
print(f"Error with the proxy: {proxy}")
print(f"Proxies left: {proxy_pool}")
print("Tentando novamente")
print("")
continue
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError) as err:
print("")
print('Servidor demorando muito')
print("Tentando novamente")
print("")
continue
except Exception as e:
print("")
print(e)
proxy_pool = proxy_list_to_cycle()
continue
#Turning the table into a dataframe.
current_page_df = table_to_df(UF_table)
#Concat DataFrames for each page into one DataFrame
full_dataframe = pd.concat([full_dataframe, current_page_df])
print(f"Total de dados coletados sobre o Estado {UF}: {full_dataframe.shape[0]} ")
#Sair do loop de post requests para o estado atual se chegamos na última página.
if current_page_df.shape[0] < 49:
print(f"Última página do estado:{UF}")
break
#Incrementa o número da página e o contador de página.
pagini += 50
pagfim += 50
count = count + 1
return full_dataframe | f71de0ec169f375fff1fba87d55aa8021b851990 | 17,439 |
import csv
def read_sto_mot_file(filename):
"""
Read sto or mot file from Opensim
----------
filename: path
Path of the file witch have to be read
Returns
-------
Data Dictionary with file informations
"""
data = {}
data_row = []
first_line = ()
end_header = False
with open(f"{filename}", "rt") as f:
reader = csv.reader(f)
for idx, row in enumerate(reader):
if len(row) == 0:
pass
elif row[0][:9] == "endheader":
end_header = True
first_line = idx + 1
elif end_header is True and row[0][:9] != "endheader":
row_list = row[0].split("\t")
if idx == first_line:
names = row_list
else:
data_row.append(row_list)
for r in range(len(data_row)):
for col in range(len(names)):
if r == 0:
data[f"{names[col]}"] = [float(data_row[r][col])]
else:
data[f"{names[col]}"].append(float(data_row[r][col]))
return data | 584cff26cb217d5fadfcea025ad58e431f46676a | 17,440 |
def verify_cef_labels(device, route, expected_first_label, expected_last_label=None, max_time=90,
check_interval=10):
""" Verify first and last label on route
Args:
device ('obj'): Device object
route ('str'): Route address
expected_first_label ('str'): Expected first label
expected_last_label ('str'): Expected last label
max_time ('int'): Max time in seconds checking output
check_interval ('int'): Interval in seconds of each checking
Return:
True/False
Raises:
None
"""
reqs = R(
[
'vrf',
'(.*)',
'address_family',
'(.*)',
'prefix',
'(.*{}.*)'.format(route),
'nexthop',
'(.*)',
'outgoing_interface',
'(.*)',
'(?P<val>.*)'
]
)
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
result = True
out = None
try:
out = device.parse('show ip cef {}'.format(route))
except SchemaEmptyParserError:
out = None
if not out:
result = False
log.info('Could not get information about show ip cef {}'.format(route))
timeout.sleep()
continue
found = find([out], reqs, filter_=False, all_keys=True)
if found:
keys = GroupKeys.group_keys(reqs=reqs.args, ret_num={},
source=found)
for item in keys:
first_label = item.get('val',{}).get('outgoing_label', None)
if first_label and str(expected_first_label) not in str(first_label):
result = False
if expected_last_label:
sid = item.get('val',{}).get('sid', None)
if str(expected_last_label) != str(sid):
result = False
if result:
return True
timeout.sleep()
return False | c082920d0c93ec0c2897dc5a06c9d9d9452151af | 17,441 |
def fcat(*fs):
"""Concatenate a sequence of farrays.
The variadic *fs* input is a homogeneous sequence of functions or arrays.
"""
items = list()
for f in fs:
if isinstance(f, boolfunc.Function):
items.append(f)
elif isinstance(f, farray):
items.extend(f.flat)
else:
raise TypeError("expected Function or farray")
return farray(items) | 440a850ed17b8fc844cafaa765b24620a29fa0fd | 17,442 |
def get_path_to_spix(
name: str,
data_directory: str,
thermal: bool,
error: bool = False,
file_ending: str = "_6as.fits",
) -> str:
"""Get the path to the spectral index
Args:
name (str): Name of the galaxy
data_directory (str): dr2 data directory
thermal (bool): non thermal data
error (bool): path to error
file_ending (str, optional): File ending. Defaults to ".fits".
Returns:
str: [description]
"""
return f"{data_directory}/magnetic/{name}/{name}_spix{'_non_thermal' if thermal else ''}{'_error' if error else ''}{file_ending}" | bf8fdff001049ed0738ed856e8234c43ce4511b7 | 17,443 |
def hexpos (nfibres,diam) :
"""
Returns a list of [x,y] positions for a classic packed hex IFU configuration.
"""
positions = [[np.nan,np.nan] for i in range(nfibres)]
# FIND HEX SIDE LENGTH
nhex = 1
lhex = 1
while nhex < nfibres :
lhex += 1
nhex = 3*lhex**2-3*lhex+1
if nhex != nfibres:
lhex -= 1
nhex = 3*lhex**2-3*lhex+1
nextra = nfibres-nhex
n = 0
khex = 2*lhex-1 # NUMBER OF FIBRES IN THE CENTRAL ROW
xhex = (-khex//2)*diam
for i in range(khex) : # CENTRAL ROW
x = xhex+diam*i
positions[n] = [int(x*100)/100,0.]
n += 1
dx = 0.5*diam
dy = diam*np.sqrt(3./4.)
for i in range(1,lhex,1) : # FOR ALL ROWS PAIRS i
khex -= 1 # EACH ROW HAS 1 LESS THAN THE PREVIOUS
xhex += dx
for j in range(khex) : # FOR ALL FIBRES j IN ROWS i
x = xhex+diam*j
y = dy*i
positions[n] = [int(x*100)/100, int(y*100)/100]
positions[n+1] = [int(x*100)/100,-int(y*100)/100]
n += 2
return positions | 4dbf1209d7021c6a4defd1c58e420b362bdbf84c | 17,444 |
from bs4 import BeautifulSoup
def parse_object_properties(html):
"""
Extract key-value pairs from the HTML markup.
"""
if isinstance(html, bytes):
html = html.decode('utf-8')
page = BeautifulSoup(html, "html5lib")
propery_ps = page.find_all('p', {'class': "list-group-item-text"})
obj_props_dict = {}
for p in propery_ps:
if 'data-name' in p.attrs:
key = p.attrs['data-name']
value = p.get_text().strip()
obj_props_dict[key] = value
return obj_props_dict | 8eb2d15cb5f46075ec44ff61265a8f70123a8646 | 17,445 |
def rgb2hex(r, g, b, normalised=False):
"""Convert RGB to hexadecimal color
:param: can be a tuple/list/set of 3 values (R,G,B)
:return: a hex vesion ofthe RGB 3-tuple
.. doctest::
>>> from colormap.colors import rgb2hex
>>> rgb2hex(0,0,255, normalised=False)
'#0000FF'
>>> rgb2hex(0,0,1, normalised=True)
'#0000FF'
.. seealso:: :func:`hex2web`, :func:`web2hex`, :func:`hex2rgb`
, :func:`rgb2hsv`, :func:`hsv2rgb`, :func:`rgb2hls`,
:func:`hls2rgb`
"""
if normalised:
r, g, b = _denormalise(r, g, b, mode="rgb")
r = int(r)
g = int(g)
b = int(b)
check_range(r, 0, 255)
check_range(g, 0, 255)
check_range(b, 0, 255)
return '#%02X%02X%02X' % (r, g, b) | 03afd09cc280d7731ca6b28098cf3f5605fddda7 | 17,446 |
def test_hookrelay_registry(pm):
"""Verify hook caller instances are registered by name onto the relay
and can be likewise unregistered."""
class Api:
@hookspec
def hello(self, arg):
"api hook 1"
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, "hello")
assert repr(hook.hello).find("hello") != -1
class Plugin:
@hookimpl
def hello(self, arg):
return arg + 1
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert out == [4]
assert not hasattr(hook, "world")
pm.unregister(plugin)
assert hook.hello(arg=3) == [] | 5f7733efbdbaf193b483c108838d2571ff686e52 | 17,447 |
def model_choices_from_protobuf_enum(protobuf_enum):
"""Protobufs Enum "items" is the opposite order djagno requires"""
return [(x[1], x[0]) for x in protobuf_enum.items()] | d3f5431293a9ab3fdf9a92794b1225a0beec40cc | 17,448 |
import os
def load_boxes_and_labels(cfg, mode):
"""
Loading boxes and labels from csv files.
Args:
cfg (CfgNode): config.
mode (str): 'train', 'val', or 'test' mode.
Returns:
all_boxes (dict): a dict which maps from `video_name` and
`frame_sec` to a list of `box`. Each `box` is a
[`box_coord`, `box_labels`] where `box_coord` is the
coordinates of box and 'box_labels` are the corresponding
labels for the box.
"""
gt_lists = cfg.AVA.TRAIN_GT_BOX_LISTS if mode == "train" else []
pred_lists = (
cfg.AVA.TRAIN_PREDICT_BOX_LISTS
if mode == "train"
else cfg.AVA.TEST_PREDICT_BOX_LISTS
)
ann_filenames = [
os.path.join(cfg.AVA.ANNOTATION_DIR, filename)
for filename in gt_lists + pred_lists
]
ann_is_gt_box = [True] * len(gt_lists) + [False] * len(pred_lists)
detect_thresh = cfg.AVA.DETECTION_SCORE_THRESH
all_boxes = {}
count = 0
unique_box_count = 0
for filename, is_gt_box in zip(ann_filenames, ann_is_gt_box):
with PathManager.open(filename, "r") as f:
for line in f:
row = line.strip().split(",")
# When we use predicted boxes to train/eval, we need to
# ignore the boxes whose scores are below the threshold.
if not is_gt_box:
score = float(row[7])
if score < detect_thresh:
continue
video_name, frame_sec = row[0], int(row[1])
# Only select frame_sec % 4 = 0 samples for validation if not
# set FULL_TEST_ON_VAL.
if (
mode == "val"
and not cfg.AVA.FULL_TEST_ON_VAL
and frame_sec % 4 != 0
):
continue
# Box with format [x1, y1, x2, y2] with a range of [0, 1] as float.
box_key = ",".join(row[2:6])
box = list(map(float, row[2:6]))
label = -1 if row[6] == "" else int(row[6])
if video_name not in all_boxes:
all_boxes[video_name] = {}
for sec in AVA_VALID_FRAMES:
all_boxes[video_name][sec] = {}
if box_key not in all_boxes[video_name][frame_sec]:
all_boxes[video_name][frame_sec][box_key] = [box, []]
unique_box_count += 1
all_boxes[video_name][frame_sec][box_key][1].append(label)
if label != -1:
count += 1
for video_name in all_boxes.keys():
for frame_sec in all_boxes[video_name].keys():
# Save in format of a list of [box_i, box_i_labels].
all_boxes[video_name][frame_sec] = list(
all_boxes[video_name][frame_sec].values()
)
logger.info(
"Finished loading annotations from: %s" % ", ".join(ann_filenames)
)
logger.info("Detection threshold: {}".format(detect_thresh))
logger.info("Number of unique boxes: %d" % unique_box_count)
logger.info("Number of annotations: %d" % count)
return all_boxes | 9ea7f476474f834fcfb70700cc269b4abf8d5c33 | 17,449 |
def kmeans(boxes, k):
"""
Group into k clusters the BB in boxes.
http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans
:param boxes: The BB in format Nx4 where (x1,y1,x2,y2)
:param k: the number of clusters.
:return: k clusters with the element indexes of each clusters.
"""
model = KMeans(n_clusters=k).fit(boxes)
pred = model.labels_
indexes = [[]] * k
for i, v in enumerate(pred):
indexes[v] = indexes[v] + [i]
return indexes | 0d2bcfb2fb7d5639f95db92ac5aa5e73b1b27498 | 17,450 |
def observation_min_max_in_hex_grid_json(request: HttpRequest):
"""Return the min, max observations count per hexagon, according to the zoom level. JSON format.
This can be useful to dynamically color the grid according to the count
"""
zoom = extract_int_request(request, "zoom")
species_ids, datasets_ids, start_date, end_date, area_ids = filters_from_request(
request
)
sql_template = readable_string(
Template(
"""
WITH grid AS ($jinjasql_fragment_aggregated_grid)
SELECT MIN(count), MAX(count) FROM grid;
"""
).substitute(
jinjasql_fragment_aggregated_grid=JINJASQL_FRAGMENT_AGGREGATED_GRID
)
)
sql_params = {
"hex_size_meters": ZOOM_TO_HEX_SIZE[zoom],
"grid_extent_viewport": False,
"species_ids": species_ids,
"datasets_ids": datasets_ids,
"area_ids": area_ids,
}
if start_date:
sql_params["start_date"] = start_date.strftime(DB_DATE_EXCHANGE_FORMAT_PYTHON)
if end_date:
sql_params["end_date"] = end_date.strftime(DB_DATE_EXCHANGE_FORMAT_PYTHON)
j = JinjaSql()
query, bind_params = j.prepare_query(sql_template, sql_params)
with connection.cursor() as cursor:
cursor.execute(query, bind_params)
r = cursor.fetchone()
return JsonResponse({"min": r[0], "max": r[1]}) | 24a3f4846aceea2df0b724d6bada88315b815ee2 | 17,451 |
import os
def is_path(value, default="", expand=None):
"""Parse a value as a path
Parameters
----------
expand:
expandvars and expandhome on loaded path
**Warning: expand currently can't work with interpolation**
"""
# TODO: fix interpolation and expand !
if str(value) == "None":
return None
if value == "":
value = default
if expand and isinstance(value, str):
return os.path.expandvars(os.path.expanduser(value))
return value | d2d369f35e6cc71dbdc3cd955456aeecb375fee6 | 17,452 |
import subprocess
import os
def find_suites():
"""
Return a dict of suitename and path, e.g.
{"heat_equation": /home/safl/bechpress/suites/cpu/heat_equation.py"}
"""
p = subprocess.Popen(
["bp-info", "--suites"],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out, err = p.communicate()
suitesdir = out.strip()
if err:
raise Exception("Error when trying to find suites-dir.")
suites = {}
for root, dirs, files in os.walk(suitesdir):
for filename in files:
if "__init__" in filename:
continue
if not filename.endswith(".py"):
continue
suitepath = os.sep.join([root, filename])
suitename = os.path.splitext(filename)[0]
suites[suitename] = suitepath
return (suitesdir, suites) | f3303f48b79d3ca4d172f272b7243629557bb9cc | 17,453 |
from bs4 import BeautifulSoup
def parseHtml(html):
"""
BeautifulSoup でパースする
Parameters
----------
html : str
HTML ソース文字列
Returns
-------
soup : BeautifulSoup
BeautifulSoup オブジェクト
"""
soup = BeautifulSoup(html, 'html.parser')
return soup | e8d7a39a9881606d1dfee810ab1c2cecd11eaba2 | 17,454 |
def am_score(probs_data, probs_gen):
"""
Calculate AM Score
"""
mean_data = np.mean(probs_data, axis=0)
mean_gen = np.mean(probs_gen, axis=0)
entropy_gen = np.mean(entropy(probs_gen, axis=1))
am_score = entropy(mean_data, mean_gen) + entropy_gen
return am_score | 5e3c3f42ed2402dd2e48ab1ff4f9ff13754d5c31 | 17,455 |
import torch
def load_image(path_image, size=None, bgr_mean=[103.939, 116.779, 123.68]):
"""
Loads and pre-process the image for SalGAN model.
args:
path_image: abs path to image
size: size to input to the network (it not specified, uses SalGAN predifined)
bgr_mean: mean values (BGR) to extract from images
returns:
torch tensor with processed image
original size of the image
"""
# image = cv2.imread(path_image)
image = cv2.imread(path_image) # BGR format
H, W, C = image.shape
if size is None:
size = SALGAN_RESIZE
image = cv2.resize(image, (size[1], size[0]), interpolation=cv2.INTER_AREA)
image = image.astype(np.float32)
bgr_mean=np.array(bgr_mean)
image -= bgr_mean
# convert to torch Tensor
image = torch.FloatTensor(image)
# swap channel dimensions
image = image.permute(2,0,1)
return image, (H, W) | 3a9ca220bb48f26d76ae35fd58897c8e59cdae0c | 17,456 |
def GetWsdlNamespace(version):
""" Get wsdl namespace from version """
return "urn:" + serviceNsMap[version] | bc75fa0e45c4ce4750898db75571de84aa302fc2 | 17,457 |
def is_PC(parcels):
"""
Dummy for Pinal County.
"""
return (parcels.county == 'PC').astype(int) | 60aa7dcc7adaefee177406c7e6bb963a5a4567d9 | 17,458 |
import hashlib
import requests
def check_password(password: str) -> int:
"""Use Have I Been Pwned to determine whether a password is bad.
If the request fails, this function will assume the password is fine, but
log an error so that administrators can diagnose it later.
:param password: The password to validate.
:return: A positive integer indicating the number of times the password has
been found in a breach. Zero is good, >0 is bad.
"""
sha1_hash = hashlib.sha1()
sha1_hash.update(password.encode("utf-8"))
digest = sha1_hash.hexdigest()
digest = digest.upper()
response = requests.get("https://api.pwnedpasswords.com/range/" + digest[0:5])
if response.status_code != 200:
# The docs say this shouldn't happen, but just in case.
return 0
return suffix_in_text(digest[5:], response.text) | 609dd29ee2b252452e31d64b18e835a39e1cbf22 | 17,459 |
def rqpos(A):
"""
RQ decomp. of A, with phase convention such that R has only positive
elements on the main diagonal.
If A is an MPS tensor (d, chiL, chiR), it is reshaped and
transposed appropriately
before the throughput begins. In that case, Q will be a tensor
of the same size, while R will be a chiL x chiL matrix.
"""
Ashp = A.shape
if len(Ashp) == 2:
return rqmat(A)
elif len(Ashp) != 3:
print("A had invalid dimensions, ", A.shape)
A = fuse_right(A) #chiL, d*chiR
R, Q = qrmat(A, mode="economic")
Q = unfuse_right(Q, Ashp)
return (Q, R) | 026629b6638265daee83e8d8b5ab5b47b61e64d8 | 17,460 |
import torch
from typing import OrderedDict
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
show_model_arch=True,
print_keys=True):
""" Note that official pre-trained models use `GroupNorm` in backbone.
"""
if not osp.isfile(filename):
raise IOError('{} is not a checkpoint file'.format(filename))
checkpoint = torch.load(filename, map_location=map_location)
# get state_dict from checkpoint
if isinstance(checkpoint, OrderedDict):
state_dict = checkpoint
elif isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
raise RuntimeError(
'No state_dict found in checkpoint file {}'.format(filename))
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('backbone.'):
state_dict = {}
for k, v in checkpoint['state_dict'].items():
new_k = k
if 'bbox_head.' in new_k:
if 'deconv_layers' in new_k:
new_k = new_k.replace("conv_offset_mask.", "conv_offset.")
new_k = new_k.replace("bbox_head.deconv_layers.", "neck.upsamples.")
if '.0.0.' in new_k:
new_k = new_k.replace(".0.0.", ".0.dcn.")
if '.0.1.' in new_k:
new_k = new_k.replace(".0.1.", ".0.dcn_bn.")
if '.1.0.' in new_k:
new_k = new_k.replace(".1.0.", ".1.dcn.")
if '.1.1.' in new_k:
new_k = new_k.replace(".1.1.", ".1.dcn_bn.")
if '.2.0.' in new_k:
new_k = new_k.replace(".2.0.", ".2.dcn.")
if '.2.1.' in new_k:
new_k = new_k.replace(".2.1.", ".2.dcn_bn.")
if '.shortcut_layers.' in new_k:
new_k = new_k.replace("bbox_head.shortcut_layers.", "neck.shortcuts.")
new_k = new_k.replace(".layers.", ".")
if '.hm.' in new_k:
new_k = new_k.replace(".hm.", ".ct_hm_head.")
if '.wh.' in new_k:
new_k = new_k.replace(".wh.", ".ct_wh_head.")
if print_keys:
print('> key = ', k, ' -> ', new_k)
state_dict[new_k] = v
if show_model_arch:
print('> model = ', model)
# load state_dict
if hasattr(model, 'module'):
load_state_dict(model.module, state_dict, strict, logger)
else:
load_state_dict(model, state_dict, strict, logger)
return checkpoint | 1d948f45f81c93af73394c891dc7e692c24378b3 | 17,461 |
def basic_image_2():
"""
A 10x10 array with a square (3x3) feature
Equivalent to results of rasterizing basic_geometry with all_touched=True.
Borrowed from rasterio/tests/conftest.py
Returns
-------
numpy ndarray
"""
image = np.zeros((20, 20), dtype=np.uint8)
image[2:5, 2:5] = 1
return image | 8e83070721b38f2a886c7affb4aadc9a053f1748 | 17,462 |
def download(url, verbose, user_agent='wswp', num_retries=2, decoding_format='utf-8', timeout=5):
"""
Function to download contents from a given url
Input:
url: str
string with the url to download from
user_agent: str
Default 'wswp'
num_retries: int
Number of times to retry downloading
if there is an error
verbose: bool
Print out url and errors
decoding: "utf-8"
Output:
returns: str
string with contents of given url
"""
# html_error = False
if verbose:
print('Downloading:', url)
headers = {'User-agent': user_agent}
request_obj = request.Request(url, headers=headers)
try:
with request.urlopen(request_obj, timeout=timeout) as response:
html = response.read()
except error.URLError as e:
if verbose:
print('Download error:', e.reason)
# html = None
# if num_retries > 0:
# if hasattr(e, 'code') and 500 <= e.code < 600:
# # retry 5XX HTTP errors
# return download(url, user_agent, num_retries - 1)[0]
# # elif hasattr(e, 'code') and e.code == 404:
# else:
# html_error = True
raise IOError(e.reason)
return html.decode(decoding_format) | 31592018b6f6f62154444dfc44b723efc1bd7f47 | 17,463 |
from typing import Union
from typing import List
def _write_deform(model: Union[BDF, OP2Geom], name: str,
loads: List[AEROS], ncards: int,
op2_file, op2_ascii, endian: bytes, nastran_format: str='nx') -> int:
"""
(104, 1, 81)
NX 2019.2
Word Name Type Description
1 SID I Deformation set identification number
2 EID I Element number
3 D RS Deformation
"""
key = (104, 1, 81)
nfields = 3
structi = Struct(endian + b'iif')
nbytes = write_header(name, nfields, ncards, key, op2_file, op2_ascii)
for load in loads:
data = [load.sid, load.eid, load.deformation]
#flutter = model.loads[flutter_id] # type: FLUTTER
#print(flutter.get_stats())
assert None not in data, data
op2_ascii.write(f' DEFORM data={data}\n')
op2_file.write(structi.pack(*data))
return nbytes | 55f2cb18336a940c550ee68bd5148c8d74f5bb93 | 17,464 |
def polygonize(geometries, **kwargs):
"""Creates polygons formed from the linework of a set of Geometries.
Polygonizes an array of Geometries that contain linework which
represents the edges of a planar graph. Any type of Geometry may be
provided as input; only the constituent lines and rings will be used to
create the output polygons.
Lines or rings that when combined do not completely close a polygon
will result in an empty GeometryCollection. Duplicate segments are
ignored.
This function returns the polygons within a GeometryCollection.
Individual Polygons can be obtained using ``get_geometry`` to get
a single polygon or ``get_parts`` to get an array of polygons.
MultiPolygons can be constructed from the output using
``pygeos.multipolygons(pygeos.get_parts(pygeos.polygonize(geometries)))``.
Parameters
----------
geometries : array_like
An array of geometries.
axis : int
Axis along which the geometries are polygonized.
The default is to perform a reduction over the last dimension
of the input array. A 1D array results in a scalar geometry.
**kwargs
For other keyword-only arguments, see the
`NumPy ufunc docs <https://numpy.org/doc/stable/reference/ufuncs.html#ufuncs-kwargs>`_.
Returns
-------
GeometryCollection or array of GeometryCollections
See Also
--------
get_parts, get_geometry
polygonize_full
Examples
--------
>>> lines = [
... Geometry("LINESTRING (0 0, 1 1)"),
... Geometry("LINESTRING (0 0, 0 1)"),
... Geometry("LINESTRING (0 1, 1 1)"),
... ]
>>> polygonize(lines)
<pygeos.Geometry GEOMETRYCOLLECTION (POLYGON ((1 1, 0 0, 0 1, 1 1)))>
"""
return lib.polygonize(geometries, **kwargs) | 20b883734a1acedb1df3241e1815687640cac8cd | 17,465 |
def slerp(input_latent1, input_latent2, interpolation_frames=100):
"""Spherical linear interpolation ("slerp", amazingly enough).
Parameters
----------
input_latent1, input_latent2 : NumPy arrays
Two arrays which will be interpolated between.
interpolation_frames : int, optional
Number of frame returned during interpolation.
Returns
-------
list
List of vectors of size interpolation_frames
"""
output_latents = []
for idx in range(interpolation_frames):
val = float(idx) / interpolation_frames
if np.allclose(input_latent1, input_latent2):
output_latents += [input_latent2]
continue
omega = np.arccos(np.dot(input_latent1 / np.linalg.norm(input_latent1), input_latent2 / np.linalg.norm(input_latent2)))
so = np.sin(omega)
output_latents += [np.sin((1.0 - val) * omega) / so * input_latent1 + np.sin(val * omega) / so * input_latent2]
return output_latents | 392b2e61f3369cf1e4038fac4240dca36f848dce | 17,466 |
from datetime import datetime
def parent_version_config():
"""Return a configuration for an experiment."""
config = dict(
_id="parent_config",
name="old_experiment",
version=1,
algorithms="random",
metadata={
"user": "corneauf",
"datetime": datetime.datetime.utcnow(),
"user_args": ["--x~normal(0,1)"],
},
)
backward.populate_space(config)
return config | ff1f123ce06d687eb3b0031d6bc82c808918c46e | 17,467 |
import re
def sanitize_k8s_name(name):
"""From _make_kubernetes_name
sanitize_k8s_name cleans and converts the names in the workflow.
"""
return re.sub('-+', '-', re.sub('[^-0-9a-z]+', '-', name.lower())).lstrip('-').rstrip('-') | edaf6dc3083f0b57aeb1d95a66b5a7f8c1347b55 | 17,468 |
def main():
""" Process command line arguments and run x86 """
run = X86Run()
result = run.Run()
return result | 7de61875207aa17bcf2ef87ff138540626fc7d2b | 17,469 |
def gen_key(uid, section='s'):
"""
Generate store key for own user
"""
return f'cs:{section}:{uid}'.encode() | 5e6386650f6bbaef681636424fd813f2df93fe58 | 17,470 |
def convert_atom_to_voxel(coordinates: np.ndarray, atom_index: int,
box_width: float, voxel_width: float) -> np.ndarray:
"""Converts atom coordinates to an i,j,k grid index.
This function offsets molecular atom coordinates by
(box_width/2, box_width/2, box_width/2) and then divides by
voxel_width to compute the voxel indices.
Parameters
-----------
coordinates: np.ndarray
Array with coordinates of all atoms in the molecule, shape (N, 3).
atom_index: int
Index of an atom in the molecule.
box_width: float
Size of the box in Angstroms.
voxel_width: float
Size of a voxel in Angstroms
Returns
-------
indices: np.ndarray
A 1D numpy array of length 3 with `[i, j, k]`, the voxel coordinates
of specified atom.
"""
indices = np.floor(
(coordinates[atom_index] + box_width / 2.0) / voxel_width).astype(int)
if ((indices < 0) | (indices >= box_width / voxel_width)).any():
logger.warning('Coordinates are outside of the box (atom id = %s,'
' coords xyz = %s, coords in box = %s' %
(atom_index, coordinates[atom_index], indices))
return indices | 6f08b594f2012aa0ba4a7985d5f4e2049c4629d3 | 17,471 |
import platform
import sys
def get_toolset_url():
"""URL of a platform specific Go toolset archive."""
# TODO(vadimsh): Support toolset for cross-compilation.
arch = {
'amd64': 'x86-64',
'x86_64': 'x86-64',
'i386': 'x86-32',
'x86': 'x86-32',
}.get(platform.machine().lower())
variant = TOOLSET_VARIANTS.get((sys.platform, arch))
if not variant:
# TODO(vadimsh): Compile go lang from source.
raise Failure('Unrecognized platform')
return '%s/%s.%s' % (DOWNLOAD_URL_PREFIX, TOOLSET_VERSION, variant) | d88d8407775e9d0ca1ee80fee076046ae7ca7e44 | 17,472 |
def plot_det_curve(y_true_arr, y_pred_proba_arr, labels_arr, pos_label=None, plot_thres_for_idx=None,
log_wandb=False):
"""Function for plotting DET curve
Args:
y_true_arr (list/np.array): list of all GT arrays
y_pred_proba_arr (list/np.array): list of all predicted probabilities
labels_arr (list/np.array): list of labels
pos_label (str, optional): What is the label of the positive class. Defaults to 'Yes'.
plot_thres_for_idx (int, optional): If true, best threshold (F1) is plotted
for the DET curve corresponding to this index. Defaults to None.
log_wandb (bool, optional): If true, figure is logged to W&B. Defaults to False.
Returns:
plt.Figure, plt.Axes: The tuple of figure and axes
"""
fig, ax = plt.subplots(figsize=(12, 8))
for i, (y_true, y_pred_proba) in enumerate(zip(y_true_arr, y_pred_proba_arr)):
fpr, fnr, _ = det_curve(
y_true, y_pred_proba[:, 1], pos_label=pos_label)
auc_score = auc(fpr, fnr)
ax.plot(norm.ppf(fpr), norm.ppf(fnr),
label=f'{labels_arr[i]} (AUC - {round(auc_score, 3)})')
if plot_thres_for_idx is not None:
y_true = y_true_arr[plot_thres_for_idx]
y_pred_proba = y_pred_proba_arr[plot_thres_for_idx]
_, idx = get_best_threshold_gmean(
y_true, y_pred_proba, pos_label=pos_label)
fpr, fnr, _ = det_curve(
y_true, y_pred_proba[:, 1], pos_label=pos_label)
ax.plot([norm.ppf(fpr[idx])], [norm.ppf(fnr[idx])], '-o',
c=ax.lines[plot_thres_for_idx].get_color(),
label=f'Best {labels_arr[plot_thres_for_idx]} Threshold (GMean)')
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('False Negative Rate')
ax.set_title('DET Curve')
ax.legend()
ax.grid()
ticks = [0.001, 0.01, 0.05, 0.20, 0.5, 0.80, 0.95, 0.99, 0.999]
tick_locations = norm.ppf(ticks)
tick_labels = [
'{:.0%}'.format(s) if (100*s).is_integer() else '{:.1%}'.format(s)
for s in ticks
]
ax.set_xticks(tick_locations)
ax.set_xticklabels(tick_labels)
ax.set_yticks(tick_locations)
ax.set_yticklabels(tick_labels)
if log_wandb:
wandb.log({"det_curve": [wandb.Image(fig)]})
plt.close(fig)
return fig, ax | 0437d700a9555b48b84cbb6e225bc88f1a57e34d | 17,473 |
def harmonic_separation(audio, margin=3.0):
"""
Wraps librosa's `harmonic` function, and returns a new Audio object.
Note that this folds to mono.
Parameters
---------
audio : Audio
The Audio object to act on.
margin : float
The larger the margin, the larger the separation.
The default is `3.0`.
"""
harmonic = librosa.effects.harmonic(
librosa.to_mono(audio.raw_samples), margin=margin
)
harmonic_audio = Audio(raw_samples=harmonic, sample_rate=audio.sample_rate)
return harmonic_audio | 3ac3e0d87f719814ca021f594a21dde08e9fd02f | 17,474 |
def merge(
left,
right,
how: str = "inner",
on=None,
left_on=None,
right_on=None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes=("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate=None,
): # noqa: PR01, RT01, D200
"""
Merge DataFrame or named Series objects with a database-style join.
"""
if isinstance(left, Series):
if left.name is None:
raise ValueError("Cannot merge a Series without a name")
else:
left = left.to_frame()
if not isinstance(left, DataFrame):
raise TypeError(
f"Can only merge Series or DataFrame objects, a {type(left)} was passed"
)
return left.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
) | da07b44fb80ee28cc8320c071876ef6ad573d974 | 17,475 |
def generate_modal(title, callback_id, blocks):
"""
Generate a modal view object using Slack's BlockKit
:param title: Title to display at the top of the modal view
:param callback_id: Identifier used to help determine the type of modal view in future responses
:param blocks: Blocks to add to the modal view
:return: View object (Dictionary)
"""
modal = {
"type": "modal",
"callback_id": callback_id,
"title": {
"type": "plain_text",
"text": title,
"emoji": False
},
"submit": {
"type": "plain_text",
"text": "Submit",
"emoji": False
},
"close": {
"type": "plain_text",
"text": "Cancel",
"emoji": False
},
"blocks": blocks
}
return modal | e0caeec1ab1cf82ed6f02ec77a984dcb25e329f5 | 17,476 |
def dir_thresh(img, sobel_kernel=3, thresh=(0.7, 1.3)):
"""
#---------------------
# This function applies Sobel x and y,
# then computes the direction of the gradient,
# and then applies a threshold.
#
"""
# Take the gradient in x and y separately
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the x and y gradients
# and calculate the direction of the gradient
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
# Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 255
# Return the binary image
return binary_output.astype(np.uint8) | 0f5aefdbc9ffbe8e3678145e2926a4fbd7e01629 | 17,477 |
from datetime import datetime, timedelta
def seconds_to_time( time ):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
if not time:
return "0s"
if isinstance( time, timedelta ) or isinstance( time, datetime ):
if time.days < 0:
diff = timedelta( )
else:
diff = time
else:
diff = timedelta( seconds = int(time if time >= 0 else 0) )
second_diff = diff.seconds
if second_diff < 0:
second_diff = 0
if second_diff > 60:
return "%sm%ss" % ( str( second_diff / 60 ), ( second_diff % 60 ) )
else:
return "%ss" % second_diff | 407fa93f782c8cff142be1ab721969d3e4c2b42f | 17,478 |
import os
def bq_use_legacy_sql():
"""
Returns BIGQUERY_LEGACY_SQL if env is set
"""
return os.environ.get('BIGQUERY_LEGACY_SQL', 'TRUE') | 53d51c5eb1caa9b6041e9b2d29bde068199bba52 | 17,479 |
def load_txt_into_set(path, skip_first_line=True):
"""Load a txt file (one value per line) into a set."""
result = set()
file = open_file_dir_safe(path)
with file:
if skip_first_line:
file.readline()
for line in file:
line = line.strip()
result.add(line)
return result | 17ad3c15820595b72254dbe4c9097a8857511599 | 17,480 |
def failed(obj):
"""Returns True if ``obj`` is an instance of ``Fail``."""
return isinstance(obj, Fail) | 715fe3ae1154e3e5712b6f4535021b44e8020146 | 17,481 |
def linkCount(tupleOfLists, listNumber, lowerBound, upperBound):
"""Counts the number of links in one of the lists passed.
This function is a speciality function to aid in calculating
statistics involving the number of links that lie in a given
range. It is primarily intended as a private helper function. The
parameters are:
tupleOfLists -- usually a linkograph entry.
listNumber -- a list of the indicies in entry that should be
considered.
lowerBound -- the lowest index that should be considered.
upperBound -- the highest index that should be considered.
Example: a typical tupleOfLists is ({'A', 'B'}, {1,2}, {4,5}) a
listNumber of [1] would only consider the links in {1,2}, a
listNumber of [2] would only consider the links in {4,5} and a
listNumber of [1,2] would consider the links in both {1,2}, and
{4,5}.
"""
summation = 0
for index in listNumber:
summation += len({link for link in tupleOfLists[index]
if link >= lowerBound
and link <= upperBound})
return summation | 239fd8d3c01fe6c88444cfa7369459e3c76005dc | 17,482 |
def encode_md5(plain_text):
"""
Encode the plain text by md5
:param plain_text:
:return: cipher text
"""
plain_text = plain_text + EXT_STRING
encoder = md5()
encoder.update(plain_text.encode('utf-8'))
return encoder.hexdigest() | ad88ebc12334c9438c38719cd7c836edb9736d3c | 17,483 |
import warnings
def delta(x, y, assume_normal=True, percentiles=[2.5, 97.5],
min_observations=20, nruns=10000, relative=False, x_weights=1, y_weights=1):
"""
Calculates the difference of means between the samples (x-y) in a
statistical sense, i.e. with confidence intervals.
NaNs are ignored: treated as if they weren't included at all. This is done
because at this level we cannot determine what a NaN means. In some cases,
a NaN represents missing data that should be completely ignored, and in some
cases it represents inapplicable (like PCII for non-ordering customers) - in
which case the NaNs should be replaced by zeros at a higher level. Replacing
with zeros, however, would be completely incorrect for return rates.
Computation is done in form of treatment minus control, i.e. x-y
Args:
x (array_like): sample of a treatment group
y (array_like): sample of a control group
assume_normal (boolean): specifies whether normal distribution
assumptions can be made
percentiles (list): list of percentile values for confidence bounds
min_observations (integer): minimum number of observations needed
nruns (integer): only used if assume normal is false
relative (boolean): if relative==True, then the values will be returned
as distances below and above the mean, respectively, rather than the
absolute values. In this case, the interval is mean-ret_val[0] to
mean+ret_val[1]. This is more useful in many situations because it
corresponds with the sem() and std() functions.
x_weights (list): weights for the x vector, in order to calculate
the weighted mean and confidence intervals, which is equivalent
to the overall metric. This weighted approach is only relevant
for ratios.
y_weights (list): weights for the y vector, in order to calculate
the weighted mean and confidence intervals, which is equivalent
to the overall metric. This weighted approach is only relevant
for ratios.
Returns:
tuple:
* mu (float): mean value of the difference
* c_i (dict): percentile levels (index) and values
* ss_x (int): size of x excluding NA values
* ss_y (int): size of y excluding NA values
* _x (float): absolute mean of x
* _y (float): absolute mean of y
"""
# Checking if data was provided
if x is None or y is None:
raise ValueError('Please provide two non-None samples.')
# Coercing missing values to right format
_x = np.array(x, dtype=float) * x_weights
_y = np.array(y, dtype=float) * y_weights
x_nan = np.isnan(_x).sum()
y_nan = np.isnan(_y).sum()
if x_nan > 0:
warnings.warn('Discarding ' + str(x_nan) + ' NaN(s) in the x array!')
if y_nan > 0:
warnings.warn('Discarding ' + str(y_nan) + ' NaN(s) in the y array!')
ss_x = sample_size(_x)
ss_y = sample_size(_y)
# Checking if enough observations are left after dropping NaNs
if min(ss_x, ss_y) < min_observations:
# Set mean to nan
mu = np.nan
# Create nan dictionary
c_i = dict(list(zip(percentiles, np.empty(len(percentiles)) * np.nan)))
else:
# Computing the mean
mu = _delta_mean(_x, _y)
# Computing the confidence intervals
if assume_normal:
c_i = normal_sample_difference(x=_x, y=_y, percentiles=percentiles,
relative=relative)
else:
c_i, _ = bootstrap(x=_x, y=_y, percentiles=percentiles, nruns=nruns,
relative=relative)
# Return the result structure
return mu, c_i, ss_x, ss_y, np.nanmean(_x), np.nanmean(_y) | 37b742775777b5a0bd26f7e8fdf7a189a69b199f | 17,484 |
def CircleCircumference(curve_id, segment_index=-1):
"""Returns the circumference of a circle curve object
Parameters:
curve_id = identifier of a curve object
segment_index [opt] = identifies the curve segment if
curve_id identifies a polycurve
Returns:
The circumference of the circle if successful.
"""
return circle.Circumference | 7a9200b089cebab93cbea387a4dd92590157dc45 | 17,485 |
def generate_handshake(info_hash, peer_id):
"""
The handshake is a required message and must be the first message
transmitted by the client. It is (49+len(pstr)) bytes long in the form:
<pstrlen><pstr><reserved><info_hash><peer_id>
Where:
pstrlen: string length of <pstr>, as a single raw byte
pstr: string identifier of the protocol
reserved: eight (8) reserved bytes. All current implementations use all
zeroes. Each bit in these bytes can be used to change the behavior of the
protocol.
info_hash: 20-byte SHA1 hash of the info key in the meta info file. This is
the same info_hash that is transmitted in tracker requests.
peer_id: 20-byte string used as a unique ID for the client. This is usually
the same peer_id that is transmitted in tracker requests
In version 1.0 of the BitTorrent protocol:
pstrlen = 19 and pstr = "BitTorrent protocol".
:param info_hash:
:param peer_id:
:return:
"""
pstr = b"BitTorrent protocol"
pstrlen = bytes(chr(len(pstr)))
reserved = b"\x00" * 8 # 8 zeroes
handshake = pstrlen + pstr + reserved + info_hash + peer_id
assert len(handshake) == 49 + len(pstr)
assert pstrlen == bytes(chr(19))
return handshake | ae13462608f3e2ec47abdb12e87a3bc08faa1cba | 17,486 |
def tokenizer_decorator(func, **kwargs):
"""
This decorator wraps around a tokenizer function.
It adds the token to the info dict and removes the found token from the given name.
"""
if not callable(func):
raise TypeError(f"func {func} not callable")
@wraps(func)
def wrapper(name, info, **kwargs):
try:
if ("patterns" and "token_name") in kwargs:
token = func(name, **kwargs)
elif "reference_date" in kwargs:
token = func(name, reference_date=kwargs.get("reference_date", None))
elif "template_file_found" in kwargs:
token = func(
name, template_file_found=kwargs.get("template_file_found", None)
)
else:
token = func(name)
except TypeError as ex:
logger.error(f"func: {func.__name__}, name: {name}\n{kwargs}")
raise TokenizerError(ex) from ex
# return name, info
except Exception as ex:
logger.error(f"func: {func.__name__}, name: {name}\n{kwargs}")
raise TokenizerError(ex) from ex
# return name, info
if not token:
# logger.warning(f'Wrapper no token found for {func}, {name}')
return name, info
str_token_values = [i for i in token.values() if isinstance(i, str)]
str_token_values_in_name = [i for i in str_token_values if i in name]
if str_token_values:
for val in str_token_values_in_name:
val_is_subset = [
i
for i in str_token_values_in_name
if val in i and len(i) > len(val)
]
if not val_is_subset:
name = replace_and_strip(name, val, **kwargs)
info.update(**token)
# print("wrapper token:",info,'\nname',name)
return name, info
return wrapper | d1827ab75a12f923c6da69927323d9c5013124c0 | 17,487 |
def reverse_complement( seq ):
"""
Biological reverse complementation. Case in sequences are retained, and
IUPAC codes are supported. Code modified from:
http://shootout.alioth.debian.org/u32/program.php?test=revcomp&lang=python3&id=4
"""
return seq.translate(_nt_comp_table)[::-1] | 86229dfeceecb7e0d2e1215b25074c35fbd38792 | 17,488 |
def computeLPS(s, n):
"""
Sol with better comle
"""
prev = 0 # length of the previous longest prefix suffix
lps = [0]*(n)
i = 1
# the loop calculates lps[i] for i = 1 to n-1
while i < n:
if s[i] == s[prev]:
prev += 1
lps[i] = prev
i += 1
else:
# This is tricky. Consider the example.
# AAACAAAA and i = 7. The idea is similar
# to search step.
if prev != 0:
prev = lps[prev-1]
# Also, note that we do not increment i here
else:
lps[i] = 0
i += 1
print(lps)
return lps[n-1] | 8b4374c9ac29f59cf1f4b0e6e07628776828c11a | 17,489 |
def roundedCorner(pc, p1, p2, r):
"""
Based on Stackoverflow C# rounded corner post
https://stackoverflow.com/questions/24771828/algorithm-for-creating-rounded-corners-in-a-polygon
"""
def GetProportionPoint(pt, segment, L, dx, dy):
factor = float(segment) / L if L != 0 else segment
return PVector((pt.x - dx * factor), (pt.y - dy * factor))
# Vector 1
dx1 = pc.x - p1.x
dy1 = pc.y - p1.y
# Vector 2
dx2 = pc.x - p2.x
dy2 = pc.y - p2.y
# Angle between vector 1 and vector 2 divided by 2
angle = (atan2(dy1, dx1) - atan2(dy2, dx2)) / 2
# The length of segment between angular point and the
# points of intersection with the circle of a given radius
tng = abs(tan(angle))
segment = r / tng if tng != 0 else r
# Check the segment
length1 = sqrt(dx1 * dx1 + dy1 * dy1)
length2 = sqrt(dx2 * dx2 + dy2 * dy2)
min_len = min(length1, length2)
if segment > min_len:
segment = min_len
max_r = min_len * abs(tan(angle))
else:
max_r = r
# Points of intersection are calculated by the proportion between
# length of vector and the length of the segment.
p1Cross = GetProportionPoint(pc, segment, length1, dx1, dy1)
p2Cross = GetProportionPoint(pc, segment, length2, dx2, dy2)
# Calculation of the coordinates of the circle
# center by the addition of angular vectors.
dx = pc.x * 2 - p1Cross.x - p2Cross.x
dy = pc.y * 2 - p1Cross.y - p2Cross.y
L = sqrt(dx * dx + dy * dy)
d = sqrt(segment * segment + max_r * max_r)
circlePoint = GetProportionPoint(pc, d, L, dx, dy)
# StartAngle and EndAngle of arc
startAngle = atan2(p1Cross.y - circlePoint.y, p1Cross.x - circlePoint.x)
endAngle = atan2(p2Cross.y - circlePoint.y, p2Cross.x - circlePoint.x)
# Sweep angle
sweepAngle = endAngle - startAngle
# Some additional checks
if sweepAngle < 0:
startAngle, endAngle = endAngle, startAngle
sweepAngle = -sweepAngle
if sweepAngle > PI:
startAngle, endAngle = endAngle, startAngle
sweepAngle = TWO_PI - sweepAngle
# Draw result using graphics
# noStroke()
with pushStyle():
noStroke()
beginShape()
vertex(p1.x, p1.y)
vertex(p1Cross.x, p1Cross.y)
vertex(p2Cross.x, p2Cross.y)
vertex(p2.x, p2.y)
endShape(CLOSE)
line(p1.x, p1.y, p1Cross.x, p1Cross.y)
line(p2.x, p2.y, p2Cross.x, p2Cross.y)
arc(circlePoint.x, circlePoint.y, 2 * max_r, 2 * max_r,
startAngle, startAngle + sweepAngle, OPEN) | e77497918025deba211469616d210c23483e2152 | 17,490 |
def synthetic_data(n_points=1000, noise=0.05,
random_state=None, kind="unit_cube",
n_classes=None, n_occur=1, legacy_labels=False, **kwargs):
"""Make a synthetic dataset
A sample dataset generators in the style of sklearn's
`sample_generators`. This adds other functions found in the Matlab
toolkit for Dimensionality Reduction
Parameters
----------
kind: {'unit_cube', 'swiss_roll', 'broken_swiss_roll', 'twinpeaks', 'difficult'}
The type of synthetic dataset
legacy_labels: boolean
If True, try and reproduce the labels from the Matlab Toolkit for
Dimensionality Reduction. (overrides any value in n_classes)
This usually only works if algorithm-specific coefficient choices
(e.g. `height` for swiss_roll) are left at their default values
n_points : int, optional (default=1000)
The total number of points generated.
n_classes: None or int
If None, target vector is based on underlying manifold coordinate
If int, the manifold coordinate is bucketized into this many classes.
n_occur: int
Number of occurrences of a given class (along a given axis)
ignored if n_classes = None
noise : double or None (default=0.05)
Standard deviation of Gaussian noise added to the data.
If None, no noise is added.
random_state : int, RandomState instance or None (default)
Determines random number generation for dataset shuffling and noise.
Pass an int for reproducible output across multiple function calls.
Additional Parameters
---------------------
difficult:
n_dims: int (default 5)
Number of dimensions to embed
swiss_roll:
broken_swiss_roll:
height: float (default 30.)
scaling to apply to y dimension
Returns
-------
X : array of shape [n_points, 2]
The generated samples.
y : array of shape [n_points]
The labels for class membership of each point.
"""
generator = check_random_state(random_state)
metadata = {
"synthetic_type": kind,
"n_points": n_points,
"noise": noise
}
if kind == 'unit_cube':
x = 2 * (generator.rand(n_points) - 0.5)
y = 2 * (generator.rand(n_points) - 0.5)
z = 2 * (generator.rand(n_points) - 0.5)
X = np.column_stack((x, y, z))
shift = np.array([1.])
scale = np.array([2.])
labels = checkerboard(X, shift_factors=shift, scale_factors=scale, n_occur=n_occur, n_classes=n_classes)
metadata['manifold_coords'] = np.concatenate((x,y,z), axis=0).T
elif kind == 'twinpeaks':
inc = 1.5 / np.sqrt(n_points)
x = np.arange(-1, 1, inc)
xy = 1 - 2 * generator.rand(2, n_points)
z = np.sin(np.pi * xy[0, :]) * np.tanh(3 * xy[1, :])
X = np.vstack([xy, z * 10.]).T # + noise * generator.randn(n_points, 3)
t = xy.T
metadata['manifold_coords'] = t
if legacy_labels is True:
labels = np.remainder(np.sum(np.round((X + np.tile(np.min(X, axis=0), (X.shape[0], 1))) / 10.), axis=1), 2)
elif n_classes is None:
labels = 1-z
else:
shift = np.array([1.])
scale = np.array([2.])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'swiss_roll':
height = kwargs.pop('height', 30.)
t = 1.5 * np.pi * (1.0 + 2.0 * generator.rand(n_points))
y = height * generator.rand(*t.shape)
manifold_coords = np.column_stack((t, y))
X = _parameterized_swiss_roll(manifold_coords)
metadata['manifold_coords'] = manifold_coords
if legacy_labels is True:
labels = np.remainder(np.round(t / 2.) + np.round(height / 12.), 2)
else:
scale = np.array([3*np.pi])
shift = np.array([-1.5*np.pi])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'broken_swiss_roll':
height = kwargs.pop('height', 30.)
np1 = int(np.ceil(n_points / 2.0))
t1 = 1.5 * np.pi * (1.0 + 2.0 * (generator.rand(np1) * 0.4))
t2 = 1.5 * np.pi * (1.0 + 2.0 * (generator.rand(n_points - np1) * 0.4 + 0.6))
t = np.concatenate((t1, t2))
y = height * generator.rand(*t.shape)
manifold_coords = np.column_stack((t, y))
X = _parameterized_swiss_roll(manifold_coords)
metadata['manifold_coords'] = manifold_coords
if legacy_labels is True:
labels = np.remainder(np.round(t / 2.) + np.round(height / 12.), 2)
else:
scale = np.array([3*np.pi])
shift = np.array([-1.5*np.pi])
labels = checkerboard(t, shift_factors=shift, scale_factors=scale,
n_classes=n_classes, n_occur=n_occur)
elif kind == 'difficult':
n_dims = kwargs.pop("n_dims", 5)
points_per_dim = int(np.round(float(n_points ** (1.0 / n_dims))))
l = np.linspace(0, 1, num=points_per_dim)
t = np.array(list(_combn(l, n_dims)))
X = np.vstack((np.cos(t[:,0]),
np.tanh(3 * t[:,1]),
t[:,0] + t[:,2],
t[:,3] * np.sin(t[:,1]),
np.sin(t[:,0] + t[:,4]),
t[:,4] * np.cos(t[:,1]),
t[:,4] + t[:,3],
t[:,1],
t[:,2] * t[:,3],
t[:,0])).T
tt = 1 + np.round(t)
# Generate labels for dataset (2x2x2x2x2 checkerboard pattern)
labels = np.remainder(tt.sum(axis=1), 2)
metadata['n_dims'] = n_dims
metadata['manifold_coords'] = t
else:
raise Exception(f"Unknown synthetic dataset type: {kind}")
if noise is not None:
X += noise * generator.randn(*X.shape)
return X, labels, metadata | 740b5d2f708e177ce703f2124806ab7bd0079a09 | 17,491 |
import argparse
def parse_args():
"""
Argument Parser
"""
parser = argparse.ArgumentParser(description="Wiki Text Extractor")
parser.add_argument("-i", "--input_dir", dest="input_dir", type=str, metavar="PATH",
default="./extracted",help="Input directory path ")
parser.add_argument("-o", "--output_dir", dest="output_dir", type=str, metavar="PATH",
default="./wiki_text",help="Output directory path")
parser.add_argument("-t", "--output_type", dest="output_type", type=int, metavar="INT",
default=1, choices=[1,2], help="Output in a single file or multiple file")
args = parser.parse_args()
return args | af9149f893bb652c7e86a731749b3e2a5e4c1c8f | 17,492 |
def _load_default_profiles():
# type: () -> Dict[str, Any]
"""Load all the profiles installed on the system."""
profiles = {}
for path in _iter_default_profile_file_paths():
name = _get_profile_name(path)
if _is_abstract_profile(name):
continue
definition = _read_profile_definition(path)
try:
recursively_expand_base_profiles(definition)
except Exception:
logger.error("Could not expand base profile %s", path)
raise
profiles[name] = {'definition': definition}
return profiles | b53411dce6bdf3baba876a626b023a2b93e48c99 | 17,493 |
import torch
def train_model(model, train_loader, valid_loader, learning_rate, device,
epochs):
"""Trains a model with train_loader and validates it with valid_loader
Arguments:
model -- Model to train
train_loader -- Data to train
valid_loader -- Data to validate the training
learning_rate -- Learning rate
device -- Device where the computations will be executed
epochs -- Number of epochs to train
Returns:
The trained model
"""
# Our loss function will be 'negative log likelihood'
criterion = nn.NLLLoss()
# We only want to optimize our classifier parameters
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
# makes PyTorch use 'device' to compute
model.to(device)
criterion.to(device)
print_every = 25
step = 0
for epoch in range(epochs): # for each epoch
running_loss = 0
print("Epoch: {}/{}".format(epoch+1, epochs))
print("==========")
for inputs, labels in train_loader: # for each batch of data / label
step += 1
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad() # resets gradients to zero
output = model.forward(inputs) # feed forward
loss = criterion(output, labels) # calculate the loss
loss.backward() # back propagate the loss
optimizer.step() # do gradient descent (update weights)
running_loss += loss.item()
if step % print_every == 0:
model.eval() # Turn off dropout to make the validation pass
# Turn off gradients for the validation pass
with torch.no_grad():
valid_loss, accuracy = validate_model(model, valid_loader,
criterion, device)
print("Training Loss: {:.3f}.. ".format(
running_loss/print_every),
"Validation Loss: {:.3f}.. ".format(
valid_loss/len(valid_loader)),
"Validation Accuracy: {:.3f}".format(
accuracy/len(valid_loader)))
running_loss = 0
model.train() # enable dropout back
model.eval() # Turn off dropout to make the validation pass
with torch.no_grad(): # Turn off gradients for the validation pass
valid_loss, accuracy = validate_model(
model, valid_loader, criterion, device)
print("\nEpoch: {}/{}.. ".format(epoch+1, epochs),
"Validation Loss: {:.3f}.. ".format(
valid_loss/len(valid_loader)),
"Validation Accuracy: {:.3f}\n".format(
accuracy/len(valid_loader)))
model.train() # enable dropout back
return model | 3addd258adddcbb43d846dae09d943d9a7016b69 | 17,494 |
def get_weapon_techs(fighter=None):
"""If fighter is None, return list of all weapon techs.
If fighter is given, return list of weapon techs fighter has."""
if fighter is None:
return weapon_tech_names
else:
return [t for t in fighter.techs if get_tech_obj(t).is_weapon_tech] | bbda76e55fdbe80e9883ff05746256fb56767136 | 17,495 |
def xml_to_values(l):
"""
Return a list of values from a list of XML data potentially including null values.
"""
new = []
for element in l:
if isinstance(element, dict):
new.append(None)
else:
new.append(to_float(element))
return new | 30b6af4101f45697e0f074ddedcd051aba37cb99 | 17,496 |
def _get_options(raw_options, apply_config):
"""Return parsed options."""
if not raw_options:
return parse_args([''], apply_config=apply_config)
if isinstance(raw_options, dict):
options = parse_args([''], apply_config=apply_config)
for name, value in raw_options.items():
if not hasattr(options, name):
raise ValueError("No such option '{}'".format(name))
# Check for very basic type errors.
expected_type = type(getattr(options, name))
if not isinstance(expected_type, (str, unicode)):
if isinstance(value, (str, unicode)):
raise ValueError(
"Option '{}' should not be a string".format(name))
setattr(options, name, value)
else:
options = raw_options
return options | e88014f0f5497e72973afbdf669cf14bf4537051 | 17,497 |
def csvdir_equities(tframes=None, csvdir=None):
"""
Generate an ingest function for custom data bundle
This function can be used in ~/.zipline/extension.py
to register bundle with custom parameters, e.g. with
a custom trading calendar.
Parameters
----------
tframes: tuple, optional
The data time frames, supported timeframes: 'daily' and 'minute'
csvdir : string, optional, default: CSVDIR environment variable
The path to the directory of this structure:
<directory>/<timeframe1>/<symbol1>.csv
<directory>/<timeframe1>/<symbol2>.csv
<directory>/<timeframe1>/<symbol3>.csv
<directory>/<timeframe2>/<symbol1>.csv
<directory>/<timeframe2>/<symbol2>.csv
<directory>/<timeframe2>/<symbol3>.csv
Returns
-------
ingest : callable
The bundle ingest function
Examples
--------
This code should be added to ~/.zipline/extension.py
.. code-block:: python
from zipline.data.bundles import csvdir_equities, register
register('custom-csvdir-bundle',
csvdir_equities(["daily", "minute"],
'/full/path/to/the/csvdir/directory'))
"""
return CSVDIRBundle(tframes, csvdir).ingest | 6dc4b76e52f7512074eb044d5505c904a323eb69 | 17,498 |
def normalize_skeleton(joints):
"""Normalizes joint positions (NxMx2 or NxMx3, where M is 14 or 16) from parent to child order. Each vector from parent to child is normalized with respect to it's length.
:param joints: Position of joints (NxMx2) or (NxMx3)
:type joints: numpy.ndarray
:return: Normalzed position of joints (NxMx2) or (NxMx3)
:rtype: numpy.ndarray
"""
assert len(joints.shape) == 3
assert joints.shape[1] == 14 or joints.shape[1] == 16
assert joints.shape[-1] == 2 or joints.shape[-1] == 3
hip = 0
if joints.shape[1] == 14:
names = NAMES_14
else:
names = NAMES_16
neck = names.index('Neck')
joints_ = joints.copy()
joints_ -= joints_[:, :1, :]
spine = joints_[:, neck, :] - joints_[:, hip, :]
spine_norm = np.linalg.norm(spine, axis=1).reshape(-1, 1)
adjacency = adjacency_list(joints_.shape[1])
queue = []
queue.append(0)
while len(queue) > 0:
current = queue.pop(0)
for child in adjacency[current]:
queue.append(child)
prnt_to_chld = joints[:, child, :] - joints[:, current, :]
prnt_to_chld_norm = np.linalg.norm(prnt_to_chld, axis=1).reshape(-1, 1)
prnt_to_chld_unit = prnt_to_chld / prnt_to_chld_norm
joints_[:, child, :] = joints_[:, current, :] + (prnt_to_chld_unit * (prnt_to_chld_norm / (spine_norm + 1e-8)))
return joints_ | 579862d05814eaa9b04f3e1a4812e727b02175aa | 17,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.