content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def check_dependencies_ready(dependencies, start_date, dependencies_to_ignore):
"""Checks if every dependent pipeline has completed
Args:
dependencies(dict): dict from id to name of pipelines it depends on
start_date(str): string representing the start date of the pipeline
dependencies_to_ignore(list of str): dependencies to ignore if failed
"""
print 'Checking dependency at ', str(datetime.now())
dependency_ready = True
# Convert date string to datetime object
start_date = datetime.strptime(start_date, '%Y-%m-%d')
for pipeline in dependencies.keys():
# Get instances of each pipeline
instances = list_pipeline_instances(pipeline)
failures = []
# Collect all pipeline instances that are scheduled for today
instances_today = []
for instance in instances:
date = datetime.strptime(instance[START_TIME], '%Y-%m-%dT%H:%M:%S')
if date.date() == start_date.date():
instances_today.append(instance)
# Dependency pipeline has not started from today
if not instances_today:
dependency_ready = False
for instance in instances_today:
# One of the dependency failed/cancelled
if instance[STATUS] in FAILED_STATUSES:
if dependencies[pipeline] not in dependencies_to_ignore:
raise Exception(
'Pipeline %s (ID: %s) has bad status: %s'
% (dependencies[pipeline], pipeline, instance[STATUS])
)
else:
failures.append(dependencies[pipeline])
# Dependency is still running
elif instance[STATUS] != FINISHED:
dependency_ready = False
return dependency_ready, failures | 8ff01e54e3dae4110e7bd06accbc01b73148f4c3 | 14,900 |
def factor_returns(factor_data, demeaned=True, group_adjust=False):
"""
计算按因子值加权的投资组合的收益
权重为去均值的因子除以其绝对值之和 (实现总杠杆率为1).
参数
----------
factor_data : pd.DataFrame - MultiIndex
一个 DataFrame, index 为日期 (level 0) 和资产(level 1) 的 MultiIndex,
values 包括因子的值, 各期因子远期收益, 因子分位数,
因子分组(可选), 因子权重(可选)
demeaned : bool
因子分析是否基于一个多空组合? 如果是 True, 则计算权重时因子值需要去均值
group_adjust : bool
因子分析是否基于一个分组(行业)中性的组合?
如果是 True, 则计算权重时因子值需要根据分组和日期去均值
返回值
-------
returns : pd.DataFrame
每期零风险暴露的多空组合收益
"""
def to_weights(group, is_long_short):
if is_long_short:
demeaned_vals = group - group.mean()
return demeaned_vals / demeaned_vals.abs().sum()
else:
return group / group.abs().sum()
grouper = [factor_data.index.get_level_values('date')]
if group_adjust:
grouper.append('group')
weights = factor_data.groupby(grouper)['factor'] \
.apply(to_weights, demeaned)
if group_adjust:
weights = weights.groupby(level='date').apply(to_weights, False)
weighted_returns = \
factor_data[get_forward_returns_columns(factor_data.columns)] \
.multiply(weights, axis=0)
returns = weighted_returns.groupby(level='date').sum()
return returns | 127f26e20ca14cae5d9fc2e444ab93d98cc6b8c4 | 14,901 |
def create_input_lambda(i):
"""Extracts off an object tensor from an input tensor"""
return Lambda(lambda x: x[:, i]) | b574e5659723f5394590cedcc4305f9fade5021e | 14,902 |
def create_model_talos(params, time_steps, num_features, input_loss='mae', input_optimizer='adam',
patience=3, monitor='val_loss', mode='min', epochs=100, validation_split=0.1):
"""Uses sequential model class from keras. Adds LSTM layer. Input samples, timesteps, features.
Hyperparameters include number of cells, dropout rate. Output is encoded feature vector of the input data.
Uses autoencoder by mirroring/reversing encoder to be a decoder."""
model = Sequential()
model.add(LSTM(params['cells'], input_shape=(time_steps, num_features))) # one LSTM layer
model.add(Dropout(params['dropout']))
model.add(RepeatVector(time_steps))
model.add(LSTM(params['cells'], return_sequences=True)) # mirror the encoder in the reverse fashion to create the decoder
model.add(Dropout(params['dropout']))
model.add(TimeDistributed(Dense(num_features)))
print(model.optimizer)
model.compile(loss=input_loss, optimizer=input_optimizer)
es = tf.keras.callbacks.EarlyStopping(monitor=monitor, patience=patience, mode=mode)
history = model.fit(
X_train, y_train,
epochs=epochs, # just set to something high, early stopping will monitor.
batch_size=params['batch_size'], # this can be optimized later
validation_split=validation_split, # use 10% of data for validation, use 90% for training.
callbacks=[es], # early stopping similar to earlier
shuffle=False # because order matters
)
return history, model | e17becc7b95b07fb15059e8ef76a70fbfbd68b88 | 14,903 |
import struct
def parseAnswers(args, data, question_length = 0):
"""
parseAnswers(args, data): Parse all answers given to a query
"""
retval = []
#
# Skip the headers and question
#
index = 12 + question_length
logger.debug("question_length=%d total_length=%d" % (question_length, len(data)))
if index >= len(data):
logger.debug("parseAnswers(): index %d >= data length(%d), so no answers were received. Aborting." % (
index, len(data)))
return(retval)
#
# Now loop through our answers.
#
while True:
answer = {}
logger.debug("parseAnswers(): Index is currently %d" % index)
#
# If we're doing a fake TTL, we also have to fudge the response header and overwrite
# the original TTL. In this case, we're doing to overwrite it with the 4 byte string
# of 0xDEADBEEF, so that it will be obvious upon inspection that this string was human-made.
#
if args.fake_ttl:
ttl_index = index + 6
#
# If the leading byte of the Answer Headers is zero (the question), then
# the question was for a bad TLD, and the "pointer" is really just a single
# byte, so go forward one byte less.
#
if data[index] == 0:
ttl_index -= 1
data_new = bytes()
logger.debug("parseAnswers(): --fake-ttl specified, forcing TTL to be -2")
data_new = data[0:ttl_index] + struct.pack(">i", -2) + data[ttl_index + 4:]
data = data_new
answer["headers"] = parseAnswerHeaders(args, data[index:])
#
# Advance our index to the start of the next answer, then put this entire
# answer into answer["rddata_raw"]
#
index_old = index
index_next = index + 12 + answer["headers"]["rdlength"]
answer["rddata_raw"] = data[index:index_next]
(answer["rddata"], answer["rddata_text"]) = parse_answer_body.parseAnswerBody(answer, index, data)
index = index_next
#
# This is a bit of hack, but we want to grab the sanity data from the rddata
# dictonary and put it into its own dictionary member so that the sanity
# module can later extract it.
#
answer["sanity"] = {}
if "sanity" in answer:
answer["sanity"] = answer["rddata"]["sanity"]
del answer["rddata"]["sanity"]
#
# Deleting the raw data because it will choke when convered to JSON
#
answer["rddata_hex"] = {}
if "rddata_hex" in answer:
answer["rddata_hex"] = output.formatHex(answer["rddata_raw"])
del answer["rddata_raw"]
retval.append(answer)
#
# If we've run off the end of the packet, then break out of this loop
#
if index >= len(data):
logger.debug("parseAnswer(): index %d >= data length (%d), stopping loop!" % (index, len(data)))
break
return(retval) | b50aef467e76451443fdf94ea7651b9ad7157baa | 14,904 |
def ortho_init(scale=1.0):
"""
Orthogonal initialization for the policy weights
:param scale: (float) Scaling factor for the weights.
:return: (function) an initialization function for the weights
"""
# _ortho_init(shape, dtype, partition_info=None)
def _ortho_init(shape, *_, **_kwargs):
"""Intialize weights as Orthogonal matrix.
Orthogonal matrix initialization [1]_. For n-dimensional shapes where
n > 2, the n-1 trailing axes are flattened. For convolutional layers, this
corresponds to the fan-in, so this makes the initialization usable for
both dense and convolutional layers.
References
----------
.. [1] Saxe, Andrew M., James L. McClelland, and Surya Ganguli.
"Exact solutions to the nonlinear dynamics of learning in deep
linear
"""
# lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
# Added by Ronja
elif len(shape) == 3: # assumes NWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
gaussian_noise = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(gaussian_noise, full_matrices=False)
weights = u if u.shape == flat_shape else v # pick the one with the correct shape
weights = weights.reshape(shape)
return (scale * weights[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init | d82af86b0650c4588b1f4a22fea809cda1e72959 | 14,905 |
def get_rde_model(rde_version):
"""Get the model class of the specified rde_version.
Factory method to return the model class based on the specified RDE version
:param rde_version (str)
:rtype model: NativeEntity
"""
rde_version: semantic_version.Version = semantic_version.Version(rde_version) # noqa: E501
if rde_version.major == 1:
return NativeEntity1X
elif rde_version.major == 2:
return NativeEntity2X | 0ce32c2649ebdac84f6a000e40df8e85715733e1 | 14,906 |
import math
def pnorm(x, mu, sd):
"""
Normal distribution PDF
Args:
* scalar: variable
* scalar: mean
* scalar: standard deviation
Return type: scalar (probability density)
"""
return math.exp(- ((x - mu) / sd) ** 2 / 2) / (sd * 2.5) | 08896264db17493bc299a3e69b781c28429ef08f | 14,907 |
import numpy as np
import math
def getTransformToPlane(planePosition, planeNormal, xDirection=None):
"""Returns transform matrix from World to Plane coordinate systems.
Plane is defined in the World coordinate system by planePosition and planeNormal.
Plane coordinate system: origin is planePosition, z axis is planeNormal, x and y axes are orthogonal to z.
"""
# Determine the plane coordinate system axes.
planeZ_World = planeNormal/np.linalg.norm(planeNormal)
# Generate a plane Y axis by generating an orthogonal vector to
# plane Z axis vector by cross product plane Z axis vector with
# an arbitrarily chosen vector (that is not parallel to the plane Z axis).
if xDirection:
unitX_World = np.array(xDirection)
unitX_World = unitX_World/np.linalg.norm(unitX_World)
else:
unitX_World = np.array([0,0,1])
angle = math.acos(np.dot(planeZ_World,unitX_World))
# Normalize between -pi/2 .. +pi/2
if angle>math.pi/2:
angle -= math.pi
elif angle<-math.pi/2:
angle += math.pi
if abs(angle)*180.0/math.pi>20.0:
# unitX is not parallel to planeZ, we can use it
planeY_World = np.cross(planeZ_World, unitX_World)
else:
# unitX is parallel to planeZ, use unitY instead
unitY_World = np.array([0,1,0])
planeY_World = np.cross(planeZ_World, unitY_World)
planeY_World = planeY_World/np.linalg.norm(planeY_World)
# X axis: orthogonal to tool's Y axis and Z axis
planeX_World = np.cross(planeY_World, planeZ_World)
planeX_World = planeX_World/np.linalg.norm(planeX_World)
transformPlaneToWorld = np.row_stack((np.column_stack((planeX_World, planeY_World, planeZ_World, planePosition)),
(0, 0, 0, 1)))
transformWorldToPlane = np.linalg.inv(transformPlaneToWorld)
return transformWorldToPlane | 19073b3fefdb75a92ccc812538078e1d5ad72d75 | 14,908 |
def jp_runtime_dir(tmp_path):
"""Provides a temporary Jupyter runtime dir directory value."""
return mkdir(tmp_path, "runtime") | 17794c4b702d97d4040d89909452df5e4dd1344e | 14,909 |
def _softmax(X, n_samples, n_classes):
"""Derive the softmax of a 2D-array."""
maximum = np.empty((n_samples, 1))
for i in prange(n_samples):
maximum[i, 0] = np.max(X[i])
exp = np.exp(X - maximum)
sum_ = np.empty((n_samples, 1))
for i in prange(n_samples):
sum_[i, 0] = np.sum(exp[i])
return exp / sum_ | 8544a79dc52601e882383164ae34dd05d893ed2a | 14,910 |
from typing import OrderedDict
from typing import MutableMapping
def merge_dicts(dict1, dict2, dict_class=OrderedDict):
"""Merge dictionary ``dict2`` into ``dict1``"""
def _merge_inner(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], (dict, MutableMapping)) and isinstance(
dict2[k], (dict, MutableMapping)
):
yield k, dict_class(_merge_inner(dict1[k], dict2[k]))
else:
# If one of the values is not a dict, you can't continue
# merging it. Value from second dict overrides one in
# first and we move on.
yield k, dict2[k]
elif k in dict1:
yield k, dict1[k]
else:
yield k, dict2[k]
return dict_class(_merge_inner(dict1, dict2)) | b2013f888dfc3a1713153c7aa8a00ce4044fba07 | 14,911 |
import numpy
def jaccard_overlap_numpy(box_a: numpy.ndarray, box_b: numpy.ndarray) -> numpy.ndarray:
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: Multiple bounding boxes, Shape: [num_boxes,4]
box_b: Single bounding box, Shape: [4]
Return:
jaccard overlap: Shape: [box_a.shape[0], box_a.shape[1]]"""
inter = intersect_numpy(box_a, box_b)
area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]) # [A,B]
area_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]) # [A,B]
union = area_a + area_b - inter
return inter / union | 4fc79406724815a9d5982ac94344ca2e45993980 | 14,912 |
import glob
def find_pkg(pkg):
""" Find the package file in the repository """
candidates = glob.glob('/repo/' + pkg + '*.rpm')
if len(candidates) == 0:
print("No candidates for: '{0}'".format(pkg))
assert len(candidates) == 1
return candidates[0] | ac91f34ed7accd2c81e1c68e143319998de9cdf3 | 14,913 |
import random
def random_choice(lhs, ctx):
"""Element ℅
(lst) -> random element of a
(num) -> Random integer from 0 to a
"""
if vy_type(lhs) == NUMBER_TYPE:
return random.randint(0, lhs)
return random.choice(iterable(lhs, ctx=ctx)) | 9b4251a9b1d590742cab847035a2ef78c565af70 | 14,914 |
def ask(choices,
message="Choose one from [{choices}]{default}{cancelmessage}: ",
errormessage="Invalid input", default=None,
cancel=False, cancelkey='c',
cancelmessage='press {cancelkey} to cancel'):
"""
ask is a shorcut instantiate PickOne and use .ask method
"""
return PickOne(choices, message, errormessage, default, cancel, cancelkey,
cancelmessage).ask() | 09f1951a72800bb710167bbf0b2695b94a6370ec | 14,915 |
import os
import re
def get_shares(depth):
"""
this is pretty janky, again, but simply grab the list of directories under /mnt/user0, an an unraid-specific shortcut to access shares
"""
rootdir = "/mnt/user0/"
shares = []
pattern = "('\w+')"
with os.scandir(rootdir) as p:
depth -= 1
for entry in p:
#yield entry.path
if entry.is_dir() and depth > 0:
sharematch = re.search(pattern, str(entry))
if sharematch:
# extract share name utilizing the grouping regex and remove single quotes
share_name = sharematch.group(1)
share_name = str(share_name.replace("'",""))
shares.append(share_name)
shares.sort()
return(shares) | 0079618cc30a4c02dec2441a64ae7aa6207c765a | 14,916 |
import argparse
def parse_cli_args():
"""Function to parse the command-line arguments for PETRARCH2."""
__description__ = """
PETRARCH2
(https://openeventdata.github.io/) (v. 1.0.0)
"""
aparse = argparse.ArgumentParser(prog='petrarch2',
description=__description__)
sub_parse = aparse.add_subparsers(dest='command_name')
parse_command = sub_parse.add_parser('parse', help=""" DEPRECATED Command to run the
PETRARCH parser. Do not use unless you've used it before. If you need to
process unparsed text, see the README""",
description="""DEPRECATED Command to run the
PETRARCH parser. Do not use unless you've used it before.If you need to
process unparsed text, see the README""")
parse_command.add_argument('-i', '--inputs',
help='File, or directory of files, to parse.',
required=True)
parse_command.add_argument('-P', '--parsed', action='store_true',
default=False, help="""Whether the input
document contains StanfordNLP-parsed text.""")
parse_command.add_argument('-o', '--output',
help='File to write parsed events.',
required=True)
parse_command.add_argument('-c', '--config',
help="""Filepath for the PETRARCH configuration
file. Defaults to PETR_config.ini""",
required=False)
batch_command = sub_parse.add_parser('batch', help="""Command to run a batch
process from parsed files specified by
an optional config file.""",
description="""Command to run a batch
process from parsed files specified by
an optional config file.""")
batch_command.add_argument('-c', '--config',
help="""Filepath for the PETRARCH configuration
file. Defaults to PETR_config.ini""",
required=False)
batch_command.add_argument('-i', '--inputs',
help="""Filepath for the input XML file. Defaults to
data/text/Gigaword.sample.PETR.xml""",
required=False)
batch_command.add_argument('-o', '--outputs',
help="""Filepath for the input XML file. Defaults to
data/text/Gigaword.sample.PETR.xml""",
required=False)
nulloptions = aparse.add_mutually_exclusive_group()
nulloptions.add_argument(
'-na',
'--nullactors', action='store_true', default=False,
help="""Find noun phrases which are associated with a verb generating an event but are
not in the dictionary; an integer giving the maximum number of words follows the command.
Does not generate events. """,
required=False)
nulloptions.add_argument('-nv', '--nullverbs',
help="""Find verb phrases which have source and
targets but are not in the dictionary. Does not generate events. """,
required=False, action="store_true", default=False)
args = aparse.parse_args()
return args | 274404ae28150852c7602a21db0fba5241595695 | 14,917 |
def _check_eq(value):
"""Returns a function that checks whether the value equals a
particular integer.
"""
return lambda x: int(x) == int(value) | 4d2a02727afd90dbc012d252b01ed72f745dc564 | 14,918 |
def query_data(session, agency_code, start, end, page_start, page_stop):
""" Request D2 file data
Args:
session - DB session
agency_code - FREC or CGAC code for generation
start - Beginning of period for D file
end - End of period for D file
page_start - Beginning of pagination
page_stop - End of pagination
"""
rows = initial_query(session).\
filter(file_model.is_active.is_(True)).\
filter(file_model.awarding_agency_code == agency_code).\
filter(func.cast_as_date(file_model.action_date) >= start).\
filter(func.cast_as_date(file_model.action_date) <= end).\
slice(page_start, page_stop)
return rows | f555685ae4072aec14db29ee3b3425f1b0de5adb | 14,919 |
def ProfitBefTax(t):
"""Profit before Tax"""
return (PremIncome(t)
+ InvstIncome(t)
- BenefitTotal(t)
- ExpsTotal(t)
- ChangeRsrv(t)) | 4787d1c34698beb1e493968e5302defdf1416516 | 14,920 |
def myCommand():
"""
listens to commands spoken through microphone (audio)
:returns text extracted from the speech which is our command
"""
r = sr.Recognizer()
with sr.Microphone() as source:
print('Say something...')
r.pause_threshold = 1
r.adjust_for_ambient_noise(source, duration=1) # removed "duration=1" argument to reduce wait time
audio = r.listen(source)
try:
command = r.recognize_google(audio).lower()
print('You said: ' + command + '\n')
#loop back to continue to listen for commands if unrecognizable speech is received
except sr.UnknownValueError:
print('....')
command = myCommand()
except sr.RequestError as e:
print("????")
return command | ce0b3c01efa1fe0aa704183e6293d4ed7e5170e9 | 14,921 |
def hammer(ohlc_df):
"""returns dataframe with hammer candle column"""
df = ohlc_df.copy()
df["hammer"] = (((df["high"] - df["low"])>3*(df["open"] - df["close"])) & \
((df["close"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6) & \
((df["open"] - df["low"])/(.001 + df["high"] - df["low"]) > 0.6)) & \
(abs(df["close"] - df["open"]) > 0.1* (df["high"] - df["low"]))
return df | 7445d20e27ad2e34702868eaad028c86e71ac3a7 | 14,922 |
def calcPhase(star,time):
"""
Calculate the phase of an orbit, very simple calculation but used quite a lot
"""
period = star.period
phase = time/period
return phase | 4b282d9e4fdb76a4358d895ba30b902328ce030c | 14,923 |
def advanced_search():
"""
Get a json dictionary of search filter values suitable for use with the javascript queryBuilder plugin
"""
filters = [
dict(
id='name',
label='Name',
type='string',
operators=['equal', 'not_equal', 'begins_with', 'ends_with', 'contains']
),
dict(
id='old_name',
label='Old Name',
type='string',
operators=['equal', 'not_equal', 'begins_with', 'ends_with', 'contains']
),
dict(
id='label',
label='Label',
type='string',
operators=['contains']
),
dict(
id='qtext',
label='Question Text',
type='string',
operators=['contains']
),
dict(
id='probe',
label='Probe',
type='string',
operators=['contains']
),
dict(
id='data_source',
label='Data Source',
type='string',
input='select',
values=valid_filters['data_source'],
operators=['equal', 'not_equal', 'in', 'not_in'],
multiple=True,
plugin='selectpicker'
),
dict(
id='survey',
label='Survey',
type='string',
input='select',
values=valid_filters['survey'],
operators=['equal', 'not_equal', 'in', 'not_in'],
multiple=True,
plugin='selectpicker'
),
dict(
id='wave',
label='Wave',
type='string',
input='select',
values=valid_filters['wave'],
operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'],
multiple=True,
plugin='selectpicker'
),
dict(
id='respondent',
label='Respondent',
type='string',
input='select',
values=valid_filters['respondent'],
operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'],
multiple=True,
plugin='selectpicker'
),
dict(
id='focal_person',
label='Focal Person',
type='string',
input='select',
values={'Focal Child': 'Focal Child', 'Mother': 'Mother', 'Father': 'Father', 'Primary Caregiver': 'Primary Caregiver', 'Partner': 'Partner', 'Other': 'Other'},
operators=['contains', 'is_null', 'is_not_null']
),
dict(
id='topics',
label='Topics',
type='string',
input='select',
values=valid_filters['topic'],
operators=['contains'],
multiple=True,
plugin='selectpicker'
),
dict(
id='subtopics',
label='Sub-Topics',
type='string',
input='select',
values=valid_filters['subtopic'],
operators=['contains'],
multiple=True,
plugin='selectpicker'
),
dict(
id='scale',
label='Scale',
type='string',
input='select',
values=valid_filters['scale'],
operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'],
multiple=True,
plugin='selectpicker'
),
dict(
id='n_cities_asked',
label='Asked in (N) cities',
type='integer',
operators=['equal', 'not_equal', 'less', 'less_or_equal', 'greater', 'greater_or_equal', 'in', 'not_in'],
input='select',
values=valid_filters['n_cities_asked'],
multiple=True,
plugin='selectpicker'
),
dict(
id='data_type',
label='Data Type',
type='string',
input='select',
values=valid_filters['data_type'],
operators=['equal', 'not_equal', 'in', 'not_in'],
multiple=True,
plugin='selectpicker'
),
dict(
id='in_FFC_file',
label='FFC variable',
type='string',
input='select',
operators=['equal', 'not_equal', 'in', 'not_in', 'is_null', 'is_not_null'],
values={'yes': 'Yes', 'no': 'No'},
multiple=True,
plugin='selectpicker'
)
]
return jsonify({"filters": filters}) | 73453941eff4aa03def530691b52b109b1fe0a76 | 14,924 |
def rdp_rec(M, epsilon, dist=pldist):
"""
Simplifies a given array of points.
Recursive version.
:param M: an array
:type M: numpy array
:param epsilon: epsilon in the rdp algorithm
:type epsilon: float
:param dist: distance function
:type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pldist`
"""
dmax = 0.0
index = -1
for i in range(1, M.shape[0]):
d = dist(M[i], M[0], M[-1])
if d > dmax:
index = i
dmax = d
if dmax > epsilon:
r1 = rdp_rec(M[:index + 1], epsilon, dist)
r2 = rdp_rec(M[index:], epsilon, dist)
return np.vstack((r1[:-1], r2))
else:
return np.vstack((M[0], M[-1])) | 2518ef902bb9d7e696145e86746804a6fca115f8 | 14,925 |
from datetime import datetime
def secBetweenDates(dateTime0, dateTime1):
"""
:param dateTime0:
:param dateTime1:
:return: The number of seconds between two dates.
"""
dt0 = datetime.strptime(dateTime0, '%Y/%m/%d %H:%M:%S')
dt1 = datetime.strptime(dateTime1, '%Y/%m/%d %H:%M:%S')
timeDiff = ((dt1.timestamp()) - (dt0.timestamp()))
return timeDiff | d9e2f839d8a7c10fbde8009ea1f69db56a222426 | 14,926 |
def iframe_home(request):
""" Página inicial no iframe """
# Info sobre pedidos de fabricação
pedidosFabricacao = models.Pedidofabricacao.objects.filter(
hide=False
).exclude(
fkid_statusfabricacao__order=3
).order_by(
'-fkid_statusfabricacao', 'dt_fim_maturacao'
)
context = {
"fabricacaoPiece":"iframe/pieces/fabricacaoDetail.html",
"pedidosFabricacao":pedidosFabricacao
}
return render(request, "iframe/home.html", context) | b8db02d3b8a019bdc23fd369ab4ccc96e9b77437 | 14,927 |
import sys
def get_raw_KEGG(kegg_comp_ids=[], kegg_rxn_ids=[], krest="http://rest.kegg.jp",
n_threads=128, test_limit=0):
"""
Downloads all KEGG compound (C) and reaction (R) records and formats them
as MINE database compound or reaction entries. The final output is a tuple
containing a compound dictionary and a reaction dictionary.
Alternatively, downloads only a supplied list of compounds and reactions.
"""
s_out("\nDownloading KEGG data via %s/...\n" % krest)
# Acquire list of KEGG compound IDs
if not len(kegg_comp_ids):
s_out("Downloading KEGG compound list...")
r = rget("/".join([krest,"list","compound"]))
if r.status_code == 200:
for line in r.text.split("\n"):
if line == "": break # The end
kegg_comp_id = line.split()[0].split(":")[1]
kegg_comp_ids.append(kegg_comp_id)
else:
msg = "Error: Unable to download KEGG rest compound list.\n"
sys.exit(msg)
s_out(" Done.\n")
# Acquire list of KEGG reaction IDs
if not len(kegg_rxn_ids):
s_out("Downloading KEGG reaction list...")
r = rget("/".join([krest,"list","reaction"]))
if r.status_code == 200:
for line in r.text.split("\n"):
if line == "": break # The end
kegg_rxn_id = line.split()[0].split(":")[1]
kegg_rxn_ids.append(kegg_rxn_id)
else:
msg = "Error: Unable to download KEGG rest reaction list.\n"
sys.exit(msg)
s_out(" Done.\n")
# Limit download length, for testing only
if test_limit:
kegg_comp_ids = kegg_comp_ids[0:test_limit]
kegg_rxn_ids = kegg_rxn_ids[0:test_limit]
# Download compounds (threaded)
kegg_comp_dict = {}
print("Downloading KEGG compounds...")
for comp in get_KEGG_comps(kegg_comp_ids):
if comp == None:
continue
try:
kegg_comp_dict[comp['_id']] = comp
except KeyError:
s_err("Warning: '" + str(comp) + \
"' lacks an ID and will be discarded.\n")
continue
print("")
# Download reactions (threaded)
kegg_rxn_dict = {}
print("Downloading KEGG reactions...")
for rxn in get_KEGG_rxns(kegg_rxn_ids):
if rxn == None:
continue
try:
kegg_rxn_dict[rxn['_id']] = rxn
except KeyError:
s_err("Warning: '" + str(rxn) + \
"' lacks an ID and will be discarded.\n")
continue
print("")
# Re-organize compound reaction listing, taking cofactor role into account
s_out("Organizing reaction lists...")
sort_KEGG_reactions(kegg_comp_dict, kegg_rxn_dict)
s_out(" Done.\n")
s_out("KEGG download completed.\n")
return (kegg_comp_dict, kegg_rxn_dict) | f84c4307f578fc46651d8c17a4657a10c1a0c0a7 | 14,928 |
def inv(n: int, n_bits: int) -> int:
"""Compute the bitwise inverse.
Args:
n: An integer.
n_bits: The bit-width of the integers used.
Returns:
The binary inverse of the input.
"""
# We should only invert the bits that are within the bit-width of the
# integers we use. We set this mask to set the other bits to zero.
bit_mask = (1 << n_bits) - 1 # e.g. 0b111 for n_bits = 3
return ~n & bit_mask | 5be1eaf13490091096b8cd13fdbcdbbbe43760da | 14,929 |
def _render_flight_addition_page(error):
"""
Helper to render the flight addition page
:param error: Error message to display on the page or None
:return: The rendered flight addition template
"""
return render_template("flights/add.html",
airlines=list_airlines(),
airports=list_airports(),
error=error) | 916b14fa3b829b4fa6e0720d64bcdd74aab476ce | 14,930 |
def get_node_index(glTF, name):
"""
Return the node index in the glTF array.
"""
if glTF.get('nodes') is None:
return -1
index = 0
for node in glTF['nodes']:
if node['name'] == name:
return index
index += 1
return -1 | cb0c6a727e9786467861d0ea622462264269814a | 14,931 |
def online_user_count(filter_user=None):
"""
Returns the number of users online
"""
return len(_online_users()) | 5ab03f1ca6738925847b338e956a0c8afbbc4d7d | 14,932 |
def get_latest_version_url(start=29, template="http://unicode.org/Public/cldr/{}/core.zip"):
"""Discover the most recent version of the CLDR dataset.
Effort has been made to make this function reusable for other URL numeric URL schemes, just override `start` and
`template` to iteratively search for the latest version of any other URL.
"""
latest = None
with Session() as http: # We perform several requests iteratively, so let's be nice and re-use the connection.
for current in count(start):
result = http.head(template.format(current)) # We only care if it exists or not, thus HEAD use here.
if result.status_code != 200:
return current - 1, latest # Propagate the version found and the URL for that version.
latest = result.url | a93ff3081e6e0a5a507d79a5340b69b2be670f88 | 14,933 |
import os
def delete_file(filename):
"""Remove a file"""
filename = os.path.basename(filename)
# FIXME: possible race condition
if os.path.exists(secure_path(cagibi_folder, filename)) and filename in files_info:
os.remove(secure_path(cagibi_folder, filename))
del files_info[filename]
save_config(files_info, filename="files.json")
return "Ok."
else:
abort(500, "File doesn't exist or is not in database.") | b896f2a8bbfb1a30d6726bc60a838295dadae1ce | 14,934 |
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
msg = f"the 'package' argument is required to perform a relative import for {name!r}"
raise TypeError(msg)
for character in name:
if character != '.':
break
level += 1
return _gcd_import(name[level:], package, level) | aeed1a00ec9149923c9be73a2346de4664ff98e3 | 14,935 |
import os
import re
def sanitize_filename(filename, replacement='_', max_length=200):
"""compute basename of filename. Replaces all non-whitelisted characters.
The returned filename is always a basename of the file."""
basepath = os.path.basename(filename).strip()
sane_fname = re.sub(r'[^\w\.\- ]', replacement, basepath)
while ".." in sane_fname:
sane_fname = sane_fname.replace('..', '.')
while " " in sane_fname:
sane_fname = sane_fname.replace(' ', ' ')
if not len(filename):
sane_fname = 'NONAME'
# limit filename length
if max_length:
sane_fname = sane_fname[:max_length]
return sane_fname | 7be376ae127118ca4fa94328b01dbbc2431c1e6f | 14,936 |
import requests
from bs4 import BeautifulSoup
def get_image_links_from_imgur(imgur_url):
"""
Given an imgur URL, return a list of image URLs from it.
"""
if 'imgur.com' not in imgur_url:
raise ValueError('given URL does not appear to be an imgur URL')
urls = []
response = requests.get(imgur_url)
if response.status_code != 200:
raise ValueError('there was something wrong with the given URL')
soup = BeautifulSoup(response.text, 'html5lib')
# this is an album
if '/a/' in imgur_url:
matches = soup.select('.album-view-image-link a')
urls += [x['href'] for x in matches]
# directly linked image
elif 'i.imgur.com' in imgur_url:
urls.append(imgur_url)
# single-image page
else:
try:
urls.append(soup.select('.image a')[0]['href'])
except IndexError:
pass
# clean up image URLs
urls = [url.strip('/') for url in urls]
urls = ['http://{}'.format(url) if not url.startswith('http') else url
for url in urls]
return urls | 19d8f994cd1730c23fdf5d6105e8db916da67d15 | 14,937 |
def filter_ignored_images(y_true, y_pred, classification=False):
""" Filter those images which are not meaningful.
Args:
y_true: Target tensor from the dataset generator.
y_pred: Predicted tensor from the network.
classification: To filter for classification or
regression.
Returns: Filtered tensors.
"""
states = y_true[:, :, -1]
if classification:
indexes = tf.where(tf.math.not_equal(states, -1))
else:
indexes = tf.where(tf.math.equal(states, 1))
pred = y_pred
true = y_true[:, :, :-1]
true_filtered = tf.gather_nd(true, indexes)
pred_filtered = tf.gather_nd(pred, indexes)
return true_filtered, pred_filtered, indexes, states | a3f5e10c2f2961eafe734bc27c53e23294ee9eea | 14,938 |
def context_data_from_metadata(metadata):
""" Utility function transforming `metadata` into a context data dictionary.
Metadata may have been encoded at the client by `metadata_from_context_data`, or
it may be "normal" GRPC metadata. In this case, duplicate values are allowed;
they become a list in the context data.
"""
data = {}
for name, value in metadata:
if name.startswith(METADATA_PREFIX):
_, key = name.split(METADATA_PREFIX, 1)
data[key] = decode_value(value)
else:
if name in data:
try:
data[name].append(value)
except AttributeError:
data[name] = [data[name], value]
else:
data[name] = value
return data | 7b801b5835f7146a2ab163b83741113a039cb6bd | 14,939 |
import glob
import os
def open_debug_and_training_data(t, ids, training_data_path):
"""Open an concatenate the debugging and training data"""
debug_files = {
tag: glob.glob(os.path.join(path, '*.pkl'))
for tag, path in ids.items()
}
# open training
training_ds = xr.open_dataset(training_data_path)
train_ds_init_time = training_ds.isel(time=0)
args = [('Train', train_ds_init_time)]
for tag in debug_files:
dbg = open_debug_state_like_ds(debug_files[tag][t], train_ds_init_time)
args.append((tag, dbg))
return concat_datasets(args, name='tag') | 062b3db8bc58fe2c0457793ac5f03a550a7eca59 | 14,940 |
def plot_results_fit(
xs,
ys,
covs,
line_ax,
lh_ax=None,
outliers=None,
auto_outliers=False,
fit_includes_outliers=False,
report_rho=False,
):
"""Do the fit and plot the result.
Parameters
----------
sc_ax : axes to plot the best fit line
lh_ax : axes to plot the likelihood function
xs, ys, covs: the data to use (see return value of plot_results_scatter)
outliers : list of int
list of indices for which data will be ignored in the fitting.
If auto_outliers is True, then this data will only be ignored
for the first iteration. The manual outlier choice positions the
fit where were we want it. Then, these points are added back in,
and ideally, the automatic outlier rejection will reject them in
an objective way. This is to make sure that we are not guilty of
cherry picking.
auto_outliers : bool
Use auto outlier detection in linear_ortho_maxlh, and mark
outliers on plot (line ax). See outlier detection function for
criterion.
fit_includes_outliers : bool
Use the detected outliers in the fitting, despite them being outliers.
report_rho: draw a box with the correlation coefficient AFTER outlier removal
Returns
-------
outlier_idxs : array of int
Indices of points treated as outliers
"""
# fix ranges before plotting the fit
line_ax.set_xlim(line_ax.get_xlim())
line_ax.set_ylim(line_ax.get_ylim())
r = linear_ortho_fit.linear_ortho_maxlh(
xs,
ys,
covs,
line_ax,
sigma_hess=True,
manual_outliers=outliers,
auto_outliers=auto_outliers,
fit_includes_outliers=fit_includes_outliers,
)
m = r["m"]
b_perp = r["b_perp"]
sm = r["m_unc"]
sb_perp = r["b_perp_unc"]
outlier_idxs = r["outlier_idxs"]
b = linear_ortho_fit.b_perp_to_b(m, b_perp)
# The fitting process also indicated some outliers. Do the rest without them.
if fit_includes_outliers:
xs_used = xs
ys_used = ys
covs_used = covs
else:
xs_used = np.delete(xs, outlier_idxs, axis=0)
ys_used = np.delete(ys, outlier_idxs, axis=0)
covs_used = np.delete(covs, outlier_idxs, axis=0)
# Looking at bootstrap with and without outliers might be interesting.
# boot_cov_mb = linear_ortho_fit.bootstrap_fit_errors(xs_no_out, ys_no_out, covs_no_out)
# boot_sm, boot_sb = np.sqrt(np.diag(boot_cov_mb))
# sample the likelihood function to determine statistical properties
# of m and b
a = 2
m_grid, b_perp_grid, logL_grid = linear_ortho_fit.calc_logL_grid(
m - a * sm,
m + a * sm,
b_perp - a * sb_perp,
b_perp + a * sb_perp,
xs_used,
ys_used,
covs_used,
)
# Sample the likelihood of (m, b_perp) and convert to (m, b), so we
# can properly determine the covariance.
sampled_m, sampled_b_perp = linear_ortho_fit.sample_likelihood(
m, b_perp, m_grid, b_perp_grid, logL_grid, N=2000
)
sampled_b = linear_ortho_fit.b_perp_to_b(sampled_m, sampled_b_perp)
sample_cov_mb = np.cov(sampled_m, sampled_b)
m_unc = np.sqrt(sample_cov_mb[0, 0])
b_unc = np.sqrt(sample_cov_mb[1, 1])
mb_corr = sample_cov_mb[0, 1] / (m_unc * b_unc)
# print out results here
print("*** FIT RESULT ***")
print(f"m = {m:.2e} pm {m_unc:.2e}")
print(f"b = {b:.2e} pm {b_unc:.2e}")
print(f"correlation = {mb_corr:.2f}")
if lh_ax is not None:
linear_ortho_fit.plot_solution_neighborhood(
lh_ax,
logL_grid,
[min(b_perp_grid), max(b_perp_grid), min(m_grid), max(m_grid)],
m,
b_perp,
cov_mb=sample_cov_mb,
what="L",
extra_points=zip(sampled_b_perp, sampled_m),
)
# pearson coefficient without outliers (gives us an idea of how
# reasonable the trend is)
print("VVV-auto outlier removal-VVV")
if report_rho:
plot_rho_box(
line_ax,
xs_used,
ys_used,
covs_used,
)
# plot the fitted line
xlim = line_ax.get_xlim()
xp = np.linspace(xlim[0], xlim[1], 3)
yp = m * xp + b
line_ax.plot(xp, yp, color=FIT_COLOR, linewidth=2)
# plot sampled lines
linear_ortho_fit.plot_solution_linescatter(
line_ax, sampled_m, sampled_b_perp, color=FIT_COLOR, alpha=5 / len(sampled_m)
)
# if outliers, mark them
if len(outlier_idxs) > 0:
line_ax.scatter(
xs[outlier_idxs],
ys[outlier_idxs],
marker="x",
color="y",
label="outlier",
zorder=10,
)
# return as dict, in case we want to do more specific things in
# post. Example: gathering numbers and putting them into a table, in
# the main plotting script (paper_scatter.py).
# Also return covariance and samples, useful for determining error on y = mx + b.
results = {
"m": m,
"m_unc": m_unc,
"b": b,
"b_unc": b_unc,
"mb_cov": sample_cov_mb[0, 1],
"outlier_idxs": outlier_idxs,
"m_samples": sampled_m,
"b_samples": sampled_b,
}
return results | 695d7f47fa8319f9fd085b51e4bb031acee0079a | 14,941 |
def check_for_features(cmph5_file, feature_list):
"""Check that all required features present in the cmph5_file. Return
a list of features that are missing.
"""
aln_group_path = cmph5_file['AlnGroup/Path'][0]
missing_features = []
for feature in feature_list:
if feature not in cmph5_file[aln_group_path].keys():
missing_features.append(feature)
return missing_features | 2d51e1389e6519607001ad2b0006581e6a876ddd | 14,942 |
def inverse(a: int, b: int) -> int:
"""
Calculates the modular inverse of a in b
:param a:
:param b:
:return:
"""
_, inv, _ = gcd_extended(a, b)
return inv % b | 5bde17e2526d5d8f940c8c384c2962dee8cb7188 | 14,943 |
def build_md2po_events(mkdocs_build_config):
"""Build dinamically those mdpo events executed at certain moments of the
Markdown file parsing extrating messages from pages, different depending on
active extensions and plugins.
"""
_md_extensions = mkdocs_build_config['markdown_extensions']
md_extensions = []
for ext in _md_extensions:
if not isinstance(ext, str):
if isinstance(ext, MkdocstringsExtension):
md_extensions.append('mkdocstrings')
else:
md_extensions.append(ext)
else:
md_extensions.append(ext)
def build_event(event_type):
parameters = {
'text': 'md2po_instance, block, text',
'msgid': 'md2po_instance, msgid, *args',
'link_reference': 'md2po_instance, target, *args',
}[event_type]
if event_type == 'text':
req_extension_conditions = {
'admonition': 're.match(AdmonitionProcessor.RE, text)',
'pymdownx.details': 're.match(DetailsProcessor.START, text)',
'pymdownx.snippets': (
're.match(SnippetPreprocessor.RE_ALL_SNIPPETS, text)'
),
'pymdownx.tabbed': 're.match(TabbedProcessor.START, text)',
'mkdocstrings': 're.match(MkDocsStringsProcessor.regex, text)',
}
body = ''
for req_extension, condition in req_extension_conditions.items():
if req_extension in md_extensions:
body += (
f' if {condition}:\n '
'md2po_instance.disabled_entries.append(text)\n'
' return False\n'
)
if not body:
return None
elif event_type == 'msgid':
body = (
" if msgid.startswith(': '):"
'md2po_instance._disable_next_line = True\n'
)
else: # link_reference
body = " if target.startswith('^'):return False;\n"
function_definition = f'def {event_type}_event({parameters}):\n{body}'
code = compile(function_definition, 'test', 'exec')
exec(code)
return locals()[f'{event_type}_event']
# load only those events required for the extensions
events_functions = {
event:
build_event(event) for event in ['text', 'msgid', 'link_reference']
}
events = {}
for event_name, event_function in events_functions.items():
if event_function is not None:
events[event_name] = event_function
return events | 5dd4cf7afe9168d4b110c197454b237f9267ce0e | 14,944 |
def is_three(x):
"""Return whether x is three.
>>> search(is_three)
3
"""
return x == 3 | a57266892eebf684945d0d841ede67965c751f1a | 14,945 |
def get_task_id(prefix, path):
"""Generate unique tasks id based on the path.
:parma prefix: prefix string
:type prefix: str
:param path: file path.
:type path: str
"""
task_id = "{}_{}".format(prefix, path.rsplit("/", 1)[-1].replace(".", "_"))
return get_unique_task_id(task_id) | 965b5df1d1cc80d489d4a003f453e53a96d4c38e | 14,946 |
def rot_permutated_geoms(geo, saddle=False, frm_bnd_key=[], brk_bnd_key=[], form_coords=[]):
""" convert an input geometry to a list of geometries
corresponding to the rotational permuations of all the terminal groups
"""
gra = graph(geo, remove_stereo=True)
term_atms = {}
all_hyds = []
neighbor_dct = automol.graph.atom_neighbor_keys(gra)
# determine if atom is a part of a double bond
unsat_atms = automol.graph.unsaturated_atom_keys(gra)
if not saddle:
rad_atms = automol.graph.sing_res_dom_radical_atom_keys(gra)
res_rad_atms = automol.graph.resonance_dominant_radical_atom_keys(gra)
rad_atms = [atm for atm in rad_atms if atm not in res_rad_atms]
else:
rad_atms = []
gra = gra[0]
for atm in gra:
if gra[atm][0] == 'H':
all_hyds.append(atm)
for atm in gra:
if atm in unsat_atms and atm not in rad_atms:
pass
else:
if atm not in frm_bnd_key and atm not in brk_bnd_key:
#if atm not in form_coords:
nonh_neighs = []
h_neighs = []
neighs = neighbor_dct[atm]
for nei in neighs:
if nei in all_hyds:
h_neighs.append(nei)
else:
nonh_neighs.append(nei)
if len(nonh_neighs) < 2 and len(h_neighs) > 1:
term_atms[atm] = h_neighs
geo_final_lst = [geo]
for atm in term_atms:
hyds = term_atms[atm]
geo_lst = []
for geom in geo_final_lst:
geo_lst.extend(_swap_for_one(geom, hyds))
geo_final_lst = geo_lst
return geo_final_lst | 347e358b311725801587a2174f62084b066b414e | 14,947 |
def wasserstein_loss(y_true, y_pred):
""" for more detail: https://github.com/keras-team/keras-contrib/blob/master/examples/improved_wgan.py"""
return K.mean(y_true * y_pred) | bc99572e298a565e68fe41d38e1becb72b4c304d | 14,948 |
from typing import Optional
def call_and_transact(
contract_function: ContractFunction, transaction_params: Optional[TxParams] = None,
) -> HexBytes:
""" Executes contract_function.{call, transaction}(transaction_params) and returns txhash """
# First 'call' might raise an exception
contract_function.call(transaction_params)
return contract_function.transact(transaction_params) | 851904f85d757faa548f8988ffcdfe97188de288 | 14,949 |
def bidirectional_rnn_model(input_dim, units, output_dim=29):
"""
Build a bidirectional recurrent network for speech
Params:
input_dim (int): Length of the input sequence.
units: output dimensions of the GRU
output_dim: output dimensions of the dense connected layers
Returns:
returns the RNN acoustic model
Code Attribution:
This function contains code that was updated and leveraged from the
Udacity Natural Language Processing Nano Degree Training material.
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add bidirectional recurrent layer
bidir_rnn = Bidirectional(GRU(units, return_sequences=True, implementation=2, name="bidir_rnn"))(input_data)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bidir_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model | fa092c1a1fb982c9c1446953e691e70476002954 | 14,950 |
import re
def compress_sparql(text: str, prefix: str, uri: str) -> str:
"""
Compress given SPARQL query by replacing all instances of the given uri with the given prefix.
:param text: SPARQL query to be compressed.
:param prefix: prefix to use as replace.
:param uri: uri instance to be replaced.
:return: compressed SPARQL query.
"""
bordersremv = lambda matchobj: prefix + ":" + re.sub(f"[<>]|({uri})", "", matchobj.group(0))
return re.sub(f"<?({uri}).*>?", bordersremv, text) | b86ceebadb262730fb4dec90b43e04a09d9c9541 | 14,951 |
from operator import mod
def easter(g_year):
"""Return fixed date of Easter in Gregorian year g_year."""
century = quotient(g_year, 100) + 1
shifted_epact = mod(14 +
11 * mod(g_year, 19) -
quotient(3 * century, 4) +
quotient(5 + (8 * century), 25), 30)
adjusted_epact = ((shifted_epact + 1)
if ((shifted_epact == 0) or ((shifted_epact == 1) and
(10 < mod(g_year, 19))))
else shifted_epact)
paschal_moon = (fixed_from_gregorian(gregorian_date(g_year, APRIL, 19)) -
adjusted_epact)
return kday_after(SUNDAY, paschal_moon) | e084e9a0ae755065bf7704da6aa1506894ad958e | 14,952 |
def _with_generator_error_translation(code_to_exception_class_func, func):
"""Same wrapping as above, but for a generator"""
@funcy.wraps(func)
def decorated(*args, **kwargs):
"""Execute a function, if an exception is raised, change its type if necessary"""
try:
for x in func(*args, **kwargs):
yield x
except grpc.RpcError as exc:
raise_exception_from_grpc_exception(code_to_exception_class_func, exc)
return decorated | fbd0491b2f7d68ecfaa5405a02203c3c8294bdc2 | 14,953 |
import requests
def openei_api_request(
data,
):
"""Query the OpenEI.org API.
Args:
data (dict or OrderedDict): key-value pairs of parameters to post to the
API.
Returns:
dict: the json response
"""
# define the Overpass API URL, then construct a GET-style URL as a string to
# hash to look up/save to cache
url = " https://openei.org/services/api/content_assist/recommend"
prepared_url = requests.Request("GET", url, params=data).prepare().url
cached_response_json = get_from_cache(prepared_url)
if cached_response_json is not None:
# found this request in the cache, just return it instead of making a
# new HTTP call
return cached_response_json | c02ef34fd3fc8327a0debc954eb1a211dc161978 | 14,954 |
def generate_content(vocab, length):
"""Generate a random passage.
Pass in a dictionary of words from a text document and a specified
length (number of words) to return a randomized string.
"""
new_content = []
pair = find_trigram(vocab)
while len(new_content) < length:
third = find_trigram(vocab, pair)
trigram = (pair + " " + third).split()
new_content.extend(*[trigram]) # unpack trigrams and add to content
next_one = find_trigram(vocab, trigram[1] + " " + trigram[2])
if len(next_one.split()) > 1:
pair = next_one
else:
next_two = find_trigram(vocab, trigram[2] + " " + next_one)
pair = next_one + " " + next_two
return " ".join(new_content) | 5897c507281ffccddfb28880a0c5678b0fab7363 | 14,955 |
def transform_generic(inp: dict, out, met: ConfigurationMeta) -> list:
""" handle_generic is derived from P -> S, where P and S are logic expressions.
This function will use a generic method to transform the logic expression P -> S
into multiple mathematical constraints. This is done by first converting r into
a logic expression Ç, then Ç is converted into CNF and last into constraints.
"""
support_variable_name = met.support_variable_name
P = None
if inp['condition'] and inp['condition']['sub_conditions']:
P = ""
evaluated_sub_conditions = []
for sub_condition in inp['condition']['sub_conditions']:
if sub_condition['relation'] == "ALL":
concat = " & ".join(sub_condition['components'])
elif sub_condition.relation == "ANY":
concat = " | ".join(sub_condition['components'])
else:
raise Exception(f"Not implemented for relation type: '{sub_condition.relation}'")
if not concat == '':
evaluated_sub_conditions.append(f"({concat})")
if inp['condition']['relation'] == "ALL":
P = " & ".join(evaluated_sub_conditions)
elif inp['condition']['relation'] == "ANY":
P = " | ".join(evaluated_sub_conditions)
else:
raise Exception(f"Not implemented for relation type: '{inp['condition']['relation']}'")
cmps = inp['consequence']['components']
if inp['consequence']['rule_type'] in ["REQUIRES_ALL", "PREFERRED"]:
S = " & ".join(cmps)
elif inp['consequence']['rule_type'] == "REQUIRES_ANY":
S = " | ".join(cmps)
elif inp['consequence']['rule_type'] == "FORBIDS_ALL":
_cmps = [f"~{x}" for x in cmps]
S = " & ".join(_cmps)
elif inp['consequence']['rule_type'] == "REQUIRES_EXCLUSIVELY":
if P == None:
return transform_exactly_one(inp=inp, out=out, met=met)
condition = []
for i in range(len(cmps)):
clause = [f"{cmps[j]}" if i == j else f"~{cmps[j]}" for j in range(len(cmps))]
condition.append(" & ".join(clause))
S = " | ".join([f"({x})" for x in condition])
else:
raise Exception(f"Not implemented for rule type '{inp['consequence']['rule_type']}'")
expression = S if not P else f"({P}) >> ({S})"
constraints = fake_expression_to_constraints(
expression=expression,
support_variable_name=support_variable_name,
)
_constraints = []
for constraint, support_vector_value in constraints:
constraint[support_variable_name] = support_vector_value
_constraints.append(constraint)
return _constraints | 0b87c001a94fb9ad3198651d0948bddf7d477b1b | 14,956 |
def generate_mprocess_from_name(
c_sys: CompositeSystem, mprocess_name: str, is_physicality_required: bool = True
) -> MProcess:
"""returns MProcess object specified by name.
Parameters
----------
c_sys : CompositeSystem
CompositeSystem of MProcess.
mprocess_name : str
name of the MProcess.
is_physicality_required: bool = True
whether the generated object is physicality required, by default True
Returns
-------
MProcess
MProcess object.
"""
# check mprocess name
single_mprocess_names = mprocess_name.split("_")
mprocess_name_list = get_mprocess_names_type1() + get_mprocess_names_type2()
for single_mprocess_name in single_mprocess_names:
if single_mprocess_name not in mprocess_name_list:
raise ValueError(
f"mprocess_name is out of range. mprocess_name={single_mprocess_name}"
)
# generate mprocess
hss = generate_mprocess_hss_from_name(mprocess_name, c_sys)
mprocess = MProcess(
hss=hss, c_sys=c_sys, is_physicality_required=is_physicality_required
)
return mprocess | 8be8f79610e424342fae3c6ddbccf2177d0941b1 | 14,957 |
from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk
def convert_polydata_to_image_data(poly, ref_im, reverse=True):
"""
Convert the vtk polydata to imagedata
Args:
poly: vtkPolyData
ref_im: reference vtkImage to match the polydata with
Returns:
output: resulted vtkImageData
"""
# Have to copy to create a zeroed vtk image data
ref_im_zeros = vtk.vtkImageData()
ref_im_zeros.DeepCopy(ref_im)
ref_im_zeros.GetPointData().SetScalars(numpy_to_vtk(np.zeros(vtk_to_numpy(ref_im_zeros.GetPointData().GetScalars()).shape)))
ply2im = vtk.vtkPolyDataToImageStencil()
ply2im.SetTolerance(0.05)
ply2im.SetInputData(poly)
ply2im.SetOutputSpacing(ref_im.GetSpacing())
ply2im.SetInformationInput(ref_im_zeros)
ply2im.Update()
stencil = vtk.vtkImageStencil()
stencil.SetInputData(ref_im_zeros)
if reverse:
stencil.ReverseStencilOn()
stencil.SetStencilData(ply2im.GetOutput())
stencil.Update()
output = stencil.GetOutput()
return output | 75a8780d287b5c2f5b2cc81d735859d56a5f9641 | 14,958 |
def matplot(x, y, f, vmin=None, vmax=None, ticks=None, output='output.pdf', xlabel='X', \
ylabel='Y', diverge=False, cmap='viridis', **kwargs):
"""
Parameters
----------
f : 2D array
array to be plotted.
extent: list [xmin, xmax, ymin, ymax]
Returns
-------
Save a fig in the current directory.
To be deprecated. Please use imshow.
"""
fig, ax = plt.subplots(figsize=(4,3))
set_style()
if diverge:
cmap = "RdBu_r"
else:
cmap = 'viridis'
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
extent = [xmin, xmax, ymin, ymax]
cntr = ax.imshow(f.T, aspect='auto', cmap=cmap, extent=extent, \
origin='lower', vmin=vmin, vmax=vmax, **kwargs)
ax.set_aspect('auto')
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.colorbar(cntr, ticks=ticks)
ax.xaxis.set_ticks_position('bottom')
# fig.subplots_adjust(wspace=0, hspace=0, bottom=0.14, left=0.14, top=0.96, right=0.94)
if output is not None:
fig.savefig(output, dpi=1200)
return fig, ax | f50d7f7d8ebcb87a993001042f48afcc69616393 | 14,959 |
def createNewClasses(df, sc, colLabel):
"""
Divide the data into classes
Parameters
----------
df: Dataframe
Spark Dataframe
sc: SparkContext object
SparkContext object
colLabel: List
Items that considered Label
logs_dir: string
Directory for log file
Return
----------
colCat: List
Items that is considered categories
colNum: List
Items that is considered numerical values
"""
rdd = sc.parallelize(df.dtypes)
colCat = rdd.map(lambda i: i[0] if (i[1]=='string' or i[1]=='boolean' and i[0] not in colLabel) else None).filter(lambda i: i != None).collect()
colNum = rdd.map(lambda i: i[0] if (i[1]=='double' and i[0] not in colLabel) else None).filter(lambda i: i != None).collect()
print(f"Label: {colLabel} \nCategories: {colCat}\nNumerical: {colNum}")
return colCat, colNum | e28e5240bca65bd602234b6560b58d934012f530 | 14,960 |
def scan_usb(device_name=None):
""" Scan for available USB devices
:param device_name: The device name (MX6DQP, MX6SDL, ...) or USB device VID:PID value
:rtype list
"""
if device_name is None:
objs = []
devs = RawHid.enumerate()
for cls in SDP_CLS:
for dev in devs:
for value in cls.DEVICES.values():
if dev.vid == value[0] and dev.pid == value[1]:
objs += [cls(dev)]
return objs
else:
if ':' in device_name:
vid, pid = device_name.split(':')
devs = RawHid.enumerate(int(vid, 0), int(pid, 0))
return [SdpBase(dev) for dev in devs]
else:
for cls in SDP_CLS:
if device_name in cls.DEVICES:
vid = cls.DEVICES[device_name][0]
pid = cls.DEVICES[device_name][1]
devs = RawHid.enumerate(vid, pid)
return [cls(dev) for dev in devs]
return [] | 0178537f65d46b5e1333ef4ee8d590c68d619019 | 14,961 |
def _boolrelextrema(
data, comparator, axis=0, order: tsutils.IntGreaterEqualToOne = 1, mode="clip"
):
"""Calculate the relative extrema of `data`.
Relative extrema are calculated by finding locations where
comparator(data[n],data[n+1:n+order+1]) = True.
Parameters
----------
data: ndarray
comparator: function
function to use to compare two data points. Should take 2 numbers as
arguments
axis: int, optional
axis over which to select from `data`
order: int, optional
How many points on each side to require a `comparator`(n,n+x) = True.
mode: string, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default 'clip'. See numpy.take
Returns
-------
extrema: ndarray
Indices of the extrema, as boolean array of same shape as data. True
for an extrema, False else.
See Also
--------
argrelmax, argrelmin
Examples
--------
>>> testdata = np.array([1,2,3,2,1])
>>> _boolrelextrema(testdata, np.greater, axis=0).tolist()
[False, False, True, False, False]
"""
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs)
for shift in range(1, order + 1):
plus = np.take(data, locs + shift, axis=axis, mode=mode)
results &= comparator(main, plus)
minus = np.take(data, locs - shift, axis=axis, mode=mode)
results &= comparator(main, minus)
if ~results.any():
return results
return results | b9315675845b27d77b39e1b0a8facd8cdda955c1 | 14,962 |
from bs4 import BeautifulSoup
def parse_description(offer_markup):
""" Searches for description if offer markup
:param offer_markup: Body from offer page markup
:type offer_markup: str
:return: Description of offer
:rtype: str
"""
html_parser = BeautifulSoup(offer_markup, "html.parser")
return html_parser.find(id="textContent").text.replace(" ", "").replace("\n", " ").replace("\r", "").strip() | 30464ca8ac313f4998fb067b46c3ec17e567da50 | 14,963 |
def return_args():
"""Return a parser object."""
_parser = ArgumentParser(add_help=True, description=(
"Translate msgid's from a POT file with Google Translate API"))
_parser.add_argument('-f', '--file', action='store', required=True,
help="Get the POT file name.")
_parser.add_argument('-o', '--output_file', action='store', required=True,
help="Get name to save the new PO file.")
_parser.add_argument('-t', '--translate', action='store', required=True,
help="Get language to translate to.")
_parser.add_argument('-i', '--imprecise', action='store_true',
help="Save translated texts as fuzzy(draft).")
_parser.add_argument('-e', '--error', action='store_true',
help="Print translate errors if exist.")
_parser.add_argument('-p', '--print_process', action='store_true',
help="Print translate process.")
return _parser | 45b608f25cbf9823dcd2dcaa070eaf97daf52895 | 14,964 |
def get_df_tau(plot_dict, gen_err):
"""
Return a dataframe of the kendall tau's coefficient for different methods
"""
# tau, p_value = compute_tau(result_dict[err], plot_dict['avg_clusters'], inverse=True)
# taus, pvalues, names, inverses = [tau], [p_value], ['cc'], ['True']
taus, pvalues, names, inverses = [], [], [], []
for key, value in plot_dict.items():
value = np.array(value)
# if key in ['ranks', 'stable_ranks', 'avg_clusters', 'modularity']:
# continue
for i in range(value.shape[1]):
if key == "Schatten":
if i == 0: # Schatten 1-norm, no inversion
inverse_flag = False
elif i == 1:
continue # skip trivial 2-norm
else:
inverse_flag = True
else:
inverse_flag = True
tau, p_value = compute_tau(gen_err, value[:, i], inverse=inverse_flag)
taus.append(tau)
pvalues.append(p_value)
names.append(key + "_" + str(i + 1))
inverses.append(inverse_flag)
kendal_cor = pd.DataFrame(
{"metric": names, "kendall_tau": taus, "pvalue": pvalues, "inverse": inverses}
)
return kendal_cor | 642af1f675aa1b323f8221cebb81aa98e4a9d188 | 14,965 |
def traverse(graph, priorities):
"""Return a sequence of all the nodes in the graph by greedily choosing high 'priority' nodes
before low 'priority' nodes."""
reachable = PriorityContainer()
visited = {}
# start by greedily choosing the highest-priority node
current_node = max(priorities.items(), key=lambda i: i[1])[0]
visited_count = 0
while current_node:
# visit node
visited[current_node] = visited_count
visited_count += 1
# update visit-able nodes
for neighbor in graph[current_node]['neighbors']:
if neighbor not in reachable and neighbor not in visited:
reachable.put((priorities[neighbor], neighbor))
try:
current_priority, current_node = reachable.get(False)
except Queue.Empty:
current_priority = current_node = None
return visited | 255c14348a1fb7ba33e85ad36537529434ce2865 | 14,966 |
def build_dataset(dataset_name, set_name, root_path, transforms=None):
"""
:param dataset_name: the name of dataset
:param root_path: data is usually located under the root path
:param set_name: "train", "valid", "test"
:param transforms:
:return:
"""
if "cameo_half_year" in dataset_name:
_, data_type, max_length, depth, profile_type = dataset_name.split("-")
max_length = int(max_length)
depth = int(depth)
dataset = CAMEO_HALF_YEAR(root=root_path, data_type=data_type,
transform=transforms, max_length_limit=max_length, depth=depth, profile_type=profile_type)
else:
raise Exception("Can not build unknown image dataset: {}".format(dataset_name))
return dataset | eb7a3090e03c95031f04d6f13f165c02eef8850c | 14,967 |
def remove_characters(text, characters_to_remove=None):
"""
Remove various auxiliary characters from a string.
This function uses a hard-coded string of 'undesirable'
characters (if no such string is provided),
and removes them from the text provided.
Parameters:
-----------
text : str
A piece of text to remove characters from.
characters_to_remove : str
A string of 'undesirable' characters to remove from the text.
Returns:
--------
text : str
A piece of text with undesired characters removed.
"""
# chars = "\\`*_{}[]()<>#+-.!$%@"
if characters_to_remove is None:
characters_to_remove = "\\`*_{}[]()<>#+!$%@"
for c in characters_to_remove:
if c in text:
text = text.replace(c, '')
return text | d2864983bfa3d58c631ff91a8719d45392f4bf42 | 14,968 |
def changePrev ( v, pos, findPat, changePat, bodyFlag = 1 ):
"""
changePrev: use string.rfind() to change text in a Leo outline.
v the vnode to start the search.
pos the position within the body text of v to start the search.
findPat the search string.
changePat the replacement string.
bodyFlag true: change body text. false: change headline text.
returns a tuple (v,pos) showing where the change occured.
returns (None,0) if no further match in the outline was found.
Note: if (v,pos) is a tuple returned previously from changePrev,
changePrev(v,pos-len(findPat),findPat,changePat)
changes the next matching string.
"""
n = len(findPat)
v, pos = findPrev(v, pos, findPat, bodyFlag)
if v == None:
return None, 0
if bodyFlag:
s = v.bodyString()
# s[pos:pos+n] = changePat
s = s[:pos] + changePat + s[pos+n:]
v.setBodyStringOrPane(s)
else:
s = v.headString()
#s[pos:pos+n] = changePat
s = s[:pos] + changePat + s[pos+n:]
v.setHeadStringOrHeadline(s)
return v, pos | 5c45b08b6aba5f7e699e1864e9a44af457b46d17 | 14,969 |
import logging
def put_mint(update: Update, context: CallbackContext) -> int:
""" Returns the token data to user """
session_uuid = context.user_data['session_uuid']
user = get_chat_info(update)
creator_username = user['username']
# Start DB Session to get addr
session = Session()
sesh_exists = session.query(Tokens).filter(
Tokens.session_uuid == session_uuid).scalar() is not None
if sesh_exists:
# Add a check for the UTXO, bail if not found
token_data = session.query(Tokens).filter(
Tokens.session_uuid == session_uuid).one()
logging.info(f'Searching for the UTXOs in address: {token_data}')
bot_payment_addr = token_data.bot_payment_addr
update.message.reply_text("Checking for confirmed transactions.")
utxo = check_wallet_utxo(bot_payment_addr)
if utxo:
sesh = {'session_uuid': session_uuid}
update.message.reply_text("OK, I found the Transaction!")
update.message.reply_text(
"Please grab a coffee as I build your NFT "
"I'll ssend it back to you with your change in ADA."
)
update.message.reply_text("Initiating NFT Minting process....")
minted = mint(**sesh)
if minted:
update.message.reply_text(
f"Holey Baloney! \n your token is minted, @{creator_username}."
)
update.message.reply_text(
f"The token should arrive in your wallet any second now."
)
update.message.reply_text(
f"Thank you for using the *NFT-TELEGRAM-BOT*. \n Have a Daedalus day."
)
return ConversationHandler.END
else:
update.message.reply_text(
f"Something failed, please try not to panic, "
f"but you may have hit a bug. Sorry."
)
return ConversationHandler.END
else:
update.message.reply_text(
f"Sorry, but there is no UTXO to use yet. "
f"Transaction not found."
f"Please try running /MINT again in a few moments."
)
return ConversationHandler.END
update.message.reply_text(
f"Sorry, but there is no PRE_MINT session yet. "
f"Please try /start again in a few moments."
)
return ConversationHandler.END | 5a7263db4a42e2a8bf050803f0dbeed4a1fa5625 | 14,970 |
def trait_colors(rows):
"""Make tags for HTML colorizing text."""
backgrounds = defaultdict(lambda: next(BACKGROUNDS))
for row in rows:
for trait in row['traits']:
key = trait['trait']
if key not in ('heading',):
_ = backgrounds[key]
return backgrounds | 851783d8fa5acca3b9c7f1f3ea1e59466f056ad0 | 14,971 |
import json
def webhook():
""" CI with GitHub & PythonAnywhere
Author : Aadi Bajpai
https://medium.com/@aadibajpai/deploying-to-pythonanywhere-via-github-6f967956e664 """
try:
event = request.headers.get('X-GitHub-Event')
# Get payload from GitHub webhook request
payload = request.get_json()
x_hub_signature = request.headers.get('X-Hub-Signature')
# Check if signature is valid
if not github.is_valid_signature(x_hub_signature, request.data):
abort(401)
if event == "ping":
return json.dumps({'msg': 'Ping Successful!'})
if event != "push":
return json.dumps({'msg': "Wrong event type"})
repo = git.Repo(my_directory)
branch = payload['ref'][11:]
# Checking that branch is a non staging deployments
if my_directory != "/home/stagingapi/mysite":
if branch != 'master':
return json.dumps({'msg': 'Not master; ignoring'})
repo.git.reset('--hard')
origin = repo.remotes.origin
try:
origin.pull(branch)
utility.write("tests/gitstats.txt",
f'{branch} ,' + str(payload["after"]))
return f'Updated PythonAnywhere successfully with branch: {branch}'
except Exception:
origin.pull('master')
utility.write("tests/gitstats.txt",
f'{branch} ,' + str(payload["after"]))
return 'Updated PythonAnywhere successfully with branch: master'
except Exception as error_message:
return utility.handle_exception(
"Github Update Server", {error_message}) | 998a82897b2aa36dfef6e8125b34964b47218621 | 14,972 |
import argparse
def cli_parser() -> argparse.Namespace:
"""
Parser for the command line interface.
"""
fw_parser = argparse.ArgumentParser(
fromfile_prefix_chars="@", description="FileWriter Starter"
)
fw_parser.add_argument(
"-f",
"--filename",
metavar="filename",
type=str,
required=True,
help="Name of the output file, e.g., `<filename>.nxs`.",
)
fw_parser.add_argument(
"-j",
"--job-id",
metavar="job_id",
type=str,
help="The job identifier of the currently running file-writer job. "
"The job identifier should be a valid UUID.",
)
fw_parser.add_argument(
"-c",
"--config",
metavar="json_config",
type=str,
required=True,
help="Path to JSON config file.",
)
fw_parser.add_argument(
"-b",
"--broker",
metavar="kafka_broker",
type=str,
default="localhost:9092",
help="Kafka broker port.",
)
fw_parser.add_argument(
"-t",
"--command-status-topic",
metavar="consume_topic",
type=str,
required=True,
help="Name of the Kafka topic to listen to" " commands and send status to.",
)
fw_parser.add_argument(
"-p",
"--job-pool-topic",
metavar="job_pool_topic",
type=str,
required=True,
help="The Kafka topic that the available file-writers"
" are listening to for write jobs.",
)
fw_parser.add_argument(
"--timeout",
metavar="ack_timeout",
type=float,
default=5,
help="How long to wait for timeout on acknowledgement.",
)
fw_parser.add_argument(
"--stop",
metavar="stop_writing",
type=float,
help="How long the file will be written.",
)
args = fw_parser.parse_args()
return args | f7f896209fbc7e20e421927dc94452f7e170c0f9 | 14,973 |
def pension_drawdown(months, rate, monthly_drawdown, pension_pot):
""" Returns the balance left in the pension pot after drawing an income for the given nr of months """
return monthly_growth(months, rate, -monthly_drawdown, pension_pot) | 2b2d811bbe134eca71d2965de4b06a62a71ccf85 | 14,974 |
def bytesToUInt(bytestring):
"""Unpack 4 byte string to unsigned integer, assuming big-endian byte order"""
return _doConv(bytestring, ">", "I") | f3d645d71b3503b8e5b4b052fe33e839a82c3782 | 14,975 |
def use(*authenticator_classes):
""" A decorator to attach one or more :class:`Authenticator`'s to the decorated class.
Usage:
from thorium import auth
@auth.use(BasicAuth, CustomAuth)
class MyEngine(Endpoint):
...
OR
@auth.use(BasicAuth)
@auth.use(CustomAuth)
class MyEngine(Endpoint):
...
:param authenticator_classes: One or more :class:`Authenticator` class definitions.
"""
def wrapped(cls):
if not cls._authenticator_classes:
cls._authenticator_classes = []
cls._authenticator_classes.extend(authenticator_classes)
return cls
return wrapped | 27aeb7711c842540a1ed77a76cebeb61e0342f1e | 14,976 |
def list_standard_models():
"""Return a list of all the StandardCellType classes available for this simulator."""
standard_cell_types = [obj for obj in globals().values() if isinstance(obj, type) and issubclass(obj, standardmodels.StandardCellType)]
for cell_class in standard_cell_types:
try:
create(cell_class)
except Exception, e:
print "Warning: %s is defined, but produces the following error: %s" % (cell_class.__name__, e)
standard_cell_types.remove(cell_class)
return [obj.__name__ for obj in standard_cell_types] | 7c7d36c5931340ddca5dcad91b34b9e9deb6ef1b | 14,977 |
def AchievableTarget(segments,target,Speed):
"""
The function checks if the car can make the required curvature to reach the target, taking into account its speed
Return [id, radius, direction}
id = 1 -> achievable else id =0
direction = 1 -> right direction = -1 -> left
"""
Rminamaxlat=Speed**2/parameters.Max_accelerationlateral
Rminepsilonmax=parameters.tsb*Speed**2/(parameters.epsilonmax*pi/180)+parameters.Car_length/(parameters.epsilonmax*pi/180)
Rmin=max(Rminamaxlat,Rminepsilonmax)
Rmax=abs(CurvatureRadius(target))/3
xp=target[0]
yp=target[1]
Ns=len(segments)
#coeficient
K=0
if xp!=0:
K=yp/xp
#Calculating which way the car will turn
direction=1 #right
if yp<0:
direction=-1 #left
#If the radius of curvature is greater than the minimum possible then the objective is not reachable
if Rmin>Rmax:
return(0,Rmax,direction)
#Adding possible radius values between the minimum and the maximum in the list R []
R=[]
Nr=100
i=0
while i<Nr:
R.append(Rmax-i*(Rmax-Rmin)/(Nr-1))
i+=1
#Checking all posible radius
i=0
while i<Nr:
r=R[i]
yc=direction*r
#If the car and the segment are aligned then the arc is a straight line without problems
if yp==0:
return(1,Rmax,1)
if xp!=0:
xinter=(-2*K*yc)/(1+K**2)
yinter=K*xinter
j=0
while (j<Ns and IntersectionArc([xinter,yinter],segments[j])!=1):
j+=1
if j==Ns:
return(1,r,direction)
return(0,r,direction)
xinter=0
yinter=direction*2*r
theta=180
j=0
while (j<Ns and IntersectionArc([xinter,yinter],segments[j])!=1):
j+=1
if j==Ns:
return(1,r,direction)
return(0,r,direction)
i+=1 | 1608847c224b5315cd668d650b52b9a6184c84ac | 14,978 |
from xml.etree import ElementTree
import dask
import os
def read_silixa_files_routine_v4(
filepathlist,
timezone_netcdf='UTC',
silent=False,
load_in_memory='auto'):
"""
Internal routine that reads Silixa files.
Use dtscalibration.read_silixa_files function instead.
The silixa files are already timezone aware
Parameters
----------
load_in_memory
filepathlist
timezone_netcdf
silent
Returns
-------
"""
# translate names
tld = {
'ST': 'st',
'AST': 'ast',
'REV-ST': 'rst',
'REV-AST': 'rast',
'TMP': 'tmp'}
# Open the first xml file using ET, get the name space and amount of data
xml_tree = ElementTree.parse(filepathlist[0])
namespace = get_xml_namespace(xml_tree.getroot())
logtree = xml_tree.find('./{0}wellLog'.format(namespace))
logdata_tree = logtree.find('./{0}logData'.format(namespace))
# Amount of datapoints is the size of the logdata tree
nx = len(logdata_tree)
sep = ':'
ns = {'s': namespace[1:-1]}
# Obtain metadata from the first file
attrs = read_silixa_attrs_singlefile(filepathlist[0], sep)
# Add standardised required attributes
attrs['isDoubleEnded'] = attrs['customData:isDoubleEnded']
double_ended_flag = bool(int(attrs['isDoubleEnded']))
attrs['forwardMeasurementChannel'] = attrs[
'customData:forwardMeasurementChannel']
if double_ended_flag:
attrs['backwardMeasurementChannel'] = attrs[
'customData:reverseMeasurementChannel']
else:
attrs['backwardMeasurementChannel'] = 'N/A'
chFW = int(attrs['forwardMeasurementChannel']) - 1 # zero-based
if double_ended_flag:
chBW = int(attrs['backwardMeasurementChannel']) - 1 # zero-based
else:
# no backward channel is negative value. writes better to netcdf
chBW = -1
# obtain basic data info
if double_ended_flag:
data_item_names = [
attrs['logCurveInfo_{0}:mnemonic'.format(x)] for x in range(0, 6)]
else:
data_item_names = [
attrs['logCurveInfo_{0}:mnemonic'.format(x)] for x in range(0, 4)]
nitem = len(data_item_names)
ntime = len(filepathlist)
# print summary
if not silent:
print(
'%s files were found, each representing a single timestep' % ntime)
print(
'%s recorded vars were found: ' % nitem
+ ', '.join(data_item_names))
print('Recorded at %s points along the cable' % nx)
if double_ended_flag:
print('The measurement is double ended')
else:
print('The measurement is single ended')
# obtain timeseries from data
timeseries_loc_in_hierarchy = [
('wellLog', 'customData', 'acquisitionTime'),
('wellLog', 'customData', 'referenceTemperature'),
('wellLog', 'customData', 'probe1Temperature'),
('wellLog', 'customData', 'probe2Temperature'),
('wellLog', 'customData', 'referenceProbeVoltage'),
('wellLog', 'customData', 'probe1Voltage'),
('wellLog', 'customData', 'probe2Voltage'),
(
'wellLog', 'customData', 'UserConfiguration',
'ChannelConfiguration', 'AcquisitionConfiguration',
'AcquisitionTime', 'userAcquisitionTimeFW')]
if double_ended_flag:
timeseries_loc_in_hierarchy.append(
(
'wellLog', 'customData', 'UserConfiguration',
'ChannelConfiguration', 'AcquisitionConfiguration',
'AcquisitionTime', 'userAcquisitionTimeBW'))
timeseries = {
item[-1]: dict(loc=item, array=np.zeros(ntime, dtype=np.float32))
for item in timeseries_loc_in_hierarchy}
# add units to timeseries (unit of measurement)
for key, item in timeseries.items():
if f'customData:{key}:uom' in attrs:
item['uom'] = attrs[f'customData:{key}:uom']
else:
item['uom'] = ''
# Gather data
arr_path = 's:' + '/s:'.join(['wellLog', 'logData', 'data'])
@dask.delayed
def grab_data_per_file(file_handle):
"""
Parameters
----------
file_handle
Returns
-------
"""
with open_file(file_handle, mode='r') as f_h:
eltree = ElementTree.parse(f_h)
arr_el = eltree.findall(arr_path, namespaces=ns)
if not len(arr_el) == nx:
raise ValueError(
'Inconsistent length of x-dimension'
+ '\nCheck if files are mixed up, or if the number of '
+ 'data points vary per file.')
# remove the breaks on both sides of the string
# split the string on the comma
arr_str = [arr_eli.text.split(',') for arr_eli in arr_el]
return np.array(arr_str, dtype=float)
data_lst_dly = [grab_data_per_file(fp) for fp in filepathlist]
data_lst = [
da.from_delayed(x, shape=(nx, nitem), dtype=float)
for x in data_lst_dly]
data_arr = da.stack(data_lst).T # .compute()
# Check whether to compute data_arr (if possible 25% faster)
data_arr_cnk = data_arr.rechunk({0: -1, 1: -1, 2: 'auto'})
if load_in_memory == 'auto' and data_arr_cnk.npartitions <= 5:
if not silent:
print('Reading the data from disk')
data_arr = data_arr_cnk.compute()
elif load_in_memory:
if not silent:
print('Reading the data from disk')
data_arr = data_arr_cnk.compute()
else:
if not silent:
print('Not reading the data from disk')
data_arr = data_arr_cnk
data_vars = {}
for name, data_arri in zip(data_item_names, data_arr):
if name == 'LAF':
continue
if tld[name] in dim_attrs:
data_vars[tld[name]] = (
['x', 'time'], data_arri, dim_attrs[tld[name]])
else:
raise ValueError(
'Dont know what to do with the'
+ ' {} data column'.format(name))
# Obtaining the timeseries data (reference temperature etc)
_ts_dtype = [(k, np.float32) for k in timeseries]
_time_dtype = [
('filename_tstamp', np.int64), ('minDateTimeIndex', '<U29'),
('maxDateTimeIndex', '<U29')]
ts_dtype = np.dtype(_ts_dtype + _time_dtype)
@dask.delayed
def grab_timeseries_per_file(file_handle):
"""
Parameters
----------
file_handle
Returns
-------
"""
with open_file(file_handle, mode='r') as f_h:
eltree = ElementTree.parse(f_h)
out = []
for k, v in timeseries.items():
# Get all the timeseries data
if 'userAcquisitionTimeFW' in v['loc']:
# requires two namespace searches
path1 = 's:' + '/s:'.join(v['loc'][:4])
val1 = eltree.findall(path1, namespaces=ns)
path2 = 's:' + '/s:'.join(v['loc'][4:6])
val2 = val1[chFW].find(path2, namespaces=ns)
out.append(val2.text)
elif 'userAcquisitionTimeBW' in v['loc']:
# requires two namespace searches
path1 = 's:' + '/s:'.join(v['loc'][:4])
val1 = eltree.findall(path1, namespaces=ns)
path2 = 's:' + '/s:'.join(v['loc'][4:6])
val2 = val1[chBW].find(path2, namespaces=ns)
out.append(val2.text)
else:
path = 's:' + '/s:'.join(v['loc'])
val = eltree.find(path, namespaces=ns)
out.append(val.text)
# get all the time related data
startDateTimeIndex = eltree.find(
's:wellLog/s:minDateTimeIndex', namespaces=ns).text
endDateTimeIndex = eltree.find(
's:wellLog/s:maxDateTimeIndex', namespaces=ns).text
if isinstance(file_handle, tuple):
file_name = os.path.split(file_handle[0])[-1]
else:
file_name = os.path.split(file_handle)[-1]
tstamp = np.int64(file_name[10:-4])
out += [tstamp, startDateTimeIndex, endDateTimeIndex]
return np.array(tuple(out), dtype=ts_dtype)
ts_lst_dly = [grab_timeseries_per_file(fp) for fp in filepathlist]
ts_lst = [
da.from_delayed(x, shape=tuple(), dtype=ts_dtype) for x in ts_lst_dly]
ts_arr = da.stack(ts_lst).compute()
for name in timeseries:
if name in dim_attrs:
data_vars[name] = (('time',), ts_arr[name], dim_attrs[name])
else:
data_vars[name] = (('time',), ts_arr[name])
# construct the coordinate dictionary
coords = {
'x': ('x', data_arr[0, :, 0], dim_attrs['x']),
'filename': ('time', [os.path.split(f)[1] for f in filepathlist]),
'filename_tstamp': ('time', ts_arr['filename_tstamp'])}
maxTimeIndex = pd.DatetimeIndex(ts_arr['maxDateTimeIndex'])
dtFW = ts_arr['userAcquisitionTimeFW'].astype('timedelta64[s]')
if not double_ended_flag:
tcoords = coords_time(
maxTimeIndex,
timezone_netcdf=timezone_netcdf,
dtFW=dtFW,
double_ended_flag=double_ended_flag)
else:
dtBW = ts_arr['userAcquisitionTimeBW'].astype('timedelta64[s]')
tcoords = coords_time(
maxTimeIndex,
timezone_netcdf=timezone_netcdf,
dtFW=dtFW,
dtBW=dtBW,
double_ended_flag=double_ended_flag)
coords.update(tcoords)
return data_vars, coords, attrs | 9c8351400770263cfa54c8ec0e759ae765a78694 | 14,979 |
import math
def mutual_information(co_oc, oi, oj, n):
"""
:param co_oc: Number of co occurrences of the terms oi and oj in the corpus
:param oi: Number of occurrences of the term oi in the corpus
:param oj: Number of occurrences of the term oi in the corpus
:param n: Total number of words in the corpus
:return:
"""
e = (oi * oj)/n
return math.log2(co_oc/e) | 76c27295c7e757282573eab71f2bb7cfd3df74cb | 14,980 |
from typing import Dict
def random(population: pd.DataFrame,
num_parents_per_nationality: Dict[str, int]) -> pd.DataFrame:
"""Selects parents of next generation randomly
Args:
population (pd.DataFrame):
Current population dataframe.
num_parents_per_nationality (Dict[str, int]):
Dictionary indicating how many parents should come from each nation.
Returns:
df (pd.DataFrame):
Parents of next generation.
"""
df = pd.DataFrame()
national_origins = np.unique(population['birth_nation'])
for nation in national_origins:
tdf = population.loc[population['birth_nation'] == nation]
# TODO see effect of setting random state
tdf = tdf.sample(n=num_parents_per_nationality[nation],
random_state=123)
df = df.append(tdf)
return df | 95ea78e4399d8fafca0fdbf2769fdc2ebaa5ccb3 | 14,981 |
def interpolate_GLMdenoise_to_fsaverage_prior(freesurfer_sub, prf_props, save_stem,
GLMdenoise_path=None, plot_class=0, plot_bootstrap=0,
target_varea=1, interp_method='linear'):
"""interpolate a scanning session's GLMdenoise models results to fsaverage space
In order to combine data across subjects, we need them to have
equivalent vertices (that is, vertices we can consider 'the same'
and average together). We follow the method done by Benson et al,
2019's analysis of the retinotopic data in the Human Connectome
Project: interpolate each subject's results to the locations in
fsaverage, in the visual field (the Benson et al, 2014 retinotopic
atlas defines the retinotopic coordinates for fsaverage).
For the subject's retinotopic information, you should almost
certainly pass the outputs of the Bayesian retinotopy, as a
dictionary. For the paths used in this project, the following is how
to create this dictionary (setting the BIDS_DIR and subject
variables beforehand):
```
template = (f'{BIDS_dir}/derivatives/prf_solutions/{subject}/bayesian_posterior/'
'{hemi}.inferred_{data}.mgz')
prf_props = {}
for h in ['lh', 'rh']:
prf_props[h] = {}
names = zip(['varea', 'eccen', 'angle'], ['visual_area', 'eccentricity', 'polar_angle'])
for k, prop in names:
prf_props[h][prop] = ny.load(template.format(hemi=h, data=k))
```
The following steps are taken:
- grab and shape the 'models' field from the GLMdenoise results.mat
file, add to the prf_props dict
- for each hemisphere:
- add all properties from the prf_props dict to the neuropythy
mesh
- grab the fsaverage retinotopic prior (from the neuropythy package)
- for each bootstrap:
- interpolate the amplitude estimates for all models from the
subject's retinotopic space to the fsaverage one
- insert all these interpolated estimates into a properly-sized
array
- concatenate this array across hemispheres and save as an hdf5 file
(the array is now the right size for the GLMdenoise results field,
but doesn't look like quite right because the GLMdenoise results
field also contains the fitted HRF)
The main output is:
- save_stem+"_models.hdf5": a HDF5 file containing the array (as
field 'models') with shape (num_bootstraps, num_classes, 1,
num_vertices, 1) containing the subject/session's amplitude
estimates (for each bootstrap and class) interpolate to the
fsaverage retinotopic prior space. It has this shape because
that's the shape of the GLMdenoise output, and we'll want to mimic
that. We use a HDF5 file because this will be very large, and a
HDF5 file is more compact than a .npy file
We also produce several outputs to help check what's going on.
The first two are plots which show the same amplitude estimates, one
in the subject's original retinotopic space, and one interpolated to
the fsaverage retinotopic prior space. These two should look like
they're conveying the same information, just sampling at different
locations.
- save_stem+"_models_b{plot_bootstrap}_c{plot_class}_space-subject.png":
a plot showing the amplitude estimates for the stimulus class
`plot_class` and the bootstrap `plot_bootstrap` as a scatter plot,
with x, y locations coming from the subject's pRFs and the values
from the output of GLMdenoise
- save_stem+"_models_b{plot_bootstrap}_c{plot_class}_space-prior.png":
a plot showing the amplitude estimates for the stimulus class
`plot_class` and the bootstrap `plot_bootstrap` as a scatter plot,
with x, y locations coming from the fsaverage pRF prior and the
interpolated values.
We then produce four outputs to examine any voxels that have zero
amplitudes. GLMdenoise shouldn't produce voxels that have an
amplitude estimate of exactly zero, so this is often a sign that
something has gotten messed up. For each of the following, if there
are no voxels with zero amplitude, we create a text file (replacing
the .png extension with .txt) that contains the string "No voxels
have amplitude zero" instead of the plot.
- save_stem+"_zero_check_b{plot_bootstrap}_coords-polar_space-subject":
a seaborn pairplot showing the polar angle and eccentricity
locations of all voxels that have any zero amplitudes prior to
interpolation.
- save_stem+"_zero_check_b{plot_bootstrap}_coords-cartesian_space-subject":
a seaborn pairplot showing the x and y locations of all voxels
that have any zero amplitudes prior to interpolation.
- save_stem+"_zero_check_b{plot_bootstrap}_coords-polar_space-prior":
a seaborn pairplot showing the polar angle and eccentricity
locations of all voxels that have any zero amplitudes after
interpolation
- save_stem+"_zero_check_b{plot_bootstrap}_coords-polar_space-prior":
a seaborn pairplot showing the x and y locations of all voxels
that have any zero amplitudes after interpolation.
The expectation is:
- There should never be any voxels with amplitude zero prior to
interpolation (so none of the `space-subject` plots should be
created)
- if `interp_method='linear'`, the only voxels with amplitude zero
after interpolation should be at the extremes of the visual field
(so along the visual meridian and far periphery / with min and max
possible eccentricity values)
- if `interp_method='nearest'`, no voxels should have amplitude zero
after interpolation
Parameters
----------
freesurfer_sub : str
The freesurfer subject to use. This can be either the name
(e.g., wlsubj045; in which case the environmental variable
SUBJECTS_DIR must be set) or a path to the freesurfer folder. It
will be passed directly to neuropythy.freesurfer_subject, so see
the docstring of that function for more details
prf_props : dict
dictionary containing the arrays with prf properties to add to
the neuropythy freesurfer subject. This should contain two keys,
'lh' and 'rh', corresponding to the left and right hemispheres,
respectively. Each of those should have a dictionary containing
identical keys, which should be some subset of 'visual_area',
'eccentricity', and 'polar_angle'. If any of those are not
included in prf_props, we will use the corresponding property
from the freesurfer directory (and if they aren't present there,
this function will fail). The intended use is that this will
contain the results of the Bayesian retinotopy, which we'll use
as the pRF parameters in subject-space.
save_stem : str
the stem of the path to save things at (i.e., should not end in
the extension)
GLMdenoise_path : str or None, optional
path to the results.mat file created by GLMdenoise for this
subject/session. If None, we assume prf_props already contains
the 'models_bootstrap_{i:02d}' keys
plot_class : int, optional
we create a plot showing the amplitudes for one class, one
bootstrap. this specifies which class to plot.
plot_bootstrap : int, optional
we create a plot showing the amplitudes for one class, one
bootstrap. this specifies which bootstrap to plot.
target_varea : int, optional
The visual area we're interpolating. because we interpolate in
the visual field, we can only do one visual area at a time
(because otherwise they'll interfere with each other)
interp_method : {'nearest', 'linear'}, optional
whether to use linear or nearest-neighbor interpolation. See the
docstring of `neuropythy.mesh.interpolate` for more details
Returns
-------
interp_all : np.array
the numpy array containing the interpolated amplitude estimates,
of shape (num_bootstraps, num_classes, 1, num_vertices, 1). note
that num_vertices here is the number of vertices in the entire
fsaverage brain, not just `target_varea` (but all vertices not
in that visual area will be 0).
"""
sub = ny.freesurfer_subject(freesurfer_sub)
if GLMdenoise_path is not None:
prf_props = add_GLMdenoise_field_to_props(GLMdenoise_path, prf_props)
num_bootstraps = len([b for b in prf_props['lh'].keys() if 'bootstrap' in b])
if num_bootstraps != 100:
raise Exception(f"There should be 100 bootstraps, but there are {num_bootstraps}!")
priors = {}
idx = {}
submesh = {}
for hemi in ['lh', 'rh']:
priors[hemi] = dict(zip(['x', 'y', 'varea', 'polar_angle', 'eccentricity'],
get_fsaverage_coords(hemi, target_varea)))
# we need to figure out which vertices correspond to our
# targeted visual area for constructing the overall array (which
# should mimic the results of GLMdenoise run on the full
# brain). we grab the first element of np.where because this is
# a 1d array
idx[hemi] = np.where(priors[hemi]['varea'] == target_varea)[0]
if hemi == 'lh':
mesh = sub.lh.with_prop(**prf_props['lh'])
else:
mesh = sub.rh.with_prop(**prf_props['rh'])
submesh[hemi] = mesh.white_surface.submesh(mesh.white_surface.mask(('visual_area',
target_varea)))
# grab the vmin and vmax, for the target varea, in the plotted
# bootstrap, across both hemispheres and all classes. We use 1st and
# 99th percnetile because the min/max are often much larger than the
# rest of the distribution
vmin = min(np.percentile(submesh['lh'].properties[f'models_bootstrap_{plot_bootstrap:02d}'][:, plot_class], 1),
np.percentile(submesh['rh'].properties[f'models_bootstrap_{plot_bootstrap:02d}'][:, plot_class], 1))
vmax = max(np.percentile(submesh['lh'].properties[f'models_bootstrap_{plot_bootstrap:02d}'][:, plot_class], 99),
np.percentile(submesh['rh'].properties[f'models_bootstrap_{plot_bootstrap:02d}'][:, plot_class], 99))
interpolated_all = []
zero_check_data = {'submesh': {}, 'interpolated': {}, 'original': {}}
for hemi in ['lh', 'rh']:
# this should be of shape (num_bootstraps, num_classes, 1,
# num_vertices, 1), in order to mimic the output of
# GLMdenoise. num_vertices will be different between the two
# hemispheres, everything else will be the same. Note that we
# use priors[hemi][varea] to get the number of vertices, NOT
# prf_props[hemi]['models_bootstrap_00'], because we want the
# number in fsaverage-space, not in subject-space
_, num_classes = prf_props[hemi]['models_bootstrap_00'].shape
interpolated_hemi = np.zeros((num_bootstraps, num_classes, 1,
priors[hemi]['varea'].shape[0], 1))
x, y = ny.as_retinotopy(submesh[hemi], 'geographical')
submesh_tmp = submesh[hemi].copy(coordinates=[x, y])
zero_check_data['submesh'][hemi] = submesh_tmp.with_prop(x=x, y=y).properties
# neuropythy's interpolate can only work with 2d arrays, so we
# need to do each bootstrap separate
for i in range(num_bootstraps):
interp_models = submesh_tmp.interpolate([priors[hemi]['x'], priors[hemi]['y']],
f'models_bootstrap_{i:02d}', method=interp_method)
# for now, there's a bug where neuropythy isn't putting
# inserting NaNs in the extrapolated locations, so we do
# that manually. they'll be exactly 0
interp_models[interp_models.sum(1)==0] = np.nan
interpolated_hemi[i, :, 0, idx[hemi], 0] = interp_models
if i == plot_bootstrap:
fig = plot_amplitudes(x, y, submesh_tmp.properties[f'models_bootstrap_{i:02d}'],
hemi, f'bootstrap {i}', 'subject', plot_class, vmin=vmin,
vmax=vmax)
fig.savefig(save_stem + f"_models_{hemi}_b{i:02d}_c{plot_class:02d}_space-subject.png")
fig = plot_amplitudes(priors[hemi]['x'], priors[hemi]['y'], interp_models, hemi,
f'bootstrap {i}', 'fsaverage', plot_class, vmin=vmin,
vmax=vmax)
fig.savefig(save_stem + f"_models_{hemi}_b{i:02d}_c{plot_class:02d}_space-prior.png")
zero_check_data['interpolated'][hemi] = interp_models
zero_check_data['original'][hemi] = submesh_tmp.properties[f'models_bootstrap_{i:02d}']
interpolated_all.append(interpolated_hemi)
for a, p, s, n in zip([zero_check_data['original'], zero_check_data['interpolated']],
[zero_check_data['submesh'], priors], ['subject', 'prior'],
['zero', 'nan']):
for v, c in zip([['polar_angle', 'eccentricity'], ['x', 'y']], ['polar', 'cartesian']):
fig = plot_zero_check(a, p, v, nan_check=(n == 'nan'))
if not isinstance(fig, str):
fig.savefig(save_stem + f"_{n}_check_b{i:02d}_coords-{c}_space-{s}.png")
else:
print(fig)
print(fig, file=open(save_stem + f"_zero_check_b{i:02d}_coords-{c}_space-{s}.txt", 'w'))
# concatenate into one array (vertices are on dimension 3)
interpolated_all = np.concatenate(interpolated_all, 3)
# and save
with h5py.File(save_stem + '_models.hdf5', 'w') as f:
f.create_dataset('results/models', data=interpolated_all, compression='gzip')
return interpolated_all | 87c20c832ae93f3de91632c5cab06c803218dcc3 | 14,982 |
def is_dark(color: str) -> bool:
"""
Whether the given color is dark of bright
Taken from https://github.com/ozh/github-colors
"""
l = 0.2126 * int(color[0:2], 16) + 0.7152 * int(color[2:4], 16) + 0.0722 * int(color[4:6], 16)
return False if l / 255 > 0.65 else True | 80fe2c4bd42b20fedff11ef200ae5ca246d4489d | 14,983 |
from datetime import datetime
def get_date_input_examples(FieldClass) -> list:
"""
Generate examples for a valid input value.
:param FieldClass: InputField
:return: List of input examples.
"""
r = []
for f in FieldClass.input_formats:
now = datetime.now()
r.append(now.strftime(f))
return r | e0b73aac49ac2bbd6423faa3e5e5ebfb81c2d7b7 | 14,984 |
def sve_logistic():
"""SVE of the logistic kernel for Lambda = 42"""
print("Precomputing SVEs for logistic kernel ...")
return {
10: sparse_ir.compute_sve(sparse_ir.LogisticKernel(10)),
42: sparse_ir.compute_sve(sparse_ir.LogisticKernel(42)),
10_000: sparse_ir.compute_sve(sparse_ir.LogisticKernel(10_000))
} | 774365aa9f17c66ea8a3296a08fe1d0972c82ad6 | 14,985 |
def post_team_iteration(id, team, organization=None, project=None, detect=None): # pylint: disable=redefined-builtin
"""Add iteration to a team.
:param id: Identifier of the iteration.
:type: str
:param team: Name or ID of the team.
:type: str
"""
organization, project = resolve_instance_and_project(detect=detect, organization=organization, project=project)
client = get_work_client(organization)
team_context = TeamContext(project=project, team=team)
team_setting_iteration = TeamSettingsIteration(id=id)
try:
team_iteration = client.post_team_iteration(iteration=team_setting_iteration, team_context=team_context)
return team_iteration
except AzureDevOpsServiceError as ex:
_handle_empty_backlog_iteration_id(ex=ex, client=client, team_context=team_context) | 78648eba53e50be7023ac88f9a4ffe2635c74d5b | 14,986 |
import collections
def JoinTypes(types):
"""Combine a list of types into a union type, if needed.
Leaves singular return values alone, or wraps a UnionType around them if there
are multiple ones, or if there are no elements in the list (or only
NothingType) return NothingType.
Arguments:
types: A list of types. This list might contain other UnionTypes. If
so, they are flattened.
Returns:
A type that represents the union of the types passed in. Order is preserved.
"""
queue = collections.deque(types)
seen = set()
new_types = []
while queue:
t = queue.popleft()
if isinstance(t, pytd.UnionType):
queue.extendleft(reversed(t.type_list))
elif isinstance(t, pytd.NothingType):
pass
elif t not in seen:
new_types.append(t)
seen.add(t)
if len(new_types) == 1:
return new_types.pop()
elif any(isinstance(t, pytd.AnythingType) for t in new_types):
return pytd.AnythingType()
elif new_types:
return pytd.UnionType(tuple(new_types)) # tuple() to make unions hashable
else:
return pytd.NothingType() | 0d43551a2882fa75a1827302811670fefe19433c | 14,987 |
def calc_nominal_strike(traces: np.ndarray):
"""
Gets the start and ending trace of the fault and ensures order for largest lon value first
Parameters
----------
traces: np.ndarray
Array of traces of points across a fault with the format [[lon, lat, depth],...]
"""
# Extract just lat and lon for the start and end of the traces
trace_start, trace_end = [traces[0][0], traces[0][1]], [
traces[-1][0],
traces[-1][1],
]
# Ensures correct order
if trace_start[0] < trace_end[0]:
return np.asarray([trace_end]), np.asarray([trace_start])
else:
return np.asarray([trace_start]), np.asarray([trace_end]) | 21c5c2de8c136ac44cbea401dce79c84007fc4ac | 14,988 |
def merge_options(custom_options, **default_options):
"""
Utility function to merge some default options with a dictionary of custom_options.
Example: custom_options = dict(a=5, b=3)
merge_options(custom_options, a=1, c=4)
--> results in {a: 5, b: 3, c: 4}
"""
merged_options = default_options
merged_options.update(custom_options)
return merged_options | a1676c9304f3c231aefaeb107c8fb6f5a8251b26 | 14,989 |
def build_wall(game: Board, player: Player) -> float:
"""
Encourage the player to go the middle row and column of the board
to increase the chances of a partition in the later game
"""
position = game.get_player_location(player)
blanks = game.get_blank_spaces()
blank_vertical = [loc for loc in blanks
if position[1] == 3]
blank_horizontal = [loc for loc in blanks
if position[0] == 3]
vertical = len(blank_vertical)
horizontal = len(blank_horizontal)
if position == (3, 3):
return max(vertical, horizontal)
elif position[0] == 3:
return horizontal
elif position[1] == 3:
return vertical
else:
return 0 | 9309e152b704317442e646d2283c0b20041c55b9 | 14,990 |
from bs4 import BeautifulSoup
def get_menu_from_hzu_navigation():
"""
获取惠州学院官网的导航栏的 HTML 文本。
:return: 一个 ul 标签文本
"""
try:
html = urlopen("https://www.hzu.edu.cn/")
except HTTPError as e:
print(e)
print('The page is not exist or have a error in getting page.')
return None
except URLError as e:
print(e)
print("url is wrong or the url couldn't open.")
return None
try:
bs = BeautifulSoup(html.read(), 'html.parser')
return bs.find(id='naver').find('ul', {'class': {'wp-menu'}})
except AttributeError as e:
print(e)
print('某个标签元素不存在 或者url错误(服务器不存在)导致html.read()出错')
return None | 1e0fab3402aeaca8bce3a93787f98a9360ffe49f | 14,991 |
def calc_user_withdraw_fee(user_id, amount):
"""手续费策略"""
withdraw_logs = dba.query_user_withdraw_logs(user_id, api_x.utils.times.utctoday())
if len(withdraw_logs) > 0:
return Decimal('2.00')
return Decimal('0.00') | a9d28ad6c3cb2cf801ac8fdcf67e3f9d2c804a67 | 14,992 |
def get_last_row(dbconn, tablename, n=1, uuid=None):
"""
Returns the last `n` rows in the table
"""
return fetch(dbconn, tablename, n, uuid, end=True) | 0c70b6fca976b4f97fb816279653e0c2bbd67d5c | 14,993 |
from typing import Optional
def get_start(period, reference_date: Optional[FlexDate] = None, strfdate="%Y-%m-%d") -> FlexDate:
"""
Returns the first day of the given period for the reference_date.
Period can be one of the following: {'year', 'quarter', 'month', 'week'}
If reference_date is instance of str, returns a string.
If reference_date is instance of datetime.date, returns a datetime.date instance.
If reference_date is instance of SmartDate, returns a SmartDate instance.
If no reference_date given, returns a SmartDate instance.
Examples
--------
>>> # when no reference is given assume that it is datetime.date(2018, 5, 8)
>>> get_start('month')
SmartDate(2018, 5, 1)
>>> get_start('quarter', '2017-05-15')
'2017-04-01'
>>> get_start('year', datetime.date(2017, 12, 12))
datetime.date(2017, 01, 01)
"""
start_functions = {
"decade": _get_decade_start,
"year": _get_year_start,
"quarter": _get_quarter_start,
"month": _get_month_start,
"fortnight": _get_fortnight_start,
"week": _get_week_start,
"day": _get_day_start,
"decades": _get_decade_start,
"years": _get_year_start,
"quarters": _get_quarter_start,
"months": _get_month_start,
"fortnights": _get_fortnight_start,
"weeks": _get_week_start,
"days": _get_day_start,
}
return start_functions[period](reference_date or SmartDate.today(), strfdate) | 53016712c6949291fe2e4e81de0ae993da4311c1 | 14,994 |
def prepare_lc_df(star_index, frame_info, magmatch, magx):
"""Prepare cleaned light curve data
Add mag, mag_err, magx, and magx_err to info
Remove nan values or too bright values in magx
Args:
star_index (int): index of the star
frame_info (DataFrame): info data
magmatch (array): raw photometry array
magx (array): corrected photometry array
Returns:
lc (array): light curve data
"""
lc = frame_info.copy()
lc = lc.assign(mag=magmatch[star_index, :, 0])
lc = lc.assign(mag_err=magmatch[star_index, :, 1])
lc = lc.assign(magx=magx[star_index, :, 0])
lc = lc.assign(magx_err=magx[star_index, :, 1])
lc = lc[~np.isnan(lc.magx) & (lc.magx > 1)]
return lc | b99123fb5bd0b84a84576791d578b5ae91f05575 | 14,995 |
def _filter_nones(centers_list):
"""
Filters out `None` from input list
Parameters
----------
centers_list : list
List potentially containing `None` elements
Returns
-------
new_list : list
List without any `None` elements
"""
return [c for c in centers_list if c is not None] | 031e878ebc8028deea238f5ac902ca55dba72a6d | 14,996 |
def pseudo_shuffle_mat(ref_var, mat, replace=False, debug=False):
"""
Shuffles the data but keeps the time information (i.e. shuffles the velocity while keeping the
time information intact)
:param np.array ref_var: shape: n_accelerations
:param np.array mat: shape: (n_trials, n_accelerations)
:return: shuffled_mat
:rtype: np.array
"""
shuffled_mat = np.zeros(mat.shape)
n_accs, n_samples = mat.shape
# TODO: use replace=True for velocity as well
seeds = np.random.choice(np.arange(len(ref_var)), n_samples, replace=replace)
for i in range(n_samples):
seed = seeds[i]
trial_vms = mat[:, i]
shuffled_vms = np.hstack((trial_vms[seed:], trial_vms[:seed])) # FIXME: could be done with np.roll (TEST:)
if debug: plt.plot(shuffled_vms) # (with fake traces)
shuffled_mat[:, i] = shuffled_vms.copy()
if debug: plt.show()
return shuffled_mat | 45c83781db0895f42dea1ca5904ab7fbad18ea13 | 14,997 |
import multiprocessing
import time
def exec_in_subprocess(func, *args, poll_interval=0.01, timeout=None, **kwargs):
""" Execute a function in a fork
Args:
func (:obj:`types.FunctionType`): function
* args (:obj:`list`): list of positional arguments for the function
poll_interval (:obj:`float`, optional): interval to poll the status of the subprocess
timeout (:obj:`float`, optional): maximum execution time in seconds
**kwargs (:obj:`dict`, optional): dictionary of keyword arguments for the function
Returns:
:obj:`object`: result of the function
"""
context_instance = multiprocessing.get_context('fork')
queue = context_instance.Queue()
process = Process(target=subprocess_target, args=[queue, func] + list(args), kwargs=kwargs)
process.start()
start_time = time.time()
while process.exception is None:
time.sleep(poll_interval)
if timeout is not None and (time.time() - start_time) > timeout:
raise TimeoutError('Execution did not complete in {} s.'.format(timeout))
if process.exception:
raise process.exception
results = queue.get()
return results | b04b506ced8f5e90489dd789ebd9f77fd4487d8a | 14,998 |
def get_cv_score_table(clf):
"""
Get a table (DataFrame) of CV parameters and scores for each combination.
:param clf: Cross-validation object (GridSearchCV)
:return:
"""
# Create data frame
df = pd.DataFrame(list(clf.cv_results_['params']))
# Add test scores
df['rank'] = clf.cv_results_['rank_test_score']
df['test_mean'] = clf.cv_results_['mean_test_score']
df['test_sd'] = clf.cv_results_['std_test_score']
# Add scores over training data
df['train_mean'] = clf.cv_results_['mean_train_score']
df['train_sd'] = clf.cv_results_['std_train_score']
# Add time metrics (s)
df['fit_time_mean'] = clf.cv_results_['mean_fit_time']
df['fit_time_sd'] = clf.cv_results_['std_fit_time']
df['score_time_mean'] = clf.cv_results_['mean_score_time']
df['score_time_sd'] = clf.cv_results_['std_score_time']
return df | e1912b6545b0a6649fa66673d2fdd5dfd2b91cd5 | 14,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.