content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import List
def _find_available_share_drive_letter(share_ignores: List[str] = None) -> str:
"""Find an available drive letter for a share.
This function iterates backwards through the ASCII uppercase letters trying
and checks them against the current net use drive mappings. Once it finds
an available drive letter, it passes that back to the caller. If an
available drive letter is not found, a RuntimeError is raised.
Args:
share_ignores (List[str]): A list of share letters to ignore.
Returns:
str: An available drive letter (i.e., 'Z:') for a network share.
Raises:
RuntimeError
"""
LOGGER.write('Looking for an available share letter.')
drive_mapping = _get_current_drive_mapping()
# Iterate backwards through letters to see if they've already been used.
available_letter = ''
for letter in reversed(ascii_uppercase):
if letter in share_ignores:
continue
letter = f'{letter}:'
if letter not in drive_mapping:
available_letter = letter
break
if not available_letter:
raise RuntimeError('Unable to find a free drive letter to map to!')
return available_letter | 0b74d17660e2b7f2d2ca92b23b5e67dac9b58f61 | 11,900 |
def integrate(sde=None, *, q=None, sources=None, log=False, addaxis=False):
"""Decorator for Ito Stochastic Differential Equation (SDE)
integration.
Decorates a function representing the SDE or SDEs into the corresponding
``sdepy`` integrator.
Parameters
----------
sde : function
Function to be wrapped. Its signature and values should be
as expected for the ``sde`` method of the ``sdepy.SDE`` or
``sdepy.SDEs`` classes.
q : int
Number of equations. If ``None``, attempts a test evaluation
of ``sde`` to find out. ``q=0`` indicates a single equation.
sources : set
Stochasticity sources used in the equation. If ``None``,
attempts a test evaluation of ``sde`` to find out.
log : bool
Sets the ``log`` attribute for the wrapping class.
addaxis : bool
Sets the ``addaxis`` attribute for the wrapping class.
Returns
-------
A subclass of ``sdepy.SDE`` or ``sdepy.SDEs`` as appropriate,
and of ``sdepy.integrator``, with the given ``sde``
cast as its ``sde`` method.
Notes
-----
To prevent a test evaluation of ``sde``, explicitly provide
the intended ``q`` and ``sources`` as keyword arguments to ``integrate()``.
The test evaluation is attempted as ``sde()`` and, upon failure,
again as ``sde(1., 1.)``.
Examples
--------
>>> from sdepy import integrate
>>> @integrate
... def my_process(t, x, theta=1., k=1., sigma=1.):
... return {'dt': k*(theta - x), 'dw': sigma}
>>> P = my_process(x0=1, sigma=0.5, paths=100*1000, steps=100)
>>> x = P(timeline=(0., 0.5, 1.))
>>> x.shape
(3, 100000)
"""
if sde is None:
def decorator(sde):
return integrate(sde, q=q, sources=sources,
log=log, addaxis=addaxis)
return decorator
else:
SDE_class = _SDE_from_function(sde, q=q, sources=sources,
log=log, addaxis=addaxis)
class sde_integrator(SDE_class, integrator):
pass
return sde_integrator | 3914a3dffae1f148459cb72ec6f23547c2dbff97 | 11,901 |
import pymbar
def calculate_statistical_inefficiency_runs(traj_l):
"""
Using fast autocorrelation calculation to estimate statistical inefficiency. This code wraps
a function from pymbar.
References
----------
[1] Shirts MR and Chodera JD. Statistically optimal analysis of samples from
multiple equilibrium states. J. Chem. Phys. 129:124105, 2008
http://dx.doi.org/10.1063/1.2978177
[2] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
"""
try:
except ImportError as err:
err.args = (err.args[0] + "\n You need to install pymbar to use this function.",)
raise
iinv = np.array([pymbar.timeseries.statisticalInefficiency_fft(tra) for tra in traj_l])
return (iinv - 1.0) / 2.0 | e146820d28258a8b6b5bc60420cbf56fdade328b | 11,902 |
import torch
def dataio_prep(hparams):
"""Creates the datasets and their data processing pipelines"""
# 1. define tokenizer and load it
modelpath = download_to_dir(hparams["tok_mdl_file"], hparams["save_folder"])
download_to_dir(hparams["tok_voc_file"], hparams["save_folder"])
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
)
tokenizer.sp.load(modelpath)
if (tokenizer.sp.eos_id() + 1) == (tokenizer.sp.bos_id() + 1) == 0 and not (
hparams["eos_index"]
== hparams["bos_index"]
== hparams["blank_index"]
== hparams["unk_index"]
== 0
):
raise ValueError(
"Desired indexes for special tokens do not agree "
"with loaded tokenizer special tokens !"
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes(hparams["input_type"])
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("words")
@sb.utils.data_pipeline.provides("tokens_bos", "tokens_eos", "tokens")
def text_pipeline(words):
tokens_list = tokenizer.sp.encode_as_ids(words)
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
# 4. Create datasets
data = {}
for dataset in ["train", "valid", "test"]:
data[dataset] = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=hparams[f"{dataset}_annotation"],
replacements={"data_root": hparams["data_folder"]},
dynamic_items=[audio_pipeline, text_pipeline],
output_keys=["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
if dataset != "train":
data[dataset] = data[dataset].filtered_sorted(sort_key="length")
# Sort train dataset and ensure it doesn't get un-sorted
if hparams["sorting"] == "ascending" or hparams["sorting"] == "descending":
data["train"] = data["train"].filtered_sorted(
sort_key="length", reverse=hparams["sorting"] == "descending",
)
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] != "random":
raise NotImplementedError(
"Sorting must be random, ascending, or descending"
)
return data, tokenizer | 0cb89799b58e0ba68781a538f17d525749ab4796 | 11,903 |
def get_superlative_type(question_normal):
"""What TV series was Mark Harmon the star of that ran the least amount of time on TV ?"""
result = 'argmax'
question_normal = question_normal.lower()
superlative_serialization_list = superlative_serialization(question=question_normal)
for element in superlative_serialization_list:
if element in ['argmax', 'argmin']:
result = element
break
return result | 8129fcf104dd49117d3ffb49f73752902c1a27f4 | 11,904 |
from re import L
def coord_net_spec(ks=3, stride=1, pad=0, pool=2, dstride=2, dpad=0):
"""
Define net spec for simple conv-pool-deconv pattern common to all
coordinate mapping tests.
"""
n = caffe.NetSpec()
n.data = L.Input(shape=dict(dim=[2, 1, 100, 100]))
n.aux = L.Input(shape=dict(dim=[2, 1, 20, 20]))
n.conv = L.Convolution(
n.data, num_output=10, kernel_size=ks, stride=stride, pad=pad)
n.pool = L.Pooling(
n.conv, pool=P.Pooling.MAX, kernel_size=pool, stride=pool, pad=0)
# for upsampling kernel size is 2x stride
try:
deconv_ks = [s*2 for s in dstride]
except:
deconv_ks = dstride*2
n.deconv = L.Deconvolution(
n.pool, num_output=10, kernel_size=deconv_ks, stride=dstride, pad=dpad)
return n | 3ee824372d99ae12cb3318d2e17dbfa67abbd31c | 11,905 |
def note_favorite(note):
"""
get the status of the note as a favorite
returns True if the note is marked as a favorite
False otherwise
"""
if 'favorite' in note:
return note['favorite']
return False | 503f4e3abaab9d759070c725cdf783d62d7c05d2 | 11,906 |
import itertools
def _crossproduct(template: CheckListTemplate):
"""
Takes the output of editor.template and does the cross product of contexts and qas
"""
ret = []
ret_labels = []
for instance in template.data:
cs = instance["contexts"]
qas = instance["qas"]
d = list(itertools.product(cs, qas))
ret.append([(x[0], x[1][0]) for x in d])
ret_labels.append([x[1][1] for x in d])
template.data = ret
template.labels = ret_labels
return template | 6f2bfd9b3c1aa392179c377e1a87c0d2221f0a45 | 11,907 |
def thread(function):
"""Runs the decorated function within a concurrent thread,
taking care of the result and error management.
Decorated functions will return a concurrent.futures.Future object
once called.
"""
@wraps(function)
def wrapper(*args, **kwargs):
future = Future()
launch_thread(_function_handler, function, args, kwargs, future)
return future
return wrapper | dd3d7d15281a6821e7235695e6311f9aabf96ea9 | 11,908 |
def collapse_range(arg, value_delimiter=',', range_delimiter='-'):
"""
Collapses a list of values into a range set
:param arg: The list of values to collapse
:param value_delimiter: The delimiter that separates values
:param range_delimiter: The delimiter that separates a value range
:return: An array of collapsed string values
:rtype: list
"""
values = list()
expanded = arg.split(value_delimiter)
range_start = None
for v1, v2 in lookahead(expanded):
if v2:
v1 = int(v1)
v2 = int(v2)
if (v1 + 1) == v2:
if not range_start:
range_start = v1
elif range_start:
item = '{}{}{}'.format(range_start, range_delimiter, v1)
values.extend([item])
range_start = None
else:
values.extend([v1])
elif range_start:
item = '{}{}{}'.format(range_start, range_delimiter, v1)
values.extend([item])
range_start = None
else:
values.extend([v1])
return [str(x) for x in values] | 5caeb8609ab83e52041cc83bbe53d9aa2316dd01 | 11,909 |
def compute_diag_mog_params(M=int(4), snr=3.):
"""Returns diagonal mixture of Gaussian target distribution settings for d=2
Args:
M: (Optional) Integer, number of components
snr: (Optional) Scaling of the means
"""
d = int(2)
weights = np.ones(M)
weights /= np.sum(weights)
# change this to set the means apart
means = np.zeros((M, d))
if M == 3:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.]])
if M == 4:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.]])
if M == 6:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.], [0, 2.], [-2, 0.]])
if M == 8:
means = snr*np.array([[1., 1.], [-1., 1], [-1., -1.], [1., -1.], [0, 2.], [-2, 0.], [2, 0.], [0, -2.]])
covs = np.ones(M)
# compute the expected value of E[||X-Y||^2] for X, Y iid from P
mean_sqdist = 0.
for i in range(M):
for j in range(M):
temp = npl.norm(means[i])**2 + npl.norm(means[j])**2 - 2 * np.dot(means[i], means[j])
temp += d*(covs[i]+ covs[j])
mean_sqdist += weights[i] * weights[j] * temp
params_p = {"name": "diag_mog",
"weights": weights,
"means": means,
"covs": covs,
"d": int(d),
"mean_sqdist" : mean_sqdist,
"saved_samples": False,
"flip_Pnmax": False
}
return(params_p) | 4247889dcf8bddc93e4433d685ec81fd59e591a7 | 11,910 |
def has_ifm2(npu_op: NpuBlockOperation) -> bool:
"""Checks if op has non-scalar IFM2"""
return npu_op.ifm2 is not None and npu_op.ifm2_scalar is None | b51092fa486979fbd53d0b1c70ac4390f22df87f | 11,911 |
def ilsvrc_fix_args(args):
"""
Update the args with fixed parameter in ilsvrc
"""
args.ds_name="ilsvrc"
args.num_classes == 1000
# GPU will handle mean std transformation to save CPU-GPU communication
args.do_mean_std_gpu_process = True
args.input_type = 'uint8'
args.mean = get_augmented_data.ilsvrc_mean
args.std = get_augmented_data.ilsvrc_std
#assert args.do_mean_std_gpu_process and args.input_type == 'uint8'
#assert args.mean is not None and args.std is not None
decay_power = args.batch_size / float(ILSVRC_DEFAULT_BATCH_SIZE)
args.batch_norm_decay=0.9**decay_power # according to Torch blog
return args | 64e956a78fc4e64040efa42fd5b6215642430d69 | 11,912 |
def get_mock_adapter() -> Adapter:
"""Get a requests-mock Adapter with some URLs mocked by default"""
adapter = Adapter()
adapter.register_uri(
ANY_METHOD,
MOCKED_URL,
headers={'Content-Type': 'text/plain'},
text='mock response',
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_HTTPS,
headers={'Content-Type': 'text/plain'},
text='mock https response',
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_JSON,
headers={'Content-Type': 'application/json'},
json={'message': 'mock json response'},
status_code=200,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_REDIRECT,
headers={'Content-Type': 'text/plain', 'Location': MOCKED_URL_REDIRECT_TARGET},
text='mock redirect response',
status_code=302,
)
adapter.register_uri(
ANY_METHOD,
MOCKED_URL_REDIRECT_TARGET,
headers={'Content-Type': 'text/plain'},
text='mock redirected response',
status_code=200,
)
return adapter | bede0a72fa8336663d81eeb352a9191280f9f2d6 | 11,913 |
def eqtls_weights_summing(eqtl_occurrence_log_likelihood, ens_gene_id, target_species_hit, converted_eqtls, gtex_weights_dict, chr_start, chr_end, gtex_variants, tf_len, gene_len):
"""
Identify if any of the eQTLs associated with this gene overlap this predicted TFBS.
Retrieve the log-likelihood scores for all of them.
Fix.
"""
eqtl_weights = []
if len(converted_eqtls) > 0:
# determine the weight score for likelihood of this magnitude eQTL.
# ref-point
motif_start = target_species_hit[4]
motif_end = target_species_hit[5]
for converted_eqtl in converted_eqtls:
converted_eqtl_start = converted_eqtl[0]
converted_eqtl_end = converted_eqtl[1]
converted_eqtl_score_mag = abs(converted_eqtl[2])
overlap = overlap_range([motif_start, motif_end], [converted_eqtl_start, converted_eqtl_end])
if len(overlap) > 0:
eqtl_weight = gtex_weights_dict[converted_eqtl_score_mag]
eqtl_weights.append(eqtl_weight + eqtl_occurrence_log_likelihood)
eqtl_weights_sum = sum(eqtl_weights)
return eqtl_weights_sum | 1ac37a4eed54c282e8f086b50ffc96946a1bdb29 | 11,914 |
def get_file_chunks_in_range(context, filediff, interfilediff,
first_line, num_lines):
"""
A generator that yields chunks within a range of lines in the specified
filediff/interfilediff.
This is primarily intended for use with templates. It takes a
RequestContext for looking up the user and for caching file lists,
in order to improve performance and reduce lookup times for files that have
already been fetched.
Each returned chunk is a dictionary with the following fields:
============= ========================================================
Variable Description
============= ========================================================
``change`` The change type ("equal", "replace", "insert", "delete")
``numlines`` The number of lines in the chunk.
``lines`` The list of lines in the chunk.
``meta`` A dictionary containing metadata on the chunk
============= ========================================================
Each line in the list of lines is an array with the following data:
======== =============================================================
Index Description
======== =============================================================
0 Virtual line number (union of the original and patched files)
1 Real line number in the original file
2 HTML markup of the original file
3 Changed regions of the original line (for "replace" chunks)
4 Real line number in the patched file
5 HTML markup of the patched file
6 Changed regions of the patched line (for "replace" chunks)
7 True if line consists of only whitespace changes
======== =============================================================
"""
def find_header(headers):
for header in reversed(headers):
if header[0] < first_line:
return {
'line': header[0],
'text': header[1],
}
interdiffset = None
key = "_diff_files_%s_%s" % (filediff.diffset.id, filediff.id)
if interfilediff:
key += "_%s" % (interfilediff.id)
interdiffset = interfilediff.diffset
if key in context:
files = context[key]
else:
assert 'user' in context
request = context.get('request', None)
files = get_diff_files(filediff.diffset, filediff, interdiffset,
request=request)
populate_diff_chunks(files, get_enable_highlighting(context['user']),
request=request)
context[key] = files
if not files:
raise StopIteration
assert len(files) == 1
last_header = [None, None]
for chunk in files[0]['chunks']:
if ('headers' in chunk['meta'] and
(chunk['meta']['headers'][0] or chunk['meta']['headers'][1])):
last_header = chunk['meta']['headers']
lines = chunk['lines']
if lines[-1][0] >= first_line >= lines[0][0]:
start_index = first_line - lines[0][0]
if first_line + num_lines <= lines[-1][0]:
last_index = start_index + num_lines
else:
last_index = len(lines)
new_chunk = {
'lines': chunk['lines'][start_index:last_index],
'numlines': last_index - start_index,
'change': chunk['change'],
'meta': chunk.get('meta', {}),
}
if 'left_headers' in chunk['meta']:
left_header = find_header(chunk['meta']['left_headers'])
right_header = find_header(chunk['meta']['right_headers'])
del new_chunk['meta']['left_headers']
del new_chunk['meta']['right_headers']
if left_header or right_header:
header = (left_header, right_header)
else:
header = last_header
new_chunk['meta']['headers'] = header
yield new_chunk
first_line += new_chunk['numlines']
num_lines -= new_chunk['numlines']
assert num_lines >= 0
if num_lines == 0:
break | 85e96d7c1a0c09880c865e1ea9f5e3eb29dca122 | 11,915 |
import urllib
import sys
def urlread(url):
"""Return the contents of a url. Raises IOError if couldn't read url."""
try:
urlfile = urllib.request.urlopen(url)
return urlfile.read()
except IOError as e:
print("[!] Error reading url:", url)
print(e.message)
sys.exit(1) | 5c62161106003524809f4b40ab5e0c8a6dbe65d7 | 11,916 |
def parse_metric(y_train, goal):
"""
Parse the metric to the dictionary
"""
y_array = np.array(y_train, dtype=np.float64)
if goal == api_pb2.MINIMIZE:
y_array *= -1
return y_array | 164518c4ba84e0fef450ec9e4196ec90de269fd3 | 11,917 |
import math
def erfc(x):
"""Complementary error function (via `http://bit.ly/zOLqbc`_)"""
z = abs(x)
t = 1. / (1. + z / 2.)
r = t * math.exp(-z * z - 1.26551223 + t * (1.00002368 + t * (
0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (
0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (
-0.82215223 + t * 0.17087277
)))
)))
)))
return 2. - r if x < 0 else r | fd2a44142042e81ef1fc5f649186a41ae4a152b0 | 11,918 |
import urllib
import logging
import time
import json
def request_until_success(url, max_attempts=5, wait=5):
"""Makes a request a few times in case of a 500 error.
Should use exponential backoff?
"""
req = urllib.request.Request(url)
success = False
num_tries = 0
while not success:
try:
num_tries += 1
response = urllib.request.urlopen(req)
success = response.getcode() == 200
except urllib.request.HTTPError as e:
logging.error(e)
logging.error("Error on url {}".format(url))
if e.code == 500 and num_tries < max_attempts:
logging.error("trying again soon")
time.sleep(wait)
else:
logging.error(e.reason)
raise e
return json.loads(response.read().decode('UTF-8')) | 639371539df5daabffaf3c3978c8677cbf2f8b4e | 11,919 |
from typing import Optional
def uuid(name, value) -> "Optional[str]":
"""Validate that the value is a UUID
Args:
name (str): Name of the argument
value (any): A UUID string value
Returns:
The value, or None if value is None
Raises:
InvalidParameterValue: if the value is not a valid UUID
"""
if value is None:
return
if not uuidutils.is_uuid_like(value):
raise InvalidParameterValue(f"Expected UUID for {name}: {value}")
return value | 3e6ca1211c9ebbba5889917ee252d21aebaac74e | 11,920 |
import re
def expandall(text):
"""
Search for abbreviations in text using re_abbr (defined in utils.get_res).
For each abbreviation, find likely full term. Replace each instance of the
abbreviation in the text with the full term.
Parameters
----------
text : str
Text to search for abbreviations.
Returns
-------
text: str
Text with expanded abbreviations.
Examples
----------
>>> text = 'This is a test string (TS). I hope it is informative (inf).'
>>> expanded = expandall(text)
>>> print(expanded)
This is a test string (test string). I hope it is informative (informative).
"""
re_abbr, _ = get_res()
f = re.finditer(re_abbr, text)
for match in f:
if match is not None:
abb = str(match.group(1))
# Very long abbreviations will break regex.
if len(abb) < 9:
abR = make_abbr_regex(match)
fullterm = re.search(abR, text)
if fullterm is not None:
index = fullterm.group(0).find(' (')
fullterm = str(fullterm.group(0)[:index]).strip()
text = replace(text, abb, fullterm)
else:
logger.info('No full term detected for '
'abbreviation {0}'.format(abb))
else:
logger.warning('Abbreviation detection regex returned None.')
return text | 0c229ec32ef5d9315c39eff6f4a8fad427ccdb07 | 11,921 |
def get_source_fields(client, source_table):
"""
Gets column names of a table in bigquery
:param client: BigQuery client
:param source_table: fully qualified table name.
returns as a list of column names.
"""
return [f'{field.name}' for field in client.get_table(source_table).schema] | abc161f252c03647a99a6d2151c00288b176a4e7 | 11,922 |
def has_user_based_permission(obj, user, allow_superuser=True, allow_staff=False):
"""
Based on obj.get_user(), checks if provided user is that user.
Accounts for superusers and staff.
"""
if hasattr(obj, "get_user"):
obj_user = obj.get_user()
# User is logged in
if user.is_authenticated:
# If staff or superuser or share a common group, then yes.
if (allow_staff and user.is_staff) \
or (allow_superuser and user.is_superuser) \
or obj_user == user:
return True
return False | bcedf697280a75575e9d0202d1a6a65161a873ad | 11,923 |
from typing import Optional
from typing import Any
def trace_stack_top(trace_stack_var: ContextVar) -> Optional[Any]:
"""Return the element at the top of a trace stack."""
trace_stack = trace_stack_var.get()
return trace_stack[-1] if trace_stack else None | 4258a4247a8b40e5cf61a39e94bb30fed936b1de | 11,924 |
def fbconnect():
"""This allows users to use facebook account to sign in."""
if request.args.get("state") != login_session["state"]:
response = make_response(json.dumps("Invalid state parameter."), 401)
response.headers["Content-Type"] = "application/json"
return response
access_token = request.data
print "access token received %s " % access_token
app_id = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_id"]
app_secret = json.loads(open("fb_client_secrets.json",
"r").read())["web"]["app_secret"]
url = ("https://graph.facebook.com/v2.8/oauth/access_token?"
"grant_type=fb_exchange_token&client_id=%s&client_secret=%s"
"&fb_exchange_token=%s") % (app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
token = data["access_token"]
# Use token to get user info from API
userinfo_url = "https://graph.facebook.com/v2.8/me"
url = userinfo_url + "?access_token=%s&fields=name,id,email" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
print data
login_session["provider"] = "facebook"
login_session["username"] = data["name"]
login_session["email"] = data["email"]
login_session["facebook_id"] = data["id"]
login_session["access_token"] = token
# Get user picture
url = userinfo_url + \
"/picture?access_token=%s&redirect=0&height=200&width=200" % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
login_session["picture"] = data["data"]["url"]
# see if user exists
user_id = getUserID(login_session["email"])
if not user_id:
user_id = createUser(login_session)
login_session["user_id"] = user_id
output = ""
output += "<h1>Welcome, "
output += login_session["username"]
output += "!</h1>"
output += "<img src='"
output += login_session["picture"]
output += ("""'style='width: 300px; height: 300px;border-radius: 150px;
-webkit-border-radius: 150px;-moz-border-radius: 150px;'>""")
flash("Now logged in as %s" % login_session["username"])
return output | a4a1ec728ce6bfc7a9c8f3fff02896f63eed6dea | 11,925 |
def _get_current_task():
"""
Stub to make it easier to test without actually running Celery.
This is a wrapper around celery.current_task, which provides access
to the top of the stack of Celery's tasks. When running tests, however,
it doesn't seem to work to mock current_task directly, so this wrapper
is used to provide a hook to mock in tests, while providing the real
`current_task` in production.
"""
return current_task | 8b8b3c4abdb8ae75fcfb2907010965e36bd1dfe5 | 11,926 |
def naiveMP(tsA, m, tsB=None):
"""
Calculate the Matrix Profile using the naive all-pairs calculation.
Parameters
----------
tsA: Time series containing the queries for which to calculate the Matrix Profile.
m: Length of subsequence to compare.
tsB: Time series to compare the query against. Note that, if no value is provided, tsB = tsA by default.
"""
return _matrixProfile(tsA, m, order.linearOrder, distanceProfile.naiveDistanceProfile, tsB) | 9d38e4384d3ad8581862df388c32ea17bd02734f | 11,927 |
from typing import Mapping
from typing import Union
from typing import Iterable
from typing import Sized
import json
import sys
def markdown_list(
handle: Jira,
jql_text: str,
column_fields=None,
list_type: str = 'ul',
data: Mapping[str, Union[object, Iterable, Sized]] = None,
) -> str:
"""Yes we can ... document later."""
if data is None:
data = query(handle, jql_text, column_fields)
if data.get('error', ''):
return json.dumps(data, indent=2)
if not data['rows']:
if laskea.STRICT:
message = f'WARNING: received 0 results for JQL ({jql_text}) and {list_type}'
if not laskea.DRY_RUN:
print(message, file=sys.stderr)
return message
else:
return ''
items = []
for slot, record in enumerate(data['rows']):
k, v = '', ''
for key, cell in record.items():
if key.lower() not in ('key', 'summary'):
continue
if key.lower() == 'key':
k = f'[{cell}]({BASE_URL.strip("/")}/browse/{cell})'
else:
v = cell
items.append((k, v))
if list_type in ('ol', 'ul'):
lt = '-' if list_type == 'ul' else '1.' # implicit 'ol'
xl = tuple(f'{lt} {key} - {summary}' for key, summary in items)
the_list = '\n'.join(xl) + '\n'
return the_list.replace('\r', '') if BASE_LF_ONLY else the_list
elif list_type == 'dl':
# 'Term'
# ':definition of term'
#
xl = tuple(f'{key}\n:{summary}\n' for key, summary in items)
the_list = '\n'.join(xl) + '\n'
return the_list.replace('\r', '') if BASE_LF_ONLY else the_list
else:
return f'Unexpected list type ({list_type}) in markdown_list not in ({("dl", "ol", "ul")})' + '\n' | 39bc4a1b12293c30cf906cb709872349a34c4418 | 11,928 |
def render_html(data):
"""
"""
data.setdefault('domain', DOMAIN)
template = '''
<table border="1" cellspacing="0" cellpadding="0">
<tr><td>类型</td><td>{type}</td></tr>
<tr><td>团队</td><td>{team}</td></tr>
<tr><td>项目</td><td>{project}</td></tr>
<tr><td>名称</td><td>{name}</td></tr>
<tr><td>接口</td><td>{interface[total]}个</td></tr>
<tr><td>断言</td><td>{interface[verify]}个</td></tr>
<tr><td>成功率</td><td>{interface[percent]}%</td></tr>
<tr><td>开始时间</td><td>{start}</td></tr>
<tr><td>结束时间</td><td>{end}</td></tr>
<tr><td>报告地址</td><td><a href="{domain}/report/detail?id={id}">测试报告-{id}</a></td></tr>
</table>
'''.format(**data)
return template | 92371e4e7589853fef167ea12f2e0461e39fcae4 | 11,929 |
from operator import invert
def segment_cells(image, max_cell_size):
"""Return segmented cells."""
image = identity(image)
wall = threshold_adaptive_median(image, block_size=101)
seeds = remove_small_objects(wall, min_size=100)
seeds = dilate_binary(seeds)
seeds = invert(seeds)
seeds = remove_small_objects(seeds, min_size=5)
seeds = connected_components(seeds, background=0)
segmentation = watershed_with_seeds(-image, seeds=seeds)
segmentation = remove_large_segments(segmentation, max_cell_size)
return segmentation, wall | 5479b04595b10e903e56b2a546c44e583b324c94 | 11,930 |
import string
def encrypt(message, key):
"""
>>> encrypt("Hello world",12)
'Tqxxa iadxp'
>>> encrypt("We are Penn State!!!",6)
'Ck gxk Vktt Yzgzk!!!'
>>> encrypt("We are Penn State!!!",5)
'Bj fwj Ujss Xyfyj!!!'
>>> encrypt(5.6,3)
'error'
>>> encrypt('Hello',3.5)
'error'
>>> encrypt(5.6,3.15)
'error'
"""
# --- YOU CODE STARTS HERE
# decide whether it is the right data type
if type(message) == str and type(key) == int:
# define a list that have the ascii number of character
words = string.ascii_letters
# use the for loop to transfer characters with keys
lowerchr = [chr((i - 97) % 26 + 97) for i in range(97 + key, 123 + key)]
capchr = [chr((i - 65) % 26 + 65) for i in range(65 + key, 91 + key)]
# join the lower and upper characters together
asc = ''.join(lowerchr) + ''.join(capchr)
# use the translate and maketrans function to transfer the ascii code to string
return message.translate(str.maketrans(words, asc))
# if the value type is not correct return "error"
return "error" | 991449aac78fba9a348a1c3b1d1d7b1f14faff11 | 11,931 |
import os
def extraction_closure(video_root, frame_root):
"""Closure that returns function to extract frames for video list."""
def func(video_list):
for video in video_list:
frame_dir = video.rstrip('.mp4')
frame_path = os.path.join(frame_root, frame_dir)
os.makedirs(frame_path, exist_ok=True)
extract_frames(video, video_root, frame_root)
return func | a8cdf26c8b4ed6078f7888cf156b2c78a66058d3 | 11,932 |
def std(input, axis=None, keepdim=False, unbiased=True, out=None, name=None):
"""
:alias_main: paddle.std
:alias: paddle.std,paddle.tensor.std,paddle.tensor.stat.std
Computes the standard-deviation of the input Variable's elements along the specified
axis.
Args:
input (Variable): The input Variable to be computed standard-deviation, with data
type float32 and float64 supported.
axis (list|int, optional): The axis along which the standard-deviation is computed.
If `None`, compute the standard-deviation over all elements of :attr:`input`
and return a Variable with a single element, otherwise it must be in
the range :math:`[-rank(input), rank(input))`. If :math:`axis[i] < 0`,
the axis to compute is :math:`rank(input) + axis[i]`.
keepdim (bool, optional): Whether to reserve the reduced dimensions in
the output Variable. The dimensions in :attr:`axis` will be squeezed
and the result Variable will have :attr:`len(axis)` fewer dimensions
than the :attr:`input` unless :attr:`keepdim` is true, default False.
unbiased (bool, optional): Whether to compute standard-deviation via the unbiased
estimator, in which the divisor used in the computation is
:math:`N - 1`, where :math:`N` represents the number of elements
along :attr:`axis`, otherwise the divisor is :math:`N`. Default True.
out (Variable, optional): Alternate output Variable to store the result
standard-deviation . Default None.
name (str, optional): The name for this layer. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default None.
Returns:
Variable: The result standard-deviation with the same dtype as :attr:`input`.
If :attr:`out = None`, returns a new Variable containing the
standard-deviation , otherwise returns a reference to the output Variable.
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
# x is a Tensor variable with following elements:
# [[0.2, 0.3, 0.5, 0.9]
# [0.1, 0.2, 0.6, 0.7]]
# Each example is followed by the corresponding output tensor.
x = fluid.data(name='x', shape=[2, 4], dtype='float32')
paddle.std(x) # [0.28252685]
paddle.std(x, axis=[0]) # [0.0707107, 0.07071075, 0.07071064, 0.1414217]
paddle.std(x, axis=[-1]) # [0.30956957, 0.29439208]
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'std')
tmp = var(input, axis=axis, keepdim=keepdim, unbiased=unbiased, name=name)
tmp = layers.sqrt(tmp)
if out is not None:
layers.assign(input=tmp, output=out)
return out
else:
return tmp | b2710442ccf0377dd1d84521f5666c8253390e35 | 11,933 |
import os
import joblib
def predict_model(model_name, data_file_name):
"""This function predicts house prices based on input data"""
model_path = os.path.join(config.TRAINED_MODEL_DIR, model_name)
data_file_path = os.path.join(os.path.join(config.DATA_DIR, data_file_name))
pipe = joblib.load(model_path)
data = pd.read_csv(data_file_path)
prediction = pipe.predict(data)
return prediction | aa49ad5f5076ad57aa3a0e3f7c1dffdfc95f5c51 | 11,934 |
def check_file_behaviour(file_hash):
"""
Returns the file execution report.
"""
params = {
'hash': file_hash
}
api_endpoint = 'file/behaviour'
return http_request('GET', api_endpoint, params, DEFAULT_HEADERS) | 87bb4539e948683cfea05f75741d26e1063073d9 | 11,935 |
def is_access_group(outter_key, inner_key) -> None:
"""Prints access-group """
values = {}
if outter_key == "access-group":
if inner_key.get('index') is not None:
values['index'] = ', '.join(is_instance(inner_key.get('index', {})))
elif inner_key.get('name') is not None:
values['name'] = ', '.join(is_instance(inner_key.get('name', {})))
return values | 96ef5ab1f0b48f7a43b4ebd96d728ff7ad552964 | 11,936 |
def lidar_to_cam_frame(xyz_lidar, frame_calib):
"""Transforms points in lidar frame to the reference camera (cam 0) frame
Args:
xyz_lidar: points in lidar frame
frame_calib: FrameCalib frame calibration
Returns:
ret_xyz: (N, 3) points in reference camera (cam 0) frame
"""
# Pad the r0_rect matrix to a 4x4
r0_rect_mat = frame_calib.r0_rect
r0_rect_mat = np.pad(r0_rect_mat, ((0, 1), (0, 1)),
'constant', constant_values=0)
r0_rect_mat[3, 3] = 1
# Pad the vel_to_cam matrix to a 4x4
tf_mat = frame_calib.velo_to_cam
tf_mat = np.pad(tf_mat, ((0, 1), (0, 0)),
'constant', constant_values=0)
tf_mat[3, 3] = 1
# Pad the point cloud with 1's for the transformation matrix multiplication
one_pad = np.ones(xyz_lidar.shape[0]).reshape(-1, 1)
xyz_lidar = np.append(xyz_lidar, one_pad, axis=1)
# p_cam = P2 * (R0_rect * Tr_velo_to_cam * p_velo)
rectified = np.dot(r0_rect_mat, tf_mat)
ret_xyz = np.dot(rectified, xyz_lidar.T)
# Return (N, 3) points
return ret_xyz[0:3].T | 6b42a52ccca1101cd0f64ed6fadbcdc974a67b0f | 11,937 |
import logging
def create_logger(model_name: str, saved_path: str):
"""Create logger for both console info and saved info
"""
logger = logging.getLogger(model_name)
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler(f"{saved_path}/{model_name}.log")
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = RuntimeFormatter('[Time: %(asctime)s] - [PID: %(process)d] - [Model: %(name)s] \n%(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger | 3703cee522fcef8bcd383a8703bef92252435b5b | 11,938 |
import torch
def hann_sinc_low_pass(x: Tensor, N: int, fs: int, fc: float) -> Tensor:
"""Hann windowed ideal low pass filter.
Args:
x: [n_batch, 1, n_sample]
N: the window will be [-N, N], totally 2N+1 samples.
Returns:
y: [n_batch, 1, n_sample]
"""
w = continuous_hann_sinc_filter(fs, fc, 2*N+1, x.dtype, x.device)
w = (w / w.sum()).view(1, 1, -1)
return torch.nn.functional.conv1d(x, w, padding=N) | 93ba44fd351d7c53e151c812cc6458433c916216 | 11,939 |
import copy
def ask_for_missing_options(arguments: CommandLineArguments, root: tk.Tk) -> ProgramOptions:
"""
Complete the missing information by askin the user interactively.
"""
values = copy.deepcopy(arguments)
if values.source_directory is None:
values.source_directory = insist_for_directory(
"Ordner mit allen Bildern auswaehlen.",
"Quellverzeichnis muss ausgewaehlt sein.",
root,
)
if len(values.target_directories) == 0:
values.target_directories.append(
insist_for_directory(
"Ordner in den die Bilder einsortiert werden sollen auswaehlen.",
"Mindestens ein Zielverzeichnis muss ausgewaehlt sein.",
root,
)
)
is_more_to_add = tkmb.askyesno(message="Ein weiteres Zielverzeichnis angeben?")
while is_more_to_add:
possible_directory = ask_for_directory(
"Ordner in den die Bilder einsortiert werden sollen auswaehlen.",
root,
)
if possible_directory is None:
tkmb.showwarning(message="Kein Verzeichnis gewaehlt!")
else:
values.target_directories.append(possible_directory)
is_more_to_add = tkmb.askyesno(message="Noch ein weiteres Zielverzeichnis angeben?")
program_options = ProgramOptions(
values.source_directory,
values.target_directories,
)
return program_options | 6be882199bee5abaef9b3362522b508e90222150 | 11,940 |
import math
def matrix2quaternion(m):
"""Returns quaternion of given rotation matrix.
Parameters
----------
m : list or numpy.ndarray
3x3 rotation matrix
Returns
-------
quaternion : numpy.ndarray
quaternion [w, x, y, z] order
Examples
--------
>>> import numpy
>>> from skrobot.coordinates.math import matrix2quaternion
>>> matrix2quaternion(np.eye(3))
array([1., 0., 0., 0.])
"""
m = np.array(m, dtype=np.float64)
tr = m[0, 0] + m[1, 1] + m[2, 2]
if tr > 0:
S = math.sqrt(tr + 1.0) * 2
qw = 0.25 * S
qx = (m[2, 1] - m[1, 2]) / S
qy = (m[0, 2] - m[2, 0]) / S
qz = (m[1, 0] - m[0, 1]) / S
elif (m[0, 0] > m[1, 1]) and (m[0, 0] > m[2, 2]):
S = math.sqrt(1. + m[0, 0] - m[1, 1] - m[2, 2]) * 2
qw = (m[2, 1] - m[1, 2]) / S
qx = 0.25 * S
qy = (m[0, 1] + m[1, 0]) / S
qz = (m[0, 2] + m[2, 0]) / S
elif m[1, 1] > m[2, 2]:
S = math.sqrt(1. + m[1, 1] - m[0, 0] - m[2, 2]) * 2
qw = (m[0, 2] - m[2, 0]) / S
qx = (m[0, 1] + m[1, 0]) / S
qy = 0.25 * S
qz = (m[1, 2] + m[2, 1]) / S
else:
S = math.sqrt(1. + m[2, 2] - m[0, 0] - m[1, 1]) * 2
qw = (m[1, 0] - m[0, 1]) / S
qx = (m[0, 2] + m[2, 0]) / S
qy = (m[1, 2] + m[2, 1]) / S
qz = 0.25 * S
return np.array([qw, qx, qy, qz]) | 97a7a9c9c7a92bc2269c9ec9fee4e9e462168e50 | 11,941 |
def paging_forward(data_func, *args):
"""
Создает кнопку вперед для переключения страницы списка
:param data_func: func from UI.buttons. Действие, которое будет возвращать кнопка
:return: InlineKeyboardButton
"""
g_data = loads(data_func(*args).callback_data)
g_data['page'] += 1
text = '>>'
return InlineKeyboardButton(text, callback_data=dumps(g_data)) | 4e1ef26c005e47422b86658814cb3336b51d296f | 11,942 |
from threading import RLock
def synchronized(func):
"""Synchronizes method invocation on an object using the method name as the mutex"""
def wrapper(self,*__args,**__kw):
try:
rlock = self.__get__('_sync_lock_%s' % func.__name__)
#rlock = self._sync_lock
except AttributeError:
rlock = self.__dict__.setdefault('_sync_lock_%s' % func.__name__, RLock())
rlock.acquire()
try:
return func(self,*__args,**__kw)
finally:
rlock.release()
wrapper.__name__ = func.__name__
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
return wrapper | 8db95217e8e5e37d0e7457c0808163fd6ddc007f | 11,943 |
def vector(location_1, location_2):
"""
Returns the unit vector from location_1 to location_2
location_1, location_2: carla.Location objects
"""
x = location_2.x - location_1.x
y = location_2.y - location_1.y
z = location_2.z - location_1.z
norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
return [x / norm, y / norm, z / norm] | 453e35f91984458e0022f7576ef17902063ee1ed | 11,944 |
import json
import requests
import sys
def test_pandas_code_snippets(
app, client, tmpdir, monkeypatch,
template_name, script_name, expected_columns
):
"""Bit of a complicated test, but TLDR: test that the API example Python
scripts work.
This test is a bit complicated and is pretty low impact, so if you're
struggling to maintain this, I recommend adding a `@pytest.mark.skip` on
top of the test function.
"""
# We need to mock a few things to test that the Pandas code works:
monkeypatch.setitem(
app.jinja_env.globals,
'url_for',
lambda loc, **kwargs: loc
)
class MockResponse:
def __init__(self, data):
self.json = lambda: json.loads(data)
def _get(loc: str, **kwargs):
reversed_url_map = {
i.endpoint: i.rule
for i
in app.url_map.iter_rules()
}
res = client.get(reversed_url_map[loc])
return MockResponse(data=res.data)
monkeypatch.setattr(requests, 'get', _get)
# Now let's render the code:
py_code = app.jinja_env \
.get_template(template_name) \
.render()
f = tmpdir.mkdir('code').join(f'{script_name}.py')
f.write(py_code)
# Import the script as a module
sys.path.append(f.dirname)
__import__(script_name)
mod = sys.modules[script_name]
assert hasattr(mod, 'df')
assert isinstance(mod.df, pd.DataFrame) # noqa
assert all([c in mod.df.columns for c in expected_columns]) | 5b48ec267ad653dd51380ac5909062eab1456d33 | 11,945 |
def getOpenOCO(recvWindow=""):
"""# Query Open OCO (USER_DATA)
#### `GET /api/v3/openOrderList (HMAC SHA256)`
### Weight: 3
### Parameters:
Name |Type |Mandatory |Description
--------|--------|--------|--------
recvWindow |LONG |NO |The value cannot be greater than <code>60000</code>
timestamp |LONG |YES |
<strong>Data Source:</strong> """
endpoint = '/api/v3/openOrderList'
params = {
}
if recvWindow: params["recvWindow"] = recvWindow
return getbinancedata_sig(endpoint, params) | ea82f7441f57efd5ebf99ad4cff370f68cbcb367 | 11,946 |
def pixel_unshuffle(scale):
""" Pixel unshuffle.
Args:
x (Tensor): Input feature with shape (b, c, hh, hw).
scale (int): Downsample ratio.
Returns:
Tensor: the pixel unshuffled feature.
"""
if scale == 1:
return lambda x: x
def f(x):
b, c, hh, hw = x.size()
out_channel = c * (scale**2)
assert hh % scale == 0 and hw % scale == 0
h = hh // scale
w = hw // scale
x_view = x.view(b, c, h, scale, w, scale)
return x_view.permute(0, 1, 3, 5, 2, 4).reshape(b, out_channel, h, w)
return f | 28802c2014e5a3c28de6b751c40ad4787de8e80c | 11,947 |
import attr
def flag(name, thing=None):
"""Generate an Attribute with that name which is valued True or False."""
if thing is None:
thing = Keyword(name)
return attr(name, thing, "Flag") | 02f32444eb927dd61f0e08da7579e47d1b4a580d | 11,948 |
def select_id_from_scores_dic(id1, id2, sc_dic,
get_worse=False,
rev_filter=False):
"""
Based on ID to score mapping, return better (or worse) scoring ID.
>>> id1 = "id1"
>>> id2 = "id2"
>>> id3 = "id3"
>>> sc_dic = {'id1' : 5, 'id2': 3, 'id3': 3}
>>> select_id_from_scores_dic(id1, id2, sc_dic)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, get_worse=True)
'id2'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True, get_worse=True)
'id1'
>>> select_id_from_scores_dic(id1, id2, sc_dic, rev_filter=True)
'id2'
>>> select_id_from_scores_dic(id2, id3, sc_dic)
False
"""
sc_id1 = sc_dic[id1]
sc_id2 = sc_dic[id2]
if sc_id1 > sc_id2:
if rev_filter:
if get_worse:
return id1
else:
return id2
else:
if get_worse:
return id2
else:
return id1
elif sc_id1 < sc_id2:
if rev_filter:
if get_worse:
return id2
else:
return id1
else:
if get_worse:
return id1
else:
return id2
else:
return False | f2fa5f33eead47288c92715ce358581a72f18361 | 11,949 |
def add_args(parser):
"""Add arguments to the argparse.ArgumentParser
Args:
parser: argparse.ArgumentParser
Returns:
parser: a parser added with args
"""
# Training settings
parser.add_argument(
"--task",
type=str,
default="train",
metavar="T",
help="the type of task: train or denoise",
)
parser.add_argument(
"--datadir",
type=str,
metavar="DD",
help="data directory for training",
)
parser.add_argument(
"--noisy_wav",
type=str,
metavar="NW",
help="path to noisy wav",
)
parser.add_argument(
"--denoised_wav",
type=str,
default="denoised_sample.wav",
metavar="DW",
help="path to denoised wav",
)
parser.add_argument(
"--pretrained",
type=str,
default=None,
metavar="PT",
help="path to pre-trainedmodel",
)
parser.add_argument(
"--saved_model_path",
type=str,
default="model.pth",
metavar="SMP",
help="path to trained model",
)
parser.add_argument(
"--partition_ratio",
type=float,
default=1 / 3,
metavar="PR",
help="partition ratio for trainig (default: 1/3)",
)
parser.add_argument(
"--batch_size",
type=int,
default=5,
metavar="BS",
help="input batch size for training (default: 5)",
)
parser.add_argument(
"--lr",
type=float,
default=0.001,
metavar="LR",
help="learning rate (default: 0.3)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="momentum (default: 0.9)",
)
parser.add_argument(
"--noise_amp",
type=float,
default=0.01,
metavar="NA",
help="amplitude of added noise for trainign (default: 0.01)",
)
parser.add_argument(
"--split_sec",
type=float,
default=1.0,
metavar="SS",
help="interval for splitting [sec]",
)
parser.add_argument(
"--epochs",
type=int,
default=5,
metavar="EP",
help="how many epochs will be trained",
)
parser.add_argument(
"--sampling_rate",
type=int,
default=16000,
metavar="SR",
help="sampling rate",
)
parser.add_argument(
"--log_interval",
type=int,
default=2,
metavar="LI",
help="log interval",
)
parser.add_argument(
"--path_to_loss",
type=str,
default=None,
metavar="PL",
help="path to png filw which shows the transtion of loss",
)
return parser | cfebbfb6e9821290efdc96aaf0f7a7470e927c70 | 11,950 |
from typing import Any
def run_interactive(package: str, action: str, *args: Any, **_kwargs: Any) -> Any:
"""Call the given action's run"""
action_cls = get(package, action)
app, interaction = args
return action_cls(app.args).run(app=app, interaction=interaction) | 5423d67b61441aaea2162042feeffe68e1d79a0c | 11,951 |
from typing import Tuple
def pythagorean_heuristic(start_point: Tuple[int, int], end_point: Tuple[int, int]) -> float:
"""Return the distance between start_point and end_point using the pythagorean distance
"""
x1, y1 = start_point
x2, y2 = end_point
distance = (((x2 - x1) ** 2) + ((y2 - y1) ** 2)) ** 0.5
return distance | b22369d3860cb0969d43c5e7eb0290a757f5c692 | 11,952 |
def run_simulation(x, simulation_time, dt, rates, sampling_time):
"""
Runs a simulation and stores the sampled sequences the matrix sequences (nb_nucleotide * nb_sequences).
x is modified during the simulation. The original sequence is included in the sequences matrix, in the first row.
"""
ii = 0
time = np.arange(0, simulation_time + 1, dt)
nb_samples = simulation_time // sampling_time
sequences = np.zeros(shape=(len(x), nb_samples + 1), dtype=bool)
for t in time:
if (t % sampling_time == 0):
sequences[:, ii] = x
ii += 1
x = simulation_step(x, dt, rates)
return sequences | b04e36fba421e9931a78f42502d69d5432b5add9 | 11,953 |
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username")
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password")
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password")
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html") | db06fe7b5f99be9ca0957c769f783ec182cc7e24 | 11,954 |
def process_item(item_soup):
"""Parse information about a single podcast episode.
@param item_soup: Soup containing information about a single podcast
episode.
@type item_soup: bs4.BeautifulSoup
@return: Dictionary describing the episode. Contains keys name (str value),
date (datetime.date), loc (url - str value), duration (seconds - int),
and orig_tags (tags applied to episode - list of str)
@rtype: dict
"""
title = item_soup.find('title').contents[0].strip()
loc = item_soup.find('guid').contents[0]
pub_date_raw = item_soup.find('pubdate').contents[0]
pub_date = common.interpret_2822_date(pub_date_raw)
tags = map(
lambda x: x.contents[0],
item_soup.findAll('category')
)
duration_soup = item_soup.find('itunes:duration')
if duration_soup == None:
duration = 1800 if 'shorts' in tags else 3600
else:
duration_str = duration_soup.contents[0]
duration = common.interpret_duration(duration_str)
return {
'name': title,
'date': pub_date,
'tags': sorted(set(tags)),
'loc': loc,
'duration': duration
} | f83c98516d1a573c11c6affbe04122f6eac32918 | 11,955 |
def update_webhook(request, log, tenantId, groupId, policyId, webhookId, data):
"""
Update a particular webhook.
A webhook may (but do not need to) include some arbitrary medata, and must
include a name.
If successful, no response body will be returned.
Example request::
{
"name": "alice",
"metadata": {
"notes": "this is for Alice"
}
}
"""
rec = get_store().get_scaling_group(log, tenantId, groupId)
deferred = rec.update_webhook(policyId, webhookId, data)
return deferred | ee33bc3a63ebe13c920288ba56a0771a10d8e2c4 | 11,956 |
def covariance_from_internal(internal_values, constr):
"""Undo a cholesky reparametrization."""
chol = chol_params_to_lower_triangular_matrix(internal_values)
cov = chol @ chol.T
return cov[np.tril_indices(len(chol))] | 04d6385f32c8d89e283be89d4f17e7fa5770115e | 11,957 |
def find_result_node(desc, xml_tree):
"""
Returns the <result> node with a <desc> child matching the given text.
Eg: if desc = "text to match", this function will find the following
result node:
<result>
<desc>text to match</desc>
</result>
Parameters
-----
xmlTree : the xml tree to search for the <result> node
desc : the text contained in the desc node
Returns
-----
node : the <result> node containing the child with the given desc
"""
result_nodes = xml_tree.findall("result")
for result_node in result_nodes:
result_desc = result_node.find("desc").text.strip()
if result_desc == desc:
return result_node
return None | 44ecfae4cd80a04e656bffbcdfbcf686c1e825f2 | 11,958 |
def y(instance):
"""Syntactic sugar to find all y-coordinates of a given class instance.
Convenience function to return all associated x-coordinates
of a given class instance.
Parameters
----------
instance : DataContainer, Mesh, R3Vector, np.array, list(RVector3)
Return the associated coordinate positions for the given class instance.
"""
return __getCoords('y', 1, instance) | aa0362148bd65427ac27f0e0e875a1cab0fd3057 | 11,959 |
import numpy
def interp_xzplane(y, u, y_target=0.0):
"""Perform linear interpolation of the 3D data at given y-location.
Parameters
----------
y : numpy.ndarray of floats
The y-coordinates along a vertical gridline as a 1D array.
u : numpy.ndarray of floats
The 3D data.
y_target : float (optional)
The y-coordinate at which to interpolate the data.
Returns
-------
u_target : numpy.ndarray of floats
The 2D interpolated data.
"""
idx = numpy.where(y >= y_target)[0][0]
y0, y1 = y[idx - 1], y[idx]
u0, u1 = u[:, idx - 1, :], u[:, idx, :]
u_target = u0 + (y_target - y0) * (u1 - u0) / (y1 - y0)
return u_target | 77f8b559c64eb2b33723a2a8e540f4d783364c84 | 11,960 |
from datetime import datetime
import os
import csv
def train_transfer(x_train, y_train, vocab_processor, pretrain_emb, x_dev, y_dev,
source_ckpt, target_ckpt, pretrained_values=None):
"""
Train a transfer model on target task: must pass "pretrained_values"
Build model architecture using target task data,
then load pre-trained model's weight value to it (instead of rand init)
"""
# Output directory for models and summaries and csvs
if FLAGS.emb_mode != 'rand':
assert int(len(vocab_processor.vocabulary_)) == int(pretrain_emb.shape[0]), "vocab length not equal to pretrain embedding row!"
assert int(FLAGS.embedding_dim) == int(pretrain_emb.shape[1]), "pretrain embedding col ot equal to embedding_dim!"
if FLAGS.train_sick:
datasetname = "SICK" + str(FLAGS.sent_col)+ "_" + str(FLAGS.sent_label) + "_"
else:
datasetname = "SE_"
today = str(datetime.date.today())
timestamp = datasetname + FLAGS.model_type + "_"
if FLAGS.model_type == 'rnn':
timestamp += FLAGS.cell_type + "_"
timestamp += 'emb-'+FLAGS.emb_mode + "_"
timestamp += 'finetune_' if FLAGS.finetune else 'freeze_'
timestamp += 'batchsize' + str(FLAGS.batch_size) + "_"
timestamp += "evalevery" + str(FLAGS.evaluate_every)
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", today, timestamp))
print("========Writing runs to {}\n".format(out_dir))
checkpoint_dir = target_ckpt
checkpoint_prefix = os.path.join(checkpoint_dir, "modelbest")
if not os.path.exists(checkpoint_dir):
raise ValueError("new directory has not been created yet to save the transfer model!")
# write to csv
csv_outdir = os.path.abspath(os.path.join(os.path.curdir,"runs", "results_csv", FLAGS.model_type))
csv_filename = datasetname + str(FLAGS.model_type)
if FLAGS.model_type == 'rnn':
csv_filename += '_'+str(FLAGS.cell_type)
csv_filename += '_'+str(FLAGS.emb_mode) + "_tune" + str(FLAGS.finetune)
csv_filename += '_batchsize' + str(FLAGS.batch_size)
csv_filename += "_evalevery" + str(FLAGS.evaluate_every)
csv_filename_train = os.path.abspath(os.path.join(csv_outdir, csv_filename+'_train_transfer.csv'))
csv_filename_test = os.path.abspath(os.path.join(csv_outdir, csv_filename+'_test_transfer.csv'))
print("========Writing train csv to {}\n".format(csv_filename_train))
print("========Writing test csv to {}\n".format(csv_filename_test))
tf.reset_default_graph()
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
if FLAGS.model_type == 'cnn':
print("=====Training in CNN=====")
model = TextCNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
pretrain_emb=pretrain_emb,
emb_mode=FLAGS.emb_mode,
finetune=FLAGS.finetune,
filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
num_filters=FLAGS.num_filters,
multi_label=FLAGS.multi_label,
l2_reg_lamb=FLAGS.l2_reg_lambda)
elif FLAGS.model_type == 'rnn':
print("=====Training in RNN=====")
model = TextRNN(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
pretrain_emb=pretrain_emb,
emb_mode=FLAGS.emb_mode,
finetune=FLAGS.finetune,
cell_type=FLAGS.cell_type,
hidden_size=FLAGS.hidden_size,
multi_label=FLAGS.multi_label,
l2_reg_lamb=FLAGS.l2_reg_lambda)
elif FLAGS.model_type == 'fasttext':
print("=====Training in fastText (avg-pooling)=====")
model = fastText(
sequence_length=x_train.shape[1],
num_classes=y_train.shape[1],
vocab_size=len(vocab_processor.vocabulary_),
embedding_size=FLAGS.embedding_dim,
pretrain_emb=pretrain_emb,
emb_mode=FLAGS.emb_mode,
finetune=FLAGS.finetune,
multi_label=FLAGS.multi_label)
else:
raise ValueError("mode %s not supported. Valid mode: %s, %s" % (
FLAGS.model_type, 'fasttext', 'cnn','rnn'))
# Summaries for loss and accuracy
loss_summary = tf.summary.scalar(str(FLAGS.emb_mode)+"_loss_"+str('finetune' if FLAGS.finetune else 'freeze'), model.loss)
acc_summary = tf.summary.scalar(str(FLAGS.emb_mode)+"_acc_"+str('finetune' if FLAGS.finetune else 'freeze'), model.accuracy)
# Train Summaries
# train_summary_op = tf.summary.merge([loss_summary, acc_summary, grad_summaries_merged])
train_summary_op = tf.summary.merge([loss_summary, acc_summary])
train_summary_dir = os.path.join(out_dir, "summaries", "train")
train_summary_writer = tf.summary.FileWriter(train_summary_dir, sess.graph)
# Dev summaries
dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
dev_summary_writer = tf.summary.FileWriter(dev_summary_dir, sess.graph)
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(1e-3)
grads_and_vars = optimizer.compute_gradients(model.loss)
train_op = optimizer.apply_gradients(grads_and_vars,
global_step=global_step)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables(),
max_to_keep=FLAGS.num_checkpoints)
graph = tf.get_default_graph()
load_ops = []
if pretrained_values != None:
print("loading pretrained weight values")
for key in pretrained_values:
print(key)
load_ops.append(tf.assign(graph.get_tensor_by_name(key),
pretrained_values[key]))
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
model.dropout_keep_prob: FLAGS.dropout_keep_prob
}
# for metric: Update the running variables on new batch of samples
_, step, summaries, loss, accuracy, pred = sess.run(
[train_op, global_step, train_summary_op, model.loss, model.accuracy,
model.predictions], feed_dict)
# Calculate the score on this batch
precision_avg, recall_avg = 0., 0.
if not FLAGS.multi_label:
y_true = np.argmax(y_batch, 1)
precision_avg = precision_score(y_true, pred, average='macro')
recall_avg = recall_score(y_true, pred, average='macro')
else:
top_k = len(pred[0])
y_true = np.stack([arr.argsort()[-top_k:][::-1] for arr in y_batch])
for k in range(top_k):
precision_avg += precision_score(y_true[:, k], pred[:, k], average='macro')
recall_avg += recall_score(y_true[:, k], pred[:, k], average='macro')
precision_avg /= top_k
recall_avg /= top_k
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g}, "
"precision {:g}, recall {:g}".format(time_str, step, loss,
accuracy, precision_avg, recall_avg))
train_summary_writer.add_summary(summaries, global_step=step)
mode = 'a' if os.path.exists(csv_filename_train) else 'w'
if mode == 'w':
with open(csv_filename_train, mode) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(['step', 'accuracy', 'precision_avg','recall_avg'])
csvwriter.writerow([step, accuracy, precision_avg, recall_avg])
else:
with open(csv_filename_train, mode) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow([step, accuracy, precision_avg, recall_avg])
def dev_step(x_batch, y_batch, writer=None):
"""
Evaluates model on the entire dev set
"""
feed_dict = {
model.input_x: x_batch,
model.input_y: y_batch,
model.dropout_keep_prob: 1.0
}
step, summaries, loss, accuracy, pred = sess.run(
[global_step, dev_summary_op, model.loss, model.accuracy,
model.predictions], feed_dict)
# Calculate the score and confusion matrix on this batch
precision_avg, recall_avg = 0., 0.
if not FLAGS.multi_label:
y_true = np.argmax(y_batch, 1)
precision_avg = precision_score(y_true, pred, average='macro')
recall_avg = recall_score(y_true, pred, average='macro')
else:
top_k = len(pred[0])
y_true = np.stack([arr.argsort()[-top_k:][::-1] for arr in y_batch])
for k in range(top_k):
precision_avg = precision_score(y_true[:, k], pred[:, k], average='macro')
recall_avg += recall_score(y_true[:, k], pred[:, k], average='macro')
precision_avg /= top_k
recall_avg /= top_k
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g},"
"precision {:g}, recall {:g}".format(time_str, step, loss, accuracy,
precision_avg, recall_avg))
if writer:
writer.add_summary(summaries, global_step=step)
mode = 'a' if os.path.exists(csv_filename_test) else 'w'
if mode == 'w':
with open(csv_filename_test, mode) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(['step', 'accuracy', 'precision_avg','recall_avg'])
csvwriter.writerow([step, accuracy, precision_avg, recall_avg])
else:
with open(csv_filename_test, mode) as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow([step, accuracy, precision_avg, recall_avg])
return accuracy
# Generate batches
batches = data_helpers.batch_iter(
list(zip(x_train, y_train)), FLAGS.batch_size, FLAGS.num_epochs)
if pretrained_values != None:
sess.run([load_ops])
# 0-step eval
print("\nEvaluation at step 0:")
dev_step(x_dev, y_dev, writer=dev_summary_writer)
print("")
moving_avg_test_acc = 0
num_eval = 0
# Training loop. For each batch...
for batch in batches:
x_batch, y_batch = zip(*batch)
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
print("\nEvaluation:")
cur_test_acc = dev_step(x_dev, y_dev,
writer=dev_summary_writer)
moving_avg_test_acc += cur_test_acc
num_eval += 1
print("")
if num_eval != 0 and moving_avg_test_acc / num_eval < cur_test_acc:
print("cur test acc:", cur_test_acc)
print("avg test acc: ", moving_avg_test_acc / num_eval)
path = saver.save(sess, checkpoint_prefix+'best', global_step=current_step)
print("Saved best model checkpoint to {}\n".format(path))
path = saver.save(sess, checkpoint_prefix+'final', global_step=current_step)
print("Saved final model checkpoint to {}\n".format(path))
return csv_filename_train, csv_filename_test, checkpoint_dir | b8758eb2442d5473f014671a89f6e198854017c7 | 11,961 |
def liste_vers_paires(l):
"""
Passer d'une structure en list(list(str)) ) list([str, str])
:param l:
:return:
"""
res = []
for i in l:
taille_i = len(i)
for j in range(taille_i-1):
for k in range(j+1, taille_i):
res.append([i[j], i[k]])
return res | 5f40e032fb9aba22656565d958ccfac828512b77 | 11,962 |
from typing import List
from typing import Dict
from typing import Any
def assert_typing(
input_text_word_predictions: List[Dict[str, Any]]
) -> List[Dict[str, str]]:
"""
this is only to ensure correct typing, it does not actually change anything
Args:
input_text_word_predictions: e.g. [
{"char_start": 0, "char_end": 7, "token": "example", "tag": "O"},
..
]
Returns:
input_text_word_predictions_str: e.g. [
{"char_start": "0", "char_end": "7", "token": "example", "tag": "O"},
..
]
"""
return [
{k: str(v) for k, v in input_text_word_prediction.items()}
for input_text_word_prediction in input_text_word_predictions
] | 0835bad510241eeb2ee1f69ac8abeca711ebbf53 | 11,963 |
import urllib
import time
def download(file):
"""Download files from live server, delete recerds of those that 404.
"""
url = 'https://www.' + settings.DOMAIN.partition('.')[2] + file.url()
try:
print(url)
return urllib.request.urlopen(url, timeout=15).read()
except urllib.error.HTTPError as e:
print(e.code, url)
file.delete()
time.sleep(.5)
except urllib.error.URLError as e:
print(e.args, url)
return '' | f1eb0bc35f3a4afa40b22e9ff4db69740b273d31 | 11,964 |
from corehq.apps.commtrack.models import StockState
def get_current_ledger_state(case_ids, ensure_form_id=False):
"""
Given a list of cases returns a dict of all current ledger data of the following format:
{
"case_id": {
"section_id": {
"product_id": StockState,
"product_id": StockState,
...
},
...
},
...
}
:param ensure_form_id: Set to True to make sure return StockState
have the ``last_modified_form_id`` field populated
"""
if not case_ids:
return {}
states = StockState.objects.filter(
case_id__in=case_ids
)
ret = {case_id: {} for case_id in case_ids}
for state in states:
sections = ret[state.case_id].setdefault(state.section_id, {})
sections[state.product_id] = state
if ensure_form_id and not state.last_modified_form_id:
transaction = StockTransaction.latest(state.case_id, state.section_id, state.product_id)
if transaction is not None:
state.last_modified_form_id = transaction.report.form_id
state.save()
return ret | 807cd430a29c7a8c377ad1822435a344d95daa7c | 11,965 |
def _FlattenPadding(padding):
"""Returns padding reduced to have only the time dimension."""
if padding is None:
return padding
r = tf.rank(padding)
return tf.reduce_min(padding, axis=tf.range(1, r)) | 0a757e3bb84ec89c0959de8a1d06667373501c9d | 11,966 |
def revive_custom_object(identifier, metadata):
"""Revives object from SavedModel."""
if ops.executing_eagerly_outside_functions():
model_class = training_lib.Model
else:
model_class = training_lib_v1.Model
revived_classes = {
'_tf_keras_layer': (RevivedLayer, base_layer.Layer),
'_tf_keras_input_layer': (RevivedInputLayer, input_layer.InputLayer),
'_tf_keras_network': (RevivedNetwork, network_lib.Network),
'_tf_keras_model': (RevivedNetwork, model_class),
'_tf_keras_sequential': (RevivedNetwork, models_lib.Sequential)
}
parent_classes = revived_classes.get(identifier, None)
if parent_classes is not None:
parent_classes = revived_classes[identifier]
revived_cls = type(
compat.as_str(metadata['class_name']), parent_classes, {})
return revived_cls._init_from_metadata(metadata) # pylint: disable=protected-access | 870db96ed17fdfe1dc535a3b38541de5a0f34688 | 11,967 |
def all_users():
"""Returns all users in database sorted by name
Returns:
QuerySet[User]: List containing each User instance
"""
# Return all unique users in Database.
# sorted by full name
# returns query set. same as python list. Each index in user_list is a user model.
user_list = User.objects.order_by("full_name")
return user_list | f952b7b1134429e9473da339bbb881011c7bb0b8 | 11,968 |
def plugin_func_list(tree):
"""Return a list of expected reports."""
return [EXPECTED_REPORT + (type(plugin_func_list),)] | 236054789507d64f1593ea13b5333b2c7db2a1aa | 11,969 |
def entity_ids(value):
"""Validate Entity IDs."""
if value is None:
raise vol.Invalid('Entity IDs can not be None')
if isinstance(value, str):
value = [ent_id.strip() for ent_id in value.split(',')]
return [entity_id(ent_id) for ent_id in value] | 21ab0ca35dc6b727b57e1dcd472de38a81c92d88 | 11,970 |
import math
def random_unitary(dim, seed=None):
"""
Return a random dim x dim unitary Operator from the Haar measure.
Args:
dim (int): the dim of the state space.
seed (int): Optional. To set a random seed.
Returns:
Operator: (dim, dim) unitary operator.
Raises:
QiskitError: if dim is not a positive power of 2.
"""
if seed is not None:
np.random.seed(seed)
if dim == 0 or not math.log2(dim).is_integer():
raise QiskitError("Desired unitary dimension not a positive power of 2.")
return Operator(unitary_group.rvs(dim)) | fd0599fe0a03036fee5ae31ea00b9ba0277b035d | 11,971 |
def allc(IL, IR):
"""
Compute the all-chain set (ALLC).
Parameters
----------
IL : ndarray
Left matrix profile indices
IR : ndarray
Right matrix profile indices
Returns
-------
S : list(ndarray)
All-chain set
C : ndarray
Anchored time series chain for the longest chain (also known as the
unanchored chain)
Notes
-----
`DOI: 10.1109/ICDM.2017.79 <https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf>`__
See Table II
Unlike the original paper, we've replaced the while-loop with a more stable
for-loop.
This is the implementation for the all-chain set (ALLC) and the unanchored
chain is simply the longest one among the all-chain set. Both the
all-chain set and unanchored chain are returned.
The all-chain set, S, is returned as a list of unique numpy arrays.
"""
L = np.ones(IL.size, dtype=np.int64)
S = set() # type: ignore
for i in range(IL.size):
if L[i] == 1:
j = i
C = deque([j])
for k in range(IL.size):
if IR[j] == -1 or IL[IR[j]] != j:
break
else:
j = IR[j]
L[j] = -1
L[i] = L[i] + 1
C.append(j)
S.update([tuple(C)])
C = atsc(IL, IR, L.argmax())
S = [np.array(s, dtype=np.int64) for s in S] # type: ignore
return S, C | 4ec01a2f3718430c2965173a48a0d3f8bd84f0e1 | 11,972 |
def build_resilient_url(host, port):
"""
Build basic url to resilient instance
:param host: host name
:type host: str
:param port: port
:type port: str|int
:return: base url
:rtype: str
"""
if host.lower().startswith("http"):
return "{0}:{1}".format(host, port)
return "https://{0}:{1}".format(host, port) | 33be03c5417aa41fdc888d856e760107843096e9 | 11,973 |
def relative_angle(pos1, pos2):
""" Angle between agents. An element (k,i,j) from the output is the angle at kth sample between ith (reference head) and jth (target base).
arg:
pos1: positions of the thoraces for all flies. [time, flies, y/x]
pos2: positions of the heads for all flies. [time, flies, y/x]
returns:
rel_angles: orientation of flies with respect to chamber. [time, flies, flies]
"""
d0 = pos2 - pos1
d1 = pos1[:, np.newaxis, :, :] - pos2[:, :, np.newaxis, :] # all pairwise "distances"
dot = d0[:, :, np.newaxis, 1]*d1[:, :, :, 1] + d0[:, :, np.newaxis, 0]*d1[:, :, :, 0]
det = d0[:, :, np.newaxis, 1]*d1[:, :, :, 0] - d0[:, :, np.newaxis, 0]*d1[:, :, :, 1]
rel_angles = np.arctan2(det, dot)
return rel_angles * 180.0 / np.pi | cceb6e2e2007399b7479e7ec7c7237d554905b62 | 11,974 |
def run_value_iteration(env):
"""Run a random policy for the given environment.
Logs the total reward and the number of steps until the terminal
state was reached.
Parameters
----------
env: gym.envs.Environment
Instance of an OpenAI gym.
Returns
-------
(float, int)
First number is the total undiscounted reward received. The
second number is the total number of actions taken before the
episode finished.
"""
initial_state = env.reset()
env.render()
# time.sleep(1) # just pauses so you can see the output
total_reward = 0
num_steps = 0
gamma = 0.9
tol = 1e-3
max_iterations = 1000
state = initial_state
optimal_value_function, iterations = value_iteration(env, gamma, max_iterations, tol)
policy = value_function_to_policy(env, gamma, optimal_value_function)
while True:
action_cur = policy[state]
print(" ")
print("step %d" % num_steps)
print("action is %s" % action_names[action_cur])
nextstate, reward, is_terminal, debug_info = env.step(action_cur)
print(debug_info)
state = nextstate
env.render()
print("move to state %d" % nextstate)
total_reward += reward
num_steps += 1
if is_terminal:
break
# time.sleep(1)
return total_reward, num_steps | 829c3288325520fe5b36fdbcd1d76c8178ace710 | 11,975 |
def get_shape(dset):
"""
Extract the shape of a (possibly constant) dataset
Parameters:
-----------
dset: an h5py.Dataset or h5py.Group (when constant)
The object whose shape is extracted
Returns:
--------
A tuple corresponding to the shape
"""
# Case of a constant dataset
if isinstance(dset, h5py.Group):
shape = dset.attrs['shape']
# Case of a non-constant dataset
elif isinstance(dset, h5py.Dataset):
shape = dset.shape
return(shape) | fd1f1ed59542cd1a6527d9c45dd64ee4c7b47cb4 | 11,976 |
from sklearn.metrics import mean_absolute_error
import os
import pickle
def predict(reaction_mech, T_list, pressure_0, CCl4_X_0, mass_flow_rate,
n_steps, n_pfr, length, area, save_fig=False, name='predict',fold_no=None,iter_CCl4=False):
"""
Load the saved parameters of StandardScaler() and rebuild the ML model to
do predictions.
=============== =============================================================
Attribute Description
=============== =============================================================
`reaction_mech` Doctinary of Cantera reaction mechanism(s) (.cti file)
`T_list` Temperature profile (°C)
`pressure_0` Initial pressue (atm)
`CCl4_X_0` Initial CCl4 concentration (mass fraction)
`mass_flow_rate`Mass flow rate of input gas (T/H)
`n_steps` Number of iterations/number of CSTRs
`n_pfr` Number of PFRs
`length` Length of each PFR (m)
`area` Cross-sectional area (m**2)
`save_fig` Save figure to `plots` folder
`name` The file name of the saving figure
=============== =============================================================
"""
# Load scaler parameter
with open(os.path.join(RESULTPATH, 'clf.pickle'), 'rb') as f:
scaler = pickle.load(f)
# Load model
model = build_model()
model.load_weights(os.path.join(RESULTPATH,'model.h5'))
if CCl4_X_0 > 1 : #ppm
CCl4_X_0 = float(CCl4_X_0) / 1000000
if type(reaction_mech) != dict:
raise TypeError('The datatype of `reaction_mech` is {}.It should be a dict.'.format(type(reaction_mech)))
results = {}
for label in reaction_mech.keys():
compositions, t, cracking_rates = EDC_cracking(
reaction_mech[label],
T_list,
pressure_0,
CCl4_X_0,
mass_flow_rate,
n_steps,
n_pfr,
length,
area,
label=label
)
results[label] = {
'compositions': compositions,
't': t,
'cracking_rates': cracking_rates,
}
# Use ML model to predict
KM_label = 'Schirmeister'
y_predicted = [0]
for i, T in enumerate(T_list[1:]):
Ti = T_list[i]
Te = T
compositions = results[KM_label]['compositions'][i]
t = sum(results[KM_label]['t'][:i+1])
t_r = results[KM_label]['t'][i]
x_predict = [Ti, Te, compositions, pressure_0, CCl4_X_0, t, t_r]
x_predict = np.hstack(x_predict).reshape(1, -1)
x_predict = x_predict[:,:-4]
rescaled_x_predict = scaler.transform(x_predict)
x_predict = [rescaled_x_predict[:,2:], rescaled_x_predict[:,:2]]
y = float(model.predict(x_predict))
y_predicted.append(y)
results['ML'] = {'cracking_rates': y_predicted}
loss = mean_absolute_error(results['Choi']['cracking_rates'],
results['ML']['cracking_rates'])
print(f"loss in {CCl4_X_0}: {loss} ")
'''
Ti = T_list[:-1]
Te = T_list[1:]
df = read_csv("Data/RawDataInput.csv")
y_ground_df = df.query(
"CCl4_X_0 == @CCl4_X_0 &"\
"pressure_0 == @pressure_0 &"\
"mass_flow_rate == @mass_flow_rate &"\
"Ti == @Ti &"\
"Te == @Te"
)['X']
if not y_ground_df.empty:
y_ground = [0]
if len(y_ground_df) >= 18:
for index in y_ground_df.index:
try:
if y_ground_df.loc[[index+17]].index == index + 17:
for i in range(index,index+18):
y_ground.append(y_ground_df.loc[i]/100)
break
except KeyError:
print("index + 17 dont exist in y_ground_df, continue")
continue
print(len(y_ground))
results['FPC'] = {'cracking_rates': y_ground}
loss = mean_absolute_error(results['FPC']['cracking_rates'],
results['ML']['cracking_rates'])
'''
if CCl4_X_0 < 1 : #ppm
CCl4_X_0 = float(CCl4_X_0) * 1000000
# Plot figure
if save_fig:
ndata = len(T_list)
fig, ax1 = plt.subplots()
ln = ax1.plot(range(ndata), T_list, color='r', marker='o', label='Temperature ($^\circ$C)')
ax1.set_ylabel('Temperature ($^\circ$C)')
ax1.set_ylim(0, 600)
ax2 = ax1.twinx()
lns = ln
for label in results.keys():
cracking_rates = [i * 100 for i in results[label]['cracking_rates']]
lns += ax2.plot(range(ndata), cracking_rates, marker='o', label=label)
ax2.set_ylabel('Cracking rates (%)')
ax2.set_ylim(-5, 100)
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc='lower right', frameon=True)
plt.title('Temperature and cracking rates curves')
ax1.set_xlabel('PFR index')
plt.xticks(range(ndata))
if DOKFOLD:
plt.savefig(os.path.join(PLOTPATH, f'{fold_no}_{name}.png'))
elif iter_CCl4 is True:
if not os.path.exists(os.path.join(PLOTPATH,"predict")):
os.mkdir(os.path.join(PLOTPATH,"predict"))
plt.savefig(os.path.join(PLOTPATH, f'predict/CCl4_{CCl4_X_0:.6f}_mass_{mass_flow_rate}_temp_{T_list[0]}_{name}.png'))
else:
plt.savefig(os.path.join(PLOTPATH, '{}.png'.format(name)))
return loss | 5152c14facb28aa78625a6d5064a7e54094ef55f | 11,977 |
import socket
import sys
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats | 0eebef4fea6e80353f9e1ecc5c73f7b2d74b3ef6 | 11,978 |
import typing
def _sanitize_bool(val: typing.Any, /) -> bool:
"""Sanitize argument values to boolean."""
if isinstance(val, str):
return val.lower() == 'true'
return bool(val) | b41c52b6e61bcc6ec8b78138f4a5ee58f7284ca3 | 11,979 |
def isSameLinkedList(linked_list1, linked_list2):
"""
Check whether two linked lists are the same.
Args:
linked_list1: -
linked_list2: -
"""
while linked_list1:
if linked_list1.val != linked_list2.val:
return False
linked_list1, linked_list2 = linked_list1.next, linked_list2.next
return True | cb41ed64b61f49c97104939fc1b1869e872f8234 | 11,980 |
from typing import Any
async def async_get_config_entry_diagnostics(
hass: HomeAssistant, config_entry: ConfigEntry
) -> dict[str, Any]:
"""Return diagnostics for a config entry."""
coordinator: SolcastUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
return {
"info": async_redact_data(config_entry.options, TO_REDACT),
"data": async_redact_data(coordinator.data, TO_REDACT),
} | e8a75709612426a70c94ff30d740a6ca1ff53972 | 11,981 |
def calculate_value_function(transition_costs):
"""Recursively apply the bellman equation from the end to the start. """
state_dim = [tc.shape[0] for tc in transition_costs]
state_dim.append(transition_costs[-1].shape[1])
V = [np.zeros(d) for d in state_dim]
V_ind = [np.zeros(d) for d in state_dim]
for i in range(len(state_dim) - 2, -1, -1):
rhs = transition_costs[i] + V[i + 1]
V[i] = np.min(rhs, axis=1)
V_ind[i] = np.argmin(rhs, axis=1)
return V_ind, V | 14ef732e45581b407d9c19618c7c18b1e9bdbc4e | 11,982 |
def load_config(fname: str) -> JSON_TYPE:
"""Load a YAML file."""
return load_yaml(fname) | 6d4bab8da8853a3ec4ca47afc7d16b1b519343ab | 11,983 |
import os
import re
def get_date_folders():
"""
Return a list of the directories used for backing up the database.
"""
directories_in_curdir = list(filter(os.path.isdir, os.listdir(os.getcwd())))
date_folders = [
d for d in directories_in_curdir if re.match(r"([0-9]+(-[0-9]+)+)", d)
]
return date_folders | 127d087888a6cd2dc2786365206a20e495a092ff | 11,984 |
def read_tree_color_map(filename):
"""Reads a tree colormap from a file"""
infile = util.open_stream(filename)
maps = []
for line in infile:
expr, red, green, blue = line.rstrip().split("\t")
maps.append([expr, map(float, (red, green, blue))])
name2color = make_expr_mapping(maps)
def leafmap(node):
return name2color(node.name)
return tree_color_map(leafmap) | 82ebbe7b14785a5e766efe40096d94a6867c46b3 | 11,985 |
def sin_cos_encoding(arr):
""" Encode an array of angle value to correspongding Sines and Cosines, avoiding value jump in 2PI measure like from PI to -PI. """
return np.concatenate((np.sin(arr), np.cos(arr))) | ada587fc811748a01d1769385cced60cb678cf15 | 11,986 |
def atom_free_electrons(mgrph, idx):
""" number of unbound valence electrons for an atom in a molecular graph
"""
atms = atoms(mgrph)
vlnc = valence(atms[idx])
bcnt = atom_bond_count(mgrph, idx)
return vlnc - bcnt | 41989063c18f5f9d30da165528a2969fd728f4eb | 11,987 |
def identify_guest():
"""Returns with an App Engine user or an anonymous user.
"""
app_engine_user = users.get_current_user()
if app_engine_user:
return Guest.app_engine_user(app_engine_user)
ip_address = ip_address_from_request(request)
if ip_address:
return Guest.ip_address(ip_address)
else:
return Guest() | 5bb857a9477e6f7d22f3c675fc2db92935088121 | 11,988 |
def compute_placevalues(tokens):
"""Compute the placevalues for each token in the list tokens"""
pvs = []
for tok in tokens:
if tok == "point":
pvs.append(0)
else:
pvs.append(placevalue(get_value(tok)[0]))
return pvs | af67660675c3d8f55a621a300c530975bffe87ac | 11,989 |
def get_model_damping(global_step, damping_init, decay_rate, total_epochs, steps_per_epoch):
"""get_model_damping"""
damping_each_step = []
total_steps = steps_per_epoch * total_epochs
for step in range(total_steps):
epoch = (step + 1) / steps_per_epoch
damping_here = damping_init * (decay_rate ** (epoch / 10))
damping_each_step.append(damping_here)
current_step = global_step
damping_each_step = np.array(damping_each_step).astype(np.float32)
damping_now = damping_each_step[current_step:]
return damping_now | 9aeb0fff36b458886c7b38a3f0072927d2660e47 | 11,990 |
def transformData(Z,Time,Spec):
# transformData Transforms each data series based on Spec.Transformation
#
# Input Arguments:
#
# Z : T x N numeric array, raw (untransformed) observed data
# Spec : structure , model specification
#
# Output Arguments:
#
# X : T x N numeric array, transformed data (stationary to enter DFM)
"""
Transformation notes:
'lin' = Levels (No Transformation)
'chg' = Change (Difference)
'ch1' = Year over Year Change (Difference)
'pch' = Percent Change
'pc1' = Year over Year Percent Change
'pca' = Percent Change (Annual Rate)
'log' = Natural Log
"""
T,N = Z.shape
X = np.empty((T, N))
X[:] = np.nan
Freq_dict = {"m":1,"q":3}
formula_dict = {"lin":lambda x:x*2,
"chg":lambda x:np.append(np.nan,x[t1+step::step] - x[t1:-1-t1:step]),
"ch1":lambda x:x[12+t1::step] - x[t1:-12:step],
"pch":lambda x:(np.append(np.nan,x[t1+step::step]/x[t1:-1-t1:step]) - 1)*100,
"pc1":lambda x:((x[12+t1::step]/x[t1:-12:step])-1)*100,
"pca":lambda x:(np.append(np.nan,x[t1+step::step]/x[t1:-step:step])**(1/n) - 1)*100,
"log":lambda x:np.log(x)
}
for i in range(N):
formula = Spec.Transformation[i]
freq = Spec.Frequency[i]
step = Freq_dict[freq] # time step for different frequencies based on monthly time
t1 = step -1 # assume monthly observations start at beginning of quarter (subtracted 1 for indexing)
n = step/12 # number of years, needed to compute annual % changes
series = Spec.SeriesName[i]
if formula == 'lin':
X[:,i] = Z[:,i].copy()
elif formula == 'chg':
X[t1::step,i] = formula_dict['chg'](Z[:,i].copy())
elif formula == 'ch1':
X[12+t1::step, i] = formula_dict['ch1'](Z[:, i].copy())
elif formula == 'pch':
X[t1::step, i] = formula_dict['pch'](Z[:, i].copy())
elif formula == 'pc1':
X[12+t1::step, i] = formula_dict['pc1'](Z[:, i].copy())
elif formula == 'pca':
X[t1::step, i] = formula_dict['pca'](Z[:, i].copy())
elif formula == 'log':
X[:, i] = formula_dict['log'](Z[:, i].copy())
else:
ValueError("{}: Transformation is unknown".format(formula))
# Drop first quarter of observations
# since transformations cause missing values
return X[3:,:],Time[3:],Z[3:,:] | a56b6ecd4ae408cbc9e3873ba10833bd902c3249 | 11,991 |
def torch_fn():
"""Create a ReLU layer in torch."""
return ReLU() | 35ae9fe99a641768f109b8ba216271730941892e | 11,992 |
def all_user_tickets(uid, conference):
"""
Versione cache-friendly della user_tickets, restituisce un elenco di
(ticket_id, fare_type, fare_code, complete)
per ogni biglietto associato all'utente
"""
qs = _user_ticket(User.objects.get(id=uid), conference)
output = []
for t in qs:
output.append((
t.id, t.fare.ticket_type, t.fare.code,
_ticket_complete(t)
))
return output | 7778d4fa0eac0c311db8965f8bd449ad31bd49db | 11,993 |
from typing import List
def get_hardconcrete_linear_modules(module: nn.Module) -> List[nn.Module]:
"""Get all HardConcrete*Linear modules.
Parameters
----------
module : nn.Module
The input module
Returns
-------
List[nn.Module]
A list of the HardConcrete*Linear module.
"""
modules = []
for m in module.children():
if isinstance(m, HardConcreteProjectedLinear):
modules.append(m)
elif isinstance(m, HardConcreteLinear):
modules.append(m)
else:
modules.extend(get_hardconcrete_linear_modules(m))
return modules | bc821bb2fc41dbf7385b6e319323a65e0372e218 | 11,994 |
import torch
def approx_partial(model, ori_target, param, current_val, params, loss_list, information_loss_list, xs_list, ys_list, train=False, optimizer=None):
"""Compute the approximate partial derivative using the finite-difference method.
:param param:
:param current_val:
:param params:
:return:
"""
#step_size = STEP_SIZES[param]
step_size = 10
losses = []
for sign in [-1, 1]:
set_param(param, current_val + sign * step_size / 2, params)
loss = get_attack_loss(model, ori_target, information_loss_list, xs_list, ys_list,
loss_f=torch.nn.MSELoss(reduction='none'),
xs=params['x'], ys=params['y'],
shape=(320, 320), n_pixel_range=(10, 11), train=train, optimizer=optimizer)
# image = RENDERER.render()
# with torch.no_grad():
# out = MODEL(image)
# loss = CRITERION(out, LABELS).item()
losses.append(loss)
grad = (losses[1] - losses[0]) / step_size
loss_list += losses
return grad | 5cf3eff7880d2405a1dfa01a4296a974deeef70d | 11,995 |
import time
def check_successful_connections(_ctx: Context) -> bool:
"""Checks if there are no successful connections more than SUCCESSFUL_CONNECTIONS_CHECK_PERIOD sec.
Returns True if there was successful connection for last NO_SUCCESSFUL_CONNECTIONS_DIE_PERIOD_SEC sec.
:parameter _ctx: Context
"""
now_ns = time.time_ns()
lower_bound = max(_ctx.get_start_time_ns(),
_ctx.Statistic.connect.last_check_time)
diff_sec = ns2s(now_ns - lower_bound)
if _ctx.Statistic.connect.success == _ctx.Statistic.connect.success_prev:
if diff_sec > SUCCESSFUL_CONNECTIONS_CHECK_PERIOD_SEC:
_ctx.add_error(Errors('Check connection', no_successful_connections_error_msg(_ctx)))
return diff_sec <= NO_SUCCESSFUL_CONNECTIONS_DIE_PERIOD_SEC
else:
_ctx.Statistic.connect.last_check_time = now_ns
_ctx.Statistic.connect.sync_success()
_ctx.remove_error(Errors('Check connection', no_successful_connections_error_msg(_ctx)).uuid)
return True | 19a3b9ee66ad8a3a6c2b4677116616ccdd5a452b | 11,996 |
def float_or_none(string):
""" Returns float number iff string represents one, else return None. TESTS OK 2020-10-24. """
try:
return float(string)
except (ValueError, TypeError):
return None | 8cc4437841f67e5b2f884ca566f3e6870dcd7649 | 11,997 |
import requests
def move_release_to_another_collection_folder(user: UserWithUserTokenBasedAuthentication,
username: str,
source_folder_id: int,
destination_folder_id: int,
release_id: int,
instance_id: int
) -> requests.models.Response:
"""
Move the instance of an release to another folder.
User Authentication needed.
Parameters:
user: user object (required)
username: string (required)
-> The username of the collection you are trying to retrieve.
source_folder_id: number (required)
-> The ID of the source folder.
destination_folder_id: number (required)
-> The ID of the destination folder.
release_id: number (required)
-> The ID of the release you are modifying.
instance_id: number (required)
-> The ID of the instance.
"""
url = f"{USERS_URL}/{username}/collection/folders/{source_folder_id}/releases/{release_id}/instances/{instance_id}"
params = user.params
headers = user.headers
data = {"folder_id": destination_folder_id}
return requests.post(url, headers=headers, params=params, json=data) | 47434146219d323b176db76905a6b5ffb4c25955 | 11,998 |
def load_region_maps(region_file):
"""Extracts creates a map from PHI region id to a continuous region id."""
region_ids = [] # Used mainly for eval
region_ids_inv = {} # Used in data loader
region_names_inv = {} # Used in eval
for l in region_file.read().strip().split('\n'):
tok_name_id, _ = l.strip().split(';') # second field is frequency, unused
region_name, region_id = tok_name_id.split('_')
region_name = region_name.strip()
region_id = int(region_id)
# Ignore unknown regions:
if ((region_name == 'Unknown Provenances' and region_id == 884) or
(region_name == 'unspecified subregion' and region_id == 885) or
(region_name == 'unspecified subregion' and region_id == 1439)):
continue
region_ids.append(region_id)
region_ids_inv[region_id] = len(region_ids_inv)
region_names_inv[len(region_names_inv)] = region_name
return {
'ids': region_ids,
'ids_inv': region_ids_inv,
'names_inv': region_names_inv
} | 201240ce485b4039b12741bb03c547de7976c99a | 11,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.