content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import types
def get_task_name(task):
"""Gets a tasks *string* name, whether it is a task object/function."""
task_name = ""
if isinstance(task, (types.MethodType, types.FunctionType)):
# If its a function look for the attributes that should have been
# set using the task() decorator provided in the decorators file. If
# those have not been set, then we should at least have enough basic
# information (not a version) to form a useful task name.
task_name = get_attr(task, 'name')
if not task_name:
name_pieces = [a for a in get_many_attr(task,
'__module__',
'__name__')
if a is not None]
task_name = join(name_pieces, ".")
else:
task_name = str(task)
return task_name | 181682d930cf358f2532406f1558b007aa09a41f | 16,037 |
def EventAddPublication(builder, publication):
"""This method is deprecated. Please switch to AddPublication."""
return AddPublication(builder, publication) | 39cf5facf251370fd86a004477f848dafd41976c | 16,038 |
def convModel(input1_shape, layers):
"""" convolutional model defined by layers. ith entry
defines ith layer. If entry is a (x,y) it defines a conv layer
with x kernels and y filters. If entry is x it defines a pool layer
with size x"""
model = Sequential()
for (i, layer) in enumerate(layers):
if isinstance(layer, int):
model.add(MaxPool1D(layer))
elif len(layer) == 2:
if i == 0:
model.add(Conv1D(layer[0], layer[1],
input_shape=input1_shape, padding='same',
activation='relu'))
else:
model.add(Conv1D(layer[0], layer[1], padding='same',
activation='relu'))
else:
print("Hodor")
model.add(GlobalMaxPool1D())
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax'))
model.compile(loss='binary_crossentropy',
metrics=['accuracy',precision],
optimizer=Adam(lr=3e-4))
print(model.inputs)
print(model.summary())
return model | d1a49a42ab0fc4eecd40783345f09121a773ae02 | 16,039 |
def cached_open_doc(db, doc_id, cache_expire=COUCH_CACHE_TIMEOUT, **params):
"""
Main wrapping function to open up a doc. Replace db.open_doc(doc_id)
"""
try:
cached_doc = _get_cached_doc_only(doc_id)
except ConnectionInterrupted:
cached_doc = INTERRUPTED
if cached_doc in (None, INTERRUPTED):
doc = db.open_doc(doc_id, **params)
if cached_doc is not INTERRUPTED:
do_cache_doc(doc, cache_expire=cache_expire)
return doc
else:
return cached_doc | 82118a5c9c43aaf339e7ca3ab8af9680fbd362d1 | 16,040 |
import torch
def dense2bpseq(sequence: torch.Tensor, label: torch.Tensor) -> str:
"""converts sequence and label tensors to `.bpseq`-style string"""
seq_lab = dense2seqlab(sequence, label)
return seqlab2bpseq | ec0a5d681fef518068042aa2830ee4d2ef3231c8 | 16,042 |
from datetime import datetime
def _base_app(config):
"""
init a barebone flask app.
if it is needed to create multiple flask apps,
use this function to create a base app which can be further modified later
"""
app = Flask(__name__)
app.config.from_object(config)
config.init_app(app)
bootstrap.init_app(app)
app.jinja_env.globals['datetime'] = datetime
app.jinja_env.globals['str_to_datetime'] = lambda x: from_string_to_datetime(x)
app.jinja_env.globals['format_float'] = lambda x: "%.2f" % x if x else None
app.jinja_env.globals['momentjs'] = momentjs
app.jinja_env.globals['get_collapsed_ids'] = get_collapsed
return app | f5f40ed9ea740c5b9bc9ebb8490136179d06f777 | 16,043 |
def applyC(input_map,nbar,MAS_mat,pk_map,Y_lms,k_grids,r_grids,v_cell,shot_fac,include_pix=True):
"""Apply the fiducial covariance to a pixel map x, i.e. C[x] = S[x]+N[x].
We decompose P(k;x) = \sum_l P_l(k) L_l(k.x) where x is the position of the second galaxy and use spherical harmonic decompositions.
P_l(k) are the even fiducial power spectrum multipoles, taken as an input (including the MAS window if relevant).
Parameters
----------
input_map : ndarray
The input map to apply the covariance to.
nbar : ndarray
Map of the background number density.
MAS_mat : ndarray
The mass assignment (i.e. compensation) matrix.
pk_map : ndarray
The fiducial power spectrum multipoles (only used with ML weights).
Y_lms : list
List of spherical harmonic functions, generated by the compute_spherical_harmonic_functions() function.
k_grids : ndarray
3D grids containing the (k_x,k_y,k_z) values.
r_grids : ndarray
3D grids containing the (r_x,r_y,r_z) values.
v_cell : float
Cell volume.
shot_fac : float
Shot noise factor.
include_pix : bool, optional
Whether to include the MAS effects in the covariance (default: True).
Returns
-------
ndarray
Covariance matrix applied to the input map.
"""
return applyS(input_map,nbar,MAS_mat,pk_map,Y_lms,k_grids,r_grids,v_cell,include_pix=include_pix)+applyN(input_map,nbar,MAS_mat,v_cell,shot_fac,include_pix=include_pix) | 91346d935217f540a91947b0a00e91e0125794ef | 16,044 |
def invert_injective_mapping(dictionary):
"""
Inverts a dictionary with a one-to-one mapping from key to value, into a
new dictionary with a one-to-one mapping from value to key.
"""
inverted_dict = {}
for key, value in iteritems(dictionary):
assert value not in inverted_dict, "Mapping is not 1-1"
inverted_dict[value] = key
return inverted_dict | c8cba85f542c5129892eeba4168edf6d9715b54e | 16,045 |
def biosql_dbseqrecord_to_seqrecord(dbseqrecord_, off=False):
"""Converts a DBSeqRecord object into a SeqRecord object.
Motivation of this function was two-fold: first, it makes type testing simpler; and second, DBSeqRecord does
not have a functional implementation of the translate method.
:param DBSeqRecord dbseqrecord_: The DBSeqRecord object to be converted.
:param bool off: Don't actually convert the DBSeqRecord. [Default: False]
:return:
"""
assert isinstance(dbseqrecord_, DBSeqRecord), ('Input must be a DBSeqRecord, '
'was of type {}!').format(type(dbseqrecord_))
if off:
return dbseqrecord_
else:
return SeqRecord(seq=Seq(data=str(dbseqrecord_.seq)), id=dbseqrecord_.id, name=dbseqrecord_.name,
description=dbseqrecord_.description, dbxrefs=dbseqrecord_.dbxrefs,
features=dbseqrecord_.features, annotations=dbseqrecord_.annotations,
letter_annotations=dbseqrecord_.letter_annotations) | 9129c5efd9025a04f0693fd2d35f420b28c2ea91 | 16,046 |
import re
def parse_rpsbproc(handle):
"""Parse a results file generated by rpsblast->rpsbproc.
This function takes a handle corresponding to a rpsbproc output file.
local.rpsbproc returns a subprocess.CompletedProcess object, which contains the
results as byte string in it's stdout attribute.
"""
# Sanitize input. Should work for either an open file handle (str, still contains \n
# when iterating) or byte-string stdout stored in a CompletedProcess object passed to this
# function as e.g. process.stdout.splitlines()
stdout = "\n".join(
line.decode().strip() if isinstance(line, bytes) else line.strip()
for line in handle
)
# Files produced by rpsbproc have anchors for easy parsing. Each query sequence
# is given a block starting/ending with QUERY/ENDQUERY, and domain hits for the
# query with DOMAINS/ENDDOMAINS.
query_pattern = re.compile(
r"QUERY\tQuery_\d+\tPeptide\t\d+\t([A-Za-z0-9.]+?)\n"
r"DOMAINS\n(.+?)ENDDOMAINS",
re.DOTALL,
)
domains = defaultdict(list)
for match in query_pattern.finditer(stdout):
query = match.group(1)
for row in match.group(2).split("\n"):
try:
domain = domain_from_row(row)
except ValueError:
continue
domains[query].append(domain)
return domains | 64be049b5cb96a3e59f421327d1715cee84e2300 | 16,048 |
from typing import get_type_hints
def _invalidate(obj, depth=0):
"""
Recursively validate type anotated classes.
"""
annotations = get_type_hints(type(obj))
for k, v in annotations.items():
item = getattr(obj, k)
res = not_type_check(item, v)
if res:
return f"{k} field of {type(obj)} : {res}"
if isinstance(item, (list, tuple)):
for ii, i in enumerate(item):
sub = _invalidate(i, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
if isinstance(item, dict):
for ii, i in item.items():
sub = _invalidate(i, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
else:
sub = _invalidate(item, depth + 1)
if sub is not None:
return f"{k}.{ii}." + sub
# return outcome,s | a1881d45414a4a034456e0078553e4aa7bf6471a | 16,049 |
def convtranspose2d_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1, out_pad=0):
"""Calculates the output height and width of a feature map for a ConvTranspose2D operation."""
h_w, kernel_size, stride, pad, dilation, out_pad = num2tuple(h_w), num2tuple(kernel_size), num2tuple(stride), num2tuple(pad), num2tuple(dilation), num2tuple(out_pad)
pad = num2tuple(pad[0]), num2tuple(pad[1])
out_height = (h_w[0] - 1) * stride[0] - sum(pad[0]) + dilation[0] * (kernel_size[0] - 1) + out_pad[0] + 1
out_width = (h_w[1] - 1) * stride[1] - sum(pad[1]) + dilation[1] * (kernel_size[1] - 1) + out_pad[1] + 1
return out_height, out_width | e1ded212929e7e24b138335ae3d9006b1dcfb759 | 16,051 |
def set_multizone_read_mode(session, read_mode, return_type=None, **kwargs):
"""
Modifies where data is read from in multizone environments.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type read_mode: str
:param read_mode: For multizone environments, if set to 'roundrobin', data
will be read from storage nodes in all protection zones. If set to
'localcopy', data from the local protection zone will be favored.
'roundrobin' is the default value. Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_read_mode(read_mode)
body_values = {'readmode': read_mode}
path = '/api/settings/raid_read_mode.json'
return session.post_api(path=path, body=body_values,
return_type=return_type, **kwargs) | 0831bfd722514cab792eef38838e357209a0971f | 16,052 |
import re
def get_name_convert_func():
"""
Get the function to convert Caffe2 layer names to PyTorch layer names.
Returns:
(func): function to convert parameter name from Caffe2 format to PyTorch
format.
"""
pairs = [
# ------------------------------------------------------------
# 'nonlocal_conv3_1_theta_w' -> 's3.pathway0_nonlocal3.conv_g.weight'
[
r"^nonlocal_conv([0-9]*)_([0-9]*)_(.*)",
r"s\1.pathway0_nonlocal\2_\3",
],
# 'theta' -> 'conv_theta'
[r"^(.*)_nonlocal([0-9]*)_(theta)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'g' -> 'conv_g'
[r"^(.*)_nonlocal([0-9]*)_(g)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'phi' -> 'conv_phi'
[r"^(.*)_nonlocal([0-9]*)_(phi)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'out' -> 'conv_out'
[r"^(.*)_nonlocal([0-9]*)_(out)(.*)", r"\1_nonlocal\2.conv_\3\4"],
# 'nonlocal_conv4_5_bn_s' -> 's4.pathway0_nonlocal3.bn.weight'
[r"^(.*)_nonlocal([0-9]*)_(bn)_(.*)", r"\1_nonlocal\2.\3.\4"],
# ------------------------------------------------------------
# 't_pool1_subsample_bn' -> 's1_fuse.conv_f2s.bn.running_mean'
[r"^t_pool1_subsample_bn_(.*)", r"s1_fuse.bn.\1"],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[r"^t_pool1_subsample_(.*)", r"s1_fuse.conv_f2s.\1"],
# 't_res4_5_branch2c_bn_subsample_bn_rm' -> 's4_fuse.conv_f2s.bias'
[
r"^t_res([0-9]*)_([0-9]*)_branch2c_bn_subsample_bn_(.*)",
r"s\1_fuse.bn.\3",
],
# 't_pool1_subsample' -> 's1_fuse.conv_f2s'
[
r"^t_res([0-9]*)_([0-9]*)_branch2c_bn_subsample_(.*)",
r"s\1_fuse.conv_f2s.\3",
],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^res([0-9]*)_([0-9]*)_branch([0-9]*)([a-z])_(.*)",
r"s\1.pathway0_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^res_conv1_bn_(.*)", r"s1.pathway0_stem.bn.\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^res([0-9]*)_([0-9]*)_branch([0-9]*)_(.*)",
r"s\1.pathway0_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^res_conv1_(.*)", r"s1.pathway0_stem.conv.\1"],
# ------------------------------------------------------------
# 'res4_4_branch_2c_bn_b' -> 's4.pathway0_res4.branch2.c_bn_b'
[
r"^t_res([0-9]*)_([0-9]*)_branch([0-9]*)([a-z])_(.*)",
r"s\1.pathway1_res\2.branch\3.\4_\5",
],
# 'res_conv1_bn_' -> 's1.pathway0_stem.bn.'
[r"^t_res_conv1_bn_(.*)", r"s1.pathway1_stem.bn.\1"],
# 'conv1_w_momentum' -> 's1.pathway0_stem.conv.'
[r"^t_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# 'res4_0_branch1_w' -> 'S4.pathway0_res0.branch1.weight'
[
r"^t_res([0-9]*)_([0-9]*)_branch([0-9]*)_(.*)",
r"s\1.pathway1_res\2.branch\3_\4",
],
# 'res_conv1_' -> 's1.pathway0_stem.conv.'
[r"^t_res_conv1_(.*)", r"s1.pathway1_stem.conv.\1"],
# ------------------------------------------------------------
# pred_ -> head.projection.
[r"pred_(.*)", r"head.projection.\1"],
# '.bn_b' -> '.weight'
[r"(.*)bn.b\Z", r"\1bn.bias"],
# '.bn_s' -> '.weight'
[r"(.*)bn.s\Z", r"\1bn.weight"],
# '_bn_rm' -> '.running_mean'
[r"(.*)bn.rm\Z", r"\1bn.running_mean"],
# '_bn_riv' -> '.running_var'
[r"(.*)bn.riv\Z", r"\1bn.running_var"],
# '_b' -> '.bias'
[r"(.*)[\._]b\Z", r"\1.bias"],
# '_w' -> '.weight'
[r"(.*)[\._]w\Z", r"\1.weight"],
]
def convert_caffe2_name_to_pytorch(caffe2_layer_name):
"""
Convert the caffe2_layer_name to pytorch format by apply the list of
regular expressions.
Args:
caffe2_layer_name (str): caffe2 layer name.
Returns:
(str): pytorch layer name.
"""
for source, dest in pairs:
caffe2_layer_name = re.sub(source, dest, caffe2_layer_name)
return caffe2_layer_name
return convert_caffe2_name_to_pytorch | 4e3cbe0885a0d23d5af151bc0cea7127156aa9c9 | 16,053 |
def generate_dict_entry(key, wordlist):
"""Generate one entry of the python dictionary"""
entry = " '{}': {},\n".format(key, wordlist)
return entry | 57ab3c063df0bde1261602f0c6279c70900a7a88 | 16,054 |
def record_to_dict(record):
"""
Transform string into bovespa.Record
:param record: (string) position string from bovespa.
:return: parsed Record
"""
try:
record = bovespa.Record(record)
except:
return None
return {
'date': record.date, 'year': record.date.year,
'month': record.date.month, 'day': record.date.day,
'money_volume': record.volume, 'volume': record.quantity,
'stock_code': record.stock_code, 'company_name': record.company_name,
'price_open': record.price_open, 'price_close': record.price_close,
'price_mean': record.price_mean, 'price_high': record.price_high,
'price_low': record.price_low
} | 3065d233a0186a72330165c9b082c819369ef449 | 16,055 |
def sample_product(user, **params):
"""Create and return a custom product"""
defaults = {
'name': 'Ron Cacique',
'description': 'El ron cacique es...',
'price': 20,
'weight': '0.70',
'units': 'l',
'featured': True,
}
defaults.update(params)
return Products.objects.create(user=user, **defaults) | 310b2ee775e5497597dd68cf6737623e40b78932 | 16,056 |
def _find_op_path_(block, outputs, inputs, no_grad_set):
"""
no_grad_set will also be changed
"""
input_names = set([inp.name for inp in inputs])
output_names = set([out.name for out in outputs])
relevant_op_flags = [True] * len(block.ops)
# All the inputs of the block are used if inputs is empty,
if inputs:
for i, op in enumerate(block.ops):
if _some_in_set_(op.desc.input_arg_names(), input_names):
for name in op.desc.output_arg_names():
if name not in no_grad_set:
input_names.add(name)
else:
relevant_op_flags[i] = False
for i, op in reversed(list(enumerate(block.ops))):
if _some_in_set_(op.desc.output_arg_names(), output_names):
for name in op.desc.input_arg_names():
if name not in no_grad_set:
output_names.add(name)
else:
relevant_op_flags[i] = False
op_path = [
block.ops[i] for i in range(len(block.ops)) if relevant_op_flags[i]
]
if inputs:
for op in op_path:
for name in op.desc.input_arg_names():
if name not in input_names and block.vars[name].stop_gradient:
no_grad_set.add(name)
return op_path | 05d1b18f883906cc41fa84f6f27f061b30ced4b8 | 16,057 |
def clean_gltf_materials(gltf):
"""
未使用のglTFマテリアルを削除する
:param gltf: glTFオブジェクト
:return: 新しいマテリアルリスト
"""
return filter(lambda m: m['name'] in used_material_names(gltf), gltf['materials']) | 7e429dceb84d48298897589172f976ea907ddcab | 16,058 |
def create_root(request):
"""
Returns a new traversal tree root.
"""
r = Root()
r.add('api', api.create_root(request))
r.add('a', Annotations(request))
r.add('t', TagStreamFactory())
r.add('u', UserStreamFactory())
return r | ebc64f7f49bf6b3405b9971aa0b30e72b3d13c5f | 16,059 |
def sort_basis_functions(basis_functions):
"""Sorts a set of basis functions by their distance to the
function with the smallest two-norm.
Args:
basis_functions: The set of basis functions to sort.
Expected shape is (-1, basis_function_length).
Returns:
sorted_basis: The sorted basis functions
sorted_ids: Mapping from unsorted basis function ids to
their sorted position.
"""
min_norm_idx = np.argmin(np.linalg.norm(basis_functions, axis=-1), axis=0)
min_norm_fn = basis_functions[min_norm_idx]
ids = list(range(len(basis_functions)))
sorted_ids = sorted(ids, key=lambda x: np.linalg.norm(basis_functions[x] - min_norm_fn))
sorted_basis = np.array(basis_functions)[sorted_ids]
return sorted_basis, sorted_ids | 18b11f80c7d08eb6435d823e557ec9ea4e028b92 | 16,060 |
def info_materials_groups_get():
"""
info_materials_groups_get
Get **array** of information for all materials, or if an array of `type_ids` is included, information on only those materials.
:rtype: List[Group]
"""
session = info_map.Session()
mat = aliased(info_map.Material)
grp = aliased(info_map.Group)
q = session.query(mat.group_id,grp.name).join(grp).distinct()
groups = [Group(group=row.group_id,name=row.name) for row in q.all()]
return groups, 200 | 49dcf0785be8d9a94b4bb730af6326d493e79000 | 16,062 |
def relu(x):
"""
Compute the relu of x
Arguments:
x -- A scalar or numpy array of any size.
Return:
s -- relu(x)
"""
s = np.maximum(0,x)
return s | 40b838889b62abca0a88788436b5d648261a3c67 | 16,064 |
def line(value):
"""
| Line which can be used to cross with functions like RSI or MACD.
| Name: line\_\ **value**\
:param value: Value of the line
:type value: float
"""
def return_function(data):
column_name = f'line_{value}'
if column_name not in data.columns:
data[column_name] = value
return data[column_name].copy()
return return_function | 07b4f9671ae06cf63c02062a9da4eb2a0b1a265a | 16,066 |
import json
import requests
def goodsGetSku(spuId,regionId):
"""
:param spuId:
:param regionId:
:return:
"""
reqUrl = req_url('goods', "/goods/getGoodsList")
if reqUrl:
url = reqUrl
else:
return "服务host匹配失败"
headers = {
'Content-Type': 'application/json',
'X-Region-Id': regionId,
}
body = json.dumps(
{
"spuId": spuId,
"groundStatus": "",
"environment": "",
"page": 1,
"limit": 20
}
)
result = requests.post(url=url,headers=headers,data=body)
resultJ = json.loads(result.content)
return resultJ | 23c3960384529c7a45e730a612cc7999fa316bd4 | 16,067 |
def get_scheduler(config, optimizer):
"""
:param config: 配置参数
:param optimizer: 优化器
:return: 学习率衰减策略
"""
# 加载学习率衰减策略
if config.scheduler_name == 'StepLR':
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=config.StepLR['decay_step'],
gamma=config.StepLR["gamma"])
elif config.scheduler_name == 'Cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.Cosine['restart_step'],
eta_min=config.Cosine['eta_min'])
elif config.scheduler_name == 'author':
scheduler = WarmupMultiStepLR(optimizer,
config.WarmupMultiStepLR["steps"],
config.WarmupMultiStepLR["gamma"],
config.WarmupMultiStepLR["warmup_factor"],
config.WarmupMultiStepLR["warmup_iters"],
config.WarmupMultiStepLR["warmup_method"]
)
return scheduler | 1e4be51c74ed6c35bde3343547c4a7a88736179a | 16,069 |
def pipeline_dict() -> dict:
"""Pipeline config dict. You need to update the labels!"""
pipeline_dictionary = {
"name": "german_business_names",
"features": {
"word": {"embedding_dim": 16, "lowercase_tokens": True},
"char": {
"embedding_dim": 16,
"encoder": {
"type": "gru",
"num_layers": 1,
"hidden_size": 32,
"bidirectional": True,
},
"dropout": 0.1,
},
},
"head": {
"type": "TextClassification",
"labels": [
"Unternehmensberatungen",
"Friseure",
"Tiefbau",
"Dienstleistungen",
"Gebrauchtwagen",
"Restaurants",
"Architekturbüros",
"Elektriker",
"Vereine",
"Versicherungsvermittler",
"Sanitärinstallationen",
"Edv",
"Maler",
"Physiotherapie",
"Werbeagenturen",
"Apotheken",
"Vermittlungen",
"Hotels",
"Autowerkstätten",
"Elektrotechnik",
"Allgemeinärzte",
"Handelsvermittler Und -vertreter",
],
"pooler": {
"type": "gru",
"num_layers": 1,
"hidden_size": 16,
"bidirectional": True,
},
"feedforward": {
"num_layers": 1,
"hidden_dims": [16],
"activations": ["relu"],
"dropout": [0.1],
},
},
}
return pipeline_dictionary | d9e15fb1a09678d65b30a49b7c7c811843420c57 | 16,070 |
def _is_hangul_syllable(i):
"""
Function for determining if a Unicode scalar value i is within the range of Hangul syllables.
:param i: Unicode scalar value to lookup
:return: Boolean: True if the lookup value is within the range of Hangul syllables, otherwise False.
"""
if i in range(0xAC00, 0xD7A3 + 1): # Range of Hangul characters as defined in UnicodeData.txt
return True
return False | 793519ec33a8920ea13328b0e5a4f814c859b0d3 | 16,071 |
def shape14_4(tik_instance, input_x, res, input_shape, shape_info):
"""input_shape == ((32, 16, 14, 14, 16), 'float16', (1, 1), (1, 1))"""
stride_w, stride_h, filter_w, filter_h, dilation_filter_w, dilation_filter_h = shape_info
pad = [0, 0, 0, 0]
l1_h = 14
l1_w = 14
c1_index = 0
jump_stride = 1
repeat_mode = 1
with tik_instance.for_range(0, 32, block_num=32) as block_index:
eeb0 = block_index % 2
eeb1 = block_index // 2
input_1_1_local_l1 = tik_instance.Tensor("float16", (196 * 32 * 16,), scope=tik.scope_cbuf,
name="input_1_1_local_l1")
input_1_1_fractal_l1_local_ub = tik_instance.Tensor("float16", (106496 // 2,), scope=tik.scope_ubuf,
name="input_1_1_fractal_l1_local_ub")
input_1_2_fractal_l1_local_ub = tik_instance.Tensor("float16", (196 * 16 * 16,), scope=tik.scope_ubuf,
name="input_1_2_fractal_l1_local_ub")
with tik_instance.for_range(0, 32) as i:
tik_instance.data_move(input_1_1_local_l1[i * 3136], input_x[i, eeb1, 0, 0, 0], 0, 1, 196, 0, 0)
with tik_instance.for_range(0, 16) as i:
fetch_filter_w = 0
fetch_filter_h = 0
left_top_h = 0
left_top_w = 0
tik_instance.load3dv1(input_1_1_fractal_l1_local_ub[i * 3328],
input_1_1_local_l1[i * 3136 + eeb0 * 16 * 3136],
pad, l1_h, l1_w, c1_index, fetch_filter_w, fetch_filter_h,
left_top_w, left_top_h, stride_w, stride_h, filter_w,
filter_h, dilation_filter_w, dilation_filter_h,
jump_stride, repeat_mode, 13)
with tik_instance.for_range(0, 16) as i:
tik_instance.data_move(input_1_2_fractal_l1_local_ub[i * 196 * 16],
input_1_1_fractal_l1_local_ub[i * 3328], 0, 1, 196, 0, 0)
with tik_instance.for_range(0, 196) as i:
tik_instance.data_move(res[eeb1, i + 196 * eeb0, 0, 0], input_1_2_fractal_l1_local_ub[256 * i], 0, 1,
16, 0, 0)
return tik_instance, res | 5606e2e6445ea5415960e1784009bcc75de29669 | 16,072 |
from typing import Dict
from typing import Any
from typing import Optional
from typing import Iterator
from typing import Union
from typing import List
def read_json(
downloader: Download, datasetinfo: Dict, **kwargs: Any
) -> Optional[Iterator[Union[List, Dict]]]:
"""Read data from json source allowing for JSONPath expressions
Args:
downloader (Download): Download object for downloading JSON
datasetinfo (Dict): Dictionary of information about dataset
**kwargs: Variables to use when evaluating template arguments
Returns:
Optional[Iterator[Union[List,Dict]]]: Iterator or None
"""
url = get_url(datasetinfo["url"], **kwargs)
response = downloader.download(url)
json = response.json()
expression = datasetinfo.get("jsonpath")
if expression:
expression = parse(expression)
json = expression.find(json)
if isinstance(json, list):
return iter(json)
return None | 77183992d42d9c3860965222c3feda23aca588dc | 16,073 |
import json
def json_dumps_safer(obj, **kwargs):
"""Convert obj to json, with some extra encodable types."""
return json.dumps(obj, cls=WandBJSONEncoder, **kwargs) | 816e97051553f1adc4c39a7c5e4559fb3a354197 | 16,074 |
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
# raise NotImplementedError()
full_data = pd.read_csv(filename).drop_duplicates()
data = full_data.drop(['id', 'date', 'lat', 'long'],
axis=1)
data = data.dropna()
for f in ZERO_AND_ABOVE:
data = data[data[f] >= 0]
for f in ONLY_POSITIVE:
data = data[data[f] > 0]
data['yr_renovated'] = np.where(data['yr_renovated'] == 0.0,
data['yr_built'], data['yr_renovated'])
data = pd.get_dummies(data, columns=['zipcode'],
drop_first=True)
features, label = data.drop("price", axis=1), data['price']
return features, label | 74707af8839b37de80d682d715ed8000375cdd7c | 16,075 |
from uuid import uuid4
from redis import Redis
from qiita_core.configuration_manager import ConfigurationManager
from qiita_db.sql_connection import SQLConnectionHandler
from moi.job import submit_nouser
def test(runner):
"""Test the environment
* Verify redis connectivity indepedent of moi
* Verify database connectivity
* Verify submission via moi
Tests are performed both on the server and ipengines.
"""
def redis_test(**kwargs):
"""Put and get a key from redis"""
config = ConfigurationManager()
r_client = Redis(host=config.redis_host,
port=config.redis_port,
password=config.redis_password,
db=config.redis_db)
key = str(uuid4())
r_client.set(key, 42, ex=1)
return int(r_client.get(key))
def postgres_test(**kwargs):
"""Open a connection and query postgres"""
c = SQLConnectionHandler()
return c.execute_fetchone("SELECT 42")[0]
def moi_test(**kwargs):
"""Submit a function via moi"""
def inner(a, b, **kwargs):
return a + b
_, _, ar = submit_nouser(inner, 7, 35)
state, result = _ipy_wait(ar)
return result
if runner == 'all':
runner = ('local', 'remote', 'moi')
else:
runner = [runner]
for name in runner:
_test_runner(name, "redis", redis_test, 42)
_test_runner(name, "postgres", postgres_test, 42)
_test_runner(name, "submit via moi", moi_test, 42) | ea4f8f50e3d85c3df6f7c890b5b91140a63bac65 | 16,076 |
def validate_model(model):
"""
Validate a single data model parameter or a full data model block by
recursively calling the 'validate' method on each node working from
the leaf nodes up the tree.
:param model: part of data model to validate
:type model: :graphit:GraphAxis
:return: overall successful validation
:rtype: :py:bool
"""
allnodes = model.nodes.keys()
leaves = model.leaves(return_nids=True)
done = []
def _walk_ancestors(nodes, success=True):
parents = []
for node in nodes:
node = model.getnodes(node)
# Continue only if the node was found and it has a 'validate' method
if not node.empty() and hasattr(node, 'validate'):
val = node.validate()
done.append(node.nid)
if not val:
return False
pnid = node.parent().nid
if pnid not in done and pnid in allnodes:
parents.append(pnid)
if parents:
return _walk_ancestors(set(parents), success=success)
return success
# Recursively walk the tree from leaves up to root.
return _walk_ancestors(leaves) | 009c629fe80af65f574c698567cb6b5213e9c888 | 16,078 |
def simple_get(url):
"""
Attempts to get the content at `url` by making an HTTP GET request.
If the content-type of response is some kind of HTML/XML, return the
text content, otherwise return None.
"""
filename = "{0}.html".format(url.split("/").pop().lower())
filepath = abspath(join(dirname(__file__), "./cache", filename))
file_data = read_file(filepath)
if file_data != None:
return file_data
try:
print("Fetching: {0}...".format(url))
with closing(get(url, stream=True)) as resp:
if is_good_response(resp):
write_cache_file(filepath, resp.content)
return resp.content
else:
return None
except RequestException as e:
log_error('Error during requests to {0} : {1}'.format(url, str(e)))
return None | 60b7714a439d949f42b1b8de6064c8ba087ccfdc | 16,079 |
def ensemble_tsfresh(forecast_in, forecast_out, season, perd):
"""
Create rolled time series for ts feature extraction
"""
def tsfresh_run(forecast, season, insample=True, forecast_out=None):
df_roll_prep = forecast.reset_index()
if insample:
df_roll_prep = df_roll_prep.drop(["Target", "Date"], axis=1)
df_roll_prep["id"] = 1
target = forecast["Target"]
else:
df_roll_prep = df_roll_prep.drop(["index"], axis=1)
df_roll_prep["id"] = 1
df_roll = roll_time_series(
df_roll_prep,
column_id="id",
column_sort=None,
column_kind=None,
rolling_direction=1,
max_timeshift=season - 1,
)
counts = df_roll["id"].value_counts()
df_roll_cut = df_roll[df_roll["id"].isin(counts[counts >= season].index)]
# TS feature extraction
concat_df = pd.DataFrame()
concat_df = extract_features(
df_roll_cut.ffill(),
column_id="id",
column_sort="sort",
n_jobs=season,
show_warnings=False,
disable_progressbar=True,
)
if insample:
concat_df = concat_df.dropna(axis=1, how="all")
concat_df.index = (
target[df_roll_cut["id"].value_counts().index]
.sort_index()
.to_frame()
.index
)
concat_df = pd.merge(
target[df_roll_cut["id"].value_counts().index].sort_index().to_frame(),
concat_df,
left_index=True,
right_index=True,
how="left",
)
concat_df_list = constant_feature_detect(data=concat_df, threshold=0.95)
concat_df = concat_df.drop(concat_df_list, axis=1)
else:
forecast_out.index.name = "Date"
concat_df.index = forecast_out.index
concat_df = impute(concat_df)
return concat_df
_LOG.info("LightGBM ensemble have been successfully built")
concat_df_drop_in = tsfresh_run(forecast_in, season, insample=True)
extracted_n_selected = select_features(
concat_df_drop_in.drop("Target", axis=1),
concat_df_drop_in["Target"],
fdr_level=0.01,
n_jobs=12,
) # fdr is the significance level.
forecast_out_add = pd.concat(
(forecast_in.iloc[-season + 1 :, :].drop(["Target"], axis=1), forecast_out),
axis=0,
)
concat_df_drop_out = tsfresh_run(
forecast_out_add, season, insample=False, forecast_out=forecast_out
)
extracted_n_selected_out = concat_df_drop_out[extracted_n_selected.columns]
# Reduce the dimensions of generated time series features
pca2 = PCA(n_components=8)
pca2.fit(extracted_n_selected)
pca2_results_in = pca2.transform(extracted_n_selected)
pca2_results_out = pca2.transform(extracted_n_selected_out)
cols = 0
for i in range(pca2_results_in.shape[1]):
cols = cols + 1
extracted_n_selected["pca_" + str(i)] = pca2_results_in[:, i]
extracted_n_selected_out["pca_" + str(i)] = pca2_results_out[:, i]
df = forecast_in.iloc[season - 1 :, :].copy()
df = time_feature(df, perd)
df["mean"] = df.drop(["Target"], axis=1).mean(axis=1)
df_new = pd.concat(
(df.reset_index(), extracted_n_selected.iloc[:, -cols:].reset_index(drop=True)),
axis=1,
)
df_new = df_new.set_index("Date")
forecast_train, forecast_test = tts(
df_new, train_size=0.5, shuffle=False, stratify=None
)
target = "Target"
d_train = lgb.Dataset(
forecast_train.drop(columns=[target]), label=forecast_train[target]
)
params = {
"boosting_type": "gbdt",
"objective": "regression",
"metric": "rmsle",
"max_depth": 6,
"learning_rate": 0.1,
"verbose": 0,
"num_threads": 16,
}
model = lgb.train(params, d_train, 100, verbose_eval=1)
ensemble_ts = pd.DataFrame(index=forecast_test.index)
ensemble_ts["ensemble_ts"] = model.predict(forecast_test.drop(columns=[target]))
df_out = forecast_out.copy()
df_out = time_feature(df_out, perd)
df_out["mean"] = df_out.mean(axis=1)
ensemble_ts_out = pd.DataFrame(index=df_out.index)
ensemble_ts_out["ensemble_ts"] = model.predict(df_out)
_LOG.info("LightGBM ensemble have been successfully built")
return ensemble_ts, ensemble_ts_out | 3bcef19cd495c043e51a591118c1ae8e043290b5 | 16,080 |
from affine import Affine
def transform_from_latlon(lat, lon):
"""
Tranform from latitude and longitude
NOTES:
- credit - Shoyer https://gist.github.com/shoyer/0eb96fa8ab683ef078eb
"""
lat = np.asarray(lat)
lon = np.asarray(lon)
trans = Affine.translation(lon[0], lat[0])
scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0])
return trans * scale | 1f6fccfddd23423c0a621efa74a62cdb61b53665 | 16,082 |
from typing import OrderedDict
def xml_to_json(xml_text: str) -> OrderedDict:
"""Converts xml text to json.
Args:
xml_text (str): xml text to be parsed
Returns:
OrderedDict: an ordered dict representing the xml text as json
"""
return xmltodict.parse(xml_text) | 243156c6f0b3b0f0bf92d8eeaa3ecf52f5846fc8 | 16,083 |
import requests
def getWeather(city, apikey):
"""
天気を取得する
リクエストにAPIKeyと都市をパラメーターに入れる
https://openweathermap.org/forecast5
"""
payload = {
'APIKEY': APIKEY,
'q': CITY
}
r = requests.get(
APIBASE,
params=payload
)
return r | 41a61d1bd9d1bd5d835963c305d0692babd6b64a | 16,085 |
def func1(xc):
"""Function which sets the data value"""
s = .1
res = np.exp(-xc**2/(2*s**2))
return res | 75ab06abf5b348746e322cf41660cfb908d69b62 | 16,086 |
def GetEnabledDiskTemplates(*args):
"""Wrapper for L{_QaConfig.GetEnabledDiskTemplates}.
"""
return GetConfig().GetEnabledDiskTemplates(*args) | c07707de13be5c055386659be620831b2f057d64 | 16,087 |
def is_pull_request_merged(pull_request):
"""Takes a github3.pulls.ShortPullRequest object"""
return pull_request.merged_at is not None | 0fecf82b96f7a46cfb4e9895897bd4998d6f225b | 16,088 |
def arraytoptseries(arr, crs={'epsg': '4326'}):
"""Convert an array of shape (2, ...) or (3, ...) to a
geopandas GeoSeries containing shapely Point objects.
"""
if arr.shape[0] == 2:
result = geopandas.GeoSeries([Point(x[0], x[1])
for x in arr.reshape(2, -1).T])
else:
result = geopandas.GeoSeries([Point(x[0], x[1], x[2])
for x in arr.reshape(3, -1).T])
#result.crs = crs
return result | b193f9bcb4144b81becca1acaa8285f8cafaaff2 | 16,089 |
async def get_kml_network_link():
""" Return KML network link file """
logger.info('/c-haines/network-link')
headers = {"Content-Type": kml_media_type,
"Content-Disposition": "inline;filename=c-haines-network-link.kml"}
return Response(headers=headers, media_type=kml_media_type, content=fetch_network_link_kml()) | 699ac59529ce085264a79dfdd048c96b3771e0a8 | 16,090 |
def splitToPyNodeList(res):
# type: (str) -> List[pymel.core.general.PyNode]
"""
converts a whitespace-separated string of names to a list of PyNode objects
Parameters
----------
res : str
Returns
-------
List[pymel.core.general.PyNode]
"""
return toPyNodeList(res.split()) | 44e8780833ed2a5d7418c972afb9e184ca82670b | 16,091 |
def get_jira_issue(commit_message):
"""retrieve the jira issue referenced in the commit message
>>> get_jira_issue(b"BAH-123: ")
{b'BAH-123'}
>>> messages = (
... b"this is jira issue named plainly BAH-123",
... b"BAH-123 plainly at the beginning",
... b"in parens (BAH-123)",
... b"(BAH-123) at the beginning",
... b"after a colon :BAH-123",
... b"Merged from \\FOO-4325 foo.\\n\\nsvn path=/foo/trunk/; revision=12345\\n"
... )
>>> issuesets = (get_jira_issue(i) for i in messages)
>>> issues = set()
>>> for issueset in issuesets:
... for issue in issueset: issues.add(issue)
>>> sorted(list(issues))
[b'BAH-123', b'FOO-4325']
>>> get_jira_issue(b"there is no issue here")
set()
>>> with open("tomatch.txt", "rb") as f: data = f.read().splitlines()
>>> missed = list(i for i in (None if get_jira_issue(i) else i for i in data) if i is not None)
>>> len(missed)
0
>>> for i in missed:
... print(i)
>>> with open("missed-strings.txt", "rb") as f: data = f.read().splitlines()
>>> missed = list(i for i in (None if get_jira_issue(i) else i for i in data) if i is not None)
>>> len(missed)
0
>>> for i in missed:
... print(i)
"""
start = 0
match = JIRA_ID_MATCHER.search(commit_message[start:])
issues = set()
while match:
issues.add(match.group(1))
start += match.end(1)
match = JIRA_ID_MATCHER.search(commit_message[start:])
return issues | b0bf47319c492ec297dd9898645a10ce8a53b43f | 16,092 |
def get_mean_std(dataloader):
"""Compute mean and std on the fly.
Args:
dataloader (Dataloader): Dataloader class from torch.utils.data.
Returns:
ndarray: ndarray of mean and std.
"""
cnt = 0
mean = 0
std = 0
for l in dataloader: # Now in (batch, channel, h, w)
data = l[0].double() # set dtype
b = data.size(0) # batch size at axis=0
data = data.view(b, data.size(1), -1) # reshape the tensor into (b, channel, h, w)
mean += data.mean(2).sum(0) # calculate mean for 3 channels
std += data.std(2).sum(0) # calculate std for 3 channels
cnt += b # get the count of data
mean /= cnt
std /= cnt
return mean.cpu().detach().numpy(), std.cpu().detach().numpy() | d943ae5244743749fa6a2186aacfbf0ea160d17b | 16,094 |
import random
import math
def create_random_camera(bbox, frac_space_x, frac_space_y, frac_space_z):
""" Creates a new camera, sets a random position for it, for a scene inside the bbox.
Given the same random_seed the pose of the camera is deterministic.
Input:
bbox - same rep as output from get_scene_bbos.
Output:
new camera created
"""
rand_theta = random.uniform(0, 2 * math.pi) # Rotate around z
# Phi: 0 - top view, 0.5 * pi - side view, -pi - bottom view
rand_sign = random.randint(0, 1) * 2 - 1.0
rand_phi = rand_sign * random.normalvariate(0.4, 0.2) * math.pi
max_dim = max(bbox.get_dims())
r = random.uniform(max_dim * 0.4, max_dim * 0.6)
x = frac_space_x * r * math.cos(rand_theta) * math.sin(rand_phi) + bbox.get_center()[0]
y = frac_space_y * r * math.sin(rand_theta) * math.sin(rand_phi) + bbox.get_center()[1]
z = frac_space_z * r * math.cos(rand_phi) + bbox.get_center()[2]
bpy.ops.object.camera_add(location=Vector((x, y, z)))
cam = bpy.context.object
cam.data.clip_start = 0.01
cam.data.clip_end = max(170, r * 2 * 10)
look_at(cam, Vector(bbox.get_center()))
return cam | 8cfa86127d569493a2456e13ae5924da886640a9 | 16,096 |
def temp2():
""" This is weird, but correct """
if True:
return (1, 2)
else:
if True:
return (2, 3)
return (4, 5) | c63f5566a6e52a3b5d175640fce830b7aad33ebe | 16,097 |
from typing import Tuple
def compute_blade_representation(bitmap: int, firstIdx: int) -> Tuple[int, ...]:
"""
Takes a bitmap representation and converts it to the tuple
blade representation
"""
bmp = bitmap
blade = []
n = firstIdx
while bmp > 0:
if bmp & 1:
blade.append(n)
bmp = bmp >> 1
n = n + 1
return tuple(blade) | 3bf6672280629dee8361ee3c0dbf5920afa4edda | 16,098 |
import time
def get_ntp_time(ntp_server_url):
"""
通过ntp server获取网络时间
:param ntp_server_url: 传入的服务器的地址
:return: time.strftime()格式化后的时间和日期
"""
ntp_client = ntplib.NTPClient()
ntp_stats = ntp_client.request(ntp_server_url)
fmt_time = time.strftime('%X', time.localtime(ntp_stats.tx_time))
fmt_date = time.strftime('%Y-%m-%d', time.localtime(ntp_stats.tx_time))
return fmt_time, fmt_date | 17881970361994e329e1154478c8abb8171461f9 | 16,100 |
def read_data(path, names, verbose=False):
"""
Read time-series from MATLAB .mat file.
Parameters
----------
path : str
Path (relative or absolute) to the time series file.
names : list
Names of the requested time series incl. the time array itself
verbose : bool, optional
Increase verbosity
Returns
-------
dict
Time and data
Examples
--------
>>> tname, names = read_names('data.mat')
>>> data = read_data('data.mat')
>>> t = data[tname] # time
>>> x1 = data[names[0]] # first data series
"""
if verbose:
print('Reading %s ...' % path)
data = loadmat(path, squeeze_me=True, variable_names=names)
return data | 978870d4517b5e5ab66186747c326794d6d43814 | 16,101 |
async def search(q: str, person_type: str = 'student') -> list:
"""
Search by query.
:param q: `str` query to search for
:param person_type: 'student', 'lecturer', 'group', 'auditorium'
:return: list of results
"""
url = '/'.join((BASE_URL, SEARCH_INDPOINT))
params = {'term': q,
'type': person_type}
return await api_request(url, params) | 83913866f45a44202ccddbc9352fef9799caf751 | 16,102 |
def format_data_hex(data):
"""Convert the bytes array to an hex representation."""
# Bytes are separated by spaces.
return ' '.join('%02X' % byte for byte in data) | 27239052d9ca0b12c19977e79d512e0cab04182e | 16,103 |
def disp(cog_x, cog_y, src_x, src_y):
"""
Compute the disp parameters
Parameters
----------
cog_x: `numpy.ndarray` or float
cog_y: `numpy.ndarray` or float
src_x: `numpy.ndarray` or float
src_y: `numpy.ndarray` or float
Returns
-------
(disp_dx, disp_dy, disp_norm, disp_angle, disp_sign):
disp_dx: 'astropy.units.m`
disp_dy: 'astropy.units.m`
disp_norm: 'astropy.units.m`
disp_angle: 'astropy.units.rad`
disp_sign: `numpy.ndarray`
"""
disp_dx = src_x - cog_x
disp_dy = src_y - cog_y
disp_norm = np.sqrt(disp_dx**2 + disp_dy**2)
if hasattr(disp_dx, '__len__'):
disp_angle = np.arctan(disp_dy / disp_dx)
disp_angle[disp_dx == 0] = np.pi / 2. * np.sign(disp_dy[disp_dx == 0])
else:
if disp_dx == 0:
disp_angle = np.pi/2. * np.sign(disp_dy)
else:
disp_angle = np.arctan(disp_dy/disp_dx)
disp_sign = np.sign(disp_dx)
return disp_dx, disp_dy, disp_norm, disp_angle, disp_sign | e9d8166827e86a8e2180ba357a450aca817fdff4 | 16,106 |
def get_landmark_from_prob(prob, thres=0.5, mode="mean", binary_mask=False):
"""Compute landmark location from the model probablity maps
Inputs:
prob : [RO, E1], the model produced probablity map for a landmark
thres : if np.max(prob)<thres, determine there is no landmark detected
mode : mean or max, use mean or max probablity to detect landmark
binary_mask : if true, prob is a binary (0 or 1) map
Outputs:
pt : [x, y], detected landmark point
"""
pt = None
if(binary_mask):
ind = np.where(prob==thres)
else:
if(thres>0 and np.max(prob)<thres):
return pt
else:
adaptive_thres = 0.5
mask = adaptive_thresh_cpu(prob, p_thresh=adaptive_thres*np.max(prob))
ind = np.where(mask>0)
if (np.size(ind[0])==0):
return pt
pt = np.zeros(2)
if(mode == "mean"):
pt[0] = np.mean(ind[1].astype(np.float32))
pt[1] = np.mean(ind[0].astype(np.float32))
else:
v = np.unravel_index(np.argmax(prob), prob.shape)
pt[0] = v[1]
pt[1] = v[0]
return pt | fad614088e587e389f15b0700bf442a956d498b0 | 16,107 |
import socket
def request(
url,
timeout: float,
method="GET",
data=None,
response_encoding="utf-8",
headers=None,
):
"""
Helper function to perform HTTP requests
"""
req = Request(url, data=data, method=method, headers=headers or {})
try:
return urlopen(req, timeout=timeout).read().decode(response_encoding)
except (URLError, socket.timeout, UnicodeDecodeError) as error:
raise CEPProviderUnavailableError(error) | 80f130101290442d538fa3f416f5650800547c6b | 16,108 |
from typing import Any
from typing import Optional
from typing import get_args
from typing import get_origin
def get_annotation_affiliation(annotation: Any, default: Any) -> Optional[Any]:
"""Helper for classifying affiliation of parameter
:param annotation: annotation record
:returns: classified value or None
"""
args, alias = get_args(annotation), get_origin(annotation)
# if alias and alias == list:
annotation = args[0] if alias == list else annotation
if annotation == Request:
return "request"
elif isinstance(default, (Form, File)):
return "form"
return None | db6efd7dfb0ed0272e7491547669de8f235b2b35 | 16,109 |
def sort_dict(original):
"""Recursively sorts dictionary keys and dictionary values in alphabetical order"""
if isinstance(original, dict):
res = (
dict()
) # Make a new "ordered" dictionary. No need for Collections in Python 3.7+
for k, v in sorted(original.items()):
res[k] = v
d = res
else:
d = original
for k in d:
if isinstance(d[k], str):
continue
if isinstance(d[k], list) and len(d[k]) > 1 and isinstance(d[k][0], str):
d[k] = sorted(d[k])
if isinstance(d[k], dict):
d[k] = sort_dict(d[k])
if isinstance(d[k], list) and len(d[k]) >= 1 and isinstance(d[k][0], dict):
for i in range(len(d[k])):
d[k][i] = sort_dict(d[k][i])
return d | 8c194af76160b0e4d3bad135720e051a4d4622b0 | 16,111 |
import requests
def playonyt(topic):
"""Will play video on following topic, takes about 10 to 15 seconds to load"""
url = 'https://www.youtube.com/results?q=' + topic
count = 0
cont = requests.get(url)
data = str(cont.content)
lst = data.split('"')
for i in lst:
count+=1
if i == 'WEB_PAGE_TYPE_WATCH':
break
if lst[count-5] == "/results":
raise Exception("No video found.")
#print("Videos found, opening most recent video")
web.open("https://www.youtube.com"+lst[count-5])
return "https://www.youtube.com"+lst[count-5] | 49f4285dc0e0086d30776fc0668bac0e4c19dbc5 | 16,112 |
def train_classifier(classifier, features, labels):
"""This function must concern itself with training the classifier
on the specified data."""
return classifier.fit(features, labels) | ef74548aeb6e245d8728caf3205163c249046aae | 16,113 |
def work_on_disk(dev, root_mb, swap_mb, image_path):
"""Creates partitions and write an image to the root partition."""
root_part = "%s-part1" % dev
swap_part = "%s-part2" % dev
if not is_block_device(dev):
LOG.warn(_("parent device '%s' not found"), dev)
return
make_partitions(dev, root_mb, swap_mb)
if not is_block_device(root_part):
LOG.warn(_("root device '%s' not found"), root_part)
return
if not is_block_device(swap_part):
LOG.warn(_("swap device '%s' not found"), swap_part)
return
dd(image_path, root_part)
mkswap(swap_part)
try:
root_uuid = block_uuid(root_part)
except processutils.ProcessExecutionError as err:
with excutils.save_and_reraise_exception():
LOG.error(_("Failed to detect root device UUID."))
return root_uuid | 195ded6deae958b7efa41bcfdda1d3d68cabb23d | 16,114 |
def get_annotation_names(viewer):
"""Detect the names of nodes and edges layers"""
layer_nodes_name = None
layer_edges_name = None
for layer in viewer.layers:
if isinstance(layer, napari.layers.points.points.Points):
layer_nodes_name = layer.name
elif isinstance(layer, napari.layers.shapes.shapes.Shapes):
layer_edges_name = layer.name
if layer_nodes_name is not None and layer_edges_name is not None:
break
return layer_nodes_name, layer_edges_name | 20e64a6719b945eceda341d5a42da178818cb1a1 | 16,115 |
def remap(kx,ky,lx,ly,qomt,datai):
"""
remap the k-space variable back to shearing
periodic frame to reflect the time dependent
Eulerian wave number
"""
ndim = datai.ndim
dim = np.array(datai.shape)# datai[nz,ny,nx]
sh_data = np.empty([dim[0],dim[1],dim[2]])
tp_data = np.empty([dim[0],dim[2]])
sh_kx = -qomt*ky*lx/ly
#nquist= np.max(np.fabs(kx))
for j in np.arange(0,dim[1]):
quot = int(np.floor(sh_kx[j]))
res = sh_kx[j]-float(quot)
#kx_new = kx[:] + sh_kx[j]
tp_data[:,:]= datai[:,j,:]
sh_data[:,j,:] = (1.0-res)*np.roll(tp_data,quot, axis=1) \
+ res*np.roll(tp_data,quot+1,axis=1)
#sh_data[:,j,kx_new[:]>nquist] = 0.0
return sh_data | 6ea415df88c0db2ba26ef0fc8daa35b12a101ef8 | 16,116 |
def fips_disable():
"""
Disables FIPS on RH/CentOS system. Note that you must reboot the
system in order for FIPS to be disabled. This routine prepares
the system to disable FIPS.
CLI Example:
.. code-block:: bash
salt '*' ash.fips_disable
"""
installed_fips_pkgs = _get_installed_dracutfips_pkgs()
ret = { 'result': True }
old = {}
new = {}
try:
# Remove dracut-fips installations.
installed_fips_pkgs = _get_installed_dracutfips_pkgs()
if 'dracut-fips' in installed_fips_pkgs:
__salt__['pkg.remove']('dracut-fips')
old['Packages'] = installed_fips_pkgs
# If fips is in kernel, create a new boot-kernel.
if _is_fips_in_kernel():
_move_boot_kernel(False)
__salt__['cmd.run']("dracut -f", python_shell=False)
# Update grub.cfg file to remove the fips argument.
grub_args = _get_grub_args()
if 'fips=1' in grub_args:
cmd = 'grubby --update-kernel=ALL --remove-args=fips=1'
__salt__['cmd.run'](cmd, python_shell=False)
new['grubby'] = cmd
# Update GRUB command line entry to remove fips.
diff = _modify_grub_file(True)
if diff:
new['/etc/default/grub'] = diff
except Exception:
_rollback_fips_disable(installed_fips_pkgs)
ret['result'] = False
ret['changes'] = {}
ret['comment'] = 'Unable to change state of system to FIPS-disabled.'
else:
if old:
ret['changes'] = {'old': old}
ret['comment'] = 'FIPS has been toggled to off.'
if new:
if 'changes' in ret:
ret['changes'].update({'new': new})
else:
ret['changes'] = {'new': new}
ret['comment'] = 'FIPS has been toggled to off.'
if fips_status() == 'enabled':
msg = ' Reboot system to place into FIPS-disabled state.'
if 'comment' in ret:
ret['comment'] = ret['comment'] + msg
else:
ret['comment'] = msg[1:]
if 'changes' not in ret and 'comment' not in ret:
ret['comment'] = 'FIPS mode is already disabled. No changes.'
finally:
return ret | d31cc5ad6dd71ec0f3d238051a7b2a64b311c0fd | 16,117 |
from datetime import datetime
def buy_sell_fun_mp_org(datam, S1=1.0, S2=0.8):
"""
斜率指标交易策略标准分策略
"""
start_t = datetime.datetime.now()
print("begin-buy_sell_fun_mp:", start_t)
dataR = pd.DataFrame()
for code in datam.index.levels[1]:
# data = price.copy()
# price = datam.query("code=='%s'" % code)
# data = price.copy()
data = buy_sell_fun(datam, code)
# if code == '000732':
# print(data.tail(22))
if len(dataR) == 0:
dataR = data
else:
dataR = dataR.append(data)
end_t = datetime.datetime.now()
print(end_t, 'buy_sell_fun_mp spent:{}'.format((end_t - start_t)))
result01 = dataR['nav'].groupby(level=['date']).sum()
result02 = dataR['nav'].groupby(level=['date']).count()
num = dataR.flag.abs().sum()
dataR2 = pd.DataFrame({'nav':result01 - result02 + 1,'flag':0})
# dataR2['flag'] = 0
dataR2.iat[-1,1] = num
# result['nav'] = result['nav'] - len(datam.index.levels[1]) + 1
return dataR2 | 8d3b78b9d266c3c39b8491677caa0f4dfb9f839a | 16,119 |
import collections
import abc
def marshall_namedtuple(obj):
"""
This method takes any atomic value, list, dictionary or namedtuple,
and recursively it tries translating namedtuples into dictionaries
"""
recurse = lambda x: map(marshall_namedtuple, x)
obj_is = partial(isinstance, obj)
if hasattr(obj, '_marshall'):
return marshall_namedtuple(obj._marshall())
elif obj_is(tuple) and hasattr(obj, '_fields'): # namedtuple
fields = zip(obj._fields, recurse(obj))
class_name = obj.__class__.__name__
return dict(fields, **{'_type': class_name})
elif obj_is((collections.abc.Mapping,dict)):
return type(obj)(zip(obj.keys(), recurse(obj.values())))
elif obj_is(collections.abc.Iterable) and not obj_is(str):
return type(obj)(recurse(obj))
elif obj_is(abc.ABC):
return {
'_instance_of': obj.__class__.__name__
}
elif obj_is(abc.ABCMeta):
return {
'_class': obj.__name__
}
else:
return obj | 87d24fe1b273bfcf481679a96710be757baf08a5 | 16,120 |
import torch
def prep_image(img, inp_dim):
"""
Prepare image for inputting to the neural network.
Returns a Variable
"""
# print("prepping images")
img = cv2.resize(img, (inp_dim, inp_dim))
img = img[:,:,::-1].transpose((2,0,1)).copy()
img = torch.from_numpy(img).float().div(255.0)
# print("prepped images")
return img | 02ebc73a32a24d59c53da9bfb99485f3a4f6dee2 | 16,121 |
from typing import Dict
from typing import List
import math
def best_broaders(supers_for_all_entities: Dict,
per_candidate_links_and_supers: List[Dict],
num_best: int = 5,
super_counts_field: str = "broader_counts",
doprint=False,
representativeness_threshold=0.1):
"""
Returns the best matching super for a candidate class, according to a list of supers for entities in the class
and entities in the whole corpus. If comparing to a taxonomy, a super is a broader.
@param super_counts_field:
@param super_counts: a dictionary that has, for every possible entity, the supers it belongs to
@param per_candidate_links_and_supers: a list of dictionaries, one per candidate. Fro each, at least
two fields are expected "entities" containing the list of entities, and that given by super_counts_field
which is, in turn, a dictionary whose keys are supers and whose values are the number of entities in that
candidate having this broad
@param num_best: maximum number of best matching supers to be returned
@return: for every candidate class, the num_best best matching supers and their log odds ratio
"""
result = []
global_counts = dict()
for ent, bros in supers_for_all_entities.items():
for bro in bros:
global_counts[bro] = global_counts.get(bro, 0) + 1
onlytopmost = []
for can in per_candidate_links_and_supers:
# For this entity, the following dictionaries have an element for every possible super
# Using notation from the paper
# T_cc : The number of entities narrower to a candidate which are tagged with NER typeT
T_cc = {x: y for x, y in can[super_counts_field].items()
if y > representativeness_threshold * len(can["entities"])}
if len(T_cc) == 0:
T_cc = {x: y for x, y in can[super_counts_field].items()}
# T_w : is the number of entities in the wholecorpus tagged with T
T_w = {y: global_counts[y] for y in T_cc.keys()}
# w : the total number of entities in the whole corpus
w = float(len(supers_for_all_entities))
# cc : the total number of entities in this candidate
cc = float(len(can["entities"]))
# dict of the form super : log_odds
log_odds_per_super = {x: math.log((T_cc[x] / cc) / (T_w[x] / w))
for x in T_cc.keys()}
logslist = list(log_odds_per_super.items())
logslist.sort(key=lambda x: x[1])
logslist.reverse()
maxbroads = min(len(logslist), num_best)
logodds = []
for bi in range(maxbroads):
logodds.append({"candidatesbroader": logslist[bi][0],
"loggods": logslist[bi][1]})
can["log_odds"] = logodds
if doprint:
print("\t\t---", ", ".join([str(x[1]) for x in logslist[:maxbroads]]))
if len(logslist) > 0:
onlytopmost.append(logslist[0][1])
can["best_match_broader"] = logslist[0][0]
else:
onlytopmost.append(None)
can["best_match_broader"] = None
return onlytopmost | 9aa9826c43e67a28eeca463b107296e093709246 | 16,122 |
def clump_list_sort(clump_list):
"""Returns a copy of clump_list, sorted by ascending minimum density. This
eliminates overlap when passing to
yt.visualization.plot_modification.ClumpContourCallback"""
minDensity = [c['Density'].min() for c in clump_list]
args = np.argsort(minDensity)
list = nar(clump_list)[args]
reverse = range(list.size-1,-1,-1)
return list[reverse] | 732e747e36c37f9d65ef44b6aa060d5c9d04e3d1 | 16,123 |
from typing import Dict
from typing import Any
from typing import Type
import types
from typing import Optional
def _prepare_artifact(
metadata_handler: metadata.Metadata,
uri: Text,
properties: Dict[Text, Any],
custom_properties: Dict[Text, Any],
reimport: bool, output_artifact_class: Type[types.Artifact],
mlmd_artifact_type: Optional[metadata_store_pb2.ArtifactType]
) -> types.Artifact:
"""Prepares the Importer's output artifact.
If there is already an artifact in MLMD with the same URI and properties /
custom properties, that artifact will be reused unless the `reimport`
argument is set to True.
Args:
metadata_handler: The handler of MLMD.
uri: The uri of the artifact.
properties: The properties of the artifact, given as a dictionary from
string keys to integer / string values. Must conform to the declared
properties of the destination channel's output type.
custom_properties: The custom properties of the artifact, given as a
dictionary from string keys to integer / string values.
reimport: If set to True, will register a new artifact even if it already
exists in the database.
output_artifact_class: The class of the output artifact.
mlmd_artifact_type: The MLMD artifact type of the Artifact to be created.
Returns:
An Artifact object representing the imported artifact.
"""
absl.logging.info(
'Processing source uri: %s, properties: %s, custom_properties: %s' %
(uri, properties, custom_properties))
# Check types of custom properties.
for key, value in custom_properties.items():
if not isinstance(value, (int, Text, bytes)):
raise ValueError(
('Custom property value for key %r must be a string or integer '
'(got %r instead)') % (key, value))
unfiltered_previous_artifacts = metadata_handler.get_artifacts_by_uri(
uri)
# Only consider previous artifacts as candidates to reuse, if the properties
# of the imported artifact match those of the existing artifact.
previous_artifacts = []
for candidate_mlmd_artifact in unfiltered_previous_artifacts:
is_candidate = True
candidate_artifact = output_artifact_class(mlmd_artifact_type)
candidate_artifact.set_mlmd_artifact(candidate_mlmd_artifact)
for key, value in properties.items():
if getattr(candidate_artifact, key) != value:
is_candidate = False
break
for key, value in custom_properties.items():
if isinstance(value, int):
if candidate_artifact.get_int_custom_property(key) != value:
is_candidate = False
break
elif isinstance(value, (Text, bytes)):
if candidate_artifact.get_string_custom_property(key) != value:
is_candidate = False
break
if is_candidate:
previous_artifacts.append(candidate_mlmd_artifact)
result = output_artifact_class(mlmd_artifact_type)
result.uri = uri
for key, value in properties.items():
setattr(result, key, value)
for key, value in custom_properties.items():
if isinstance(value, int):
result.set_int_custom_property(key, value)
elif isinstance(value, (Text, bytes)):
result.set_string_custom_property(key, value)
# If a registered artifact has the same uri and properties and the user does
# not explicitly ask for reimport, reuse that artifact.
if bool(previous_artifacts) and not reimport:
absl.logging.info('Reusing existing artifact')
result.set_mlmd_artifact(max(previous_artifacts, key=lambda m: m.id))
return result | d30dcd579c73c71173f10207ab80d05c761c7185 | 16,124 |
import re
def ParseCLILines(lines, skipStartLines=0, lastSkipLineRe=None, skipEndLines=0):
"""Delete first few and last few lines in an array"""
if skipStartLines > 0:
if lastSkipLineRe != None:
# sanity check. Make sure last line to skip matches the given regexp
if None == re.match(lastSkipLineRe, lines[(skipStartLines-1)]):
raise exceptions.MalformedIO("Expected '%s' at line %d of result, but found '%s'." % (lastSkipLineRe, skipStartLines, lines[(skipStartLines-1)].strip()))
if len(lines) < skipStartLines:
raise exceptions.MalformedIO("Can't skip first %d lines of result %s. It only contains %d lines." % (skipStartLines, repr(lines), len(lines)))
del lines[0:skipStartLines]
if skipEndLines > 0:
if len(lines) < skipEndLines:
raise exceptions.MalformedIO("Can't skip last %d lines of result %s. It only contains %d lines." % (skipEndLines, repr(lines), len(lines)))
del lines[-skipEndLines:]
return lines | dc445765f42df25b8d046e3f2303d85109a3d419 | 16,125 |
def initialize_parameters(n_x, n_h, n_y):
"""
Argument:
n_x -- size of the input layer
n_h -- size of the hidden layer
n_y -- size of the output layer
Returns:
params -- python dictionary containing your parameters:
W1 -- weight matrix of shape (n_h, n_x)
b1 -- bias vector of shape (n_h, 1)
W2 -- weight matrix of shape (n_y, n_h)
b2 -- bias vector of shape (n_y, 1)
"""
np.random.seed(2) # we set up a seed so that your output matches ours although the initialization is random.
### START CODE HERE ### (≈ 4 lines of code)
W1 = np.random.randn(n_h, n_x) * 0.01
b1 = np.zeros((n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros((n_y, 1))
### END CODE HERE ###
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1,
"b1": b1,
"W2": W2,
"b2": b2}
return parameters | 785985b79b9284ba8c6058c8e9c4018955407cf8 | 16,126 |
from datetime import datetime
def get_df_from_sampled_trips(step_trip_list, show_service_data=False, earliest_datetime=None):
"""Get dataframe from sampled trip list.
Parameters
----------
step_trip_list : list of lists
List of trip lists occuring in the same step.
show_service_data: bool
Show trip pickup and dropoff results.
earliest_datetime: datetime
Trip start time - rebalance offset
Returns
-------
DataFrame
Dataframe with trip data info.
"""
d = defaultdict(list)
for step, trips in enumerate(step_trip_list):
for t in trips:
d["placement_datetime"].append(t.placement)
d["step"].append(step + 1)
d["pk_id"].append(t.o.id)
d["dp_id"].append(t.d.id)
d["sq_class"].append(t.sq_class)
d["max_delay"].append(t.max_delay)
d["elapsed_sec"].append(t.elapsed_sec)
d["max_delay_from_placement"].append(t.max_delay_from_placement)
d["delay_close_step"].append(t.delay_close_step)
d["tolerance"].append(t.tolerance)
lon_o, lat_o = nw.tenv.lonlat(t.o.id)
lon_d, lat_d = nw.tenv.lonlat(t.d.id)
d["passenger_count"].append(1)
d["pickup_latitude"].append(lat_o)
d["pickup_longitude"].append(lon_o)
d["dropoff_latitude"].append(lat_d)
d["dropoff_longitude"].append(lon_d)
if show_service_data:
if t.pk_delay is not None:
pickup_datetime = t.placement + timedelta(
minutes=t.pk_delay
)
pickup_datetime_str = datetime.strftime(
pickup_datetime, "%Y-%m-%d %H:%M:%S"
)
if t.dropoff_time is not None:
dropoff_datetime = earliest_datetime + timedelta(
minutes=t.dropoff_time
)
dropoff_datetime_str = datetime.strftime(
dropoff_datetime, "%Y-%m-%d %H:%M:%S"
)
d["times_backlogged"].append(t.times_backlogged)
d["pickup_step"].append(
t.pk_step if t.pk_step is not None else "-"
)
d["dropoff_step"].append(
t.dp_step if t.dp_step is not None else "-"
)
d["pickup_delay"].append(
t.pk_delay if t.pk_delay is not None else "-"
)
d["pickup_duration"].append(
t.pk_duration if t.pk_duration is not None else "-"
)
d["pickup_datetime"].append(
pickup_datetime_str if t.pk_delay is not None else "-"
)
d["dropoff_time"].append(
t.dropoff_time if t.dropoff_time is not None else "-"
)
d["dropoff_datetime"].append(
dropoff_datetime_str if t.dropoff_time is not None else "-"
)
d["picked_by"].append(t.picked_by)
df = pd.DataFrame.from_dict(dict(d))
df.sort_values(by=["placement_datetime", "sq_class"], inplace=True)
return df | 36bba80f0c46862df0390cf4e4279eeb33002e86 | 16,128 |
def compute_v_y(transporter, particles):
"""
Compute values of V y on grid specified in bunch configuration
:param transporter: transport function
:param particles: BunchConfiguration object, specification of grid
:return: matrix with columns: x, theta_x, y, theta_y, pt, V y
"""
return __compute_optical_function(transporter, particles, Parameters.V_Y) | 4a17e0c0e4612534483187b6779bcf5c179c0fcc | 16,129 |
def run_fn(fn_args: TrainerFnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
# get transform component output
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
# read input data
train_dataset = fn_args.data_accessor.tf_dataset_factory(
fn_args.train_files,
dataset_options.TensorFlowDatasetOptions(
batch_size=fn_args.custom_config["batch_size"],
),
tf_transform_output.transformed_metadata.schema,
)
eval_dataset = fn_args.data_accessor.tf_dataset_factory(
fn_args.eval_files,
dataset_options.TensorFlowDatasetOptions(
batch_size=fn_args.custom_config["batch_size"],
),
tf_transform_output.transformed_metadata.schema,
)
# instantiate model
model = build_model(
fn_args.custom_config["input_features"],
fn_args.custom_config["window_size"],
fn_args.custom_config["outer_units"],
fn_args.custom_config["inner_units"],
)
# tf callbacks for tensorboard
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir,
update_freq="batch",
)
# validation_data = list(eval_dataset.as_numpy_iterator())
# train model
model.fit(
train_dataset,
# train_dataset.as_numpy_iterator(),
steps_per_epoch=fn_args.train_steps,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback],
)
# Build signatures
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def _serve_tf_examples_fn(**input_features):
# """Returns the output to be used in the serving signature."""
preprocessed_features = model.tft_layer(input_features)
autoencoded_features = model(preprocessed_features)
return {
**{
f"input_features::{f}": input_features[f] for f in input_features.keys()
},
**{
f"preprocessed_features::{f}": preprocessed_features[f]
for f in preprocessed_features.keys()
},
# Output tensor names are of the form:
# lstm_autoencoder_model/decoder/{feature_name}/Reshape_1:0
**{
f"output_features::{f.name.split('/')[2]}": f
for f in autoencoded_features
},
}
_input_tf_specs = {
f: tf.TensorSpec(
shape=[None, fn_args.custom_config["window_size"]], dtype=tf.float32, name=f
)
for f in fn_args.custom_config["input_features"]
}
signatures = {
"serving_default": _serve_tf_examples_fn.get_concrete_function(
**_input_tf_specs
)
}
# Save model (this is the effective output of this function)
model.save(fn_args.serving_model_dir, save_format="tf", signatures=signatures) | 15c8202ad6955052bbd1da2984aedb9887c390af | 16,131 |
def log_likelihood(X, Y, Z, data, boolean=True, **kwargs):
"""
Log likelihood ratio test for conditional independence. Also commonly known
as G-test, G-squared test or maximum likelihood statistical significance
test. Tests the null hypothesis that X is independent of Y given Zs.
Parameters
----------
X: int, string, hashable object
A variable name contained in the data set
Y: int, string, hashable object
A variable name contained in the data set, different from X
Z: list (array-like)
A list of variable names contained in the data set, different from X and Y.
This is the separating set that (potentially) makes X and Y independent.
Default: []
data: pandas.DataFrame
The dataset on which to test the independence condition.
boolean: bool
If boolean=True, an additional argument `significance_level` must
be specified. If p_value of the test is greater than equal to
`significance_level`, returns True. Otherwise returns False.
If boolean=False, returns the chi2 and p_value of the test.
Returns
-------
If boolean = False, Returns 3 values:
chi: float
The chi-squre test statistic.
p_value: float
The p_value, i.e. the probability of observing the computed chi-square
statistic (or an even higher value), given the null hypothesis
that X \u27C2 Y | Zs.
dof: int
The degrees of freedom of the test.
If boolean = True, returns:
independent: boolean
If the p_value of the test is greater than significance_level, returns True.
Else returns False.
References
----------
[1] https://en.wikipedia.org/wiki/G-test
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(50000, 4)), columns=list('ABCD'))
>>> data['E'] = data['A'] + data['B'] + data['C']
>>> log_likelihood(X='A', Y='C', Z=[], data=data, boolean=True, significance_level=0.05)
True
>>> log_likelihood(X='A', Y='B', Z=['D'], data=data, boolean=True, significance_level=0.05)
True
>>> log_likelihood(X='A', Y='B', Z=['D', 'E'], data=data, boolean=True, significance_level=0.05)
False
"""
return power_divergence(
X=X, Y=Y, Z=Z, data=data, boolean=boolean, lambda_="log-likelihood", **kwargs
) | 00493131d78506c5a6cbb9e04bda51b69f1a04ca | 16,132 |
def conjugada_matriz_vec(mat:list):
"""
Funcion que realiza la conjugada de una matriz o vector complejo.
:param mat: Lista que representa la matriz o vector complejo.
:return: lista que representa la matriz o vector resultante.
"""
fila = len(mat)
columnas = len(mat[0])
resul = []
for i in range(fila):
resul.append([])
for j in range(columnas):
resul[i].append(conjugado_complejos(mat[i][j]))
return resul | ad883dae9161f4e60f933caf93703544e16bfb4d | 16,133 |
def features2matrix(feature_list):
"""
Args:
feature_list (list of Feature):
Returns:
(np.ndarray, list of str): matrix and list of key of features
"""
matrix = np.array([feature.values for feature in feature_list], dtype=float)
key_lst = [feature.key for feature in feature_list]
return matrix, key_lst | f60cdb904489cca3ab926dbc8d396804367e4a7a | 16,134 |
import re
def is_heading(line):
"""Determine whether a given line is a section header
that describes subsequent lines of a report.
"""
has_cattle = re.search(r'steer?|hfrs?|calves|cows?|bulls?', line, re.IGNORECASE)
has_price = re.search(r'\$[0-9]+\.[0-9]{2}', line)
return bool(has_cattle) and not bool(has_price) | ccbc80f7db61f7ba82aa88e54112d1995d457764 | 16,136 |
def get_channel_messages(channel_id):
""" Holt fuer einen bestimmten Kanal die Nachrichten aus der Datenbank"""
session = get_cassandra_session()
future = session.execute_async("SELECT * FROM messages WHERE channel_id=%s", (channel_id,))
try:
rows = future.result()
except Exception:
log.exeception()
messages = []
for row in rows:
messages.append({
'channel_id': row.channel_id,
'message_id': row.message_id,
'author_id': row.author_id,
'message': row.message
})
return jsonify({'messages': messages}), 200 | 7a3821dd8e93c4d49dfeecea200a881fdcb3f1a4 | 16,137 |
def train(traj,
pol, targ_pol, qf, targ_qf,
optim_pol, optim_qf,
epoch, batch_size, # optimization hypers
tau, gamma, # advantage estimation
sampling,
):
"""
Train function for deep deterministic policy gradient
Parameters
----------
traj : Traj
Off policy trajectory.
pol : Pol
Policy.
targ_pol : Pol
Target Policy.
qf : SAVfunction
Q function.
targ_qf : SAVfunction
Target Q function.
optim_pol : torch.optim.Optimizer
Optimizer for Policy.
optim_qf : torch.optim.Optimizer
Optimizer for Q function.
epoch : int
Number of iteration.
batch_size : int
Number of batches.
tau : float
Target updating rate.
gamma : float
Discounting rate.
sampling : int
Number of samping in calculating expectation.
Returns
-------
result_dict : dict
Dictionary which contains losses information.
"""
pol_losses = []
qf_losses = []
logger.log("Optimizing...")
for batch in traj.iterate(batch_size, epoch):
qf_bellman_loss = lf.bellman(
qf, targ_qf, targ_pol, batch, gamma, sampling=sampling)
optim_qf.zero_grad()
qf_bellman_loss.backward()
optim_qf.step()
pol_loss = lf.ag(pol, qf, batch, sampling)
optim_pol.zero_grad()
pol_loss.backward()
optim_pol.step()
for q, targ_q, p, targ_p in zip(qf.parameters(), targ_qf.parameters(), pol.parameters(), targ_pol.parameters()):
targ_p.detach().copy_((1 - tau) * targ_p.detach() + tau * p.detach())
targ_q.detach().copy_((1 - tau) * targ_q.detach() + tau * q.detach())
qf_losses.append(qf_bellman_loss.detach().cpu().numpy())
pol_losses.append(pol_loss.detach().cpu().numpy())
logger.log("Optimization finished!")
return dict(PolLoss=pol_losses,
QfLoss=qf_losses,
) | 14c09f3ce1f30366be3b8d0e0b965bdc1c677834 | 16,138 |
def merge_dicts(dict1, dict2):
""" _merge_dicts
Merges two dictionaries into one.
INPUTS
@dict1 [dict]: First dictionary to merge.
@dict2 [dict]: Second dictionary to merge.
RETURNS
@merged [dict]: Merged dictionary
"""
merged = {**dict1, **dict2}
return merged | 67e96ba9c9831e6e2aa4bbd6cd8b8d1d5edb93c4 | 16,140 |
import pagure.api
import pagure.lib.query
def check_api_acls(acls, optional=False):
"""Checks if the user provided an API token with its request and if
this token allows the user to access the endpoint desired.
:arg acls: A list of access control
:arg optional: Only check the API token is valid. Skip the ACL validation.
"""
if authenticated():
return
flask.g.token = None
flask.g.fas_user = None
token = None
token_str = None
if "Authorization" in flask.request.headers:
authorization = flask.request.headers["Authorization"]
if "token" in authorization:
token_str = authorization.split("token", 1)[1].strip()
token_auth = False
error_msg = None
if token_str:
token = pagure.lib.query.get_api_token(flask.g.session, token_str)
if token:
if token.expired:
error_msg = "Expired token"
else:
flask.g.authenticated = True
# Some ACLs are required
if acls:
token_acls_set = set(token.acls_list)
needed_acls_set = set(acls or [])
overlap = token_acls_set.intersection(needed_acls_set)
# Our token has some of the required ACLs: auth successful
if overlap:
token_auth = True
flask.g.fas_user = token.user
# To get a token, in the `fas` auth user must have
# signed the CLA, so just set it to True
flask.g.fas_user.cla_done = True
flask.g.token = token
flask.g.authenticated = True
# Our token has none of the required ACLs -> auth fail
else:
error_msg = "Missing ACLs: %s" % ", ".join(
sorted(set(acls) - set(token.acls_list))
)
# No ACL required
else:
if optional:
token_auth = True
flask.g.fas_user = token.user
# To get a token, in the `fas` auth user must have
# signed the CLA, so just set it to True
flask.g.fas_user.cla_done = True
flask.g.token = token
flask.g.authenticated = True
else:
error_msg = "Invalid token"
elif optional:
return
else:
error_msg = "Invalid token"
if not token_auth:
output = {
"error_code": pagure.api.APIERROR.EINVALIDTOK.name,
"error": pagure.api.APIERROR.EINVALIDTOK.value,
"errors": error_msg,
}
jsonout = flask.jsonify(output)
jsonout.status_code = 401
return jsonout | 81d658036c5b31e3471e48ef44f4eb26e571c49a | 16,141 |
def china_province_head_fifteen():
"""
各省前15数据
:return:
"""
return db_request_service.get_china_province_head_fifteen(ChinaTotal, ChinaProvince) | 18dc3f22c05b3580bcd983361efc03bd3cdae43b | 16,143 |
def pipeline(x_train,
y_train,
x_test,
y_test,
param_dict=None,
problem='classification'):
"""Trains and evaluates a DNN classifier.
Args:
x_train: np.array or scipy.sparse.*matrix array of features of training data
y_train: np.array 1-D array of class labels of training data
x_test: np.array or scipy.sparse.*matrix array of features of test data
y_test: np.array 1-D array of class labels of the test data
param_dict: {string: ?} dictionary of parameters of their values
problem: string type of learning problem; values = 'classification',
'regression'
Returns:
model: Keras.models.Model
trained Keras model
metrics: {str: float}
dictionary of metric scores
"""
assert problem in ['classification', 'regression']
if param_dict is None:
param_dict = {'epochs': 10, 'batch_size': 256}
num_feature = x_train.shape[1]
is_sparse = sparse.issparse(x_train)
param_dict = param_dict.copy()
num_epoch = param_dict.pop('epochs')
batch_size = param_dict.pop('batch_size')
if problem == 'regression':
num_output = 1
loss = 'mean_squared_error'
model_init = KerasRegressor
else:
num_output = len(set(y_train))
loss = 'categorical_crossentropy'
model_init = FunctionalKerasClassifier
build_fn = pseudo_partial(
keras_build_fn,
num_feature=num_feature,
num_output=num_output,
is_sparse=is_sparse,
loss=loss,
**param_dict)
model = model_init(
build_fn=build_fn,
epochs=num_epoch,
batch_size=batch_size,
shuffle=True,
verbose=False)
return generic_pipeline(
model, x_train, y_train, x_test, y_test, problem=problem) | f01e20851c91dd9f6b3db889fdf713edc1eb37b9 | 16,145 |
def model_selection(modelname, num_out_classes,
dropout=None):
"""
:param modelname:
:return: model, image size, pretraining<yes/no>, input_list
"""
if modelname == 'xception':
return TransferModel(modelchoice='xception',
num_out_classes=num_out_classes)
# , 299, \True, ['image'], None
elif modelname == 'resnet18':
return TransferModel(modelchoice='resnet18', dropout=dropout,
num_out_classes=num_out_classes)
# , \224, True, ['image'], None
elif modelname == 'xception_concat':
return TransferModel(modelchoice='xception_concat',
num_out_classes=num_out_classes)
else:
raise NotImplementedError(modelname) | 67ba26ab4f7cbe8f4540eb10f2ef6e598b49ea2f | 16,146 |
def Packet_computeBinaryPacketLength(startOfPossibleBinaryPacket):
"""Packet_computeBinaryPacketLength(char const * startOfPossibleBinaryPacket) -> size_t"""
return _libvncxx.Packet_computeBinaryPacketLength(startOfPossibleBinaryPacket) | 58139b8d874d9292e63b6eb6afdbd9c5c2fa6f9d | 16,147 |
import itertools
def gen_positions(n, n_boulders):
"""Generates state codes for boulders. Includes empty rows
Parameters:
n: number of rows/columns
n_boulders: number of boulders per row
return value:
Possible boulder and alien states
"""
boulder_positions=[]; b_p=[]
alien_positions_with_0=["{}1{}".format('0'*(n-i-1),'0'*(i)) for i in range(n)]+['0'*n]
if n_boulders==1:
return alien_positions_with_0, alien_positions_with_0[0:n]
else:
positions=[]
position_index=list(itertools.combinations(range(n), n_boulders))
for tup in position_index:
pos=''
for i in range(n):
if i in tup:
pos+='1'
else:
pos+='0'
positions.append(pos)
if '0'*n not in boulder_positions:
positions.append('0'*n)
return positions, alien_positions_with_0[0:n] | 0a20594f2e021bf8e190f6c7c726159fde0b8367 | 16,149 |
def analysis_linear_correlation(data1:np.array,
data2:np.array,
alpha:float = .05,
return_corr:bool = True,
verbose:bool = False)->bool:
"""
## Linear correlation analysis to test independence for numerical / ordinal variables.
data1, date2 -- 1D data to be tested.
alpha -- Significance level (default, 0.05).
return_corr -- If is True, return correlation value and his p-value (default, False).
verbose -- Display extra information (default, False).
return -- boolean according test result.
"""
# get types
type1 = data1.dtype
type2 = data2.dtype
# get size
n = len(data1)
# ord - ord
if type1 == "int64" and type2 == "int64":
# number of categories
ncat1 = len(np.unique(data1))
ncat2 = len(np.unique(data2))
# analysis
if ncat1 >= 5 and ncat2 >= 5:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_kendalltau(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# num - num
if type1 == "float64" and type2 == "float64":
# test if variables are gaussian
if n >= 5000:
is_normal1 = test_anderson(data1, alpha = alpha)
is_normal2 = test_anderson(data2, alpha = alpha)
else:
is_normal1 = test_shapiro(data1, alpha = alpha)
is_normal2 = test_shapiro(data2, alpha = alpha)
# analysis
if n >= 100:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
if is_normal1 and is_normal2:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# num - ord
if (type1 == "float64" and type2 == "int64") or (type1 == "int64" and type2 == "float64"):
# number of categories
if type1 == "int64":
ncat = len(np.unique(data1))
else:
ncat = len(np.unique(data2))
# analysis
if ncat < 5:
result = correlation_kendalltau(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
if n >= 100:
result = correlation_pearson(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
else:
result = correlation_spearman(data1, data2, alpha = alpha, return_corr = return_corr, verbose = verbose)
# return
return result | 6eaf34a12281949236d28143399024ed30e834ad | 16,150 |
def sha256(buffer=None):
"""Secure Hash Algorithm 2 (SHA-2) with 256 bits hash value."""
return Hash("sha256", buffer) | 2e33c38c0f7b9dd019104a18e6842243773686ca | 16,151 |
import math
import torch
def nmc_eig(model, design, observation_labels, target_labels=None,
N=100, M=10, M_prime=None, independent_priors=False):
"""
Nested Monte Carlo estimate of the expected information
gain (EIG). The estimate is, when there are not any random effects,
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log p(y_n | \\theta_n, d) -
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M}\\sum_{m=1}^M p(y_n | \\theta_m, d)\\right)
The estimate is, in the presence of random effects,
.. math::
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M'}\\sum_{m=1}^{M'}
p(y_n | \\theta_n, \\widetilde{\\theta}_{nm}, d)\\right)-
\\frac{1}{N}\\sum_{n=1}^N \\log \\left(\\frac{1}{M}\\sum_{m=1}^{M}
p(y_n | \\theta_m, \\widetilde{\\theta}_{m}, d)\\right)
The latter form is used when `M_prime != None`.
:param function model: A pyro model accepting `design` as only argument.
:param torch.Tensor design: Tensor representation of design
:param list observation_labels: A subset of the sample sites
present in `model`. These sites are regarded as future observations
and other sites are regarded as latent variables over which a
posterior is to be inferred.
:param list target_labels: A subset of the sample sites over which the posterior
entropy is to be measured.
:param int N: Number of outer expectation samples.
:param int M: Number of inner expectation samples for `p(y|d)`.
:param int M_prime: Number of samples for `p(y | theta, d)` if required.
:param bool independent_priors: Only used when `M_prime` is not `None`. Indicates whether the prior distributions
for the target variables and the nuisance variables are independent. In this case, it is not necessary to
sample the targets conditional on the nuisance variables.
:return: EIG estimate
:rtype: `torch.Tensor`
"""
if isinstance(observation_labels, str): # list of strings instead of strings
observation_labels = [observation_labels]
if isinstance(target_labels, str):
target_labels = [target_labels]
# Take N samples of the model
expanded_design = lexpand(design, N) # N copies of the model
trace = poutine.trace(model).get_trace(expanded_design)
trace.compute_log_prob()
if M_prime is not None:
y_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in observation_labels}
theta_dict = {l: lexpand(trace.nodes[l]["value"], M_prime) for l in target_labels}
theta_dict.update(y_dict)
# Resample M values of u and compute conditional probabilities
# WARNING: currently the use of condition does not actually sample
# the conditional distribution!
# We need to use some importance weighting
conditional_model = pyro.condition(model, data=theta_dict)
if independent_priors:
reexpanded_design = lexpand(design, M_prime, 1)
else:
# Not acceptable to use (M_prime, 1) here - other variables may occur after
# theta, so need to be sampled conditional upon it
reexpanded_design = lexpand(design, M_prime, N)
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
conditional_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M_prime)
else:
# This assumes that y are independent conditional on theta
# Furthermore assume that there are no other variables besides theta
conditional_lp = sum(trace.nodes[l]["log_prob"] for l in observation_labels)
y_dict = {l: lexpand(trace.nodes[l]["value"], M) for l in observation_labels}
# Resample M values of theta and compute conditional probabilities
conditional_model = pyro.condition(model, data=y_dict)
# Using (M, 1) instead of (M, N) - acceptable to re-use thetas between ys because
# theta comes before y in graphical model
reexpanded_design = lexpand(design, M, 1) # sample M theta
retrace = poutine.trace(conditional_model).get_trace(reexpanded_design)
retrace.compute_log_prob()
marginal_lp = sum(retrace.nodes[l]["log_prob"] for l in observation_labels).logsumexp(0) \
- math.log(M)
terms = conditional_lp - marginal_lp
nonnan = (~torch.isnan(terms)).sum(0).type_as(terms)
terms[torch.isnan(terms)] = 0.
return terms.sum(0)/nonnan | 8de69e87677a4a74fd04ce4cf302221121d00b2d | 16,152 |
import scipy
def _orient_eigs(eigvecs, phasing_track, corr_metric=None):
"""
Orient each eigenvector deterministically according to the orientation
that correlates better with the phasing track.
Parameters
----------
eigvecs : 2D array (n, k)
`k` eigenvectors (as columns).
phasing_track : 1D array (n,)
Reference track for determining orientation.
corr_metric: spearmanr, pearsonr, var_explained, MAD_explained
Correlation metric to use for selecting orientations.
Returns
-------
2D array (n, k)
Reoriented `k` eigenvectors.
Notes
-----
This function does NOT change the order of the eigenvectors.
"""
for i in range(eigvecs.shape[1]):
mask = np.isfinite(eigvecs[:, i]) & np.isfinite(phasing_track)
if corr_metric is None or corr_metric == "spearmanr":
corr = scipy.stats.spearmanr(phasing_track[mask], eigvecs[mask, i])[0]
elif corr_metric == "pearsonr":
corr = scipy.stats.pearsonr(phasing_track[mask], eigvecs[mask, i])[0]
elif corr_metric == "var_explained":
corr = scipy.stats.pearsonr(phasing_track[mask], eigvecs[mask, i])[0]
# multiply by the sign to keep the phasing information
corr = np.sign(corr) * corr * corr * np.var(eigvecs[mask, i])
elif corr_metric == "MAD_explained":
corr = (
numutils.COMED(phasing_track[mask], eigvecs[mask, i]) *
numutils.MAD(eigvecs[mask, i])
)
else:
raise ValueError("Unknown correlation metric: {}".format(corr_metric))
eigvecs[:, i] = np.sign(corr) * eigvecs[:, i]
return eigvecs | d6feebbd7b7748549ebc494bf8b00f0d9e313f7c | 16,153 |
def test_CreativeProject_auto_multivariate_functional(max_iter, max_response, error_lim, model_type):
"""
test that auto method works for a particular multivariate (bivariate) function
"""
# define data
covars = [(0.5, 0, 1), (0.5, 0, 1)] # covariates come as a list of tuples (one per covariate: (<initial_guess>, <min>, <max>))
# define response function
def f(x):
return (-(6 * x['covar0'].iloc[0] - 2) ** 2 * np.sin(12 * x['covar0'].iloc[0] - 4)) * (-(6 * x['covar1'].iloc[0] - 2) ** 2 * np.sin(12 * x['covar1'].iloc[0] - 4))
# initialize class instance
cc = TuneSession(covars=covars, model=model_type)
# run the auto-method
cc.auto(response_samp_func=f, max_iter=max_iter)
# assert that max_iter steps taken by optimizer
assert cc.model["covars_sampled_iter"] == max_iter
assert cc.model["covars_proposed_iter"] == max_iter
assert cc.model["response_sampled_iter"] == max_iter
# assert that training and test data is stored
assert cc.train_X.shape[0] == max_iter
assert cc.proposed_X.shape[0] == max_iter
assert cc.train_X.shape[0] == max_iter
assert cc.train_X.shape[1] == 2 # check that it's bivariate train_X
# assert that best response is stored at each step
assert cc.covars_best_response_value.shape[0] == max_iter
assert cc.best_response_value.shape[0] == max_iter
# assert that the correct maximum and covariate values for that spot are identified
THEORETICAL_MAX_COVAR = 1.0
for it in range(len(covars)):
assert abs(cc.covars_best_response_value[-1, it].item() - THEORETICAL_MAX_COVAR)/THEORETICAL_MAX_COVAR \
< error_lim
assert abs(cc.best_response_value[-1].item() - max_response)/max_response < error_lim | ee0cc1d34a1836c8ea9ec2b23de175f4b6d8ca75 | 16,154 |
def crossValidate(x, y, cv=5, K=None):
"""
:param y: N*L ranking vectors
:return:
"""
results = {"perf": []}
## cross validation ##
np.random.seed(1100)
kf = KFold(n_splits=cv, shuffle=True, random_state=0)
for train, test in kf.split(x):
x_train = x[train, :]
y_train = y[train, :]
x_test = x[test, :]
y_test = y[test, :]
# y_pred = KNN(K=K).fit(x_train, y_train).predict(x_test)
y_pred = multithreadPredict(x_test, KNN(K=K).fit(x_train, y_train))
print y_pred
# print y_pred ### test
results["perf"].append(perfMeasure(y_pred, y_test, rankopt=True))
# print results["perf"][-1]
for key in results.keys():
item = np.array(results[key])
mean = np.nanmean(item, axis=0)
std = np.nanstd(item, axis=0)
results[key] = [mean, std]
return results | 820f5b53a38d2a64a3a1ee740d0fded020000bb7 | 16,155 |
def make_word_groups(vocab_words):
"""
:param vocab_words: list of vocabulary words with a prefix.
:return: str of prefix followed by vocabulary words with
prefix applied, separated by ' :: '.
This function takes a `vocab_words` list and returns a string
with the prefix and the words with prefix applied, separated
by ' :: '.
"""
vocab_words.reverse()
prefix = vocab_words.pop()
new_list = [prefix]
vocab_words.reverse()
for i in range(len(vocab_words)):
new_list.append(prefix + vocab_words[i])
# print(new_list)
return " :: ".join(new_list) | f940c602939ca3a9bab013f5847918f7ba4536ae | 16,156 |
def gpi_g10s40(rescale=False):
"""
Multiply by the 'rescale' factor to adjust hole sizes and centers in entrance pupil (PM)
(Magnify the physical mask coordinates up to the primary mirror size)
"""
demag = gpi_mag_asdesigned()
if rescale:
demag = demag/rescale # rescale 1.1 gives a bigger mask in PM pupil space
print ("gpi_g10s4...")
hdia, ctrs = gpi_g10s40_asmanufactured(1.0/demag) # meters
return hdia, ctrs
""" From GPI FPRD 2008 http://dms.hia.nrc.ca/view.php?fDocumentId=1398
Filter 1/2 pwr bandwidth
name wavelen/um %
Y 0.95-1.14 18
J 1.12-1.35 19
H 1.50-1.80 18
K1 1.9-2.19 14
K2 2.13-2.4 12
Spectral Resolution 34-36 35-39 44-49 62-70 75-83
# spectral pixels 12-13 13-15 16-18 18-20
18-20
pixels 14mas are nyquist at 1.1
""" | 4be151f7e99332be0f67d00619fe75def90c2b5d | 16,157 |
import inspect
def test_close_sections():
"""Parse sections without blank lines in between."""
def f(x, y, z):
"""
Parameters
----------
x :
X
y :
Y
z :
Z
Raises
------
Error2
error.
Error1
error.
Returns
-------
str
value
"""
return x + y + z
sections, errors = parse(inspect.getdoc(f), inspect.signature(f))
assert len(sections) == 3
assert not errors | 793d92d989f3caa020c06ea798f7f34703abd747 | 16,158 |
def _int_converter(value):
"""Convert string value to int.
We do not use the int converter default exception since we want to make
sure the exact http response code.
Raises: exception_handler.BadRequest if value can not be parsed to int.
Examples:
/<request_path>?count=10 parsed to {'count': '10'} and it should be
converted to {'count': 10}.
"""
try:
return int(value)
except Exception:
raise exception_handler.BadRequest(
'%r type is not int' % value
) | 6b5c99635211bf8ce2e3c2adc784f2a4e9ee355f | 16,160 |
from typing import List
def split_rule(rules, rule_name, symbols_to_extract: List[str], subrule_name: str):
"""
Let only options which are starting with symbols from symbols_to_extract.
Put the rest to a subrule.
"""
r = rule_by_name(rules, rule_name)
assert isinstance(r.body, Antlr4Selection), r
sub_options = Antlr4Selection([])
for o in r.body:
start_symbols = set()
_direct_left_corner(o, start_symbols, allow_eps_in_sel=True)
if not start_symbols.intersection(symbols_to_extract):
sub_options.append(o)
r.body = Antlr4Selection([o for o in r.body if not (o in sub_options)])
r.body.insert(0, Antlr4Symbol(subrule_name, False))
if len(r.body) == 1:
r.body = r.body[0]
assert len(sub_options) > 0
if len(sub_options) == 1:
sub_options = sub_options[0]
else:
sub_options = Antlr4Selection(sub_options)
sub_r = Antlr4Rule(subrule_name, sub_options)
rules.insert(rules.index(r), sub_r)
return sub_r | aa4d2aac62c488e3cd8d002556edea3aaef7185b | 16,161 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.