content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_work_log_queue():
""" json格式为::
{'func':'transform',
'kw':{ ... # 和前面task_queue相同
},
"runtime":{ # 队列运行相关信息
'created':12323423 #进入原始队列时间
'queue':'q01' # 是在哪个原子原子队列
'start':123213123 #转换开始时间
'end':123213123 #转换结束时间
'worker':'w01', # 转换器名
'thread':'131231', #
'return':-1, # 返回的错误代号, 0表示成功
'reason':'失败原因' # 详细的原因
}
}
"""
work__log_queue = "ztq:queue:worker_log"
return get_limit_queue(work__log_queue, 200) | 26b2e3c73f7dd05b44659d3a02ca8d2b8205057e | 9,400 |
def is_first_buy(ka, ka1, ka2=None, pf=False):
"""确定某一级别一买
注意:如果本级别上一级别的 ka 不存在,无法识别本级别一买,返回 `无操作` !!!
一买识别逻辑:
1)必须:上级别最后一个线段标记和最后一个笔标记重合且为底分型;
2)必须:上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型;
3)必须:本级别向下线段背驰 或 本级别向下笔背驰;
4)辅助:下级别向下线段背驰 或 下级别向下笔背驰。
:param ka: KlineAnalyze
本级别
:param ka1: KlineAnalyze
上级别
:param ka2: KlineAnalyze
下级别,默认为 None
:param pf: bool
pf 为 precision first 的缩写, 控制是否使用 `高精度优先模式` ,默认为 False ,即 `高召回优先模式`。
在 `高精度优先模式` 下,会充分利用辅助判断条件提高识别准确率。
:return: dict
"""
detail = {
"标的代码": ka.symbol,
"操作提示": "无操作",
"出现时间": None,
"基准价格": None,
"其他信息": None
}
if not isinstance(ka1, KlineAnalyze):
return detail
# 上级别最后一个线段标记和最后一个笔标记重合且为底分型;
if len(ka1.xd) >= 2 and ka1.xd[-1]['xd'] == ka1.bi[-1]['bi'] \
and ka1.xd[-1]['fx_mark'] == ka1.bi[-1]['fx_mark'] == 'd':
bi_inside = [x for x in ka1.bi if ka1.xd[-2]['dt'] <= x['dt'] <= ka1.xd[-1]['dt']]
# 上级别最后一个向下线段内部笔标记数量大于等于6,且本级别最后一个线段标记为底分型;
if len(bi_inside) >= 6 and ka.xd[-1]['fx_mark'] == 'd':
# 本级别向下线段背驰 或 本级别向下笔背驰;
if (ka.xd_bei_chi() or
(ka.bi[-1]['fx_mark'] == 'd' and ka.bi_bei_chi())):
detail['操作提示'] = "一买"
detail['出现时间'] = ka.xd[-1]['dt']
detail['基准价格'] = ka.xd[-1]['xd']
if pf and detail["操作提示"] == "一买" and isinstance(ka2, KlineAnalyze):
# 下级别线段背驰 或 下级别笔背驰
if not ((ka2.xd[-1]['fx_mark'] == 'd' and ka2.xd_bei_chi()) or
(ka2.bi[-1]['fx_mark'] == 'd' and ka2.bi_bei_chi())):
detail['操作提示'] = "无操作"
return detail | 5ea35d728f3ddfaa5cff09a2e735c480f1e3c622 | 9,401 |
def preprocess(path, l_pass=0.7, h_pass=0.01, bandpass=True, short_ch_reg=False, tddr=True, negative_correlation=False, verbose=False, return_all=False):
"""
Load raw data and preprocess
:param str path: path to the raw data
:param float l_pass: low pass frequency
:param float h_pass: high pass frequency
:param bool bandpass: apply bandpass filter
:param bool short_ch_reg: apply short channel regression
:param bool tddr: apply tddr
:param bool negative_correlation: apply negative correlation
:param bool verbose: print progress
:return: preprocessed data
"""
if verbose:
ic("Loading ", path)
raw_intensity = mne.io.read_raw_snirf(path, preload=True)
step_od = mne.preprocessing.nirs.optical_density(raw_intensity)
# sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od, l_freq=0.7, h_freq=1.5)
# raw_od.info['bads'] = list(compress(raw_od.ch_names, sci < 0.5))
if verbose:
ic("Apply short channel regression.")
if short_ch_reg:
step_od = mne_nirs.signal_enhancement.short_channel_regression(step_od)
if verbose:
ic("Do temporal derivative distribution repair on:", step_od)
if tddr:
step_od = mne.preprocessing.nirs.tddr(step_od)
if verbose:
ic("Convert to haemoglobin with the modified beer-lambert law.")
step_haemo = beer_lambert_law(step_od, ppf=6)
if verbose:
ic("Apply further data cleaning techniques and extract epochs.")
if negative_correlation:
step_haemo = mne_nirs.signal_enhancement.enhance_negative_correlation(
step_haemo)
if not return_all:
if verbose:
ic("Separate the long channels and short channels.")
short_chs = get_short_channels(step_haemo)
step_haemo = get_long_channels(step_haemo)
if verbose:
ic("Bandpass filter on:", step_haemo)
if bandpass:
step_haemo = step_haemo.filter(
h_pass, l_pass, h_trans_bandwidth=0.3, l_trans_bandwidth=h_pass*0.25)
return step_haemo | 01d508de322fa007886e34838911d2cccea79aab | 9,402 |
def geomapi_To2d(*args):
"""
* To intersect a curve and a surface. This function builds (in the parametric space of the plane P) a 2D curve equivalent to the 3D curve C. The 3D curve C is considered to be located in the plane P. Warning The 3D curve C must be of one of the following types: - a line - a circle - an ellipse - a hyperbola - a parabola - a Bezier curve - a BSpline curve Exceptions Standard_NoSuchObject if C is not a defined type curve.
:param C:
:type C: Handle_Geom_Curve &
:param P:
:type P: gp_Pln
:rtype: Handle_Geom2d_Curve
"""
return _GeomAPI.geomapi_To2d(*args) | 7a8a6436f364e933d71ba8fb47617f01b0e13b47 | 9,403 |
import yaml
def get_object_list():
"""Returns the object name list for APC2015.
Args:
None.
Returns:
objects (list): List of object name.
"""
pkg_path = rospkg.RosPack().get_path(PKG)
yaml_file = osp.join(pkg_path, 'data/object_list.yml')
with open(yaml_file) as f:
objects = yaml.load(f)
return objects | 7fd1268ef8804eb394a42a6b2fdc9fc223cd4316 | 9,404 |
def gtMakeTAKBlobMsg(callsign, text, aesKey=False):
"""
Assemble an ATAK plugin compatible chat message blob
(suitable for feeding to gtMakeAPIMsg() )
With optional AES encryption, if a key is provided
"""
body = (callsign + b': ' + text)[:230]
# Apply optional encryption (and base64 encoding only for chats)
if aesKey:
body = b64encode(aesEncrypt(body, aesKey))
return gtMakeGTABlobMsg(body, 'A') | ecc562e92a72a0a6e0d5cc45563d1c89962d931b | 9,405 |
import re
def validate_json_with_extensions(value, rule_obj, path):
""" Performs the above match, but also matches a dict or a list. This it
just because it seems like you can't match a dict OR a list in pykwalify
"""
validate_extensions(value, rule_obj, path)
if not isinstance(value, (list, dict)):
raise BadSchemaError("Error at {} - expected a list or dict".format(path))
def nested_values(d):
if isinstance(d, dict):
for v in d.values():
if isinstance(v, dict):
for v_s in v.values():
yield v_s
else:
yield v
else:
yield d
if any(isinstance(i, ApproxScalar) for i in nested_values(value)):
# If this is a request data block
if not re.search(r"^/stages/\d/(response/body|mqtt_response/json)", path):
raise BadSchemaError(
"Error at {} - Cannot use a '!approx' in anything other than an expected http response body or mqtt response json".format(
path
)
)
return True | ef4d5744adf0c2d3ca326da66cbe608b306a2ca3 | 9,406 |
def artists_by_rating(formatter, albums):
"""Returns the artists sorted by decreasing mean album rating.
Only artists with more than 1 reviewed albums are considered.
"""
artist_tags = set([album["artist_tag"] for album in albums])
artists = []
# build the list of artists and compute their ratings
for artist_tag in artist_tags:
specific_albums = [x for x in albums if x["artist_tag"] == artist_tag]
if len(specific_albums) > 1:
rating = compute_artist_rating([x["rating"] for x in specific_albums])
artists.append(
{
"artist_tag": artist_tag,
"artist": specific_albums[0]["artist"],
"rating": rating,
}
)
sorted_artists = sorted(
artists, key=lambda x: (x["rating"], x["artist"]), reverse=True
)
return formatter.parse_list(sorted_artists, formatter.format_artist_rating) | fdf443973b4187650d95f76f8cde2a61ea7a1a3f | 9,407 |
def st_max(*args):
"""Max function.
Parameters
----------
x : float, int, MissingValue instance, or None
(2 or more such inputs allowed)
Returns
-------
max(x1, x2, ...) if any x is non-missing (with missing values ignored).
Otherwise, MISSING (".") returned.
"""
if len(args) <= 1:
raise TypeError("need at least 2 arguments")
vectors = [a for a in args if isinstance(a, StataVarVals)]
scalars = [
a for a in args if not isinstance(a, StataVarVals) and not _is_missing(a)
]
if len(vectors) != 0:
sca_max = max(scalars) if not len(scalars) == 0 else None
return StataVarVals([_max(*v, sub_max=sca_max) for v in zip(*vectors)])
elif len(scalars) == 0:
return mv
return max(scalars) | 978cab7522250541890c723fcf33d2ded9539293 | 9,408 |
def is_button_controller(device: Device) -> bool:
"""Return true if the device is a stateless button controller."""
return (
CAP_PUSHABLE_BUTTON in device.capabilities
or CAP_HOLDABLE_BUTTON in device.capabilities
or CAP_DOUBLE_TAPABLE_BUTTON in device.capabilities
) | aa16170469f6a65d2ed94ab251817e722082ef16 | 9,409 |
import numpy as np
import os
import nibabel as nb
def gen_acq_noddi(in_file, epi_params, alt_epi_params, readout, readout_alt):
"""
This is a function to generate the FSL topup acq.txt file
:param in_file:
:param epi_params:
:param alt_epi_params:
:param readout:
:param readout_alt:
:return:
"""
out_file = os.path.abspath('acq.txt')
vols = nb.load(in_file).get_data().shape[-1]
arr = np.ones([vols, 4])
for i in range(vols):
if i < vols/2:
if epi_params['enc_dir'] == 'y-':
arr[i, :] = np.array((0, -1, 0, readout))
elif epi_params['enc_dir'] == 'y':
arr[i, :] = np.array((0, 1, 0, readout))
elif epi_params['enc_dir'] == 'x':
arr[i, :] = np.array((0, 1, 0, readout))
elif epi_params['enc_dir'] == 'x-':
arr[i, :] = np.array((0, -1, 0, readout))
elif epi_params['enc_dir'] == 'z':
arr[i, :] = np.array((0, 1, 0, readout))
elif epi_params['enc_dir'] == 'x-':
arr[i, :] = np.array((0, -1, 0, readout))
else:
if alt_epi_params['enc_dir_alt'] == 'y-':
arr[i, :] = np.array((0, -1, 0, readout_alt))
elif alt_epi_params['enc_dir_alt'] == 'y':
arr[i, :] = np.array((0, 1, 0, readout_alt))
elif alt_epi_params['enc_dir_alt'] == 'x':
arr[i, :] = np.array((0, 1, 0, readout_alt))
elif alt_epi_params['enc_dir_alt'] == 'x-':
arr[i, :] = np.array((0, -1, 0, readout_alt))
elif alt_epi_params['enc_dir_alt'] == 'z':
arr[i, :] = np.array((0, 1, 0, readout_alt))
elif alt_epi_params['enc_dir_alt'] == 'x-':
arr[i, :] = np.array((0, -1, 0, readout_alt))
np.savetxt(out_file, arr)
return out_file | 3201c1faeb1842ad4b2dd3d903d1c103572375cf | 9,410 |
def list_parts(bucket, key, upload_id):
"""Lists the parts that have been uploaded for a specific multipart upload.
This operation must include the upload ID, which you obtain by
sending the initiate multipart upload request (see
CreateMultipartUpload ). This request returns a maximum of 1,000
uploaded parts. The default number of parts returned is 1,000
parts. You can restrict the number of parts returned by specifying
the max-parts request parameter. If your multipart upload consists
of more than 1,000 parts, the response returns an IsTruncated
field with the value of true, and a NextPartNumberMarker
element. In subsequent ListParts requests you can include the
part-number-marker query string parameter and set its value to the
NextPartNumberMarker field value from the previous response.
See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.list_parts
Request Syntax
--------------
response = client.list_parts(
Bucket='string',
Key='string',
MaxParts=123,
PartNumberMarker=123,
UploadId='string',
RequestPayer='requester',
ExpectedBucketOwner='string'
)
Response Syntax
---------------
{
'AbortDate': datetime(2015, 1, 1),
'AbortRuleId': 'string',
'Bucket': 'string',
'Key': 'string',
'UploadId': 'string',
'PartNumberMarker': 123,
'NextPartNumberMarker': 123,
'MaxParts': 123,
'IsTruncated': True|False,
'Parts': [
{
'PartNumber': 123,
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123
},
],
'Initiator': {
'ID': 'string',
'DisplayName': 'string'
},
'Owner': {
'DisplayName': 'string',
'ID': 'string'
},
'StorageClass': 'STANDARD'|'REDUCED_REDUNDANCY'|'STANDARD_IA'|'ONEZONE_IA'|'INTELLIGENT_TIERING'|'GLACIER'|'DEEP_ARCHIVE'|'OUTPOSTS',
'RequestCharged': 'requester'
}
Parameters
----------
bucket : str
Name of the S3 bucket
key : str
Name of the key for the multipart upload
upload_id : str
The unique identifier returned on creation of the multipart upload
Returns
-------
response : obj
A requests.Response object
"""
client = boto3.client("s3")
try:
response = client.list_parts(
Bucket=bucket,
Key=key,
UploadId=upload_id,
)
logger.info(
f"Listed parts for multipart upload {upload_id} for key {key} in bucket {bucket}"
)
except Exception as e:
logger.error(
f"Could not list parts for multipart upload {upload_id} for key {key} in bucket {bucket}: {e}"
)
return response | eb343e071ce72ea326fc479934984fdff425dfec | 9,411 |
def leap_year():
"""
This functions seeks to return a leap year after user input << integer(4).
Rules for a leap year:
As you surely know, due to some astronomical reasons, years may be leap or common.
The former are 366 days long, while the latter are 365 days long.
Since the introduction of the Gregorian calendar (in 1582), the following rule is used to determine the kind of year:
-->if the year number isn't divisible by four, it's a common year;
-->otherwise, if the year number isn't divisible by 100, it's a leap year;
-->otherwise, if the year number isn't divisible by 400, it's a common year;
-->otherwise, it's a leap year.
:return: Year --> Integer
"""
year = int(input("Enter a year: "))
mess_1 = 'It\'s a common year!'
mess_2 = 'It\'s a leap year!'
if year <= 1582:
return f'{year} does not fall under Gregorian Calendar!!'
elif year % 4 != 0:
return mess_1
elif year % 100 != 0:
return mess_2
elif year % 400 != 0:
return mess_1
else:
return mess_2 | 5cf459514ce768c1cf633fdddab5f986004bc1c8 | 9,412 |
import math
def parse(files, **kwargs):
"""Parse all BAM files."""
parsed = []
if kwargs["meta"].has_field("base_coverage"):
cov_range = kwargs["meta"].field_meta("base_coverage")["range"]
else:
cov_range = [math.inf, -math.inf]
if kwargs["meta"].has_field("read_coverage"):
read_cov_range = kwargs["meta"].field_meta("read_coverage")["range"]
else:
read_cov_range = [math.inf, -math.inf]
names = base_names(files)
for file in names:
if ".json" in file:
fields = parse_json_cov(
file, **kwargs, cov_range=cov_range, read_cov_range=read_cov_range
)
else:
fields = parse_bam(
file, **kwargs, cov_range=cov_range, read_cov_range=read_cov_range
)
if "cov" in fields:
parsed.append(fields["cov"])
cov_range = fields["cov_range"]
if "y" not in kwargs["meta"].plot:
kwargs["meta"].plot.update({"y": fields["cov_id"]})
if "read_cov" in fields:
parsed.append(fields["read_cov"])
read_cov_range = fields["read_cov_range"]
return parsed | c12b068f2a32052cbaa583a4704f86c25e577947 | 9,413 |
def login(request):
"""Login view for GET requests."""
logged_in = request.authenticated_userid is not None
if logged_in:
return {'logged_in': True,
'form_enabled': False,
'status': u'Already logged in',
'status_type': u'info'}
status = u''
status_type = u''
return {
'form_enabled': True,
'status_type': status_type,
'status': status,
'logged_in': False,
'username': request.params.get('username', u''),
} | 8cab36d8d059d0683ef2e84a40cca5c99a27c6fc | 9,414 |
def of_type(_type, value_1, *args) -> bool:
"""
Check if a collection of values are of the same type.
Parameters:
_type (any): The type to check for.
value_1 (any): The first value to check.
*args (any): Rest of values to check against given type.
Returns:
(bool) whether or not all inputs of given type.
"""
all_of_type = isinstance(value_1, _type)
i = len(args)
while i > 0 and all_of_type != False:
all_of_type = isinstance(args[i-1], _type)
i -= 1
return all_of_type | eab1e70655ff74b1cbfc338a893719b7f0681f4a | 9,415 |
import os
import numpy
def configuration(parent_package='', top_path=None):
"""[Placeholder].
Parameters
----------
parent_package :
top_path :
Returns
-------
configuration :
"""
build_path = build_mlpack()
config = Configuration('mlpack', parent_package, top_path)
libraries = ['mlpack', 'boost_serialization']
if os.name == 'posix':
libraries.append('m')
for pyx in ['_arma_numpy.pyx', '_det.pyx']:
config.add_extension(
pyx.split('.')[0],
sources=[pyx],
language='c++',
include_dirs=[numpy.get_include(), os.path.join(build_path, 'include')],
# Needed for arma_numpy.pyx
library_dirs=[os.path.join(build_path, 'lib')],
libraries=libraries,
extra_compile_args=('-DBINDING_TYPE=BINDING_TYPE_PYX '
'-std=c++11 -Wall -Wextra -ftemplate-depth=1000 '
'-O3 -fopenmp').split(' '),
extra_link_args=['-fopenmp'],
undef_macros=[] if len("") == 0 else ''.split(';')
)
# Cythonize files (i.e. create .cpp files and return cpp sources)
config.ext_modules = cythonize(config.ext_modules)
config.add_subpackage('tests')
return config | 9724f21048071b0b6d7d213d943da77569514349 | 9,416 |
import yaml
def read_config(path):
"""
Reads the Kong config file (YAML).
"""
if path is None:
raise Exception(
"empty path provided. please provide a path using `--config=<config.yml>`"
)
with open(path, "r") as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
raise exc | 343fabb8fa1c4cc78ace63466c864e50cf5dc974 | 9,417 |
def generate_grid_world(grid, prob, pos_rew, neg_rew, gamma=.9, horizon=100):
"""
This Grid World generator requires a .txt file to specify the
shape of the grid world and the cells. There are five types of cells: 'S' is
the starting position where the agent is; 'G' is the goal state; '.' is a
normal cell; '*' is a hole, when the agent steps on a hole, it receives a
negative reward and the episode ends; '#' is a wall, when the agent is
supposed to step on a wall, it actually remains in its current state. The
initial states distribution is uniform among all the initial states
provided.
The grid is expected to be rectangular.
Args:
grid (str): the path of the file containing the grid structure;
prob (float): probability of success of an action;
pos_rew (float): reward obtained in goal states;
neg_rew (float): reward obtained in "hole" states;
gamma (float, .9): discount factor;
horizon (int, 100): the horizon.
Returns:
A FiniteMDP object built with the provided parameters.
"""
grid_map, cell_list = parse_grid(grid)
p = compute_probabilities(grid_map, cell_list, prob)
r = compute_reward(grid_map, cell_list, pos_rew, neg_rew)
mu = compute_mu(grid_map, cell_list)
return FiniteMDP(p, r, mu, gamma, horizon) | 753fa30327f2dddfb4a459fbb40e842b28b0eda8 | 9,418 |
def sqrt_quadrature_scheme(N_poly, N_poly_log):
""" Returns quadrature rule that is exact on 0^1 for
p(x) + q(x)sqrt(x) for deg(p) <= N_poly and deg(q) <= N_poly_sqrt.
"""
nodes, weights = sqrt_quadrature_rule(N_poly, N_poly_log)
return QuadScheme1D(nodes, weights) | c39539604955f473c0a77816090fe180645670ae | 9,419 |
def check_dataset_update(args, dataset):
"""Checks if the dataset information must be updated.
"""
return (args.dataset_attributes or
args.import_fields or
(args.shared_flag and r.shared_changed(args.shared, dataset)) or
(((hasattr(args, 'max_categories') and args.max_categories > 0) or
(hasattr(args, 'multi_label') and args.multi_label)) and
args.objective_field)) | 005700a0d544333f018ec423a6e3d287ab982553 | 9,420 |
from typing import Dict
from typing import List
import json
def get_package_extras(provider_package_id: str) -> Dict[str, List[str]]:
"""
Finds extras for the package specified.
:param provider_package_id: id of the package
"""
if provider_package_id == 'providers':
return {}
with open(DEPENDENCIES_JSON_FILE) as dependencies_file:
cross_provider_dependencies: Dict[str, List[str]] = json.load(dependencies_file)
extras_dict = (
{
module: [get_pip_package_name(module)]
for module in cross_provider_dependencies[provider_package_id]
}
if cross_provider_dependencies.get(provider_package_id)
else {}
)
provider_yaml_dict = get_provider_yaml(provider_package_id)
additional_extras = provider_yaml_dict.get('additional-extras')
if additional_extras:
for key in additional_extras:
if key in extras_dict:
extras_dict[key].append(additional_extras[key])
else:
extras_dict[key] = additional_extras[key]
return extras_dict | 15ac01740e60d2af73458b7ef46330708831a0ca | 9,421 |
def e(a: float, b: float) -> float:
"""
e = sqrt(1 + (b * b) / (a * a))
:param a: semi-major axis
:type a: float
:param b: semi-minor axis
:type b: float
:return: eccentricity
:rtype: float
"""
return np.sqrt(1 + (b * b) / (a * a)) | f2eec5065d735984daa5197b8401ec3a60914d25 | 9,422 |
from pathlib import Path
import sh
def parse_note(path: Path) -> dict:
""" convert note in plain text to a dictionary.
Line #1 ~ #5 are meta data of the note.
Line #9 to end is the body.
"""
header_line_number = 5
body_start_line = 9
res = {}
with open(path) as f:
for x in range(header_line_number):
the_line = next(f).strip()
if the_line.endswith(':'):
the_line += ' ' # fix 'Tags: ' striped to 'Tags:' problem
header_sections = the_line.split(': ')
assert len(header_sections) == 2, f'Please fix header {the_line} of note {path}'
res[header_sections[0]] = header_sections[1]
body = sh.sed('-n', f'{body_start_line},$p', path).stdout.decode('utf-8')
res['body'] = body
return res | 792f4bace60fa52b1a7cbeeaf0dabd881ffd4a24 | 9,423 |
def get_previous_sle_for_warehouse(last_sle, exclude_current_voucher=False):
"""get stock ledger entries filtered by specific posting datetime conditions"""
last_sle['time_format'] = '%H:%i:%s'
if not last_sle.get("posting_date"):
last_sle["posting_date"] = "1900-01-01"
if not last_sle.get("posting_time"):
last_sle["posting_time"] = "00:00"
sle = frappe.db.sql("""
select *, timestamp(posting_date, posting_time) as "timestamp"
from `tabStock Ledger Entry`
where item_code = %(item_code)s
and warehouse = %(warehouse)s
and is_cancelled = 0
and timestamp(posting_date, time_format(posting_time, %(time_format)s)) < timestamp(%(posting_date)s, time_format(%(posting_time)s, %(time_format)s))
order by timestamp(posting_date, posting_time) desc, creation desc
limit 1
for update""", last_sle, as_dict=1)
return sle[0] if sle else frappe._dict() | 7fdc0db05564cc54555784c474c7bc4cb33e280a | 9,424 |
import networkx as nx
def forest_str(graph, with_labels=True, sources=None, write=None, ascii_only=False):
"""
Creates a nice utf8 representation of a directed forest
Parameters
----------
graph : nx.DiGraph | nx.Graph
Graph to represent (must be a tree, forest, or the empty graph)
with_labels : bool
If True will use the "label" attribute of a node to display if it
exists otherwise it will use the node value itself. Defaults to True.
sources : List
Mainly relevant for undirected forests, specifies which nodes to list
first. If unspecified the root nodes of each tree will be used for
directed forests; for undirected forests this defaults to the nodes
with the smallest degree.
write : callable
Function to use to write to, if None new lines are appended to
a list and returned. If set to the `print` function, lines will
be written to stdout as they are generated. If specified,
this function will return None. Defaults to None.
ascii_only : Boolean
If True only ASCII characters are used to construct the visualization
Returns
-------
str | None :
utf8 representation of the tree / forest
Example
-------
>>> graph = nx.balanced_tree(r=2, h=3, create_using=nx.DiGraph)
>>> print(nx.forest_str(graph))
╙── 0
├─╼ 1
│ ├─╼ 3
│ │ ├─╼ 7
│ │ └─╼ 8
│ └─╼ 4
│ ├─╼ 9
│ └─╼ 10
└─╼ 2
├─╼ 5
│ ├─╼ 11
│ └─╼ 12
└─╼ 6
├─╼ 13
└─╼ 14
>>> graph = nx.balanced_tree(r=1, h=2, create_using=nx.Graph)
>>> print(nx.forest_str(graph))
╙── 0
└── 1
└── 2
>>> print(nx.forest_str(graph, ascii_only=True))
+-- 0
L-- 1
L-- 2
"""
printbuf = []
if write is None:
_write = printbuf.append
else:
_write = write
# Define glphys
# Notes on available box and arrow characters
# https://en.wikipedia.org/wiki/Box-drawing_character
# https://stackoverflow.com/questions/2701192/triangle-arrow
if ascii_only:
glyph_empty = "+"
glyph_newtree_last = "+-- "
glyph_newtree_mid = "+-- "
glyph_endof_forest = " "
glyph_within_forest = ": "
glyph_within_tree = "| "
glyph_directed_last = "L-> "
glyph_directed_mid = "|-> "
glyph_undirected_last = "L-- "
glyph_undirected_mid = "|-- "
else:
glyph_empty = "╙"
glyph_newtree_last = "╙── "
glyph_newtree_mid = "╟── "
glyph_endof_forest = " "
glyph_within_forest = "╎ "
glyph_within_tree = "│ "
glyph_directed_last = "└─╼ "
glyph_directed_mid = "├─╼ "
glyph_undirected_last = "└── "
glyph_undirected_mid = "├── "
if len(graph.nodes) == 0:
_write(glyph_empty)
else:
if not nx.is_forest(graph):
raise nx.NetworkXNotImplemented("input must be a forest or the empty graph")
is_directed = graph.is_directed()
succ = graph.succ if is_directed else graph.adj
if sources is None:
if is_directed:
# use real source nodes for directed trees
sources = [n for n in graph.nodes if graph.in_degree[n] == 0]
else:
# use arbitrary sources for undirected trees
sources = [
min(cc, key=lambda n: graph.degree[n])
for cc in nx.connected_components(graph)
]
# Populate the stack with each source node, empty indentation, and mark
# the final node. Reverse the stack so sources are popped in the
# correct order.
last_idx = len(sources) - 1
stack = [(node, "", (idx == last_idx)) for idx, node in enumerate(sources)][
::-1
]
seen = set()
while stack:
node, indent, islast = stack.pop()
if node in seen:
continue
seen.add(node)
if not indent:
# Top level items (i.e. trees in the forest) get different
# glyphs to indicate they are not actually connected
if islast:
this_prefix = indent + glyph_newtree_last
next_prefix = indent + glyph_endof_forest
else:
this_prefix = indent + glyph_newtree_mid
next_prefix = indent + glyph_within_forest
else:
# For individual tree edges distinguish between directed and
# undirected cases
if is_directed:
if islast:
this_prefix = indent + glyph_directed_last
next_prefix = indent + glyph_endof_forest
else:
this_prefix = indent + glyph_directed_mid
next_prefix = indent + glyph_within_tree
else:
if islast:
this_prefix = indent + glyph_undirected_last
next_prefix = indent + glyph_endof_forest
else:
this_prefix = indent + glyph_undirected_mid
next_prefix = indent + glyph_within_tree
if with_labels:
label = graph.nodes[node].get("label", node)
else:
label = node
_write(this_prefix + str(label))
# Push children on the stack in reverse order so they are popped in
# the original order.
children = [child for child in succ[node] if child not in seen]
for idx, child in enumerate(children[::-1], start=1):
islast_next = idx <= 1
try_frame = (child, next_prefix, islast_next)
stack.append(try_frame)
if write is None:
# Only return a string if the custom write function was not specified
return "\n".join(printbuf) | 3486545035b9c2a8954102bdb92ebe9dd7b1fa24 | 9,425 |
import copy
def rotated_shower(shower, alt, az):
"""
Return a rotated shower object from a shower object and a direction (alt, az)
Parameters
----------
shower: shower class object
Returns
-------
copy of the given shower but rotated
"""
rot_shower = copy(shower)
rot_shower.particles = shower_array_rot(shower.particles, shower.alt, shower.az)
return rot_shower | d420c408083a54837c87db405a8d65abfe46a5f8 | 9,426 |
def angle2circle(angles):
"""from degree to radians multipled by 2"""
return np.deg2rad(2 * (np.array(angles) + 7.5)) | 4c944725fd44480b5f7261c24608b3e06cec013a | 9,427 |
def _make_source(cls_source: str, cls_name: str, instance_method: str):
"""Converts a class source to a string including necessary imports.
Args:
cls_source (str): A string representing the source code of a user-written class.
cls_name (str): The name of the class cls_source represents.
instance_method (str): The method within the class that should be called from __main__
Returns:
A string representing a user-written class that can be written to a file in
order to yield an inner script for the ModelBuilder SDK. The only difference
between the user-written code and the string returned by this method is that
the user has the option to specify a method to call from __main__.
"""
src = "\n".join(["import torch", "import pandas as pd", cls_source])
src = src + "if __name__ == '__main__':\n" + f"\t{cls_name}().{instance_method}()"
return src | 105ca5d34c0de2bfc81937aaaf14b4d610eaa35a | 9,428 |
def prepend_pass_statement(line: str) -> str:
"""Prepend pass at indent level and comment out the line."""
colno = num_indented(line)
right_side = line[colno:]
indent = " " * colno
return indent + "pass # " + right_side | 7d7156581167fcd6ec5c4afc482cf8bf3dea11bc | 9,429 |
from datetime import datetime
import time
def download_spot_by_dates(start=datetime(2011, 1, 1)):
"""
下载数据,存储为csv文件
:param start: 2011-01-01 最早数据
:return: True 下载文件 False 没有下载文件
"""
file_index = get_download_file_index(SPREAD_DIR, start=start)
if file_index.empty:
return False
for date in file_index:
date_str = date.strftime('%Y-%m-%d')
file_path = SPREAD_DIR / '{}.csv'.format(date_str)
if file_path.exists():
continue
table = download_spot_by_date(date_str)
if len(table) != 0:
print(date)
spread_df = pd.DataFrame(table, columns=HEADER)
spread_df.to_csv(str(file_path), index=False, encoding='gb2312')
time.sleep(np.random.rand() * 5)
return True | 34574d4cd5d1985850fe681c3e5e4f6a3ebdc1a4 | 9,430 |
def truncate_range(data, percMin=0.25, percMax=99.75, discard_zeros=True):
"""Truncate too low and too high values.
Parameters
----------
data : np.ndarray
Image to be truncated.
percMin : float
Percentile minimum.
percMax : float
Percentile maximum.
discard_zeros : bool
Discard voxels with value 0 from truncation.
Returns
-------
data : np.ndarray
"""
if discard_zeros:
msk = ~np.isclose(data, 0)
pMin, pMax = np.nanpercentile(data[msk], [percMin, percMax])
else:
pMin, pMax = np.nanpercentile(data, [percMin, percMax])
temp = data[~np.isnan(data)]
temp[temp < pMin], temp[temp > pMax] = pMin, pMax # truncate min and max
data[~np.isnan(data)] = temp
if discard_zeros:
data[~msk] = 0 # put back masked out voxels
return data | a273db14c8f651dcbdaa39825e1150bd0cdc119b | 9,431 |
async def payment_list(request):
"""
---
description: Show outgoing payments, regarding {bolt11} or {payment_hash} if set Can only specify one of {bolt11} or {payment_hash}
tags:
- payments
produces:
- application/json
parameters:
- in: body
name: body
required: false
schema:
type: object
properties:
bolt11:
type: string
payment_hash:
type: string
responses:
"200":
description: successful operation.
"""
data = await request.json()
bolt11 = data.get('bolt11', None)
payment_hash = data.get('payment_hash', None)
return web.json_response(request.app['rpc'].listpayments(bolt11=bolt11, payment_hash=payment_hash)) | 3a4fe428adb10dd53e9b2564fea59cdc4b7c87ff | 9,432 |
import io
def write_opened(dir, file_dict, data_dict, verbose=True):
"""
read in dictionary with open files as values
and write data to files
"""
for game_id, vals in data_dict.items():
f = file_dict.get(game_id)
if not f:
fn = dir + str(game_id) + ".csv"
f = io.init_csv(fn, header=bm.LINE_COLUMNS, close=False)
file_dict[game_id] = f
io.write_list(f, vals)
if verbose:
print(f"writing {vals} to game [{game_id}]")
return file_dict | eb3ac9b95b70df31eb1ea24b94b5e416966b7bc5 | 9,433 |
def get_accessible_cases(item, user):
"""Return all accessible for a cohort and user."""
return getattr(item, "get_accessible_cases_for_user")(user) | 42d54ebf672ce401ac311f9868f6b19f93418065 | 9,434 |
def aux_conv5(A, B, n, idx):
"""
Performs the convolution of A and B where B = A* (enumerate-for-loop)
:param A: Coefficients matrix 1 (orders, buses)
:param B: Coefficients matrix 2 (orders, buses)
:param c: last order of the coefficients in while loop
:param indices: bus indices array
:return: Array with the convolution for the buses given by "indices"
"""
suma = np.zeros(len(idx), dtype=nb.complex128)
for m in range(0, n):
for i, k in enumerate(idx):
suma[i] += A[m, k] * B[n-1-m, k]
return suma.real | 0acaece3da86ac578672b7ab7e0f506117e752d3 | 9,435 |
def plot_phaseogram(phaseogram, phase_bins, time_bins, unit_str='s', ax=None,
**plot_kwargs):
"""Plot a phaseogram.
Parameters
----------
phaseogram : NxM array
The phaseogram to be plotted
phase_bins : array of M + 1 elements
The bins on the x-axis
time_bins : array of N + 1 elements
The bins on the y-axis
Other Parameters
----------------
unit_str : str
String indicating the time unit (e.g. 's', 'MJD', etc)
ax : `matplotlib.pyplot.axis` instance
Axis to plot to. If None, create a new one.
plot_kwargs : dict
Additional arguments to be passed to pcolormesh
Returns
-------
ax : `matplotlib.pyplot.axis` instance
Axis where the phaseogram was plotted.
"""
if ax is None:
plt.figure('Phaseogram')
ax = plt.subplot()
ax.pcolormesh(phase_bins, time_bins, phaseogram.T, **plot_kwargs)
ax.set_ylabel('Time ({})'.format(unit_str))
ax.set_xlabel('Phase')
ax.set_xlim([0, np.max(phase_bins)])
ax.set_ylim([np.min(time_bins), np.max(time_bins)])
return ax | b7a3b8aa0cf6a16e67e3d5059049082b6d308d7e | 9,436 |
def load_rapidSTORM_track_header(path):
"""
Load xml header from a rapidSTORM (track) single-molecule localization file and identify column names.
Parameters
----------
path : str, bytes, os.PathLike, file-like
File path for a rapidSTORM file to load.
Returns
-------
list of str
A list of valid dataset property keys as derived from the rapidSTORM identifiers.
"""
# read xml part in header
with open_path_or_file_like(path) as file:
return _read_rapidSTORM_track_header(file) | 584baa4bd0a634608bb2c254314ad80a9c7650de | 9,437 |
def hex_to_byte(hexStr):
""" Convert hex strings to bytes. """
bytes = []
hexStr = ''.join(hexStr.split(" "))
for i in range(0, len(hexStr), 2):
bytes.append(chr(int(hexStr[i:i + 2], 16)))
return ''.join(bytes) | a424d65b0a02c0d10ee5c7c25409f4a0ce477528 | 9,438 |
def _vital_config_update(cfg, cfg_in):
"""
Treat a vital Config object like a python dictionary
Args:
cfg (kwiver.vital.config.config.Config): config to update
cfg_in (dict | kwiver.vital.config.config.Config): new values
"""
# vital cfg.merge_config doesnt support dictionary input
if isinstance(cfg_in, dict):
for key, value in cfg_in.items():
if cfg.has_value(key):
cfg.set_value(key, str(value))
else:
raise KeyError('cfg has no key={}'.format(key))
else:
cfg.merge_config(cfg_in)
return cfg | 35a0092013229f3b71a1ba06bbb660f861ef391c | 9,439 |
def SubscriberReceivedStartEncKeyVector(builder, numElems):
"""This method is deprecated. Please switch to Start."""
return StartEncKeyVector(builder, numElems) | 7c2875af0ba92e66f747bdeb2754f3123c337372 | 9,440 |
import struct
def _read_extended_field_value(value, rawdata):
"""Used to decode large values of option delta and option length
from raw binary form."""
if value >= 0 and value < 13:
return (value, rawdata)
elif value == 13:
return (rawdata[0] + 13, rawdata[1:])
elif value == 14:
return (struct.unpack('!H', rawdata[:2])[0] + 269, rawdata[2:])
else:
raise ValueError("Value out of range.") | 12a1f665f133f6ea5ffc817bf69ec0a9e0e07dbc | 9,441 |
def add_uint(a, b):
"""Returns the sum of two uint256-ish tuples."""
a = from_uint(a)
b = from_uint(b)
c = a + b
return to_uint(c) | 0da42542210e72f30f00b1a41919cdad882963d0 | 9,442 |
def get_dcgan_args(parser, args=[]):
"""
parameters determing the DCGAN parameters
"""
# DCGAN:
# ------------------------------------------------------------------------
parser.add_argument(
"--lam", type=float, default=10, help="Factor for scaling gradient penalty"
)
parser.add_argument(
"--wgan",
type=bool,
default=False,
help="Determine if WGAN training should be activated",
)
parser.add_argument(
"--p_drop",
type=float,
default=0.1,
help="Dropout probability for the Discriminator network",
)
# ------------------------------------------------------------------------
return parser | 28d00721fad62ecbc381190b05d81fe578860f8e | 9,443 |
import os
def _gen_span_id() -> str:
"""Return 16 random hexadecimal digits.
The id is used for distributed tracing.
"""
return os.urandom(8).hex() | 4c70028da278eb26c947c9ca24e0c527f6744860 | 9,444 |
from pathlib import Path
def store_tabular_data(filepath: Path, use_stem: bool = True) -> None:
"""Reads the tabular data from filepath and stores it in-memory to be plotted asychronously.
Args:
filepath (Path): The tabular data file to be read and stored.
use_stem (bool, optional): Only store the filename (without extension). Defaults to True.
"""
# Declare global variables locally
global data_glob
global data_glob_changed
floats = read_tabular_data(filepath)
if floats == []:
print('Skipping empty file', filepath)
return None
# Check that the array is not ragged; each line must be the same length!
# I'm not exactly sure why this happens, but it seems like maybe the file
# contents are not being flushed to disk before getting read back in again.
# When I manually check the files afterwards, the data is all present.
lengths = [len(x) for x in floats]
if not all([length == lengths[0] for length in lengths]):
print('Warning! Skipping ragged data in', filepath)
return None
data = np.array(floats)
if use_stem:
filepath = Path(filepath.stem)
for i in range(len(data_glob)):
(p, data_old_) = data_glob[i]
if filepath == p:
data_glob[i] = (filepath, data)
data_glob_changed = True
return None
data_glob.append((filepath, data))
data_glob_changed = True
return None | 98c1c74aefe855690ad67ba0c6f09bd574c877ce | 9,445 |
import pkgutil
import io
def load_uci_credit_card(return_X_y=False, as_frame=False):
"""Loads the UCI Credit Card Dataset.
This dataset contains a sample of [Default of Credit Card Clients Dataset](https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset).
Example:
```python
from skorecard import datasets
df = datasets.load_uci_credit_card(as_frame=True)
```
Args:
return_X_y (bool): If True, returns `(data, target)` instead of a dict object.
as_frame (bool): give the pandas dataframe instead of X, y matrices (default=False).
Returns: (pd.DataFrame, dict or tuple) features and target, with as follows:
- if as_frame is True: returns pd.DataFrame with y as a target
- return_X_y is True: returns a tuple: (X,y)
- is both are false (default setting): returns a dictionary where the key `data` contains the features,
and the key `target` is the target
""" # noqa
file = pkgutil.get_data("skorecard", "data/UCI_Credit_Card.zip")
df = pd.read_csv(io.BytesIO(file), compression="zip")
df = df.rename(columns={"default.payment.next.month": "default"})
if as_frame:
return df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1", "default"]]
X, y = (
df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1"]],
df["default"].values,
)
if return_X_y:
return X, y
return {"data": X, "target": y} | ae388efcf82e0e6ff5be40ff5293d0b23d474735 | 9,446 |
def quad_lsq(x, y, verbose=False, itmax=200, iparams=[]):
"""
Fits a parabola to the data, more handy as it fits for
parabola parameters in the form y = B_0 * (x - B_1)**2 + B_2.
This is computationally slower than poly_lsq, so beware of its usage
for time consuming operations. Uses scipy odrpack, but for least squares.
Parameters
----------
x, y : 1-D arrays
Data to fit.
verbose : bool or int, optional
Can be 0,1,2 for different levels of output (False or True
are the same as 0 or 1)
itmax : int, optional
Maximum number of iterations.
iparams : 1D array, optional
Initial parameters B_0, B_1, B_2.
Returns
-------
coeff : 1-D array
Parabola coefficients
err : 1-D array
Standard error (1-sigma) on the coefficients.
"""
# Internal definition of quadratic
def _quadratic(B, x):
return B[0] * (x - B[1]) * (x - B[1]) + B[2]
def _quad_fjd(B, x):
return 2 * B[0] * (x - B[1])
def _quad_fjb(B, x):
_ret = np.concatenate((np.ones(x.shape, float),
2 * B[0] * (B[1] - x),
x * x - 2 * B[1] * x + B[1] * B[1],))
_ret.shape = (3,) + x.shape
return _ret
if any(iparams):
def _quad_est(data):
return tuple(iparams)
else:
def _quad_est(data):
return (1., 1., 1.)
quadratic = odr.Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb,
estimate=_quad_est)
mydata = odr.Data(x, y)
myodr = odr.ODR(mydata, quadratic, maxit=itmax)
# Set type of fit to least-squares:
myodr.set_job(fit_type=2)
if verbose == 2:
myodr.set_iprint(final=2)
fit = myodr.run()
# Display results:
if verbose:
fit.pprint()
if fit.stopreason[0] == 'Iteration limit reached':
print('(WWW) quad_lsq: iteration limit reached, result not reliable!')
# Results and errors
coeff = fit.beta
err = fit.sd_beta
return coeff, err | 02dda2ba78ac6754b913941f2204ef4aa26d3f36 | 9,447 |
import os
def find_file(directory_name, cyclone_id_string, prefer_zipped=True,
allow_other_format=True, raise_error_if_missing=True):
"""Finds NetCDF file with SHIPS data.
:param directory_name: Name of directory with SHIPS data.
:param cyclone_id_string: Cyclone ID (must be accepted by
`satellite_utils.parse_cyclone_id`).
:param prefer_zipped: Boolean flag. If True, will look for zipped file
first. If False, will look for unzipped file first.
:param allow_other_format: Boolean flag. If True, will allow opposite of
preferred file format (zipped or unzipped).
:param raise_error_if_missing: Boolean flag. If file is missing and
`raise_error_if_missing == True`, will throw error. If file is missing
and `raise_error_if_missing == False`, will return *expected* file path.
:return: ships_file_name: File path.
:raises: ValueError: if file is missing
and `raise_error_if_missing == True`.
"""
error_checking.assert_is_string(directory_name)
satellite_utils.parse_cyclone_id(cyclone_id_string)
error_checking.assert_is_boolean(prefer_zipped)
error_checking.assert_is_boolean(allow_other_format)
error_checking.assert_is_boolean(raise_error_if_missing)
ships_file_name = '{0:s}/ships_{1:s}.nc{2:s}'.format(
directory_name, cyclone_id_string,
GZIP_FILE_EXTENSION if prefer_zipped else ''
)
if os.path.isfile(ships_file_name):
return ships_file_name
if allow_other_format:
if prefer_zipped:
ships_file_name = ships_file_name[:-len(GZIP_FILE_EXTENSION)]
else:
ships_file_name += GZIP_FILE_EXTENSION
if os.path.isfile(ships_file_name) or not raise_error_if_missing:
return ships_file_name
error_string = 'Cannot find file. Expected at: "{0:s}"'.format(
ships_file_name
)
raise ValueError(error_string) | 4041340ecb9fc404eeabfb55a6732c5c4ede82be | 9,448 |
from typing import Tuple
import re
def _parse_cli_variable(mapping_str: str) -> Tuple[str, str]:
"""Checks that the input is of shape `name:value` and then splits it into a tuple"""
match = re.match(r"(?P<name>.+?):(?P<value>.+)", mapping_str)
if match is None:
raise ValueError(f'CLI variable input {mapping_str} is not of form `"name:value"`')
parsed = match.groupdict()
return parsed["name"], parsed["value"] | f701b7e85c45c2df35e1252721cd3215357909ba | 9,449 |
import json
def list_privileges_by_role(request, role):
"""
List sentry privilegs by role
:param request:
:param role: role name
:return: A Json array of SentryPrivileges: [p1, p2, p3...]
"""
sentry_privileges = _get_sentry_api(request.user).list_sentry_privileges_by_role("cdap", role)
sentry_privileges = [{"actions": p["action"], "authorizables": _sentry_authorizables_to_path(p["authorizables"])}
for p in sentry_privileges]
return HttpResponse(json.dumps(sentry_privileges), content_type="application/json") | fbb488f6d55b3a51646bc0c74f4861677cc16912 | 9,450 |
from typing import Any
import torch
from typing import Union
def to_torch_as(x: Any, y: torch.Tensor) -> Union[Batch, torch.Tensor]:
"""Return an object without np.ndarray.
Same as ``to_torch(x, dtype=y.dtype, device=y.device)``.
"""
assert isinstance(y, torch.Tensor)
return to_torch(x, dtype=y.dtype, device=y.device) | c6d71e0b903b611653b07e0f55666672dc123602 | 9,451 |
from pathlib import Path
def AllenAtlas(res_um=25, par=None):
"""
Instantiates an atlas.BrainAtlas corresponding to the Allen CCF at the given resolution
using the IBL Bregma and coordinate system
:param res_um: 25 or 50 um
:return: atlas.BrainAtlas
"""
if par is None:
# Bregma indices for the 10um Allen Brain Atlas, mlapdv
pdefault = {
'PATH_ATLAS': '/datadisk/BrainAtlas/ATLASES/Allen/',
'FILE_REGIONS': str(Path(__file__).parent.joinpath('allen_structure_tree.csv')),
'INDICES_BREGMA': list(np.array([1140 - (570 + 3.9), 540, 0 + 33.2]))
}
par = params.read('ibl_histology', default=pdefault)
if not Path(par.PATH_ATLAS).exists():
raise NotImplementedError("Atlas doesn't exist ! Mock option not implemented yet")
# TODO: mock atlas to get only the coordinate framework
pass
params.write('ibl_histology', par)
else:
par = Bunch(par)
# file_image = Path(path_atlas).joinpath(f'ara_nissl_{res_um}.nrrd')
file_image = Path(par.PATH_ATLAS).joinpath(f'average_template_{res_um}.nrrd')
file_label = Path(par.PATH_ATLAS).joinpath(f'annotation_{res_um}.nrrd')
image, header = nrrd.read(file_image, index_order='C') # dv, ml, ap
image = np.swapaxes(np.swapaxes(image, 2, 0), 1, 2) # image[iap, iml, idv]
label, header = nrrd.read(file_label, index_order='C') # dv, ml, ap
label = np.swapaxes(np.swapaxes(label, 2, 0), 1, 2) # label[iap, iml, idv]
# resulting volumes origin: x right, y front, z top
df_regions = pd.read_csv(par.FILE_REGIONS)
regions = BrainRegions(id=df_regions.id.values,
name=df_regions.name.values,
acronym=df_regions.acronym.values)
xyz2dims = np.array([1, 0, 2])
dims2xyz = np.array([1, 0, 2])
dxyz = res_um * 1e-6 * np.array([-1, -1, -1])
ibregma = (np.array(par.INDICES_BREGMA) * 10 / res_um)
return BrainAtlas(image, label, regions, dxyz, ibregma, dims2xyz=dims2xyz, xyz2dims=xyz2dims) | 0f8b55c075104ee39d42a0989a4006fe5f1ae617 | 9,452 |
def get_devices_properties(device_expr,properties,hosts=[],port=10000):
"""
Usage:
get_devices_properties('*alarms*',props,
hosts=[get_bl_host(i) for i in bls])
props must be an string as passed to Database.get_device_property();
regexp are not enabled!
get_matching_device_properties enhanced with multi-host support
@TODO: Compare performance of this method with
get_matching_device_properties
"""
expr = device_expr
if not isSequence(properties): properties = [properties]
get_devs = lambda db, reg : [d for d in db.get_device_name('*','*')
if not d.startswith('dserver') and matchCl(reg,d)]
if hosts: tango_dbs = dict(('%s:%s'%(h,port),PyTango.Database(h,port))
for h in hosts)
else: tango_dbs = {get_tango_host():get_database()}
return dict(('/'.join((host,d) if hosts else (d,)),
db.get_device_property(d,properties))
for host,db in tango_dbs.items() for d in get_devs(db,expr)) | 58ba6fa32d4118b60ca9eadfde1a28d2a98854d3 | 9,453 |
def atexit_shutdown_grace_period(grace_period=-1.0):
"""Return and optionally set the default worker cache shutdown grace period.
This only affects the `atexit` behavior of the default context corresponding to
:func:`trio_parallel.run_sync`. Existing and future `WorkerContext` instances
are unaffected.
Args:
grace_period (float): The time in seconds to wait for workers to
exit before issuing SIGKILL/TerminateProcess and raising `BrokenWorkerError`.
Pass `math.inf` to wait forever. Pass a negative value or use the default
value to return the current value without modifying it.
Returns:
float: The current grace period in seconds.
.. note::
This function is subject to threading race conditions."""
global ATEXIT_SHUTDOWN_GRACE_PERIOD
if grace_period >= 0.0:
ATEXIT_SHUTDOWN_GRACE_PERIOD = grace_period
return ATEXIT_SHUTDOWN_GRACE_PERIOD | f7440172f40b00069b149254a689521373dbded0 | 9,454 |
import os
import fnmatch
def combine_files(root, pattern=None):
"""Combine all files in root path directory
Parameters:
root (str) : file path to directory of files
pattern (str) : optional file pattern to search for in directory
Returns:
combined files
"""
if pattern is not None:
files = [PurePath(path, name) for path, subdirs, files in os.walk(root) for name in files if fnmatch(name, pattern)]
combined_files = pd.concat([pd.read_csv(f) for f in files])
else:
files = [PurePath(path, name) for path, subdirs, files in os.walk(root) for name in files]
combined_files = pd.concat([pd.read_csv(f) for f in files])
run_date_transformation(combined_files)
return combined_files.sort_values(by="date") | cca93f9cd62245071d36dcc345ff4da02f0bbcc8 | 9,455 |
def get_point(points, cmp, axis):
""" Get a point based on values of either x or y axys.
:cmp: Integer less than or greater than 0, representing respectively
< and > singhs.
:returns: the index of the point matching the constraints
"""
index = 0
for i in range(len(points)):
if cmp < 0:
if points[i][axis] < points[index][axis]:
index = i
else:
if points[i][axis] > points[index][axis]:
index = i
return index | b59035d390e83b45a0131e28c4acf7e302cf3e45 | 9,456 |
import os
def benchmark_parser_header_16(nb_headers, nb_fields, do_checksum=False):
"""
This method generate the P4 program to benchmark the P4 parser
:param nb_headers: the number of generic headers included in the program
:type nb_headers: int
:param nb_fields: the number of fields (16 bits) in each header
:type tbl_size: int
:returns: bool -- True if there is no error
"""
output_dir = 'output'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
program = add_headers_and_parsers_16(nb_headers, nb_fields, do_checksum)
program += add_ingress_block_16()
arguments = 'inout headers hdr, inout metadata meta, inout standard_metadata_t standard_metadata'
program += add_control_block_16('egress', '', '', '', arguments)
applies = '\t\tpacket.emit(hdr.ethernet);\n'
applies += '\t\tpacket.emit(hdr.ptp);\n'
for i in range(nb_headers):
applies += '\t\tpacket.emit(hdr.header_%d);\n' % i
program += add_control_block_16('DeparserImpl', '', '', applies, 'packet_out packet, in headers hdr')
program += add_control_block_16('verifyChecksum', '', '', '', 'inout headers hdr, inout metadata meta')
program += add_control_block_16('computeChecksum', '', '', '', 'inout headers hdr, inout metadata meta')
program += add_main_module()
fwd_tbl = 'forward_table'
commands = cli_commands(fwd_tbl)
with open ('%s/commands.txt' % output_dir, 'w') as out:
out.write(commands)
write_output(output_dir, program)
get_parser_header_pcap(nb_fields, nb_headers, output_dir)
generate_pisces_command(output_dir, nb_headers, nb_fields, do_checksum)
return True | b7b89823768dd4779fad9d48142d5b469aeb14bb | 9,457 |
import pathlib
def create_jobs_list(chunks, outdir, *filters):
# TO DO
# Figure out the packing/unpacking
"""
Create a list of dictionaries that hold information for the given
chunks
Arguments:
chunks: list: A list of lists. Each nested list contains the
filepaths to be processed
outdir: Path object: The directory where results will be written
filters: Callables
Return:
jobs_list: list: A list of dictionaries that holds information for
the execution of each chunk. Of the form
[
{'chunk_id' : int, (0,1,2,...)
'out_fp' : Path object, (outdir/chunk_<chunk_id>.fa.gz)
'fastas' : list of Path objects,
([PosixPath('path/to/PATRIC.faa'),...])
'filters' : list of functions
}
]
"""
jobs_list = []
for i, chunk in enumerate(chunks):
chunk_id = f"chunk_{i}"
chunk_out = f"{chunk_id}.fa.gz"
out_fp = outdir / pathlib.Path(chunk_out)
# chunk_skipped = f"{chunk_id}.skipped.txt"
chunk_fastas = chunk
chunk_dict = {
"chunk_id": chunk_id,
"fastas": chunk_fastas,
"out_fp": out_fp,
# Should there be an if filters or if len(filters) != 0 ?
"filters": [f for f in filters],
}
jobs_list.append(chunk_dict)
return jobs_list | 433992eb34bc1f80d12f8cdcee3dbd99d04d22c1 | 9,458 |
import torch
def per_symbol_to_per_seq_probs(per_symbol_probs, tgt_out_idx):
""" Gather per-symbol probabilities into per-seq probabilities """
# per_symbol_probs shape: batch_size, seq_len, candidate_size
# tgt_out_idx shape: batch_size, seq_len
# output shape: batch_size, 1
return torch.prod(
torch.gather(per_symbol_probs, 2, tgt_out_idx.unsqueeze(-1)).squeeze(2),
dim=1,
keepdim=True,
) | fc39ac129b8bbffcb602c73bc67fcc44b1d354ed | 9,459 |
def solve_game(payoffs):
""" given payoff matrix for a zero-sum normal-form game,
return first mixed equilibrium (may be multiple)
returns a tuple of numpy arrays """
# .vertex_enumeration()
# .lemke_howson(initial_dropped_label=0) - does not return *all* equilibrium
game = nash.Game(payoffs)
equilibria = game.lemke_howson_enumeration()
# equilibria = game.support_enumeration() # non_degenerate=False, tol=10e-16
equilibrium = next(equilibria, None)
# Lemke-Howson couldn't find equilibrium OR
# Lemke-Howson return error - game may be degenerate. try other approaches
print(equilibrium)
print(equilibrium[0])
print(equilibrium[1])
if equilibrium is None or np.isnan(equilibrium[0]).any() or np.isnan(equilibrium[1]).any() or (equilibrium[0].shape != (payoffs.shape[0],) or equilibrium[1].shape != (payoffs.shape[1],)):
# try other
print('\n\n\n\n\nuh oh! degenerate solution')
print('payoffs are\n', payoffs)
equilibria = game.vertex_enumeration()
equilibrium = next(equilibria, None)
if equilibrium is None:
print('\n\n\n\n\nuh oh x2! degenerate solution again!!')
print('payoffs are\n', payoffs)
equilibria = game.support_enumeration() # non_degenerate=False, tol=10e-16
equilibrium = next(equilibria, None)
assert equilibrium is not None
return equilibrium | 9eb0dd84592f9a2d135c79322f6c812b775b0e74 | 9,460 |
from functools import reduce
def zone_features(df, zfeatures, aufeatures):
"""Create zone features from the data
Args:
df (DataFrame): Input dataframe
zfeatures (list): List of zone median features
aufeatures (list): List of zone autocorr features
Return: 2 dataframes
"""
# Medians from the last 1,3,6,12 months
zones_1y = df[(df['ds'] >= '2018-03-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_1y.columns = ['zone_code','median_user_1y','median_bw_1y']
zones_1m = df[(df['ds'] >= '2019-02-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_1m.columns = ['zone_code','median_user_1m','median_bw_1m']
zones_3m = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_3m.columns = ['zone_code','median_user_3m','median_bw_3m']
zones_6m = df[(df['ds'] >= '2018-09-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': 'median',
'bandwidth_total': 'median'
})
zones_6m.columns = ['zone_code','median_user_6m','median_bw_6m']
# Autocorrelation features
zones_autocorr = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({
'max_user': {
'lag_user_1d' :lambda x: pd.Series.autocorr(x, 24),
'lag_user_3d' :lambda x: pd.Series.autocorr(x, 3*24),
'lag_user_1w' :lambda x: pd.Series.autocorr(x, 24*7),
},
'bandwidth_total': {
'lag_bw_1d' :lambda x: pd.Series.autocorr(x, 24),
'lag_bw_3d' :lambda x: pd.Series.autocorr(x, 3*24),
'lag_bw_1w' :lambda x: pd.Series.autocorr(x, 24*7),
}
}).fillna(0)
zones_autocorr.columns.droplevel()
zones_autocorr.reset_index()
zones_autocorr.columns = ['zone_code','lag_user_1d','lag_user_3d','lag_user_1w','lag_bw_1d','lag_bw_3d','lag_bw_1w']
zones = reduce(lambda x,y: pd.merge(x,y, on='zone_code', how='inner'), [zones_1m, zones_3m, zones_6m, zones_1y])
# Scale the zone features
scale1, scale2 = MinMaxScaler(), MinMaxScaler()
zones[zfeatures] = scale1.fit_transform(zones[zfeatures])
zones_autocorr[aufeatures] = scale2.fit_transform(zones_autocorr[aufeatures])
return zones, zones_autocorr | fb055e1c2fea040c95422818fbd6d16a97bf873f | 9,461 |
from typing import List
def get_active_validator_indices(validators: [ValidatorRecord]) -> List[int]:
"""
Gets indices of active validators from ``validators``.
"""
return [i for i, v in enumerate(validators) if is_active_validator(v)] | 14719147b49f903240e19fbaa46da8a40315a5cf | 9,462 |
def parse_decodes(sentences, predictions, lengths, label_vocab):
"""Parse the padding result
Args:
sentences (list): the tagging sentences.
predictions (list): the prediction tags.
lengths (list): the valid length of each sentence.
label_vocab (dict): the label vocab.
Returns:
outputs (list): the formatted output.
"""
predictions = [x for batch in predictions for x in batch]
lengths = [x for batch in lengths for x in batch]
id_label = dict(zip(label_vocab.values(), label_vocab.keys()))
outputs = []
for idx, end in enumerate(lengths):
sent = sentences[idx][:end]
tags = [id_label[x] for x in predictions[idx][:end]]
sent_out = []
tags_out = []
words = ""
for s, t in zip(sent, tags):
if t.endswith('-B') or t == 'O':
if len(words):
sent_out.append(words)
tags_out.append(t.split('-')[0])
words = s
else:
words += s
if len(sent_out) < len(tags_out):
sent_out.append(words)
outputs.append(''.join(
[str((s, t)) for s, t in zip(sent_out, tags_out)]))
return outputs | bf40d8570e0a552853108e860fd193c0d9940e98 | 9,463 |
from datetime import datetime
def get_weekday(start_date, end_date, weekday_nums, repeat=None):
"""
获取一段时间范围内每个周天对应的日期
:param start_date:
:param end_date:
:param weekday_nums: list, 星期对应数字 0 ~ 6
:param repeat:
:return:
"""
sdate = datetime.datetime.strptime(start_date, date_pattern1)
edate = datetime.datetime.strptime(end_date, date_pattern1)
if not repeat:
edate += datetime.timedelta(days=1)
weekdays = []
for weekday_num in weekday_nums:
tmp_date = sdate
while tmp_date < edate:
now_weekday = tmp_date.weekday()
tmp_date += datetime.timedelta(days=(((int(weekday_num)+6) % 7 - now_weekday + 7) % 7))
if tmp_date < edate:
weekdays.append(tmp_date.strftime(date_pattern1))
tmp_date += datetime.timedelta(days=7)
else:
break
return weekdays | 65e0495951647cbb6648a3a68d7fd2c7e1e2e88b | 9,464 |
def context_processor(target):
"""
Decorator that allows context processors with parameters to be assigned
(and executed properly) in a RequestContext
Example::
return render_to_response(
template_name,
context_instance=RequestContext(
request,
processors=[
test_processor1,
test_processor2(val1=test_val1, val2=test_val2),
]
)
)
"""
def cp_wrapper(*args, **kwargs):
if (len(args) == 1 and len(kwargs) == 0) \
or (len(args) == 0 and len(kwargs) == 1 and 'request' in kwargs):
return target(*args, **kwargs)
else:
def get_processor(request):
return target(request, *args, **kwargs)
return get_processor
return cp_wrapper | 842395b29aedbfe23bb3332bf343b12d26519d97 | 9,465 |
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_delete(uuid, local_id): # noqa: E501
"""data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_delete
removes tapi.common.CapacityValue # noqa: E501
:param uuid: Id of path-comp-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:rtype: None
"""
return 'do some magic!' | a3bc85df9fa77b210573058b640e47f41930ae0d | 9,466 |
import typing
import json
def decode_messages(fit_bytes: bytes) -> typing.List[typing.Dict]:
"""Decode serialized messages.
Arguments:
fit_bytes: Encoded messages
Returns:
Decoded messages
"""
messages = []
for line in fit_bytes.splitlines():
payload = json.loads(line)
messages.append(schemas.WriterMessage().load(payload))
return messages | c56a805b5c2ffee3b48be7ae88ad6a91cddd4cc5 | 9,467 |
import os
def read_DELETE(msg, hosts):
"""Parse the DELETE request and send data to the response generator function
Args:
msg (String): The request message to parse
hosts (List): The array of hosts
Returns:
List: An array of information about the request, including status code,
filename, file length, file type and connection type
"""
request_line = [i.strip() for i in msg.split("\n")[0].split(" ")]
headers = [i.rstrip() for i in msg.split("\n")[1:]]
tmp_host = ""
tmp_file = request_line[1][1:]
for i in headers:
if i.split(":")[0] == "Host":
tmp_host = i.split(": ")[1]
for i in hosts:
if tmp_host == f"localhost:{PORT}":
# CHANGE THIS LINE IN ORDER TO SEE A STUDENT'S WEBSITE IN THE BROWSER
HOST = hosts[0][0]
break
if i[0] == tmp_host:
HOST = tmp_host
break
else:
return [404, request_line[2]]
if os.path.exists(f"./{HOST}/{tmp_file}"):
if os.path.isdir(f"./{HOST}/{tmp_file}"):
os.rmdir(f"./{HOST}/{tmp_file}")
else:
os.remove(f"./{HOST}/{tmp_file}")
return [204, request_line[2], f"./{HOST}/{tmp_file}"]
else:
return [404, request_line[2]] | 5fb54a512d34e4043125e3a5cbf89cf5b362a3d9 | 9,468 |
def iresnet101(pretrained=False, progress=True, **kwargs):
"""
Constructs the IResNet-101 model trained on Glint360K(https://github.com/deepinsight/insightface/tree/master/recognition/partial_fc#4-download).
.. note::
The required input size of the model is 112x112.
Args:
pretrained (bool): Whether to download the pre-trained model on Glint360K. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> iresnet101 = flowvision.models.face_recognition.iresnet101(pretrained=False, progress=True)
"""
return _iresnet(
"iresnet101", IBasicBlock, [3, 13, 30, 3], pretrained, progress, **kwargs
) | d986282b805de959cfa2d6707532d23f1c23c31b | 9,469 |
from typing import Dict
def get_full_jwt(user: User) -> Dict:
"""
Get a full jwt response from the username and uid token.
"""
return {
'access_token': create_access_token(identity=user, fresh=True),
'refresh_token': create_refresh_token(identity=user)
} | bbc4bc12352671878edc392717d58636475001c3 | 9,470 |
import re
from datetime import datetime
def GridSearch_Prophet(prophet_grid, metric='mape'):
"""
GridSearch tool to determine the optimal parameters for prophet
Args:
- prophet_grid: List of parameters. Enter it as list(ParameterGrid(prophet_grid)
- metric: String. Not used yet. May be used to change the metric used to sort
the tested models.
Return:
- mape_table: Pandas dataframe. Show the tested parameters and median of Mean
Absolute Percentage Error calculated over 1 day.
"""
# mape_table summarizes the mean of mape according to tested parameters
mape_table = pd.DataFrame.from_dict(prophet_grid)
mape_table = mape_table[['device',
'parameter',
'begin',
'end',
'sampling_period_min',
'interval_width',
'daily_fo',
'changepoint_prior_scale']]
mape_table['mape'] = np.nan
# Loop Prophet over the prophet_grid and store the data
a = 0
name = re.sub("[']", '', str(mape_table.iloc[0, 0])) + '_Prediction_' + \
str(mape_table.iloc[a, 1])
for prophet_instance in prophet_grid:
print('\nprophet_instance nb ' + str(a))
# Run Prophet
df_pred, mape = prophet(**prophet_instance)
# store the mape
mape_table.iloc[a, 8] = mape
# Save the df_pred and figure if the mape_table has 1 row (best model)
if mape_table.shape[0] == 1:
# calculate diff between begin and end
begin_str = mape_table.iloc[a, 2]
end_str = mape_table.iloc[a, 3]
d1 = datetime.strptime(begin_str, "%Y-%m-%d")
d2 = datetime.strptime(end_str, "%Y-%m-%d")
pred_duration = abs((d2 - d1).days)
# Generate the generic name
model_name = re.sub("[']", '', str(mape_table.iloc[0, 0])) + '_' + str(mape_table.iloc[a, 3]) + \
'_cps_' + str(mape_table.iloc[a, 7]) + '_fo_' + str(mape_table.iloc[a, 6]) + '_' + \
str('{:02d}'.format(pred_duration)) + 'td'
# Save the figure
folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/figures/best/'
fig_name = folder_name + model_name + '.png'
plt.savefig(fig_name, bbox_inches="tight")
# Save the df_pred (prediction and actual values) as a csv
folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/data/processed/'
csv_name = folder_name + model_name + '.csv'
df_pred.to_csv(csv_name)
# elif a+1 == mape_table.shape[0]:
# # Store the complete mape_table if this is the last prediction
# folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/data/processed/'
# mape_table_name = folder_name + re.sub("[']", '', str(
# mape_table.iloc[0, 0])) + '_' + str(mape_table.iloc[a, 3]) + '_mape_table.csv'
mape_table = mape_table.sort_values('mape')
# mape_table.to_csv(mape_table_name)
a += 1
return mape_table | 324f6468109bfa52258d1ad6645692395be7859a | 9,471 |
def _check_max_features(importances, max_features):
"""Interpret the max_features value"""
n_features = len(importances)
if max_features is None:
max_features = n_features
elif isinstance(max_features, int):
max_features = min(n_features, max_features)
elif isinstance(max_features, float):
max_features = int(n_features * max_features)
return max_features | 816daf9d99ac4ecd2d5024a3be63f793d7669e1f | 9,472 |
def map_blocks(func, data):
"""Curried version of Dask's map_blocks
Args:
func: the function to map
data: a Dask array
Returns:
a new Dask array
>>> f = map_blocks(lambda x: x + 1)
>>> f(da.arange(4, chunks=(2,)))
dask.array<lambda, shape=(4,), dtype=int64, chunksize=(2,)>
"""
return da.map_blocks(func, data) | ab97911bb147ceb6d5350fcd16300926d2a89f8e | 9,473 |
from pathlib import Path
from typing import Optional
import os
def download_and_extract(
package: str,
directory: Path,
version: Optional[str] = None,
remove_after: bool = False
) -> Path:
"""Modified to allow avoiding removing files after.
Parameters
----------
package
directory
version
remove_after
Returns
-------
Examples
--------
>>> import src.constants as cte
>>> download_and_extract('six', cte.RAW)
PosixPath('/home/agustin/github_repos/top_pypi_source_code_stats/data/raw/six-1.16.0')
"""
try:
source = get_package_source(package, version)
except ValueError:
return None
print(f"Downloading {package}.")
local_file, _ = urlretrieve(source, directory / f"{package}-src")
with get_archive_manager(local_file) as archive:
print(f"Extracting {package}")
archive.extractall(path=directory)
result_dir = get_first_archive_member(archive)
if remove_after:
os.remove(local_file)
return directory / result_dir | a836a310894d1db38bccdfd5eb1388cb1acd78cd | 9,474 |
def premises_to_syllogism(premises):
"""
>>> premises_to_syllogism(["Aab", "Ebc"])
'AE1'
"""
figure = {"abbc": "1", "bacb": "2", "abcb": "3", "babc": "4"}[premises[0][1:] + premises[1][1:]]
return premises[0][0] + premises[1][0] + figure | a048d44acea1eb4c9346880a74547a9cd100ebf0 | 9,475 |
import re
def fix_fits_keywords(header):
"""
Update header keyword to change '-' by '_' as columns with '-' are not
allowed on SQL
"""
new_header = {}
for key in header.keys():
new_key = key.replace('-', '_')
new_header[new_key] = header[key]
# Temporary fix - needs to be removed
# Making it backwards complatible with older files.
# Check the FILETYPE is present, if not get from filename
if 'FILETYPE' not in header.keys():
logger.warning("Adding FILETYPE from FITSNAME pattern to header to compatibility")
# Try to get it from the filename
if re.search('_passthrough.fits', header['FITSNAME']):
new_header['FILETYPE'] = 'psth'
elif re.search('_fltd.fits', header['FITSNAME']):
new_header['FILETYPE'] = 'filtered'
# For headers without FILETYPE (i.e.: yearly) we set it to raw
else:
raise Exception("ERROR: Cannot provide suitable FILETYPE from header or pattern")
logger.warning(f"Added FILETYPE {new_header['FILETYPE']} from pattern")
return new_header | 0d8a2f502252051857a131944a4c31ba8ec9ff0e | 9,476 |
def request_password(email: str, mailer: Mailer, _tn: Translator):
"""
Create new hashed password and send mail..
:param email: Mail-address which should be queried
:param mailer: pyramid Mailer
:param _tn: Translator
:return: dict with info about mailing
"""
db_user = DBDiscussionSession.query(User).filter(func.lower(User.email) == func.lower(email)).first()
if not db_user:
LOG.debug("User could not be found for mail %s", email)
return {
'success': False,
'message': _tn.get(_.emailSentGeneric)
}
if checks_if_user_is_ldap_user(db_user):
LOG.debug("User is no LDAP user")
return {
'success': False,
'message': _tn.get(_.passwordRequestLDAP)
}
rnd_pwd = get_rnd_passwd()
hashed_pwd = get_hashed_password(rnd_pwd)
db_user.password = hashed_pwd
DBDiscussionSession.add(db_user)
db_language = DBDiscussionSession.query(Language).get(db_user.settings.lang_uid)
body = _tn.get(_.nicknameIs) + db_user.nickname + '\n'
body += _tn.get(_.newPwdIs) + rnd_pwd + '\n\n'
body += _tn.get(_.newPwdInfo)
subject = _tn.get(_.dbasPwdRequest)
success, _success_message, message = send_mail(mailer, subject, body, email, db_language.ui_locales)
return {
'success': success,
'message': _tn.get(_.emailSentGeneric)
} | 09c9cbc164fc43fd953a3197c03cd7c27d758dba | 9,477 |
def is_sum_lucky(x, y):
"""This returns a string describing whether or not the sum of input is lucky
This function first makes sure the inputs are valid and then calculates the
sum. Then, it will determine a message to return based on whether or not
that sum should be considered "lucky"
"""
if x != None:
if y is not None:
result = x+y;
if result == 7:
return 'a lucky number!'
else:
return( 'an unlucky number!')
return ('just a normal number') | 081b5e8cc2657a00ea160e398fb00f84187e2ab6 | 9,478 |
import asyncio
def unsync_function(func, *args, **kwargs):
"""Runs an async function in a standard blocking way and returns output"""
return asyncio.run(func(*args, **kwargs)) | cd7c19bf226b78c9e3c4b19325e7acb4fcc90e21 | 9,479 |
from typing import Iterable
from typing import Union
from typing import List
from typing import Tuple
from typing import Any
from typing import Dict
def zip_results(name: str, recipes: Iterable[Recipe], cache=CacheType.Auto) \
-> Recipe[Union[List[Tuple[Any, ...]], Dict[Any, Tuple[Any, ...]]]]:
"""
Create a Recipe that zips the outputs from a number of recipes into elements, similar to Python's built-in zip().
Notably, dictionaries are handled a bit differently, in that a dictionary is returned with keys mapping to tuples
from the different inputs, i.e.::
{"1": 1} zip {"1", "one"} -> {"1", (1, "one")}
:param name: The name to give the created Recipe
:param recipes: The recipes to zip. These must return lists or dictionaries
:param cache: The type of caching to use for this Recipe
:return: The created Recipe
"""
def _zip_results(*iterables: Union[List, Dict]) \
-> Union[List[Tuple[Any, ...]], Dict[Any, Tuple[Any, ...]]]:
# Sanity checks
if not iterables or len(iterables) == 0:
return []
if any(not isinstance(iterable, Iterable) for iterable in iterables):
raise ValueError("Cannot zip non-iterable inputs")
first_iterable = iterables[0]
if any(not isinstance(iterable, type(first_iterable)) for iterable in iterables):
raise ValueError("Cannot zip inputs of different types")
num_items = len(first_iterable)
if any(len(iterable) != num_items for iterable in iterables):
raise ValueError("Cannot zip inputs of different length")
# Handle the actual zipping operation
if isinstance(first_iterable, list):
return list(zip(*iterables))
elif isinstance(first_iterable, dict):
return {
key: tuple(iterable[key] for iterable in iterables)
for key in first_iterable.keys()
}
else:
raise ValueError("Type: {} not supported in _zip_results()".format(type(first_iterable)))
return Recipe(_zip_results, recipes, name, transient=False, cache=cache) | a1e0b7aa2d5071e485f49b0b7aa43343f8760ab2 | 9,480 |
def get_muscle_reference_dictionary():
"""
The
@article{bashkatov2011optical,
title={Optical properties of skin, subcutaneous, and muscle tissues: a review},
author={Bashkatov, Alexey N and Genina, Elina A and Tuchin, Valery V},
journal={Journal of Innovative Optical Health Sciences},
volume={4},
number={01},
pages={9--38},
year={2011},
publisher={World Scientific}
}
"""
reference_dict = dict()
values650nm = TissueProperties()
values650nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 1.04
values650nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 87.5
values650nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values650nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values650nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values650nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values650nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values650nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values650nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values700nm = TissueProperties()
values700nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.48
values700nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 81.8
values700nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values700nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values700nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values700nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values700nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values700nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values700nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values750nm = TissueProperties()
values750nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.41
values750nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 77.1
values750nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values750nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values750nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values750nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values750nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values750nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values750nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values800nm = TissueProperties()
values800nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.28
values800nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 70.4
values800nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values800nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values800nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values800nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values800nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values800nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values800nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values850nm = TissueProperties()
values850nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.3
values850nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 66.7
values850nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values850nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values850nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values850nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values850nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values850nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values850nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values900nm = TissueProperties()
values900nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.32
values900nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 62.1
values900nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values900nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values900nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values900nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values900nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values900nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values900nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
values950nm = TissueProperties()
values950nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.46
values950nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 59.0
values950nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9
values950nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0)
values950nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE
values950nm[Tags.DATA_FIELD_OXYGENATION] = 0.175
values950nm[Tags.DATA_FIELD_DENSITY] = 1090.4
values950nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4
values950nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09
reference_dict[650] = values650nm
reference_dict[700] = values700nm
reference_dict[750] = values750nm
reference_dict[800] = values800nm
reference_dict[850] = values850nm
reference_dict[900] = values900nm
reference_dict[950] = values950nm
return reference_dict | b2bcedabce6309a11d0b1f8424ccefc06d7c8dee | 9,481 |
from typing import Optional
from typing import Tuple
def flake8_entrypoint(physical_line: str) -> Optional[Tuple[int, str]]:
"""Flake8 plugin entrypoint that operates on physical lines."""
match = RX_TODO_OR_ELSE.search(physical_line)
if match:
by = match.group(2)
pact = match.group(3).strip()
try:
TodoOrElse().by(pact, by=by)
except PactViolatedException as e:
return match.start(), f"{CODE} {e.short()}"
return None | e8e672f50f0f58842cbdd7d1e599a4df5b9e1be0 | 9,482 |
import shlex
def smartquotes(text):
"""
Runs text through pandoc for smartquote correction.
This script accepts a paragraph of input and outputs typographically correct
text using pandoc. Note line breaks are not retained.
"""
command = shlex.split('pandoc --smart -t plain')
com = Popen(command, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE)
out, err = com.communicate(text.encode('utf-8'))
com_out = out.decode('utf-8')
text = com_out.replace('\n', ' ').strip()
return text | bab6ec252495d8e279cdcde7f51f60331117bae2 | 9,483 |
def get_nearest_stations_xy(x, y, variable, n=1, stations=None, ignore=None):
"""find the KNMI stations that measure 'variable' closest to the
x, y coordinates
Parameters
----------
x : int or float
x coordinate in RD
y : int or float
x coordinate in RD
variable : str
measurement variable e.g. 'RD' or 'EV24'
n : int, optional
number of stations you want to return. The default is 1.
stations : pd.DataFrame, optional
if None stations will be obtained using the get_stations function.
The default is None.
ignore : list, optional
list of stations to ignore. The default is None.
Returns
-------
list
station numbers.
"""
if stations is None:
stations = get_stations(variable=variable)
if ignore is not None:
stations.drop(ignore, inplace=True)
if stations.empty:
return None
d = np.sqrt((stations.x - x)**2 + (stations.y - y)**2)
return d.nsmallest(n).index.to_list() | 2d19e64054eb0813919e2a286c686b91e6d0a6f4 | 9,484 |
def parseStdInput():
"""Obtain a graph by parsing the standard input
as per the format specified in the PACE Challange.
"""
edges = [(1,2),(2,3),(3,4),(4,1)]
G = nx.Graph()
G.add_edges_from(edges)
return G | 4e26d50c590321241101586d9e83b2d53c7324ea | 9,485 |
def strfdelta(tdelta, fmt):
""" Get a string from a timedelta.
"""
f, d = Formatter(), {}
l = {"D": 86400, "H": 3600, "M": 60, "S": 1}
k = list(map(lambda x: x[1], list(f.parse(fmt))))
rem = int(tdelta.total_seconds())
for i in ("D", "H", "M", "S"):
if i in k and i in l.keys():
d[i], rem = divmod(rem, l[i])
return f.format(fmt, **d) | 01e7d3678cc88a08ec91e64dd59037294f17d9fe | 9,486 |
from pathlib import Path
def get_file_list_from_dir(parent_dir: Path, file_mask: str = "*") -> list:
"""
Recursively gets a list of files in a Path directory with the specified name mask
and return absolute string paths for files
"""
get_logger(__name__).debug("Iterating for files in '{}'".format(parent_dir.absolute()))
src_glob = parent_dir.rglob(file_mask)
src_files = [str(f.absolute()) for f in src_glob if f.is_file()]
get_logger(__name__).debug("Iterated and found {} files in '{}'".format(len(src_files), parent_dir.absolute()))
return src_files | 16a4b89751343ea0e8472160ef376ae008819a81 | 9,487 |
def imputation_Y(X, model):
"""Perform imputation. Don't normalize for depth.
Args:
X: feature matrix from h5.
model: a trained scBasset model.
Returns:
array: a peak*cell imputed accessibility matrix. Sequencing depth
isn't corrected for.
"""
Y_impute = model.predict(X)
return Y_impute | 75e2de758c3544655d4332098d4398255770d7c3 | 9,488 |
def format_percent(x, _pos=None):
"""
plt.gca().yaxis.set_major_formatter(format_percent)
"""
x = 100 * x
if abs(x - round(x)) > 0.05:
return r"${:.1f}\%$".format(x)
else:
return r"${:.0f}\%$".format(x) | 27362ffa3b5726c135afdf034208eeca8d7c4f60 | 9,489 |
def is_row_and_col_balanced(T1, T2):
"""
Partial latin squares T1 and T2 are balanced if the symbols
appearing in row r of T1 are the same as the symbols appearing in
row r of T2, for each r, and if the same condition holds on
columns.
EXAMPLES::
sage: from sage.combinat.matrices.latin import *
sage: T1 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: T2 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: is_row_and_col_balanced(T1, T2)
True
sage: T2 = matrix([[0,3,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]])
sage: is_row_and_col_balanced(T1, T2)
False
"""
for r in range(T1.nrows()):
val1 = set(x for x in T1.row(r) if x >= 0)
val2 = set(x for x in T2.row(r) if x >= 0)
if val1 != val2: return False
for c in range(T1.ncols()):
val1 = set(x for x in T1.column(c) if x >= 0)
val2 = set(x for x in T2.column(c) if x >= 0)
if val1 != val2: return False
return True | f0a9d1522da2fc079d4021603198e79c438de727 | 9,490 |
def submit(ds, entry_name, molecule, index):
"""
Submit an optimization job to a QCArchive server.
Parameters
----------
ds : qcportal.collections.OptimizationDataset
The QCArchive OptimizationDataset object that this calculation
belongs to
entry_name : str
The base entry name that the conformation belongs to. Usually,
this is a canonical SMILES, but can be anything as it is represents
a key in a dictionary-like datastructure. This will be used as an
entry name in the dataset
molecule : QCMolecule
The JSON representation of a QCMolecule, which has geometry
and connectivity present, among others
index : int
The conformation identifier of the molecule. This is used to make
the entry names unique, since each conformation must have its own
unique entry in the dataset in the dataset
Returns
-------
(unique_id, success): tuple
unique_id : str
The unique_id that was submitted to the dataset. This is the name
of the new entry in the dataset.
success : bool
Whether the dataset was able to successfully add the entry. If this
is False, then the entry with the name corresponding to unique_id
was already present in the dataset.
"""
# This workaround prevents cmiles from crashing if OE is installed but has
# no license. Even though rdkit is specified, protomer enumeration is OE-
# specific and still attempted.
# oe_flag = cmiles.utils.has_openeye
# cmiles.utils.has_openeye = False
# attrs = cmiles.generator.get_molecule_ids(molecule, toolkit="rdkit")
# cmiles.utils.has_openeye = oe_flag
CIEHMS = "canonical_isomeric_explicit_hydrogen_mapped_smiles"
molecule["extras"] = {CIEHMS: entry_name}
attrs = {CIEHMS: entry_name}
unique_id = entry_name + f"-{index}"
success = False
try:
ds.add_entry(unique_id, molecule, attributes=attrs, save=False)
success = True
except KeyError:
pass
return unique_id, success | 50a30a25af59906ce5636ce8a176e29befd27d60 | 9,491 |
def list_isos(apiclient, **kwargs):
"""Lists all available ISO files."""
cmd = listIsos.listIsosCmd()
[setattr(cmd, k, v) for k, v in kwargs.items()]
return(apiclient.listIsos(cmd)) | ad3117c6fc2c7bc4543372d306d0d476918d5898 | 9,492 |
from .....main import _get_bot
from typing import Optional
from typing import Union
async def edit_message_live_location(
token: str = TOKEN_VALIDATION,
latitude: float = Query(..., description='Latitude of new location'),
longitude: float = Query(..., description='Longitude of new location'),
chat_id: Optional[Union[int, str]] = Query(None, description='Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)'),
message_id: Optional[int] = Query(None, description='Required if inline_message_id is not specified. Identifier of the message to edit'),
inline_message_id: Optional[str] = Query(None, description='Required if chat_id and message_id are not specified. Identifier of the inline message'),
horizontal_accuracy: Optional[float] = Query(None, description='The radius of uncertainty for the location, measured in meters; 0-1500'),
heading: Optional[int] = Query(None, description='Direction in which the user is moving, in degrees. Must be between 1 and 360 if specified.'),
proximity_alert_radius: Optional[int] = Query(None, description='Maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified.'),
reply_markup: Optional[Json['InlineKeyboardMarkupModel']] = Query(None, description='A JSON-serialized object for a new inline keyboard.'),
) -> JSONableResponse:
"""
Use this method to edit live location messages. A location can be edited until its live_period expires or editing is explicitly disabled by a call to stopMessageLiveLocation. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned.
https://core.telegram.org/bots/api#editmessagelivelocation
"""
reply_markup: Optional[InlineKeyboardMarkupModel] = parse_obj_as(
Optional[InlineKeyboardMarkupModel],
obj=reply_markup,
)
bot = await _get_bot(token)
try:
entity = await get_entity(bot, chat_id)
except BotMethodInvalidError:
assert isinstance(chat_id, int) or (isinstance(chat_id, str) and len(chat_id) > 0 and chat_id[0] == '@')
entity = chat_id
except ValueError:
raise HTTPException(404, detail="chat not found?")
# end try
result = await bot.edit_message_live_location(
latitude=latitude,
longitude=longitude,
entity=entity,
message_id=message_id,
inline_message_id=inline_message_id,
horizontal_accuracy=horizontal_accuracy,
heading=heading,
proximity_alert_radius=proximity_alert_radius,
reply_markup=reply_markup,
)
data = await to_web_api(result, bot)
return r_success(data.to_array()) | 39eef452e570e4b00b08aa66aba6d4253bce154f | 9,493 |
def process_rollout(rollout, gamma, lambda_=1.0):
"""
given a rollout, compute its returns and the advantage
"""
batch_si = np.asarray(rollout.states)
batch_a = np.asarray(rollout.actions)
rewards = np.asarray(rollout.rewards)
action_reward = np.concatenate((batch_a,rewards[:,np.newaxis]), axis=1)
vpred_t = np.asarray(rollout.values + [rollout.r])
rewards_plus_v = np.asarray(rollout.rewards + [rollout.r])
batch_r = discount(rewards_plus_v, gamma)[:-1]
delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]
# this formula for the advantage comes "Generalized Advantage Estimation":
# https://arxiv.org/abs/1506.02438
batch_adv = discount(delta_t, gamma * lambda_)
features = rollout.features
batch_pc = np.asarray(rollout.pixel_changes)
return Batch(batch_si, batch_a, action_reward, batch_adv, batch_r, rollout.terminal, features, batch_pc) | da37f8b55294df5204f18772552e72d2131dd072 | 9,494 |
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add sensors for passed config_entry in HA."""
coordinator: IotawattUpdater = hass.data[DOMAIN][config_entry.entry_id]
created = set()
@callback
def _create_entity(key: str) -> IotaWattSensor:
"""Create a sensor entity."""
created.add(key)
return IotaWattSensor(
coordinator=coordinator,
key=key,
mac_address=coordinator.data["sensors"][key].hub_mac_address,
name=coordinator.data["sensors"][key].getName(),
entity_description=ENTITY_DESCRIPTION_KEY_MAP.get(
coordinator.data["sensors"][key].getUnit(),
IotaWattSensorEntityDescription("base_sensor"),
),
)
async_add_entities(_create_entity(key) for key in coordinator.data["sensors"])
@callback
def new_data_received():
"""Check for new sensors."""
entities = [
_create_entity(key)
for key in coordinator.data["sensors"]
if key not in created
]
if entities:
async_add_entities(entities)
coordinator.async_add_listener(new_data_received) | 171d65acf5227ed9027481bcc2eb773bee52bbca | 9,495 |
from datetime import datetime
import calendar
def calculate_cost(cost, working_days_flag, month, nr_of_passes):
"""Calculate the monthly tolls cost"""
if working_days_flag:
passes = working_days(month) * nr_of_passes
else:
now = datetime.datetime.now()
passes = calendar.monthrange(now.year, month)[1] * nr_of_passes
total_cost = 0
for i in range(1, passes + 1):
if 1 <= i <= 5:
total_cost += cost
elif 6 <= i <= 10:
total_cost += cost - (cost * 15 / 100)
elif 11 <= i <= 20:
total_cost += cost - (cost * 30 / 100)
elif 21 <= i <= 30:
total_cost += cost - (cost * 40 / 100)
elif 31 <= i <= 40:
total_cost += cost - (cost * 50 / 100)
elif 41 <= i <= 60:
total_cost += cost - (cost * 60 / 100)
else:
total_cost += cost
return total_cost | 5221e0dedd56d7d3302aa88cdf9ad7feb67173a3 | 9,496 |
def e_dl() -> str:
"""Fetch size of archives to be downloaded for next system update."""
size = 'Calculating...'
with open(file=TMERGE_LOGFILE, mode='r', encoding='utf-8') as log_file:
for line in list(log_file)[::-1]:
reqex = search(r'(Size of downloads:.)([0-9,]*\s[KMG]iB)', line)
if reqex is not None:
size = reqex.group(2)
break
print(size)
return size | 1639d6cd0e78ca4f4adfceb75875f6b0de398a63 | 9,497 |
def get_model_fn():
"""Returns the model definition."""
def model_fn(features, labels, mode, params):
"""Returns the model function."""
feature = features['feature']
print(feature)
labels = labels['label']
one_hot_labels = model_utils.get_label(
labels,
params,
FLAGS.src_num_classes,
batch_size=FLAGS.train_batch_size)
def get_logits():
"""Return the logits."""
avg_pool = model.conv_model(feature, mode)
name = 'final_dense_dst'
with tf.variable_scope('target_CLS'):
logits = tf.layers.dense(
inputs=avg_pool, units=FLAGS.src_num_classes, name=name)
return logits
logits = get_logits()
logits = tf.cast(logits, tf.float32)
dst_loss = tf.losses.softmax_cross_entropy(
logits=logits,
onehot_labels=one_hot_labels,
)
dst_l2_loss = FLAGS.weight_decay * tf.add_n([
tf.nn.l2_loss(v)
for v in tf.trainable_variables()
if 'batch_normalization' not in v.name and 'kernel' in v.name
])
loss = dst_loss + dst_l2_loss
train_op = None
if mode == tf.estimator.ModeKeys.TRAIN:
cur_finetune_step = tf.train.get_global_step()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
finetune_learning_rate = lr_schedule()
optimizer = tf.train.AdamOptimizer(finetune_learning_rate)
train_op = tf.contrib.slim.learning.create_train_op(loss, optimizer)
with tf.variable_scope('finetune'):
train_op = optimizer.minimize(loss, cur_finetune_step)
else:
train_op = None
eval_metrics = None
if mode == tf.estimator.ModeKeys.EVAL:
eval_metrics = model_utils.metric_fn(labels, logits)
if mode == tf.estimator.ModeKeys.TRAIN:
with tf.control_dependencies([train_op]):
tf.summary.scalar('classifier/finetune_lr', finetune_learning_rate)
else:
train_op = None
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metrics,
)
return model_fn | ef006ff79c6979a61a745ebfecd599858ded0418 | 9,498 |
def build_node(idx, node_type):
""" Build node list
:idx: a value to id mapping dict
:node_type: a string describe the node type
:returns: a list of records of the nodes extracted from the mapping
"""
return rekey(idx, 'value', 'id:ID', {':LABEL': node_type}) | cf9cb20b152aa55ef7f37ee1e2f513d166e2b7c5 | 9,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.