content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_all_elems_from_json(search_json: dict, search_key: str) -> list:
"""Returns values by key in all nested dicts.
Args:
search_json: Dictionary in which one needs to find all values by specific key.
search_key: Key for search.
Returns:
List of values stored in nested structures by ``search_key``.
Examples:
>>> get_all_elems_from_json({'a':{'b': [1,2,3]}, 'b':42}, 'b')
[[1, 2, 3], 42]
"""
result = []
if isinstance(search_json, dict):
for key in search_json:
if key == search_key:
result.append(search_json[key])
else:
result.extend(get_all_elems_from_json(search_json[key], search_key))
elif isinstance(search_json, list):
for item in search_json:
result.extend(get_all_elems_from_json(item, search_key))
return result
|
6ab45e33962ccb5996b50d13e57626365c4ed78b
| 32,256 |
def prFinalNodeName(q):
"""In : q (state : string)
Out: dot string (string)
Return dot string for generating final state (double circle)
"""
return dot_san_str(q) + '[shape=circle, peripheries=2];'
|
8a4e5649ebeb0c68f2e1741fefd935c9a5f919bf
| 32,258 |
import typing
from datetime import datetime
def decodeExifDateTime(value: str) -> typing.Optional[datetime.datetime]:
"""
utility fct to encode/decode
"""
try:
# return path.encode(sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding)
d = datetime.datetime.strptime(value, '%Y:%m:%d %H:%M:%S')
return d
except ValueError:
return
|
a1ce11305e8e486ad643530930368c47f1c073ef
| 32,259 |
def parse(file: str) -> Env:
"""Parse an RLE file and create a user environment
Parameters
----------
file: str
Path to the RLE file.
Returns
-------
user_env: `dict` [str, `Any`]
User environment returned from ``user_env()``. It has these attributes:
``width``
Width of the Game of Life matrix.
``height``
Height of the Game of Life matrix.
``rule``
Rule used to run the simulation.
``seed``
Seed to base simulation on.
"""
return user_env(parse_file(file))
|
cf0a884169b22f4781450c78a35b33ef43049d65
| 32,260 |
import six
def logger_has_handlers(logger):
"""
Check if given logger has at least 1 handler associated, return a boolean value.
Since Python 2 doesn't provide Logger.hasHandlers(), we have to perform the lookup by ourself.
"""
if six.PY3:
return logger.hasHandlers()
else:
c = logger
rv = False
while c:
if c.handlers:
rv = True
break
if not c.propagate:
break
else:
c = c.parent
return rv
|
dc0093dd25a41c997ca92759ccb9fa17ad265bdd
| 32,261 |
import json
def query_parameters(prefix, arpnum, t_recs, keywords, redis=True):
"""Query keyword sequence from a header file.
Alternative design: replace prefix and arpnum with filepath.
"""
KEYWORDS = ['T_REC', 'AREA', 'USFLUXL', 'MEANGBL', 'R_VALUE']
if redis:
id = f'{prefix}{arpnum:06d}' # header file identifier
if r_header.exists(id) == 0:
dataset = 'sharp' if prefix == 'HARP' else 'smarp'
header = read_header(dataset, arpnum)
header = header[KEYWORDS]
header = header.set_index('T_REC')
mapping = {t_rec: header.loc[t_rec].to_json() for t_rec in header.index}
r_header.hmset(id, mapping)
buff = r_header.hmget(id, t_recs)
# series = [pd.read_json(b, typ='series') if b else None for b in buff]
# if any([s is None for s in series]):
# print(series)
records = [json.loads(b) if b else {} for b in buff]
df = pd.DataFrame(records, index=t_recs)[keywords] # Takes up 61% of the time
else:
raise
return df
|
84c0a43d6d045e3255478175697cbb0bfaac5da8
| 32,262 |
def x0_rand(mu3,xb,num_min):
"""
Randomly initialise the 5 protocol parameters using the specified bounds.
Parameters and bounds should be specified in the order {Px,pk1,pk2,mu1,mu2}.
Parameters
----------
mu3 : float
Intensity of pulse 3 (vacuum).
xb : float, array-like
Upper and lower bounds for the protocol parameters. (5,2)
num_min : float
An arbitrarily small number.
Returns
-------
x0 : float, array
Randomly initialised protocol parameters.
"""
Px_i = np.random.rand() * (xb[0,1] - xb[0,0] - 2*num_min) + xb[0,0] + \
num_min
pk1_i, pk2_i = 1.0, 1.0
while (pk1_i+pk2_i >= 1.0):
pk1_i = np.random.rand() * (xb[1,1] - xb[1,0] - 2*num_min) + \
xb[1,0] + num_min
pk2_i = np.random.rand() * (min(xb[2,1],1-pk1_i) - xb[2,0] - \
2*num_min) + xb[2,0] + num_min
mu1_i = np.random.rand() * (xb[3,1] - max(xb[3,0],2*mu3) - 2*num_min) + \
max(xb[3,0],2*mu3) + num_min
mu2_i = np.random.rand() * (min(xb[4,1],mu1_i) - max(xb[4,0],mu3) - \
2*num_min) + max(xb[4,0],mu3) + num_min
return np.array([Px_i,pk1_i,pk2_i,mu1_i,mu2_i])
|
fcf32cd7367e7b78e48829f72523f50855ba563e
| 32,264 |
def render_smiles_list(smiles_list):
"""
Format and return a SMILES string(s).
"""
# The string that will be returned to the template
result = r'<h3>Solvent SMILES:</h3>' + '\n'
result += r'<p>'
if len(smiles_list) == 1:
result += smiles_list[0]
else:
result += 'This is a mixture of the following solvents: '
for smiles in smiles_list:
result += f'{smiles}, '
result = result[:-2]
result += r'</p>'
return mark_safe(result)
|
f6207bb63452d1037c321874b8ed5248e89dc83e
| 32,265 |
def get_config_id(kwargs=None, call=None):
"""
Returns a config_id for a given linode.
.. versionadded:: 2015.8.0
name
The name of the Linode for which to get the config_id. Can be used instead
of ``linode_id``.
linode_id
The ID of the Linode for which to get the config_id. Can be used instead
of ``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_config_id my-linode-config name=my-linode
salt-cloud -f get_config_id my-linode-config linode_id=1234567
"""
if call == "action":
raise SaltCloudException(
"The get_config_id function must be called with -f or --function."
)
return _get_cloud_interface().get_config_id(kwargs=kwargs)
|
b66eda936157d0c6794289ed90acd681a5d31c02
| 32,266 |
def bunq_oauth_reauthorize():
""" Endpoint to reauthorize OAuth with bunq """
cookie = request.cookies.get('session')
if cookie is None or cookie != util.get_session_cookie():
return render_template("message.html", msgtype="danger", msg=\
"Invalid request: session cookie not set or not valid")
return auth.bunq_oauth_reauthorize()
|
57708acb8e4c726640360bfb1263ede323571c15
| 32,267 |
from typing import List
from typing import Tuple
def extract_mealentries(meals: List[Meal]) -> List[Tuple]:
"""
Extract meal entries records from a sequence of myfitnesspal meals.
Args:
- meals (List[Meal]): A list with meal objects to extract data from
Returns:
- List[Tuple]: A list with meal record values
"""
return [
(
meal.username,
meal.date,
meal.name,
entry.short_name,
entry.quantity,
entry.unit,
entry.totals.get("calories", None),
entry.totals.get("carbohydrates", None),
entry.totals.get("fat", None),
entry.totals.get("protein", None),
entry.totals.get("sodium", None),
entry.totals.get("sugar", None),
)
for meal in meals
for entry in meal.entries
]
|
c7c043cee0b4af1253902080af67919cc9238d75
| 32,269 |
import math
def get_45point_spiralling_sphere_with_normal_zaxis_dist( num_of_spirals = 4, num_of_vertices = 45):
"""
A sphere of spiralling points. Each point is equally spaced on the x,y,z axes. The equal spacing is calculated by dividing the straight-line spiral distance by 45.
Adapted from Leonsim's code here: https://github.com/leonsim/sphere/blob/9ec92d2f5411171776e1176be58468af68a93442/Sphere.cpp
"""
vertices = []
xy_degree_change = (num_of_spirals * 360) / num_of_vertices
zaxis_dist = get_normalised_normal_curve()
c = math.pi / 180.0 # Degrees to radians
phiStart = 90.0 # Default 100
thetaStart = 180.0 # Default 180
theta = -thetaStart
phi = -phiStart
index = -1
while phi <= (phiStart):
index +=1
phir = c * phi
thetar = c * theta
x = sin(thetar) * cos(phir)
y = cos(thetar) * cos(phir)
z = sin(phir)
v1 = (x, y, z)
vertices.append(v1)
theta += xy_degree_change
print("Vertex:"+str(index)+", zAxis:"+str(len(zaxis_dist))+", Vertices:"+str(len(vertices)))
z_degree_change = (360 /num_of_vertices) * zaxis_dist[index]
phi += z_degree_change
return vertices
|
59173bdd28b513d0f039215ea7d713cd80d81b4e
| 32,270 |
import re
import json
def parse_results(line):
"""Parses and logs event information from logcat."""
header = re.search(r'cr_PasswordChangeTest: (\[[\w|:| |#]+\])', line).group(1)
print(header)
credentials_count = re.search(r'Number of stored credentials: (\d+).', line)
if not credentials_count:
# Event does not contain any credentials information.
# Print an empty line and continue.
print()
return
print('Number of stored credentials: %s' % credentials_count.group(1))
def build_credential(credential_match):
return {
'url': credential_match.group(1),
'username': credential_match.group(2),
'password': credential_match.group(3),
}
re_all_credentials = re.compile(r'PasswordStoreCredential\{.*?\}')
re_credential_info = re.compile(
r'PasswordStoreCredential\{url=(.*?), username=(.*?), password=(.*?)\}')
credentials = [
build_credential(re_credential_info.search(credential))
for credential in re_all_credentials.findall(line)
]
# Print credentials with json format.
print(json.dumps(credentials, indent=2))
print()
|
24cc928d945d2d4f16f394be68a8bb217c21b342
| 32,273 |
def bqwrapper(datai):
"""
Wraps the kdtree ball query for concurrent tree search.
"""
return kdtbq(datai, r=bw[0])
|
203e77e37ddb53b76366b0d376c37b63536da923
| 32,274 |
import re
def crawl_user_movies():
"""
@功能: 补充用户观看过的电影信息
@参数: 无
@返回: 电影信息
"""
user_df = pd.read_csv('douban_users.csv')
user_df = user_df.iloc[:, [1, 2, 3]]
user_movies = list(user_df['movie_id'].unique())
movies = [] # 储存电影
count = 1 # 日志参数
for i in user_movies:
url = 'https://movie.douban.com/subject/{}/'.format(str(i))
text = get_html(url) # 获取每部电影的页面
if text == None:
count += 1 # 日志参数
continue
html = etree.HTML(text) # 解析每部电影的页面
info = html.xpath("//div[@class='subject clearfix']/div[@id='info']//text()") # 每部电影的相关信息
# 电影ID
dataID = i
# 电影名称
name = html.xpath("//*[@id='content']/h1/span[1]/text()")[0]
name = name.split(' ')[0]
# 电影英文名称
english_name = html.xpath("//*[@id='content']/h1/span[1]/text()")[0]
# 判断字符串中是否存在英文
if bool(re.search('[A-Za-z]', english_name)):
english_name = english_name.split(' ') # 分割字符串以提取英文名称
del english_name[0] # 去除中文名称
english_name = ' '.join(english_name) # 重新以空格连接列表中的字符串
else:
english_name = None
# 导演
flag = 1
directors = []
for i in range(len(info)):
if info[i] == '导演':
for j in range(i + 1, len(info)):
if info[j] == '编剧':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文或英文
if (u'\u4e00' <= ch <= u'\u9fff') or (bool(re.search('[a-z]', info[j]))):
directors.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
directors = ''.join(directors) # 转换为字符串形式
# 编剧
flag = 1
writer = []
for i in range(len(info)):
if info[i] == '编剧':
for j in range(i + 1, len(info)):
if info[j] == '主演':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文或英文
if (u'\u4e00' <= ch <= u'\u9fff') or (bool(re.search('[a-z]', info[j]))):
writer.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
writer = ''.join(writer) # 转换为字符串形式
# 主演
flag = 1
actors = []
for i in range(len(info)):
if info[i] == '编剧':
for j in range(i + 1, len(info)):
if info[j] == '主演':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文或英文
if (u'\u4e00' <= ch <= u'\u9fff') or (bool(re.search('[a-z]', info[j]))):
actors.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
actors = ''.join(actors) # 转换为字符串形式
# 电影评分
try:
rate = html.xpath("//div[@class='rating_wrap clearbox']/div[@class='rating_self clearfix']/strong[@class='ll rating_num']/text()")[0]
except:
rate = None
# 电影类型
flag = 1
style = []
for i in range(len(info)):
if info[i] == '类型:':
for j in range(i + 1, len(info)):
if (info[j] == '制片国家/地区:') or (info[j] == '官方网站:'):
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文
if u'\u4e00' <= ch <= u'\u9fff':
style.append(info[j])
if len(style) == 3:
flag = 0
break
break
if flag == 0:
break
if flag == 0:
break
# 把电影类型分开存储
if len(style) == 0:
style1 = None
style2 = None
style3 = None
if len(style) == 1:
style1 = style[0]
style2 = None
style3 = None
if len(style) == 2:
style1 = style[0]
style2 = style[1]
style3 = None
if len(style) == 3:
style1 = style[0]
style2 = style[1]
style3 = style[2]
# 国家
flag = 1
country = []
for i in range(len(info)):
if info[i] == r'制片国家/地区:':
for j in range(i + 1, len(info)):
if info[j] == '语言:':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文
if u'\u4e00' <= ch <= u'\u9fff':
country.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
country = country[0].split(r'/')
country = country[0]
# 电影语言
flag = 1
language = []
for i in range(len(info)):
if info[i] == '语言:':
for j in range(i + 1, len(info)):
if info[j] == '上映日期:':
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文
if u'\u4e00' <= ch <= u'\u9fff':
language.append(info[j].strip())
flag = 0
break
if flag == 0:
break
if flag == 0:
break
try:
language = language[0].split(r'/')
language = language[0]
except:
language = None
# 电影上映日期
flag = 1
date = []
for i in range(len(info)):
if info[i] == '上映日期:':
for j in range(i + 1, len(info)):
if (info[j] == '片长:') or (info[j] == '又名:'):
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文或英文
if (u'\u4e00' <= ch <= u'\u9fff') or (bool(re.search('[a-z]', info[j]))):
date.append(re.search(r'\d+', info[j]).group(0))
flag = 0
break
if flag == 0:
break
if flag == 0:
break
date = ''.join(date) # 转换为字符串形式
# 电影片长
flag = 1
duration = []
for i in range(len(info)):
if info[i] == '片长:':
for j in range(i + 1, len(info)):
if (info[j] == '又名:') or (info[j] == 'IMDb链接:'):
flag = 0
break
for ch in info[j]:
# 判断字符串中是否存在中文
if u'\u4e00' <= ch <= u'\u9fff':
info[j] = info[j].split('/')[0]
duration.append(re.search(r'\d+', info[j].strip()).group(0))
flag = 0
break
if flag == 0:
break
if flag == 0:
break
duration = ''.join(duration) # 转换为字符串形式
# 海报图片
pic = html.xpath("//div[@id='mainpic']/a[@class='nbgnbg']/img/@src")[0]
# 电影简介
introduction = ''.join(html.xpath("//div[@class='related-info']/div[@id='link-report']/span/text()")).strip().replace(' ', '').replace('\n', '').replace('\xa0', '').replace(u'\u3000', u' ')
# 把每部电影的信息存入一个列表,再append进去movies总列表
each_movie = [name, english_name, directors, writer, actors, rate, style1, style2, style3,
country, language, date, duration, introduction, dataID, url, pic]
movies.append(each_movie)
print("成功解析第" + str(count) + "部电影的信息: ", each_movie)
count += 1 # 日志参数
return movies
|
882fe56fc2fc5e22b6ad0ce518b7adaabd724cd2
| 32,275 |
import logging
def filter_by_shape(data: pd.DataFrame, geofence: Polygon) -> pd.DataFrame:
"""Remove trips outside of geofence. Filter by pickup and dropoff locations"""
logging.info('Filtering by bbox')
(min_lon, min_lat, max_lon, max_lat) = geofence.bounds
data = data[
(data.pickup_longitude > min_lon) & (data.pickup_longitude < max_lon) &
(data.pickup_latitude > min_lat) & (data.pickup_latitude < max_lat) &
(data.dropoff_longitude > min_lon) & (data.dropoff_longitude < max_lon) &
(data.dropoff_latitude > min_lat) & (data.dropoff_latitude < max_lat)
]
logging.info(f"Data shape {data.shape}")
return data
|
fa98c85ea286921e9a986820a7a17e03e94181dc
| 32,276 |
from ..core import cache as cache
def upload_collection(flask_app, filenames, runs, dataset_id, collection_id,
descriptions=None, cache=None):
""" Create new Predictors from TSV files
Args:
filenames list of (str): List of paths to TSVs
runs list of (int): List of run ids to apply events to
dataset_id (int): Dataset id.
collection_id (int): Id of collection object
descriptions (dict): Optional descriptions for each column
cache (obj): Optional flask cache object
"""
if cache is None:
if descriptions is None:
descriptions = {}
collection_object = PredictorCollection.query.filter_by(
id=collection_id).one()
# Load into pandas
try:
events = [pd.read_csv(f, sep='\t') for f in filenames]
except Exception as e:
update_record(
collection_object,
exception=e,
traceback='Error reading event files'
)
raise
# Check columns are all the same across all files
cols = [set(e.columns) for e in events]
common_cols = set.intersection(*cols)
if not len(common_cols) == len(cols[0]):
update_record(
collection_object,
traceback='Event files contain distinct columns'
)
raise Exception('Event files contain distinct columns')
if not set(['onset', 'duration']).issubset(common_cols):
update_record(
collection_object,
traceback='Not all columns have "onset" and "duration"'
)
raise Exception('Not all columns have "onset" and "duration"')
pe_objects = []
try:
for col in common_cols - set(['onset', 'duration']):
predictor = Predictor(
name=col,
source=f'Collection: {collection_object.collection_name}',
dataset_id=dataset_id,
predictor_collection_id=collection_object.id,
private=True,
description=descriptions.get(col))
db.session.add(predictor)
db.session.commit()
for ix, e in enumerate(events):
select = e[['onset', 'duration', col]].dropna()
for run_id in runs[ix]:
# Add PredictorRun
pr, _ = get_or_create(
PredictorRun, predictor_id=predictor.id, run_id=run_id)
for _, row in select.iterrows():
row = row.to_dict()
pe_objects.append(
PredictorEvent(
predictor_id=predictor.id,
run_id=run_id, onset=row['onset'],
duration=row['duration'], value=row[col])
)
collection_object.predictors.append(predictor)
db.session.bulk_save_objects(pe_objects)
db.session.commit()
except Exception as e:
cache.clear()
db.session.rollback()
update_record(
collection_object,
exception=e,
traceback=f'Error creating predictors. Failed processing {col}'
)
raise
cache.clear()
return update_record(
collection_object,
status='OK'
)
|
d6d16206716dae0e7e945d1cff95317454031e3e
| 32,277 |
def get_filtered_metadata_list(metadata_list, strand):
""" Given a lis of exon junctions, remove the ones that redundantly cover a junction
Parameters
----------
metadata_list: List(Output_metadata),
strand: strand of the gene
Returns
-------
filtered_meetadata_list: List of metadata objects remaining after filter
"""
exon_dict = _get_exon_junction_dict(metadata_list, strand)
remove_id_list = _collect_remove_ids(exon_dict)
return list(filter(lambda m: m.output_id not in remove_id_list, metadata_list))
|
dab3da34f435d7401dd5e76be2c9c032aea875c1
| 32,278 |
import functools
import traceback
def handle_exceptions(database, params, constraints, start_params, general_options):
"""Handle exceptions in the criterion function.
This decorator catches any exceptions raised inside the criterion function. If the
exception is a :class:`KeyboardInterrupt` or a :class:`SystemExit`, the user wants
to stop the optimization and the exception is raised
For other exceptions, it is assumed that the optimizer proposed parameters which
could not be handled by the criterion function. For example, the parameters formed
an invalid covariance matrix which lead to an :class:`numpy.linalg.LinAlgError` in
the matrix decompositions. Then, we calculate a penalty as a function of the
criterion value at the initial parameters and some distance between the initial and
the current parameters.
"""
def decorator_handle_exceptions(func):
@functools.wraps(func)
def wrapper_handle_exceptions(x, *args, **kwargs):
try:
out = func(x, *args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception as e:
# Adjust the criterion value at the start.
start_criterion_value = general_options["start_criterion_value"]
constant, slope = general_options.get(
"criterion_exception_penalty", (None, None)
)
constant = 2 * start_criterion_value if constant is None else constant
slope = 0.1 * start_criterion_value if slope is None else slope
raise_exc = general_options.get("criterion_exception_raise", False)
if raise_exc:
raise e
else:
if database:
exception_info = traceback.format_exc()
p = reparametrize_from_internal(
internal=x,
fixed_values=params["_internal_fixed_value"].to_numpy(),
pre_replacements=params["_pre_replacements"]
.to_numpy()
.astype(int),
processed_constraints=constraints,
post_replacements=(
params["_post_replacements"].to_numpy().astype(int)
),
processed_params=params,
)
msg = (
exception_info
+ "\n\n"
+ "The parameters are\n\n"
+ p["value"].to_csv(sep="\t", header=True)
)
append_rows(database, "exceptions", {"value": msg})
out = min(
MAX_CRITERION_PENALTY,
constant + slope * np.linalg.norm(x - start_params),
)
return out
return wrapper_handle_exceptions
return decorator_handle_exceptions
|
725cfc7d3c338e2a4dbd143fc558307cbb49e1cc
| 32,279 |
def vector2angles(gaze_vector: np.ndarray):
"""
Transforms a gaze vector into the angles yaw and elevation/pitch.
:param gaze_vector: 3D unit gaze vector
:return: 2D gaze angles
"""
gaze_angles = np.empty((1, 2), dtype=np.float32)
gaze_angles[0, 0] = np.arctan(-gaze_vector[0]/-gaze_vector[2]) # phi= arctan2(x/z)
gaze_angles[0, 1] = np.arcsin(-gaze_vector[1]) # theta= arcsin(y)
return gaze_angles
|
b0db8e1f6cb9865e9563af5385f760699069013e
| 32,280 |
def setup_train_test_idx(X, last_train_time_step, last_time_step, aggregated_timestamp_column='time_step'):
""" The aggregated_time_step_column needs to be a column with integer values, such as year, month or day """
split_timesteps = {}
split_timesteps['train'] = list(range(last_train_time_step + 1))
split_timesteps['test'] = list(range(last_train_time_step + 1, last_time_step + 1))
train_test_idx = {}
train_test_idx['train'] = X[X[aggregated_timestamp_column].isin(split_timesteps['train'])].index
train_test_idx['test'] = X[X[aggregated_timestamp_column].isin(split_timesteps['test'])].index
return train_test_idx
|
256fbe66c0b27b651c8190101e5068f7e0542498
| 32,281 |
def get_targets_as_list(key_list):
"""Get everything as list
:param key_list: Target key list
:type key_list: `list`
:return: Values list
:rtype: `list`
"""
session = get_scoped_session()
values = []
for key in key_list:
values.append(get_all_targets(session, key))
return values
|
bcd2ed48d685353a59c4545d1277589fa388b4a0
| 32,282 |
import re
def load_jmfd(jmfd_path):
"""Loads j-MFD as Pandas DataFrame.
Args:
jmfd_path (str): Path of J-MFD.
Raises:
JMFDFormatError: J-MFD format error.
Returns:
pandas.DataFrame: Pandas DataFrame of loaded j-MFD with word, existence of stem, foundation
id and foundation columns.
dict: A dict mapping ids to the corresponding Moral foundation.
"""
with open(jmfd_path, mode='r') as f:
text = f.read()
splitted = text.split('%')
if len(splitted) != 3:
raise JMFDFormatError('Invalid JMFD format.')
text_cat = splitted[1].strip()
text_dic = splitted[2].strip()
# Creates a dict mapping ids to the corresponding Moral foundation.
foundation = {}
for t in text_cat.splitlines():
fid, cat = t.strip().split('\t')
foundation[fid] = cat
# Gets moral foundation words and ids.
words = []
fids = []
for t in text_dic.splitlines():
text_splitted = re.split('\t+', t.strip())
for i in range(1, len(text_splitted)):
words.append(text_splitted[0].strip())
fids.append(text_splitted[i].strip())
# Creates DataFrame containing loaded J-MFD.
df = pd.DataFrame({
'word': [w.replace('*', '') for w in words],
'stem': [w.endswith('*') for w in words],
'fid': fids,
'foundation': [foundation[i] for i in fids]
})
return df, foundation
|
675370c9ce0ed37667ec347dc4a0af57ea5b20b3
| 32,283 |
def objectId(value):
"""objectId校验"""
if value and not ObjectId.is_valid(value):
raise ValueError('This is not valid objectId')
return value
|
2e33950649fe95460e82102c1d6209a9173fa5fd
| 32,285 |
def add_lists(list1, list2):
"""
Add corresponding values of two lists together. The lists should have the same number of elements.
Parameters
----------
list1: list
the first list to add
list2: list
the second list to add
Return
----------
output: list
a new list containing the sum of the given lists
"""
output = []
for it1, it2 in zip(list1, list2):
output.append(it1 + it2)
return output
|
e4efbc079a981caa4bcbff4452c8845a7e534195
| 32,286 |
def get_struc_first_offset(*args):
"""
get_struc_first_offset(sptr) -> ea_t
Get offset of first member.
@param sptr (C++: const struc_t *)
@return: BADADDR if memqty == 0
"""
return _ida_struct.get_struc_first_offset(*args)
|
f589dec791c3a81664b81573ea52f02d1c9a6b15
| 32,287 |
def export_gps_route( trip_id, trip_date, vehicle_id,
gtfs_error, offset_seconds,
gps_data ):
"""
Writes the given entry to the "tracked_routes" table. This table is used
to cache the results of finding and filtering only the valid routes as
represented in the GPS dataset.
Returns segment_id, a unique identifier for this GPS segment
trip_id: the GTFS trip id
trip_date: the date of the trip
vehicle_id: as reported in the GPS data
gtfs_error: The distance from the matched GTFS trip as measured by
the GPSBusTrack metric
offset_seconds: Number of seconds to subtract from GTFS trip to normalize.
gps_data: A list of (lat, lon, reported_update_time) values, exactly as
reported in the GPS dat. Note that reported_update_time should
be a timestamp.
WARNING: No effort is made to prevent duplicate entries! If you do this
more than once for the same route then YOU MUST DELETE IT FIRST!
"""
sql1 = """insert into gps_segments (
trip_id, trip_date, vehicle_id,
schedule_error, schedule_offset_seconds
) VALUES (
%(trip_id)s,%(trip_date)s,%(vehicle_id)s,
%(gtfs_error)s, %(offset)s
) RETURNING gps_segment_id"""
sql2 = """insert into tracked_routes (
gps_segment_id, lat, lon, reported_update_time
) VALUES (
%(seg_id)s,%(lat)s,%(lon)s,%(reported_update_time)s
)"""
cur = get_cursor()
SQLExec(cur,sql1,
{'trip_id':trip_id,'trip_date':trip_date,'vehicle_id':vehicle_id,
'gtfs_error':str(gtfs_error),'offset':offset_seconds});
segment_id = list(cur.fetchall())[0][0];
for lat,lon,reported_update_time in gps_data:
SQLExec(cur,sql2,
{'lat':lat,'lon':lon,
'reported_update_time':reported_update_time,
'seg_id':str(segment_id)});
cur.close()
return segment_id
|
fe1a4f4fb2c89c6634353748d5cdd49d82110e64
| 32,288 |
def optimize_solution(solution):
"""
Eliminate moves which have a full rotation (N % 4 = 0)
since full rotations don't have any effects in the cube
also if two consecutive moves are made in the same direction
this moves are mixed in one move
"""
i = 0
while i < len(solution):
dir, n = solution[i]
if n % 4 == 0:
solution.pop(i)
if i > 0:
i -= 1
elif i + 1 < len(solution):
dir2, n2 = solution[i+1]
if dir == dir2:
solution[i] = (dir, (n + n2) % 4)
solution.pop(i+1)
else:
i += 1
else:
break
return solution
|
4be6bf0e4200dbb629c37a9bdae8338ee32c262b
| 32,289 |
from typing import Iterable
import resource
from typing import Optional
def secretsmanager_resource(
client: Client,
policies: Iterable[Policy] = None,
):
"""
Create Secrets Manager resource.
Parameters:
• client: Secrets Manager client object
• policies: security policies to apply to all operations
"""
if client.service_name != "secretsmanager":
raise TypeError("expecting Secrets Manager client")
@resource
class SecretsResource:
@mutation(policies=policies)
async def create(self, secret, secret_string: Optional[str]):
"""Add a secret to secrets manager"""
await client.create_secret(Name=secret, SecretString=secret_string)
@operation(policies=policies)
async def put(self, secret, secret_string: Optional[str]):
"""Update a secret in the secrets manager"""
await client.put_secret_value(SecretId=secret, SecretString=secret_string)
@operation(policies=policies)
async def delete(self, secret):
"""Delete the secret."""
await client.delete_secret(SecretId=secret)
@resource
class SecretsManagerResource:
"""Amazon Secrets Manager resource."""
@mutation(policies=policies)
async def get_secret(self, secret_name: str) -> Secret:
"""
Retrieve a secret from Secrets Manager.
Parameters:
• secret_name: The name of the secret or secret Amazon Resource Names (ARNs).
"""
with wrap_client_error():
get_secret_value_response = await client.get_secret_value(SecretId=secret_name)
if "SecretString" in get_secret_value_response:
return Secret(
ARN=get_secret_value_response["ARN"],
Name=get_secret_value_response["Name"],
SecretString=get_secret_value_response["SecretString"],
)
else:
return Secret(
ARN=get_secret_value_response["ARN"],
Name=get_secret_value_response["Name"],
SecretBinary=get_secret_value_response["SecretBinary"],
)
secretsresource = SecretsResource()
return SecretsManagerResource()
|
ee2d880944065331aba0751bdfba2f82c3d7e2ac
| 32,290 |
def lgbm_hyperband_classifier(numeric_features, categoric_features, learning_rate=0.08):
"""
Simple classification pipeline using hyperband to optimize lightgbm hyper-parameters
Parameters
----------
`numeric_features` : The list of numeric features
`categoric_features` : The list of categoric features
`learning_rate` : The learning rate
"""
return _lgbm_hyperband_model('classification', numeric_features, categoric_features, learning_rate)
|
7c48373d1f40d7248a9d0f6a37c95281027aa1bd
| 32,291 |
def GL(mu, wid, x, m = 0.5):
"""
Function to generate a 1D Gaussian-Lorentzian peak. The peak
is centered at pos, is wid wide (FWHM) and with blending parameter m.
Parameters
----------
mu: float
Peak center
wid: float
FWHM of Gaussian peak. FWHM is related to sigma by the
relation: FWHM = 2 * sqrt(2 * ln(2)) * sigma
m: float
Blending constant. Default is 0.5.
x: ndarray
Input numpy array of numbers
Output
------
Numpy ndarray
Single blended Gaussian-Lorentzian peak.
Reference
---------
Implementation of MATLAB code from
http://terpconnect.umd.edu/~toh/spectrum/functions.html#Peak_shape_functions
"""
return m * gaussian(mu, wid, x) + (1 - m) * lorentzian(mu, wid, x)
|
d458eae3ad1ea31dcab021c798e9d7d02fa390ae
| 32,292 |
def jp_(var,mask):
"""Value at j+1/2, no gradient across boundary"""
return div0((var*mask + np.roll(var*mask,-1,axis=0)),(mask+np.roll(mask,-1,axis=0)))
|
cc2aaf2e17bd0cbe3a211b26bc9d976298307f0d
| 32,293 |
import re
def _solrize_date(date, date_type=''):
"""
Takes a date string like 2018/01/01 and returns an
integer suitable for querying the date field in a solr document.
"""
solr_date = "*"
if date:
date = date.strip()
start_year, end_year = fulltext_range()
if date_type == 'start' and date == str(start_year) +'-01-01':
return '*'
elif date_type == 'end' and date == str(end_year) +'-12-31':
return '*'
# 1900-01-01 -> 19000101
match = re.match(r'(\d{4})-(\d{2})-(\d{2})', date)
if match:
y, m, d = match.groups()
if y and m and d:
solr_date = y+m+d
return solr_date
|
f309f784d79b46ed704ee1e631d7b4bdda7057f6
| 32,294 |
def read_file(filename):
"""
read filename and return its content
"""
in_fp = open(filename)
content = in_fp.read()
in_fp.close()
return content
|
c707a412b6099591daec3e70e9e2305fee6511f9
| 32,295 |
def delete_job(job_id):
"""Delete my job by Id
Upon success, marks job as 'aborted' if it must be suspended, and returns the deleted job with the appropriate status # noqa: E501
:param job_id: Id of the job that needs to be deleted
:type job_id: str
:rtype: Job
"""
job = q.fetch_job(job_id)
job.cancel()
return job_id
|
e8f02faa2a9336c93725739443b9007242b50b5c
| 32,297 |
def service(appctx):
"""Service with files instance."""
return RecordService(ServiceWithFilesConfig)
|
4902f8eae2c2c4200543a9c594f2abbc5163ec70
| 32,298 |
import json
import re
def get_sci_edus(filepath):
"""
load each sciedu
"""
with open(filepath, 'r') as fb:
train = json.loads(fb.read().encode('utf-8'))['root']
EDUs = []
sentenceNo = 1
sentenceID = 1
for edu_dict in train:
if edu_dict['id'] == 0:
continue
EDUs.append(EDU([edu_dict['id'], edu_dict['parent'], edu_dict['relation'],
re.sub('<S>|\r' ,'',edu_dict['text']), sentenceNo, sentenceID],
[1]))
if '<S>' in edu_dict['text']:
sentenceNo += 1
sentenceID = 1
else:
sentenceID += 1
return EDUs
|
0b8fd37dd8884e9e3f38f4bb671dff2df978f5b2
| 32,299 |
def vec_moderates(vec, minv, maxv, inclusive=1):
"""return a integer array where values inside bounds are 1, else 0
if inclusive, values will also be set if they equal a bound
return error code, new list
success: 0, list
error : 1, None"""
if not vec: return 1, None
if minv > maxv:
print '** moderates: minv > maxv (', minv, maxv, ')'
return 1, None
if inclusive:
elist = [1*(vec[t]>=minv and vec[t]<=maxv) for t in range(len(vec))]
else:
elist = [1*(vec[t]> minv and vec[t]< maxv) for t in range(len(vec))]
return 0, elist
|
65696ba3d4cb8c43e231a4aae1c8cef83351fb07
| 32,300 |
def SideInfo(version_index, channel_mode, raw_data=None):
"""SideInfo(version_index, channel_mode, raw_data) -> object
Return an object representing MPEG layer 3 side info, based on the given
parameters. The class of the object varies based on the MPEG version and
channel mode (only applicable fields are present, and field sizes vary)."""
lsf = (version_index != 3)
mono = (channel_mode == 3)
return _si_classes[lsf][mono](raw_data)
|
af554b0b6ebc4c33846881b27c02fb648d82b5ca
| 32,301 |
def datetimeConvertor(date, month, year, time, timezone):
"""
Converts raw date/time data into an object of datetime class.
"""
Date = date + "/" + monthnumberSwap(month) + "/" + year
Time = time + " " + timezone
return dt.datetime.strptime(Date + " " + Time, "%d/%m/%Y %H:%M:%S %z")
|
a83e873ee9b9aa1737fffc61c80aa9204305d3fb
| 32,302 |
import pathlib
def dump_gone(aspect_store: dict, indent=False) -> bool:
"""Not too dry ..."""
return _dump(aspect_store, pathlib.Path('gone.json'), indent)
|
14df4567d0ffc80f9764afa50c725bc1d178e031
| 32,303 |
def loss_fn(params, model, data):
"""
Description:
This is MSE loss function, again pay close attention to
function signature as this is the function which is going to be differentiated, so
params must be in its inputs. we do not need to vectorize this function as it is written
with batching considerations.
params -- pytree of trainable parameters
model -- model to be trained
data -- a tuple of training data --> (x_train, y_train)
"""
x, y = data
return jnp.mean((model(params, x) - y) ** 2)
|
cddd29becee4ce047b086130c7ce8cea114cb914
| 32,304 |
import copy
def lufact(A):
"""
lufact(A)
Compute the LU factorization of square matrix A, returning the factors.
"""
n = A.shape[0]
L = eye(n) # puts ones on diagonal
U = copy(A)
# Gaussian elimination
for j in range(n-1):
for i in range(j+1,n):
L[i,j] = U[i,j] / U[j,j] # row multiplier
U[i,j:] = U[i,j:] - L[i,j]*U[j,j:]
return L,triu(U)
|
e04c20ede47019789e00dc375b84efa931fe2e1f
| 32,305 |
def get_city(msg):
""" 提取消息中的地名
"""
# 对消息进行分词和词性标注
words = posseg.lcut(msg)
# 遍历 posseg.lcut 返回的列表
for word in words:
# 每个元素是一个 pair 对象,包含 word 和 flag 两个属性,分别表示词和词性
if word.flag == 'ns':
# ns 词性表示地名
return word.word
return None
|
017f910090291fdc77cc22ce4bc3fc3699c2981b
| 32,306 |
def get_cycle_time(string):
"""
Extract the cycle time text from the given string. None if not found.
"""
return _search_in_pattern(string, CYCLE_TIME_PATTERN, 1)
|
6d41a9f4b04f90b4a5a8d7892398bc080d41e519
| 32,309 |
async def async_unload_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(
config_entry, PLATFORMS
)
hass.data[DOMAIN].pop(config_entry.entry_id, None)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return unload_ok
|
6e4bf924e6e04d03cc30de3a6d4b2f713dd05b32
| 32,310 |
from typing import Union
def get_scale_factor(unit: Union[str, float]) -> float:
"""
Get how many pts are in a unit.
:param unit: A unit accepted by fpdf.FPDF
:return: The number of points in that unit
:raises FPDFException
"""
if isinstance(unit, (int, float)):
return float(unit)
k = FPDF_UNITS.get(unit, None)
if k is None:
raise ValueError(f"Unit does not exist: {unit}")
return k
|
c95429436b96f883e5fcfe3b1680a9f35c5f27e3
| 32,312 |
def r2_score(y_true,y_pred):
"""Calculate the coefficient of determination."""
assert len(y_true)==len(y_pred)
rss = sum_square_residuals(y_true,y_pred)
tss = total_sum_squares(y_true)
return 1 - rss/tss
|
7d2eba54db3d5682ec0ed22b5c09a65cf1e34e27
| 32,313 |
def give_name(fname):
"""
return name.csv
"""
if fname[:len( AUX_FILE) ] != AUX_FILE: # hide file
# renaming with correct extension
if fname[ -4: ]!= '.csv':
if fname.find('.') > -1:
fname = fname[: fname.find('.')]+'.csv'
else:
fname += '.csv'
else:
fname += '.csv'
return fname
|
55f6241b7a57d7611fe2db1731d909bb5b4186ac
| 32,314 |
def categorize(document):
"""Categorize a document.
Categorizes a document into the following categories
[business, entertainment, politics, sport, tech].
Takes a string object as input and returns a string object.
"""
doc = clean(document)
vector = doc2vec_model.infer_vector(doc.split(' '))
result = svm.predict(vector.reshape(1, -1))[0]
return news_categories[result]
|
a085e08e7e8b7ff31e68a536e973b5131540e481
| 32,315 |
def delete_intent(token, aiid, intent_name):
"""Delete an Intent"""
return fetch_api(
'/intent/{aiid}?intent_name={intent_name}',
token=token,
aiid=aiid,
intent_name=intent_name,
method='delete'
)
|
198ed90b176c2f08c3c681dfbb5deea52cfbcfa4
| 32,316 |
def report_all(df_select):
"""
report all values to a defined template
"""
if len(df_select) == 0:
report_all = 'No similar events were reported on in online media'
else:
report_all = """
Similar events were reported on in online media.
Below we provide a tabulated set of impacts from these media sources. We also
provide the links to these media sources so that you can read more about these
past events. You can use these reports to have a better understanding what may
happen with the forecast event by TMA.
==============================================================================
"""
for _, row in df_select.iterrows():
report_all += fill_report(row)
return report_all
|
d0e5f06416a467d7578f4748725301638b33d1bb
| 32,317 |
import urllib
def get_filename_from_headers(response):
"""Extract filename from content-disposition headers if available."""
content_disposition = response.headers.get("content-disposition", None)
if not content_disposition:
return None
entries = content_disposition.split(";")
name_entry = next((e.strip() for e in entries if e.strip().lower().startswith("filename*=")), None)
if name_entry:
name = name_entry.split("=", 1)[1].strip()
encoding, _, name = name.split("'")
return urllib.parse.unquote(name, encoding, errors="strict")
name_entry = next((e.strip() for e in entries if e.strip().lower().startswith("filename=")), None)
if not name_entry:
return None
filename = name_entry.split("=", 1)[1].strip()
if filename.startswith('"'):
filename = filename[1:-1]
return filename
|
d4c54c3d19d72f2813e2d1d4afde567d0db0e1af
| 32,318 |
def names(as_object=False, p5_connection=None):
"""
Syntax: ArchiveIndex names
Description: Returns the list of names of archive indexes.
Return Values:
-On Success: a list of names. If no archive indexes are configured,
the command returns the string "<empty>"
"""
method_name = "names"
result = exec_nsdchat([module_name, method_name], p5_connection)
if not as_object:
return result
else:
return resourcelist(result, ArchiveIndex, p5_connection)
|
1b1d00d70730b79ccab25e5ca101f752ad49cc1c
| 32,319 |
def regionvit_base_w14_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the RegionViT-Base-w14-224 model.
.. note::
RegionViT-Base-w14-224 model from `"RegionViT: Regional-to-Local Attention for Vision Transformers" <https://arxiv.org/pdf/2106.02689.pdf>`_.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> regionvit_base_w14_224 = flowvision.models.regionvit_base_w14_224(pretrained=False, progress=True)
"""
return _regionvit("regionvit_base_w14", pretrained, progress, **kwargs)
|
63983c4fe9cb5ed74e43e1c40b501f50fbaead56
| 32,320 |
import collections
def create_executor_list(suites):
"""
Looks up what other resmoke suites run the tests specified in the suites
parameter. Returns a dict keyed by suite name / executor, value is tests
to run under that executor.
"""
memberships = collections.defaultdict(list)
test_membership = resmokelib.parser.create_test_membership_map()
for suite in suites:
for group in suite.test_groups:
for test in group.tests:
for executor in test_membership[test]:
memberships[executor].append(test)
return memberships
|
c9150b14ba086d9284acb2abdcd4592e7803a432
| 32,321 |
def calculate_great_circle(args):
"""one step of the great circle calculation"""
lon1,lat1,lon2,lat2 = args
radius = 3956.0
x = np.pi/180.0
a,b = (90.0-lat1)*(x),(90.0-lat2)*(x)
theta = (lon2-lon1)*(x)
c = np.arccos((np.cos(a)*np.cos(b)) +
(np.sin(a)*np.sin(b)*np.cos(theta)))
return(radius*c)
|
f0832b984382b2cd2879c40ab1249d68aacddd69
| 32,322 |
def divide(x,y):
"""div x from y"""
return x/y
|
74adba33dfd3db2102f80a757024696308928e38
| 32,323 |
def execute(compile_state: CompileState, string: StringResource) -> NullResource:
""" Executes the string at runtime and returns Null"""
compile_state.ir.append(CommandNode(string.static_value))
return NullResource()
|
4785d9a527982eb723d120f47af2915b6b830795
| 32,324 |
import torch
def fakeLabels(lth):
"""
lth (int): no of labels required
"""
label=torch.tensor([])
for i in range(lth):
arr=np.zeros(c_dims)
arr[0]=1
np.random.shuffle(arr)
label=torch.cat((label,torch.tensor(arr).float().unsqueeze(0)),dim=0)
return label
|
a2ffb4a7ff3b71bc789181130bc6042ff184ac9c
| 32,325 |
def load_canadian_senators(**kwargs):
"""
A history of Canadian senators in office.::
Size: (933,10)
Example:
Name Abbott, John Joseph Caldwell
Political Affiliation at Appointment Liberal-Conservative
Province / Territory Quebec
Appointed on the advice of Macdonald, John Alexander
Term (yyyy.mm.dd) 1887.05.12 - 1893.10.30 (Death)
start_date 1887-05-12 00:00:00
end_date 1893-10-30 00:00:00
reason Death
diff_days 2363
observed True
"""
return _load_dataset("canadian_senators.csv", **kwargs)
|
42ae6a455d3bed11275d211646ee6acd2da505b6
| 32,326 |
def _get_md5(filename):
"""Return the MD5 checksum of the passed file"""
data = open(filename, "rb").read()
r = md5(data)
return r.hexdigest()
|
c86943841a1f8f8e296d82818c668c197f824373
| 32,327 |
def implements(numpy_func_string, func_type):
"""Register an __array_function__/__array_ufunc__ implementation for Quantity
objects.
"""
def decorator(func):
if func_type == "function":
HANDLED_FUNCTIONS[numpy_func_string] = func
elif func_type == "ufunc":
HANDLED_UFUNCS[numpy_func_string] = func
else:
raise ValueError("Invalid func_type {}".format(func_type))
return func
return decorator
|
ec0d843798c4c047d98cd9a76bcd862c3d5339e8
| 32,328 |
def r2(data1, data2):
"""Return the r-squared difference between data1 and data2.
Parameters
----------
data1 : 1D array
data2 : 1D array
Returns
-------
output: scalar (float)
difference in the input data
"""
ss_res = 0.0
ss_tot = 0.0
mean = sum(data1) / len(data1)
for i in range(len(data1)):
ss_res += (data1[i] - data2[i]) ** 2
ss_tot += (data1[i] - mean) ** 2
return 1 - ss_res / ss_tot
|
d42c06a5ad4448e74fcb1f61fa1eed1478f58048
| 32,329 |
from typing import IO
def fio_color_hist_fio(image_fio):
"""Generate a fileIO with the color histogram of an image fileIO
:param image_fio: input image in fileIO format
:type image_fio: fileIO
:return: color histogram of the input image in fileIO format
:rtype: fileIO
"""
image_fio.seek(0)
bkp = fio_to_b(image_fio)
img_pil = Image.open(image_fio).convert('RGB')
r, g, b = img_pil.split()
bins = list(range(256))
plt.plot(bins, r.histogram(), 'r')
plt.plot(bins, g.histogram(), 'g')
plt.plot(bins, b.histogram(), 'b')
plt.xlabel('Pixel value')
plt.ylabel('Frequency')
plt.grid(True)
out_img_fio = IO.BytesIO()
plt.savefig(out_img_fio)
plt.close()
out_img_fio.seek(0)
image_fio = b_to_fio(bkp)
return out_img_fio
|
13c10cce5dc9bfa17d19a4b2f486fb7b34bcb176
| 32,330 |
def lattice2d_fixed_env():
"""Lattice2DEnv with a fixed sequence"""
seq = 'HHHH'
return Lattice2DEnv(seq)
|
664b6b411a47018c460b09909ccb29c033bae2e5
| 32,332 |
import time
import logging
def expected_full(
clr,
view_df=None,
smooth_cis=False,
aggregate_smoothed=False,
smooth_sigma=0.1,
aggregate_trans=False,
expected_column_name="expected",
ignore_diags=2,
clr_weight_name='weight',
chunksize=10_000_000,
nproc=4,
):
"""
Generate a DataFrame with expected for *all* 2D regions
tiling entire heatmap in clr.
Such 2D regions are defined as all pairwise combinations
of the regions in view_df. Average distance decay is calculated
for every cis-region (e.g. inter- and intra-arms), and
a "simple" average over each block is caculated for trans-
regions.
When sub-chromosomal view is provided, trans averages
can be aggregated back to the level of full chromosomes.
Parameters
----------
clr : cooler.Cooler
Cooler object
view_df : viewframe
expected is calculated for all pairwise combinations of regions
in view_df. Distance dependent expected is calculated for cis
regions, and block-level average is calculated for trans regions.
smooth_cis: bool
Apply smoothing to cis-expected. Will be stored in an additional column
aggregate_smoothed: bool
When smoothing cis expected, average over all regions, ignored without smoothing.
smooth_sigma: float
Control smoothing with the standard deviation of the smoothing Gaussian kernel.
Ignored without smoothing.
aggregate_trans : bool
Aggregate trans-expected at the inter-chromosomal level.
expected_column_name : str
Name of the column where to store combined expected
ignore_diags : int, optional
Number of intial diagonals to exclude for calculation of distance dependent
expected.
clr_weight_name : str or None
Name of balancing weight column from the cooler to use.
Use raw unbalanced data, when None.
chunksize : int, optional
Size of pixel table chunks to process
nproc : int, optional
How many processes to use for calculation
Returns
-------
expected_df: pd.DataFrame
cis and trans expected combined together
"""
# contacs vs distance - i.e. intra/cis expected
time_start = time.perf_counter()
cvd = expected_cis(
clr,
view_df=view_df,
intra_only=False, # get cvd for all 2D regions
smooth=smooth_cis,
smooth_sigma=smooth_sigma,
aggregate_smoothed=aggregate_smoothed,
clr_weight_name=clr_weight_name,
ignore_diags=ignore_diags,
chunksize=chunksize,
nproc=nproc,
)
time_elapsed = time.perf_counter() - time_start
logging.info(f"Done calculating cis expected in {time_elapsed:.3f} sec ...")
# contacts per block - i.e. inter/trans expected
time_start = time.perf_counter()
cpb = expected_trans(
clr,
view_df=view_df,
clr_weight_name=clr_weight_name,
chunksize=chunksize,
nproc=nproc,
)
# pretend that they also have a "dist"
# to make them mergeable with cvd
cpb["dist"] = 0
time_elapsed = time.perf_counter() - time_start
logging.info(f"Done calculating trans expected in {time_elapsed:.3f} sec ...")
# annotate expected_df with the region index and chromosomes
view_label = view_df \
.reset_index() \
.rename(columns={"index":"r"}) \
.set_index("name")
# which expected column to use, based on requested "modifications":
cis_expected_name = "balanced.avg" if clr_weight_name else "count.avg"
if smooth_cis:
cis_expected_name = f"{cis_expected_name}.smoothed"
if aggregate_smoothed:
cis_expected_name = f"{cis_expected_name}.agg"
# copy to the prescribed column for the final output:
cvd[expected_column_name] = cvd[cis_expected_name].copy()
# aggregate trans if requested and deide which trans-expected column to use:
trans_expected_name = "balanced.avg" if clr_weight_name else "count.avg"
if aggregate_trans:
trans_expected_name = f"{trans_expected_name}.agg"
additive_cols = ["n_valid","count.sum"]
if clr_weight_name:
additive_cols.append("balanced.sum")
# groupby chrom1, chrom2 and aggregate additive fields (sums and n_valid):
_cpb_agg = cpb.groupby(
[
view_label["chrom"].loc[cpb["region1"]].to_numpy(), # chrom1
view_label["chrom"].loc[cpb["region2"]].to_numpy(), # chrom2
]
)[additive_cols].transform("sum")
# recalculate aggregated averages:
cpb["count.avg.agg"] = _cpb_agg["count.sum"]/_cpb_agg["n_valid"]
if clr_weight_name:
cpb["balanced.avg.agg"] = _cpb_agg["balanced.sum"]/_cpb_agg["n_valid"]
# copy to the prescribed column for the final output:
cpb[expected_column_name] = cpb[trans_expected_name].copy()
# concatenate cvd and cpb (cis and trans):
expected_df = pd.concat([cvd, cpb], ignore_index=True)
# add r1 r2 labels to the final dataframe for obs/exp merging
expected_df["r1"] = view_label["r"].loc[expected_df["region1"]].to_numpy()
expected_df["r2"] = view_label["r"].loc[expected_df["region2"]].to_numpy()
# and return joined cis/trans expected in the same format
logging.info(f"Returning combined expected DataFrame.")
# consider purging unneccessary columns here
return expected_df
|
5f387c71f059cd942ff1ff4b6cdb6a59e91ef85b
| 32,333 |
def nmgy2abmag(flux, flux_ivar=None):
"""
Conversion from nanomaggies to AB mag as used in the DECALS survey
flux_ivar= Inverse variance oF DECAM_FLUX (1/nanomaggies^2)
"""
lenf = len(flux)
if lenf > 1:
ii = np.where(flux>0)
mag = 99.99 + np.zeros_like(flux)
mag[ii] = 22.5 - 2.5*np.log10(flux[ii])
else:
mag = 22.5 - 2.5*np.log10(flux)
if flux_ivar is None:
return mag
elif lenf>1:
err = np.zeros_like(mag)
df = np.sqrt(1./flux_ivar)
err[ii] = mag_err(df[ii]/flux[ii], verbose=False)
else:
df = np.sqrt(1./flux_ivar)
err = mag_err(df/flux, verbose=False)
return mag,err
|
5f65a06049955b4ddfe235d6fc12ae5726089b0f
| 32,334 |
def rnn_decoder(dec_input, init_state, cell, infer, dnn_hidden_units, num_feat):
"""Decoder for RNN cell.
Given list of LSTM hidden units and list of LSTM dropout output keep
probabilities.
Args:
dec_input: List of tf.float64 current batch size by number of features
matrix tensors input to the decoder.
init_state: Initial state of the decoder cell. Final state from the
encoder cell.
cell:
infer:
dnn_hidden_units:
num_feat:
Returns:
outputs: List of decoder outputs of length number of timesteps of tf.float64
current batch size by number of features matrix tensors.
state: Final cell state of the decoder.
"""
# Create the decoder variable scope
with tf.variable_scope("decoder"):
# Load in our initial state from our encoder
# Tuple of final encoder c_state and h_state of final encoder layer
state = init_state
# Create an empty list to store our hidden state output for every timestep
outputs = []
# Begin with no previous output
previous_output = None
# Loop over all of our dec_input which will be seq_len long
for index, decoder_input in enumerate(dec_input):
# If there has been a previous output, we will determine the next input
if previous_output is not None:
# Create the input layer to our DNN
# shape = (cur_batch_size, lstm_hidden_units[-1])
network = previous_output
# Create our dnn variable scope
with tf.variable_scope(name_or_scope="dnn", reuse=tf.AUTO_REUSE):
# Add hidden layers with the given number of units/neurons per layer
# shape = (cur_batch_size, dnn_hidden_units[i])
for units in dnn_hidden_units:
network = tf.layers.dense(
inputs=network,
units=units,
activation=tf.nn.relu)
# Connect final hidden layer to linear layer to get the logits
# shape = (cur_batch_size, num_feat)
logits = tf.layers.dense(
inputs=network,
units=num_feat,
activation=None)
# If we are in inference then we will overwrite our next decoder_input
# with the logits we just calculated. Otherwise, we leave the decoder
# input input as it was from the enumerated list. We have to calculate
# the logits even when not using them so that the correct DNN subgraph
# will be generated here and after the encoder-decoder for both
# training and inference
if infer:
# shape = (cur_batch_size, num_feat)
decoder_input = logits
# If this isn"t our first time through the loop, just reuse(share) the
# same variables for each iteration within the current variable scope
if index > 0:
tf.get_variable_scope().reuse_variables()
# Run the decoder input through the decoder stack picking up from the
# previous state
# output_shape = (cur_batch_size, lstm_hidden_units[-1])
# state_shape = # tuple of final decoder c_state and h_state
output, state = cell(decoder_input, state)
# Append the current decoder hidden state output to the outputs list
# List seq_len long of shape = (cur_batch_size, lstm_hidden_units[-1])
outputs.append(output)
# Set the previous output to the output just calculated
# shape = (cur_batch_size, lstm_hidden_units[-1])
previous_output = output
return outputs, state
|
215691ac8b3191da46d01a17fd37e2be08174640
| 32,335 |
import torch
def l1_loss(pre, gt):
""" L1 loss
"""
return torch.nn.functional.l1_loss(pre, gt)
|
c552224b3a48f9cde201db9d0b2ee08cd6335861
| 32,336 |
def run_tnscope(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
"""Call variants with Sentieon's TNscope somatic caller.
"""
if out_file is None:
out_file = "%s-variants.vcf.gz" % utils.splitext_plus(align_bams[0])[0]
if not utils.file_exists(out_file):
variant_regions = bedutils.merge_overlaps(dd.get_variant_regions(items[0]), items[0])
interval = _get_interval(variant_regions, region, out_file, items)
with file_transaction(items[0], out_file) as tx_out_file:
paired = vcfutils.get_paired_bams(align_bams, items)
assert paired and paired.normal_bam, "Require normal BAM for Sentieon TNscope"
dbsnp = "--dbsnp %s" % (assoc_files.get("dbsnp")) if "dbsnp" in assoc_files else ""
license = license_export(items[0])
cmd = ("{license}sentieon driver -t 1 -r {ref_file} "
"-i {paired.tumor_bam} -i {paired.normal_bam} {interval} "
"--algo TNscope "
"--tumor_sample {paired.tumor_name} --normal_sample {paired.normal_name} "
"{dbsnp} {tx_out_file}")
do.run(cmd.format(**locals()), "Sentieon TNhaplotyper")
return out_file
|
a7e82dc94a9166bde47ad43dab2c778b2f7945d6
| 32,337 |
def get_product(product_id):
"""
Read a single Product
This endpoint will return a product based on it's id
"""
app.logger.info("Request for product with id: %s", product_id)
product = Product.find(product_id)
if not product:
raise NotFound("product with id '{}' was not found.".format(product_id))
return make_response(jsonify(product.serialize()), status.HTTP_200_OK)
|
e9ee42be5f586aa0bbe08dfa5edefbd3b0bbc5d7
| 32,338 |
import re
import string
def aips_bintable_fortran_fields_to_dtype_conversion(aips_type):
"""Given AIPS fortran format of binary table (BT) fields, returns
corresponding numpy dtype format and shape. Examples:
4J => array of 4 32bit integers,
E(4,32) => two dimensional array with 4 columns and 32 rows.
"""
intv = np.vectorize(int)
aips_char = None
dtype_char = None
repeat = None
_shape = None
format_dict = {'L': 'bool', 'I': '>i2', 'J': '>i4', 'A': 'S', 'E': '>f4',
'D': '>f8'}
for key in format_dict.keys():
if key in aips_type:
aips_char = key
if not aips_char:
raise Exception("aips data format reading problem " + str(aips_type))
try:
dtype_char = format_dict[aips_char]
except KeyError:
raise Exception("no dtype counterpart for aips data format" +
str(aips_char))
try:
repeat = int(re.search(r"^(\d+)" + aips_char,
aips_type).groups()[0])
if aips_char is 'A':
dtype_char = str(repeat) + dtype_char
repeat = 1
except AttributeError:
repeat = None
if repeat is None:
_shape = tuple(intv(string.split(re.search(r"^" + aips_char +
"\((.+)\)$",
aips_type).groups()[0],
sep=',')))
else:
_shape = repeat
return dtype_char, _shape
|
772bd75ff2af92cede5e5dac555662c9d97c544a
| 32,339 |
def account_list():
"""获取账户列表"""
rps = {}
rps["status"] = True
account_list = query_account_list(db)
if account_list:
rps["data"] = account_list
else:
rps["status"] = False
rps["data"] = "账户列表为空"
return jsonify(rps)
|
3ab704e96cbf2c6548bf39f51a7f8c6f77352b6c
| 32,340 |
def sample_points_in_range(min_range, max_range, origin, directions, n_points):
"""Sample uniformly depth planes in a depth range set to [min_range,
max_range]
Arguments
---------
min_range: int, The minimum depth range
max_range: int, The maximum depth range
origin: tensor(shape=(4, 1), float32), The origin of the rays
directions: tensor(shape=(4, N), float32), The direction vectors defining
the rays
n_points: int, The number of points to be sampled
"""
# How many rays do we have?
N = K.shape(directions)[1]
directions /= K.sqrt(K.sum(directions**2, axis=0))
# Sample points uniformly on the ray in the bbox
points = K.map_fn(
lambda i: origin + directions[:, i:i+1] * K.tf.linspace(min_range, max_range, n_points),
K.tf.range(N),
dtype="float32"
)
return K.permute_dimensions(
K.reshape(points, (N, 4, n_points)),
(1, 0, 2)
)
|
6cc33a77e58a573315caf51b907cd881029e7ea1
| 32,341 |
from typing import Counter
def normalize(vectorOrCounter):
"""
normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0:
return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0:
return vector
return [el / s for el in vector]
|
8d4cb0f8be4e7c6eeaba6b49d5a84b024f2c91b9
| 32,342 |
def IsStringInt(string_to_check):
"""Checks whether or not the given string can be converted to an int."""
try:
int(string_to_check)
return True
except ValueError:
return False
|
75d83ce78fca205457d4e4325bca80306f248e08
| 32,343 |
import torch
import math
def build_ewc_posterior(data_handlers, mnet, device, config, shared, logger,
writer, num_trained, task_id=None):
"""Build a normal posterior after having trained using EWC.
The posterior is constructed as described in function :func:`test`.
Args:
(....): See docstring of function :func:`probailistic.train_vi.test`.
num_trained (int): The number of output heads that already have been
trained.
task_id (int, optional): If training from scratch, only a specific head
has been trained, that has to be specified via this argument.
Note:
If training from scratch, it is assumed that the correct
``mnet`` (corresponding to ``task_id``) has already been loaded
to memory. This function will not load any checkpoints!
"""
n = num_trained
# Build posterior from Fisher approximations.
is_regression = 'regression' in shared.experiment_type
is_multihead = None
if is_regression:
is_multihead = config.multi_head
else:
is_multihead = config.cl_scenario == 1 or \
config.cl_scenario == 3 and config.split_head_cl3
if is_multihead:
post_means = [None] * len(mnet.internal_params)
post_stds = [None] * len(mnet.internal_params)
out_inds = [pmutils.out_units_of_task(config, data_handlers[i], i,
None) for i in range(n)]
out_masks = [mnet.get_output_weight_mask(out_inds=out_inds[i], \
device=device) for i in range(n)]
for ii, mask in enumerate(out_masks[0]):
pind = mnet.param_shapes_meta[ii]['index']
buff_w_name, buff_f_name = ewc._ewc_buffer_names(None, pind, True)
if mask is None: # Shared parameters.
post_means[pind] = getattr(mnet, buff_w_name)
# The hessian that is approximated in EWC is corresponds to the
# inverse variance.
post_stds[pind] = getattr(mnet, buff_f_name).pow(-.5)
else:
# Initialize head weights to prior.
curr_m = torch.zeros_like(getattr(mnet, buff_w_name)).to(device)
curr_s = torch.ones_like(getattr(mnet, buff_w_name)).\
to(device) * math.sqrt(config.prior_variance)
# Update head weights for trained output heads.
for jj, t_mask in enumerate(out_masks):
# Note, if we train from scratch, then also all previous
# output heads are not trained, thus we let those weights
# follow the prior.
if not config.train_from_scratch or jj == task_id:
m = t_mask[ii]
curr_m[m] = getattr(mnet, buff_w_name)[m]
curr_s[m] = getattr(mnet, buff_f_name)[m].pow(-.5)
post_means[pind] = curr_m
post_stds[pind] = curr_s
# Quick and dirty solution. Note, that a Pytorch `Normal` object with
# zero std will just return the mean.
if hasattr(config, 'det_multi_head') and config.det_multi_head:
post_stds = [torch.zeros_like(t) for t in post_stds]
return post_means, post_stds
return None
|
dd04d235a36516ea600eec154f5a8952ee6ea889
| 32,345 |
def get_roc_curve(y_gold_standard,y_predicted):
"""
Computes the Receiver Operating Characteristic.
Keyword arguments:
y_gold_standard -- Expected labels.
y_predicted -- Predicted labels
"""
return roc_curve(y_gold_standard, y_predicted)
|
b522ee6566004ec97781585be0ed8946e8f2889e
| 32,346 |
def get_image_unixtime2(ibs, gid_list):
""" alias for get_image_unixtime_asfloat """
return ibs.get_image_unixtime_asfloat(gid_list)
|
2c5fb29359d7a1128fab693d8321d48c8dda782b
| 32,347 |
def create_sql_query(mogrify, data_set_id, user_query):
"""
Creates a sql query and a funtion which transforms the output into a list
of dictionaries with correct field names.
>>> from tests.support.test_helpers import mock_mogrify
>>> query, fn = create_sql_query(mock_mogrify, 'some-collection', Query.create())
>>> query
"SELECT record FROM mongo WHERE collection='some-collection'"
>>> fn([({"foo":"bar"},)])
[{'foo': 'bar'}]
>>> query, fn = create_sql_query(mock_mogrify, 'some-collection', Query.create(group_by=['foo']))
>>> query
"SELECT count(*), record->'foo' FROM mongo WHERE collection='some-collection' AND record->'foo' IS NOT NULL GROUP BY record->'foo'"
>>> fn([[123, 'some-foo-value'], [456, 'other-foo-value']])
[{'_count': 123, 'foo': 'some-foo-value'}, {'_count': 456, 'foo': 'other-foo-value'}]
"""
if user_query.is_grouped:
return _create_grouped_sql_query(mogrify, data_set_id, user_query)
else:
return _create_basic_sql_query(mogrify, data_set_id, user_query)
|
ac56dd8b89da7554111f4e285eb9511fbdef5ced
| 32,348 |
def patch_hass():
"""
Patch the Hass API and returns a tuple of:
- The patched functions (as Dict)
- A callback to un-patch all functions
"""
class MockInfo:
"""Holds information about a function that will be mocked"""
def __init__(self, object_to_patch, function_name, autospec=False):
self.object_to_patch = object_to_patch
self.function_name = function_name
# Autospec will include `self` in the mock signature.
# Useful if you want a sideeffect that modifies the actual object instance.
self.autospec = autospec
actionable_functions_to_patch = [
# Meta
MockInfo(Hass, '__init__', autospec=True), # Patch the __init__ method to skip Hass initialization
# Logging
MockInfo(Hass, 'log'),
MockInfo(Hass, 'error'),
# Scheduler callback registrations functions
MockInfo(Hass, 'run_in'),
MockInfo(Hass, 'run_once'),
MockInfo(Hass, 'run_at'),
MockInfo(Hass, 'run_daily'),
MockInfo(Hass, 'run_hourly'),
MockInfo(Hass, 'run_minutely'),
MockInfo(Hass, 'run_every'),
MockInfo(Hass, 'cancel_timer'),
# Sunrise and sunset functions
MockInfo(Hass, 'run_at_sunrise'),
MockInfo(Hass, 'run_at_sunset'),
# Listener callback registrations functions
MockInfo(Hass, 'listen_event'),
MockInfo(Hass, 'listen_state'),
# Sunrise and sunset functions
MockInfo(Hass, 'run_at_sunrise'),
MockInfo(Hass, 'run_at_sunset'),
# Listener callback registrations functions
# State functions / attr
MockInfo(Hass, 'set_state'),
MockInfo(Hass, 'get_state'),
MockInfo(Hass, 'time'),
MockInfo(Hass, 'args'), # Not a function, attribute. But same patching logic
# Interactions functions
MockInfo(Hass, 'call_service'),
MockInfo(Hass, 'turn_on'),
MockInfo(Hass, 'turn_off'),
# Custom callback functions
MockInfo(Hass, 'register_constraint'),
MockInfo(Hass, 'now_is_between'),
MockInfo(Hass, 'notify'),
# Miscellaneous Helper Functions
MockInfo(Hass, 'entity_exists')
]
patches = []
hass_functions = {}
for mock_info in actionable_functions_to_patch:
patch_function = mock.patch.object(mock_info.object_to_patch, mock_info.function_name, create=True,
autospec=mock_info.autospec)
patches.append(patch_function)
patched_function = patch_function.start()
patched_function.return_value = None
hass_functions[mock_info.function_name] = patched_function
def unpatch_callback():
for patch in patches:
patch.stop()
_ensure_compatibility_with_previous_versions(hass_functions)
_mock_logging(hass_functions)
_mock_hass_init(hass_functions)
return hass_functions, unpatch_callback
|
400bb38ca7f00da3b1a28bc1ab5c2408be2931c9
| 32,349 |
def compute_coherence_values(dictionary, corpus, texts, limit, start=2, step=3):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.ldamodel.LdaModel(doc_term_matrix, num_topics = num_topics, random_state = 2, id2word = dictionary, iterations = 10)
model_list.append(model)
coherence_model = CoherenceModel(model = model ,texts = texts, dictionary = dictionary, coherence = 'c_v')
coherence_values.append(coherence_model.get_coherence())
return (model_list, coherence_values)
|
009f637b7ff1d92514711ca5566f2c2c7ee307b0
| 32,350 |
def parallax_angle(sc, **kw) -> DEG:
"""Compute parallax angle from skycoord.
Parameters
----------
sc: SkyCoord
** warning: check if skycoord frame centered on Earth
Returns
-------
p: deg
parallax angle
"""
return np.arctan(1 * AU / sc.spherical.distance)
|
0d84a98cae93828d1166008fe3d654668a4a178e
| 32,351 |
def formatting_dates(dates_list):
""" Formatting of both the start and end dates of a historical period.
dates = [period_start_date, period_end_date]"""
new_dates = dates_list
# Change all "BCE" into "BC":
for index1 in range(len(new_dates)):
if " BCE" not in new_dates[index1]:
if " BC" in new_dates[index1]:
new_dates[index1] = str(new_dates[index1]) + "E"
counter = 0
# Change "present" into today's year:
if "present" in new_dates[counter +1]:
new_dates[counter +1] = str(date.today())[:-6]
if "th century" in new_dates[counter]:
pass
# Adding Missing " BCE" and " CE":
elif " CE" not in new_dates[counter] and " CE" not in new_dates[counter +1]:
# Both dates "Before Common Era" - Add "BCE" if start date higher number than end date:
if "BCE" not in new_dates[counter] and "BCE" in new_dates[counter +1]:
if int(new_dates[counter]) >= int(new_dates[counter+1][:-3]):
new_dates[counter] = str(new_dates[counter]) + " BCE"
else:
print("Not a valid date.") # PRINT ERROR
# Both dates "Before Common Era" - Add "BCE" if start date higher number than end date:
elif "BCE" in new_dates[counter] and "BCE" not in new_dates[counter +1]:
if int(new_dates[counter][:-3]) >= int(new_dates[counter+1]):
new_dates[counter +1] = str(new_dates[counter +1]) + " BCE"
else:
print("Not a valid date, except if end date is CE.") # PRINT ERROR
elif "BCE" not in new_dates[counter] and "BCE" not in new_dates[counter +1]:
# Both dates "Before Common Era" - Add "BCE" if start date higher number than end date:
if int(new_dates[counter]) >= int(new_dates[counter+1]):
new_dates[counter] = str(new_dates[counter]) + " BCE"
new_dates[counter+1] = str(new_dates[counter+1]) + " BCE"
# Both dates "Common Era"
else:
new_dates[counter] = str(new_dates[counter]) + " CE"
new_dates[counter+1] = str(new_dates[counter+1]) + " CE"
# One date "Before Common Era" and one date "Common Era"
elif " BCE" in new_dates[counter] and " CE" in new_dates[counter +1]:
pass
return new_dates
|
174617ad0a97c895187f8c1abe7e6eb53f59da6f
| 32,352 |
from datetime import datetime
def exp_days_f(cppm_class, current_user):
"""
User's password expiration and check force change password function.
1. Calculates days to expiry password for particular user
2. Checks change password force checkbox.
Returns:
exp_days: Number of days until a password expired
change_pwd_next_login: status for force change password checkbox (boolean)
"""
now = datetime.now()
# Get User ID, date of password changing and user attributes
uid, pwd_dt, attr, change_pwd_next_login = pg_sql('user', cppm_class,
current_user)
# print (cppm_class.db_password, current_user)
exp_days = int(cppm_connect_main.days_to_passw_exp) - (now - pwd_dt).days
# print(exp_days, change_pwd_next_login)
return exp_days, change_pwd_next_login
|
e0f014fe4813dd70fd733aa2ed2fa4f06105c2f0
| 32,353 |
def isinteger(x):
"""
determine if a string can be converted to an integer
"""
try:
a = int(x)
except ValueError:
return False
except TypeError:
return False
else:
return True
|
b39530a79c39f0937a42335587f30bed26c6ce0a
| 32,354 |
import hashlib
def _get_hash(x):
"""Generate a hash from a string, or dictionary."""
if isinstance(x, dict):
x = tuple(sorted(pair for pair in x.items()))
return hashlib.md5(bytes(repr(x), "utf-8")).hexdigest()
|
c47f96c1e7bfc5fd9e7952b471516fbf40470799
| 32,357 |
def wrap_arr(arr, wrapLow=-90.0, wrapHigh=90.0):
"""Wrap the values in an array (e.g., angles)."""
rng = wrapHigh - wrapLow
arr = ((arr-wrapLow) % rng) + wrapLow
return arr
|
e07e8916ec060aa327c9c112a2e5232b9155186b
| 32,358 |
def task_fail_slack_alert(context):
"""
Callback task that can be used in DAG to alert of failure task completion
Args:
context (dict): Context variable passed in from Airflow
Returns:
None: Calls the SlackWebhookOperator execute method internally
"""
if ENV != "data":
return
if context["dag_run"].external_trigger is True:
return
if context["dag"].is_paused is True:
return
slack_webhook_token = BaseHook.get_connection(SLACK_CONN_ID).password
slack_msg = """
:red_circle: Task Failed.
*Task*: {task}
*Dag*: {dag}
*Execution Time*: {exec_date}
*Running For*: {run_time} secs
*Log Url*: {log_url}
""".format(
task=context["task_instance"].task_id,
dag=context["task_instance"].dag_id,
ti=context["task_instance"],
exec_date=context["execution_date"],
run_time=get_task_run_time(context["task_instance"]),
log_url=context["task_instance"].log_url,
)
failed_alert = SlackWebhookOperator(
task_id=context["task_instance"].task_id,
http_conn_id=SLACK_CONN_ID,
webhook_token=slack_webhook_token,
message=slack_msg,
username="airflow",
)
return failed_alert.execute(context=context)
|
392d5f3b1df21d8dbe239e700b7ea0bd1d44c49f
| 32,359 |
def largest_negative_number(seq_seq):
"""
Returns the largest NEGATIVE number in the given sequence of
sequences of numbers. Returns None if there are no negative numbers
in the sequence of sequences.
For example, if the given argument is:
[(30, -5, 8, -20),
(100, -2.6, 88, -40, -5),
(400, 500)
]
then this function returns -2.6.
As another example, if the given argument is:
[(200, 2, 20), (500, 400)]
then this function returns None.
Preconditions:
:type seq_seq: (list, tuple)
and the given argument is a sequence of sequences,
where each subsequence contains only numbers.
"""
# -------------------------------------------------------------------------
# DONE: 5. Implement and test this function.
# Note that you should write its TEST function first (above).
#
# CHALLENGE: Try to solve this problem with no additional sequences
# being constructed (so the SPACE allowed is limited to the
# give sequence of sequences plus any non-list variables you want).
# -------------------------------------------------------------------------
largest = 0
for k in range(len(seq_seq)):
for j in range(len(seq_seq[k])):
if seq_seq[k][j] < 0 and largest == 0:
largest = seq_seq[k][j]
if seq_seq[k][j] < 0 and seq_seq[k][j] > largest:
largest = seq_seq[k][j]
if largest != 0:
return largest
|
b7326b3101d29fcc0b8f5921eede18a748af71b7
| 32,360 |
def align_quaternion_frames(target_skeleton, frames):
"""align quaternions for blending
src: http://physicsforgames.blogspot.de/2010/02/quaternions.html
"""
ref_frame = None
new_frames = []
for frame in frames:
if ref_frame is None:
ref_frame = frame
else:
offset = 3
for joint in target_skeleton.animated_joints:
q = frame[offset:offset + 4]
ref_q = ref_frame[offset:offset + 4]
dot = np.dot(ref_q, q)
if dot < 0:
frame[offset:offset + 4] = -q
offset += 4
new_frames.append(frame)
return new_frames
|
7c8d6f4bacfb3581dc023504b94d2fba66c5e875
| 32,361 |
import math
def do_round(precision=0, method='common'):
"""
Round the number to a given precision. The first
parameter specifies the precision (default is ``0``), the
second the rounding method:
- ``'common'`` rounds either up or down
- ``'ceil'`` always rounds up
- ``'floor'`` always rounds down
If you don't specify a method ``'common'`` is used.
.. sourcecode:: jinja
{{ 42.55|round }}
-> 43
{{ 42.55|round(1, 'floor') }}
-> 42.5
*new in Jinja 1.1*
"""
if not method in ('common', 'ceil', 'floor'):
raise FilterArgumentError('method must be common, ceil or floor')
if precision < 0:
raise FilterArgumentError('precision must be a postive integer '
'or zero.')
def wrapped(env, context, value):
if method == 'common':
return round(value, precision)
func = getattr(math, method)
if precision:
return func(value * 10 * precision) / (10 * precision)
else:
return func(value)
return wrapped
|
3e2b4c6c842ca5c3f60951559a815f27cc8edd19
| 32,362 |
import torch
def scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor:
"""Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general
considered an overall measure of how good a source sound.
Args:
preds:
shape ``[...,time]``
target:
shape ``[...,time]``
zero_mean:
If to zero mean target and preds or not
Returns:
si-sdr value of shape [...]
Example:
>>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio
>>> target = torch.tensor([3.0, -0.5, 2.0, 7.0])
>>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0])
>>> scale_invariant_signal_distortion_ratio(preds, target)
tensor(18.4030)
References:
[1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP) 2019.
"""
_check_same_shape(preds, target)
EPS = torch.finfo(preds.dtype).eps
if zero_mean:
target = target - torch.mean(target, dim=-1, keepdim=True)
preds = preds - torch.mean(preds, dim=-1, keepdim=True)
alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + EPS) / (
torch.sum(target ** 2, dim=-1, keepdim=True) + EPS
)
target_scaled = alpha * target
noise = target_scaled - preds
val = (torch.sum(target_scaled ** 2, dim=-1) + EPS) / (torch.sum(noise ** 2, dim=-1) + EPS)
val = 10 * torch.log10(val)
return val
|
2ec9e4d3cbd0046940974f8d7bae32e230da63ed
| 32,363 |
import json
from datetime import datetime
def is_token_valid():
"""Check whether the stored token is still valid.
:returns: A bool.
"""
try:
with open('/tmp/tngcli.txt', 'r') as file:
for line in file:
payload = json.loads(line)
except:
return False, 'no token file found'
exp_t = payload[env.get_sp_path()]['exp_t']
exp_t_datetime = datetime.strptime(exp_t, '%Y-%m-%d %H:%M')
return (datetime.now() - exp_t_datetime) < timedelta(minutes=58)
|
2574245a38a02bdba7b2fee8f5dff807b128316f
| 32,364 |
def DB_getQanswer(question):
"""
Calls the function in the database that gets the question answer to the
input question.
"""
return DB.get_question_answer(question)
|
8afb32f1e8b39d3ff89b3c9fe02a314099a416ef
| 32,365 |
def _state_senate_slide_preview(slug):
"""
Preview a state slide outside of the stack.
"""
context = make_context()
resp = _state_senate_slide(slug)
if resp.status_code == 200:
context['body'] = resp.data
return render_template('slide_preview.html', **context)
else:
return "404", 404
|
c9139df85745feca150fd22591e85165969952de
| 32,366 |
def tensor_network_tt_einsum(inputs, states, output_size, rank_vals, bias, bias_start=0.0):
# print("Using Einsum Tensor-Train decomposition.")
"""tensor train decomposition for the full tenosr """
num_orders = len(rank_vals)+1#alpha_1 to alpha_{K-1}
num_lags = len(states)
batch_size = tf.shape(inputs)[0]
state_size = states[0].get_shape()[1].value #hidden layer size
input_size= inputs.get_shape()[1].value
total_state_size = (state_size * num_lags + 1 )
# These bookkeeping variables hold the dimension information that we'll
# use to store and access the transition tensor W efficiently.
mat_dims = np.ones((num_orders,)) * total_state_size
# The latent dimensions used in our tensor-train decomposition.
# Each factor A^i is a 3-tensor, with dimensions [a_i, hidden_size, a_{i+1}]
# with dimensions [mat_rank[i], hidden_size, mat_rank[i+1] ]
# The last
# entry is the output dimension, output_size: that dimension will be the
# output.
mat_ranks = np.concatenate(([1], rank_vals, [output_size]))
# This stores the boundary indices for the factors A. Starting from 0,
# each index i is computed by adding the number of weights in the i'th
# factor A^i.
mat_ps = np.cumsum(np.concatenate(([0], mat_ranks[:-1] * mat_dims * mat_ranks[1:])),dtype=np.int32)
mat_size = mat_ps[-1]
# Compute U * x
weights_x = vs.get_variable("weights_x", [input_size, output_size] )
out_x = tf.matmul(inputs, weights_x)
# Get a variable that holds all the weights of the factors A^i of the
# transition tensor W. All weights are stored serially, so we need to do
# some bookkeeping to keep track of where each factor is stored.
mat = vs.get_variable("weights_h", mat_size) # h_z x h_z... x output_size
#mat = tf.Variable(mat, name="weights")
states_vector = tf.concat(states, 1)
states_vector = tf.concat( [states_vector, tf.ones([batch_size, 1])], 1)
"""form high order state tensor"""
states_tensor = states_vector
for order in range(num_orders-1):
states_tensor = _outer_product(batch_size, states_tensor, states_vector)
# print("tensor product", states_tensor.name, states_tensor.get_shape().as_list())
cores = []
for i in range(num_orders):
# Fetch the weights of factor A^i from our big serialized variable weights_h.
mat_core = tf.slice(mat, [mat_ps[i]], [mat_ps[i + 1] - mat_ps[i]])
mat_core = tf.reshape(mat_core, [mat_ranks[i], total_state_size, mat_ranks[i + 1]])
cores.append(mat_core)
out_h = tensor_train_contraction(states_tensor, cores)
# Compute h_t = U*x_t + W*H_{t-1}
res = tf.add(out_x, out_h)
# print "END OF CELL CONSTRUCTION"
# print "========================"
# print ""
if not bias:
return res
biases = vs.get_variable("biases", [output_size])
return nn_ops.bias_add(res,biases)
|
b9cabf2e76e3b18d73d53968b4578bedc3d7bb7e
| 32,367 |
from .observable.case import case_
from typing import Callable
from typing import Mapping
from typing import Optional
from typing import Union
def case(
mapper: Callable[[], _TKey],
sources: Mapping[_TKey, Observable[_T]],
default_source: Optional[Union[Observable[_T], "Future[_T]"]] = None,
) -> Observable[_T]:
"""Uses mapper to determine which source in sources to use.
.. marble::
:alt: case
--1---------------|
a--1--2--3--4--|
b--10-20-30---|
[case(mapper, { 1: a, 2: b })]
---1--2--3--4--|
Examples:
>>> res = reactivex.case(mapper, { '1': obs1, '2': obs2 })
>>> res = reactivex.case(mapper, { '1': obs1, '2': obs2 }, obs0)
Args:
mapper: The function which extracts the value for to test in a
case statement.
sources: An object which has keys which correspond to the case
statement labels.
default_source: [Optional] The observable sequence or Future that will
be run if the sources are not matched. If this is not provided,
it defaults to :func:`empty`.
Returns:
An observable sequence which is determined by a case statement.
"""
return case_(mapper, sources, default_source)
|
3ecc790a3e6e7e30e4f0a34e06dbfc9e2875388c
| 32,368 |
from typing import Tuple
import json
def group_to_stats(request, project_id) -> Tuple:
"""
Combining the same actions for grouping data for chart
"""
filters = json.loads(request.query_params.get('filters', '{}')) # date time, issue type, method
group_by = request.query_params.get('groupBy', 'hours')
requests_stats = RequestStat.objects.filter(project_id=project_id, **filters).order_by('created')
group_type = group_types[group_by]
return requests_stats, group_type
|
7a27fa180fd0e1bf059d11bd7995cdea0a85c6cf
| 32,369 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.