content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def next(space, w_arr):
""" Advance the internal array pointer of an array """
length = w_arr.arraylen()
current_idx = w_arr.current_idx + 1
if current_idx >= length:
w_arr.current_idx = length
return space.w_False
w_arr.current_idx = current_idx
return w_arr._current(space) | 668fec305ed6bbe05895f317e284c7d2e4f83189 | 8,700 |
def geocoordinatess_id_get(id, username=None): # noqa: E501
"""Get a single GeoCoordinates by its id
Gets the details of a given GeoCoordinates (more information in https://w3id.org/okn/o/sdm#GeoCoordinates) # noqa: E501
:param id: The ID of the GeoCoordinates to be retrieved
:type id: str
:param username: Name of the user graph to query
:type username: str
:rtype: GeoCoordinates
"""
return query_manager.get_resource(id=id,
username=username,
rdf_type_uri=GEOCOORDINATES_TYPE_URI,
rdf_type_name=GEOCOORDINATES_TYPE_NAME,
kls=GeoCoordinates) | 3ac772eab95915ac0030187f22da74f9965f6dfc | 8,701 |
def check_callable(target, label=None):
"""Checks target is callable and then returns it."""
if not callable(target):
raise TypeError('Expected {} callable, found non-callable {}.'.format(
'{} to be'.format(label) if label is not None else 'a',
type_string(type(target))))
return target | a22006b72e04adb47eeef0ee418301cecdbfde0b | 8,702 |
import re
def convert_dictionary_values(d, map={}):
"""convert string values in a dictionary to numeric types.
Arguments
d : dict
The dictionary to convert
map : dict
If map contains 'default', a default conversion is enforced.
For example, to force int for every column but column ``id``,
supply map = {'default' : "int", "id" : "str" }
"""
rx_int = re.compile(r"^\s*[+-]*[0-9]+\s*$")
rx_float = re.compile(r"^[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?$")
# pre-process with 'default'
if "default" in map:
k = "default"
if map[k] == "int":
default = int
elif map[k] == "float":
default = float
elif map[k] == "string":
default = str
else:
default = False
for k, vv in list(d.items()):
if vv is None:
continue
v = vv.strip()
try:
if k in map:
if map[k] == "int":
d[k] = int(v)
elif map[k] == "float":
d[k] = float(v)
elif map[k] == "string":
pass
continue
elif default:
if v != "":
d[k] = default(v)
else:
d[k] = v
continue
except TypeError as msg:
raise TypeError("conversion in field: %s, %s" % (k, msg))
try:
if rx_int.match(v):
d[k] = int(v)
elif rx_float.match(v):
d[k] = float(v)
except TypeError as msg:
raise TypeError(
"expected string or buffer: offending value = '%s' " % str(v))
except ValueError as msg:
raise ValueError("conversion error: %s, %s" % (msg, str(d)))
return d | 4ecbd8ddd53324c3a83ce6b5bbe3ef0e5a86bc1e | 8,703 |
def GetLimitPB(user, action_type):
"""Return the apporiate action limit PB part of the given User PB."""
if action_type == PROJECT_CREATION:
if not user.project_creation_limit:
user.project_creation_limit = user_pb2.ActionLimit()
return user.project_creation_limit
elif action_type == ISSUE_COMMENT:
if not user.issue_comment_limit:
user.issue_comment_limit = user_pb2.ActionLimit()
return user.issue_comment_limit
elif action_type == ISSUE_ATTACHMENT:
if not user.issue_attachment_limit:
user.issue_attachment_limit = user_pb2.ActionLimit()
return user.issue_attachment_limit
elif action_type == ISSUE_BULK_EDIT:
if not user.issue_bulk_edit_limit:
user.issue_bulk_edit_limit = user_pb2.ActionLimit()
return user.issue_bulk_edit_limit
elif action_type == FLAG_SPAM:
if not user.flag_spam_limit:
user.flag_spam_limit = user_pb2.ActionLimit()
return user.flag_spam_limit
elif action_type == API_REQUEST:
if not user.api_request_limit:
user.api_request_limit = user_pb2.ActionLimit()
return user.api_request_limit
raise Exception('unexpected action type %r' % action_type) | 91f9289d3be149112d08409b1cf1e2c8e68a9668 | 8,704 |
def best_int_dtype(data):
"""get bit depth required to best represent float data as int"""
d, r = divmod(np.log2(data.ptp()), 8)
d = max(d, 1)
i = (2 ** (int(np.log2(d)) + bool(r)))
return np.dtype('i%d' % i) | c8d54b10ba67a83250312668f7cd09b99e47bf56 | 8,705 |
def gen_decorate_name(*args):
"""
gen_decorate_name(name, mangle, cc, type) -> bool
Generic function for 'decorate_name()' (may be used in IDP modules)
@param name (C++: const char *)
@param mangle (C++: bool)
@param cc (C++: cm_t)
@param type (C++: const tinfo_t *)
"""
return _ida_typeinf.gen_decorate_name(*args) | 963f6bfc5ca30a7552f881d8c9f030c0c1653fce | 8,706 |
def main(self, count=10):
"""
kosmos -p 'j.servers.myjobs.test("start")'
"""
self.reset()
def wait_1sec():
gevent.sleep(1)
return "OK"
ids = []
for x in range(count):
job_sch = self.schedule(wait_1sec)
ids.append(job_sch.id)
self._workers_gipc_nr_max = 1
self.workers_subprocess_start()
res = self.results(ids, timeout=120)
print(res)
self.stop(reset=True)
print("TEST OK") | 0634f76d33d6b32150f367d6c598f5c520991ef3 | 8,707 |
import requests
def get_asc() -> pd.DataFrame:
"""Get Yahoo Finance small cap stocks with earnings growth rates better than 25%. [Source: Yahoo Finance]
Returns
-------
pd.DataFrame
Most aggressive small cap stocks
"""
url = "https://finance.yahoo.com/screener/predefined/aggressive_small_caps"
data = pd.read_html(requests.get(url).text)[0]
return data | 7d7d9810782950434a0752c97984f20df74a3366 | 8,708 |
def getEnabled(chat_id):
"""Gets the status of a conversation"""
status = EnableStatus.get_by_id(str(chat_id))
if status:
return status.enabled
return False | 24d7ca4f197f6e4dc4c9c54e59824ff4fc89114e | 8,709 |
def create_app(config=DevelopConfig):
"""App factory."""
app = Flask(
__name__.split('.')[0],
static_url_path='/static',
static_folder=f'{config.PROJECT_PATH}/src/static'
)
app.url_map.strict_slashes = False
app.config.from_object(config)
register_extensions(app)
register_blueprints(app)
register_shellcontext(app)
register_adminpanel(app)
register_sessions(app)
register_github_oauth(app)
register_before_hooks(app)
register_commands(app)
register_mail_settings(app)
register_secret(app)
return app | 0154d3ebb00ae869c2f1bb2a2392e2bde74e36b4 | 8,710 |
def merge_inputs_for_create(task_create_func):
"""Merge all inputs for start operation into one dict"""
# Needed to wrap the wrapper because I was seeing issues with
# "RuntimeError: No context set in current execution thread"
def wrapper(**kwargs):
# NOTE: ctx.node.properties is an ImmutableProperties instance which is
# why it is passed into a mutable dict so that it can be deep copied
return _wrapper_merge_inputs(task_create_func,
dict(ctx.node.properties), **kwargs)
return wrapper | 119ab1b40ba84959b960295b35e668de7296929f | 8,711 |
def embedding_lookup(params, ids):
"""Wrapper around ``tf.nn.embedding_lookup``.
This converts gradients of the embedding variable to tensors which allows
to use of optimizers that don't support sparse gradients (e.g. Adafactor).
Args:
params: The embedding tensor.
ids: The ids to lookup in :obj:`params`.
Returns:
A ``tf.Tensor``, the embeddings that correspond to :obj:`ids`.
"""
params = convert_gradient_to_tensor(params)
return tf.nn.embedding_lookup(params, ids) | 774595aaf119ab93928095f397bc4ff7f5ebad53 | 8,712 |
from re import T
def value_as_unit(value: T | None, unit: Unit = None) -> T | Quantity[T] | None:
"""Return value as specified unit or sensor fault if value is none."""
if value is None:
return None
if unit is None:
return value
return value * unit | 3f96d837a40894d589fbae3f40ca6adf220a9d56 | 8,713 |
import numpy
def get_static_spatial_noise_image(image) :
""" The first step is to sum all of the odd-numbered images (sumODD image)
and separately sum all of the even-numbered images (sumEVEN image). The
difference between the sum of the odd images and the sum of the even
images (DIFF = sumODD - sumEVEN) is taken as a raw measure of static
spatial noise. (p. 828-829)
"""
image_odd = image[range(1, image.shape[0],2)].astype(numpy.single)
sum_odd = numpy.sum(image_odd, 0)
image_even = image[range(0, image.shape[0],2)].astype(numpy.single)
sum_even = numpy.sum(image_even, 0)
diff = sum_odd-sum_even
return medipy.base.Image(data=diff,
origin=image.origin[1:], spacing=image.spacing[1:],
direction=image.direction[1:,1:]) | 511275fefc2368c6d3976ea420e11fcf1a913f8c | 8,714 |
import os
def get_gallery_dir() -> str:
"""
Return the path to the mephisto task gallery
"""
return os.path.join(get_root_dir(), "gallery") | d3bf9455b48429b1709f249ca5691237821e4566 | 8,715 |
import random
def get_next_action():
""" gets the next action to perform, based on get_action_odds """
action_odds = get_action_odds()
#print(f"DEBUG action_odds {action_odds}")
# get the sum of all the action odds values
total = 0
for action in action_odds:
#print(f"DEBUG get_next_action total {total} adding action {action} odds {action_odds[action]}")
total += action_odds[action]
#print(f"DEBUG get_next_action total now {total}")
# get a random number from 1..sum
val = random.randint(1,total)
#print(f"DEBUG get_next_action val {val} is 1..{total}")
# now, check if the value is <= the first action.
# If so, use that. If not, reduce the sum by that number, and check the next action.
for action in action_odds:
odds = action_odds[action]
if val <= odds:
return action
val -= odds
raise Exception("random action was greater than sum of odds, this shouldn't be possible") | 372676573e69afe045c88742591555fcfe42766a | 8,716 |
def get_movie_title(movie_id):
"""
Takes in an ID, returns a title
"""
movie_id = int(movie_id)-1
return items.iloc[movie_id]['TITLE'] | e3e0694eb35923ce3a6f528a4b9ac622044b9159 | 8,717 |
import logging
def get_logger():
"""
Return a logger object
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | a23531364d947a83ace175ba02212ff57ba7e0ea | 8,718 |
def enough_gap_since_last_obs(df, current_state, obs_log):
"""
Determine if a sufficient time has passed since the last observation
in this subprogram (in any filter):
"""
now = current_state['current_time'].mjd
# don't mess up with the upstream data structure
df = df.copy()
grp = df.groupby(['program_id','subprogram_name'])
df['ref_obs_mjd'] = np.nan
for grpi, dfi in grp:
ref_obs = obs_log.select_last_observed_time_by_field(
field_ids = set(dfi['field_id'].tolist()),
program_ids = [grpi[0]],
subprogram_names = [grpi[1]])
if len(ref_obs) > 0:
tmp = pd.merge(df, ref_obs, left_on='field_id', right_index=True,
how='inner')
df.loc[tmp.index, 'ref_obs_mjd'] = tmp.expMJD.values
# give a fake value for fields unobserved
df.loc[df['ref_obs_mjd'].isnull(), 'ref_obs_mjd'] = 58119.0
# calculate dt
df['dt'] = now - df['ref_obs_mjd']
return df['dt'] >= (df['intranight_gap_min']*(1*u.minute).to(u.day).value) | 3a58a7d03074eec6458b4a10addc40953d01da8b | 8,719 |
def find_nearest_feature_to_attribute(sentence, features, attribute):
"""
Parameters
----------
sentence: str,
One sentence from the info text of a mushroom species
features: list of strs
List of possible features as in dataset_categories.features_list
attribute: str,
Mushroom feature attribute that is in the sentence (e.g. 'red' for 'cap color').
Return
------
str,
The feature in features that is closest to attribute in word steps.
Example
-------
sentences[2] = "The entire young fruitbody is enclosed in a white veil which leaves fragments (which may wash off)
on the shiny red, marginally grooved cap." (for simplicity only one sentence is considered)
features = dataset_categories.features_list (relevant here: 'cap', 'veil')
attribute = 'white'
return:
'veil' (since 'veil' is closer to 'white' than 'cap')
"""
min_distance = float('inf')
min_distance_index = 0
for i in range(0, len(features)):
if features[i] in sentence:
word_distance = get_word_distance(sentence, features[i], attribute)
if word_distance < min_distance:
min_distance = word_distance
min_distance_index = i
return features[min_distance_index] | 6877ec945870cce9a0873a713830fa5f830408fc | 8,720 |
from datetime import datetime
def lists():
"""
库存列表
:return:
"""
template_name = 'inventory/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('inventory lists')
# 搜索条件
form = InventorySearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
form.rack_id.choices = get_rack_choices(form.warehouse_id.data)
# app.logger.info('')
inventory_brand_choices = [(brand, brand) for brand in get_distinct_inventory_brand(status_delete=STATUS_DEL_NO) if
brand != '']
form.production_brand.choices = DEFAULT_SEARCH_CHOICES_STR + inventory_brand_choices
search_condition = [
Inventory.status_delete == STATUS_DEL_NO,
Inventory.stock_qty_current > 0,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Inventory.warehouse_id == form.warehouse_id.data)
if form.rack_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Inventory.rack_id == form.rack_id.data)
if form.production_brand.data != DEFAULT_SEARCH_CHOICES_STR_OPTION:
search_condition.append(Inventory.production_brand == form.production_brand.data)
if form.production_model.data:
search_condition.append(Inventory.production_model.like('%%%s%%' % form.production_model.data))
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_inventory_section_export.can():
abort(403)
column_names = Inventory.__table__.columns.keys()
query_sets = get_inventory_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('inventory lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_inventory_section_del.can():
abort(403)
inventory_ids = request.form.getlist('inventory_id')
result_total = True
for inventory_id in inventory_ids:
current_time = datetime.utcnow()
inventory_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_inventory(inventory_id, inventory_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_inventory_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
) | 25c561167cc34eba5bd8bf8123007961d28165e3 | 8,721 |
def open_1d_txt(filename, xaxcol=0, datacol=1, errorcol=2,
text_reader='simple', format=None, **kwargs):
"""
Attempt to read a 1D spectrum from a text file assuming wavelength as the
first column, data as the second, and (optionally) error as the third.
Reading can be done either with astropy.io.ascii or a 'simple' reader. If
you have an IPAC, CDS, or formally formatted table, you'll want to use
astropy.io.ascii and spceify a format.
If you have a simply formatted file of the form, e.g.
# name name
# unit unit
data data
data data
kwargs are passed to astropy.io.ascii.read
"""
if text_reader in ('simple','readcol'):
if text_reader == 'simple':
data, error, XAxis, T = simple_txt(filename, xaxcol=xaxcol,
datacol=datacol,
errorcol=errorcol, **kwargs)
elif text_reader == 'readcol':
Tlist = readcol.readcol(filename, twod=False, **kwargs)
XAxis = units.SpectroscopicAxis(Tlist[xaxcol])
data = Tlist[datacol]
error = Tlist[errorcol]
T = dummy_class()
T.data = dummy_class()
T.data.dtype = dummy_class()
T.columns = {}
T.columns[T.data.dtype.names[xaxcol]] = dummy_class()
T.columns[T.data.dtype.names[datacol]] = dummy_class()
elif text_reader in ('ascii', 'astropy', 'asciitable'):
T = ascii.read(filename, format=format, **kwargs)
xarr = T.data[T.data.dtype.names[xaxcol]]
data = T.data[T.data.dtype.names[datacol]]
if len(T.columns) > errorcol:
error = T.data[T.data.dtype.names[errorcol]]
else:
# assume uniform, zero error
error = data*0
if 'xunits' in T.keywords:
xunits = T.keywords['xunits']
else:
xunits = 'unknown'
XAxis = units.SpectroscopicAxis(xarr,xunits)
# Need this in Spectrum class to correctly parse header
T.xaxcol = xaxcol
T.datacol = datacol
return data, error, XAxis, T | 133361ba2ba75a1f13f8768c6130601db3d870ec | 8,722 |
def clean_record(raw_string: str) -> str:
"""
Removes all unnecessary signs from a raw_string and returns it
:param raw_string: folder or file name to manage
:return: clean value
"""
for sign in ("'", '(', ')', '"'):
raw_string = raw_string.replace(sign, '')
return raw_string.replace(' ', '-').replace('--', '-') | ea484934dc10da879ede883287fc1d650cda74b8 | 8,723 |
import pandas as pd
import numpy as np
def df_of_tables_for_dd_ids(dd_ids, sqlite_tables, sql_con):
"""
:param list dd_ids: list of Deep Dive IDs to retrieve
:param list sqlite_tables: list of SQLite tables to join
:param sqlalchemy.create_engine sql_con: Connection to SQLite (can be \
omitted)
:returns: `pandas.DataFrame` -- dataframe of tables, joined using the Deep \
Dive IDs.
"""
dd_ids_str = ','.join(['"{}"'.format(x) for x in dd_ids])
query_fmt = 'select * from {} where dd_id in ({})'.format
df = pd.read_sql(query_fmt(sqlite_tables[0], dd_ids_str), sql_con).drop_duplicates()
df['dd_id'] = df.dd_id.astype(int)
for s_t in sqlite_tables[1:]:
df_2 = pd.read_sql(query_fmt(s_t, dd_ids_str), sql_con)
df_2['dd_id'] = df_2.dd_id.astype(int)
# We use outer joins because dd_ids in one table may be missing from the other.
df = df.merge(df_2, on=['dd_id'], how='outer')
if 'post_date' in df:
df['post_date'] = df.post_date.apply(pd.to_datetime)
if 'duration_in_mins' in df:
df['duration_in_mins'] = df.duration_in_mins.apply(lambda x: float(x) if x != '' else np.nan)
# I melted some rows when making this, and it's proven a mistake. Let's unmelt
melted_cols = ['ethnicity', 'flag']
for m_c in melted_cols:
if m_c in df.columns:
df = aggregated_df(df, m_c, 'dd_id', '|')
return df | 282d3e9bda8e38687660c21323f8bb3ea40abbd2 | 8,724 |
from typing import Union
def get_group_type(group: Union[hou.EdgeGroup, hou.PointGroup, hou.PrimGroup]) -> int:
"""Get an HDK compatible group type value.
:param group: The group to get the group type for.
:return: An HDK group type value.
"""
try:
return _GROUP_TYPE_MAP[type(group)]
except KeyError as exc:
raise ValueError("Invalid group type") from exc | e8b708911760c99c6e3c23d39b4fc4d205380bac | 8,725 |
def mp2d_driver(jobrec, verbose=1):
"""Drive the jobrec@i (input) -> mp2drec@i -> mp2drec@io -> jobrec@io (returned) process."""
return module_driver(
jobrec=jobrec, module_label='mp2d', plant=mp2d_plant, harvest=mp2d_harvest, verbose=verbose) | efa3cf31714719f87c239dbd6cdd1aad80982647 | 8,726 |
def query_user_list():
"""
Retrieve list of users on user watch list.
"""
conn = connect.connect()
cur = conn.cursor()
cur.execute("SELECT * FROM watched_users")
watched_users = cur.fetchall()
return watched_users | 2de40bc963503e4c87d7bc15409bf4803c5c87a6 | 8,727 |
def service_stop_list(service_id, direction):
""" Queries all patterns for a service and creates list of stops sorted
topologically.
:param service_id: Service ID.
:param direction: Groups journey patterns by direction - False for
outbound and True for inbound.
"""
graph, dict_stops = service_graph_stops(service_id, direction)
if not dict_stops:
raise ValueError(f"No stops exist for service ID {service_id}")
return [dict_stops[v] for v in graph.sequence()] | c2eaf08853469597a83647a3e1ec5fc6a7b02ced | 8,728 |
def convert_coord(value):
"""将GPS值转换为度分秒形式
Args:
value(str): GPS读取的经度或纬度
Returns:
list: 度分秒列表
"""
v1, v2 = value.split('.')
v2_dec = Decimal(f'0.{v2}') * 60 # + Decimal(random.random())
return [v1[:-2], v1[-2:], v2_dec.to_eng_string()] | 4269e9d9b58e3d7ce42c82cd0299abac6c740499 | 8,729 |
import torch
def _interpolate_zbuf(
pix_to_face: torch.Tensor, barycentric_coords: torch.Tensor, meshes
) -> torch.Tensor:
"""
A helper function to calculate the z buffer for each pixel in the
rasterized output.
Args:
pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices
of the faces (in the packed representation) which
overlap each pixel in the image.
barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying
the barycentric coordianates of each pixel
relative to the faces (in the packed
representation) which overlap the pixel.
meshes: Meshes object representing a batch of meshes.
Returns:
zbuffer: (N, H, W, K) FloatTensor
"""
verts = meshes.verts_packed()
faces = meshes.faces_packed()
faces_verts_z = verts[faces][..., 2][..., None] # (F, 3, 1)
zbuf = interpolate_face_attributes(pix_to_face, barycentric_coords, faces_verts_z)[
..., 0
] # (1, H, W, K)
zbuf[pix_to_face == -1] = -1
return zbuf | b54d6c44fd23f842b13cb7ac1984f77c7a6a31a4 | 8,730 |
from typing import Sequence
def pp_chain(chain: Sequence[Subtree]) -> str:
"""Pretty-print a chain
"""
return ' '.join(
s.label if isinstance(s, ParentedTree) else str(s)
for s in chain
) | 372488b64c86c2af459d67e5ddde0a77fa26fb5c | 8,731 |
def ptr_ty(ty : 'LLVMType') -> 'LLVMPointerType':
"""``ty*``, i.e. a pointer to a value of type ``ty``."""
return LLVMPointerType(ty) | 79c7d304c4cd20937abe982311b2ff6ff17a01f9 | 8,732 |
def series_spline(self):
"""Fill NaNs using a spline interpolation."""
inds, values = np.arange(len(self)), self.values
invalid = isnull(values)
valid = -invalid
firstIndex = valid.argmax()
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
inds = inds[firstIndex:]
result = values.copy()
s = InterpolatedUnivariateSpline(inds[valid], values[firstIndex:][valid])
result[firstIndex:][invalid] = s(inds[invalid])
return Series(result, index=self.index, name=self.name) | 1fbbf66efc7e6c73bdcc3c63aab83237e434aa79 | 8,733 |
def label(job_name, p5_connection=None):
"""
Syntax: Job <name> label
Description: Returns the (human readable) job label.
The following labels are returned:
Archive, Backup, Synchronize and System.
A Job label can be used in conjunction with the Job describe command to
better display the job record in various list displays.
Return Values:
-On Success: the job label
"""
method_name = "label"
return exec_nsdchat([module_name, job_name, method_name], p5_connection) | ec58cbb085cb06f5ad8f2c2d04ee6cd9d3638984 | 8,734 |
import math
def rating(pairing, previous):
"""The lower the rating value is the better"""
current = set(chain.from_iterable(pair[1] for pair in pairing))
overlaps = current & set(previous)
if overlaps:
return sum(math.pow(0.97, previous[overlap] / 86400) for overlap in overlaps)
return 0.0 | af86bf1c1bbe036e20e3a1e7bcff0dec09d382cf | 8,735 |
from typing import Optional
from typing import Dict
def copy_multipart_passthrough(src_blob: AnyBlob,
dst_blob: CloudBlob,
compute_checksums: bool=False) -> Optional[Dict[str, str]]:
"""
Copy from `src_blob` to `dst_blob`, passing data through the executing instance.
Optionally compute checksums.
"""
checksums: Optional[dict] = None
if compute_checksums:
checksums = {SSDSObjectTag.SSDS_MD5: checksum.S3EtagUnordered(),
SSDSObjectTag.SSDS_CRC32C: checksum.GScrc32cUnordered()}
with dst_blob.multipart_writer() as writer:
for part in src_blob.parts():
if checksums is not None:
for cs in checksums.values():
cs.update(part.number, part.data)
writer.put_part(part)
if checksums is not None:
return {key: cs.hexdigest() for key, cs in checksums.items()}
else:
return None | ba37598a55e00252f879e66c1438681f0033de34 | 8,736 |
import csv
def read_manifest_from_csv(filename):
"""
Read the ballot manifest into a list in the format ['batch id : number of ballots']
from CSV file named filename
"""
manifest = []
with open(filename, newline='') as csvfile:
reader = csv.reader(csvfile, delimiter = ",")
for row in reader:
# row.remove(row[1])
batch = " , ".join(row)
manifest.append(batch)
return manifest[1:] | b04b6a1b20512c27bb83a7631346bc6553fdc251 | 8,737 |
def siblings_list():
"""
Shows child element iteration
"""
o = untangle.parse(
"""
<root>
<child name="child1"/>
<child name="child2"/>
<child name="child3"/>
</root>
"""
)
return ",".join([child["name"] for child in o.root.child]) | 06737cb187e18c9fa8b9dc9164720e68f5fd2c36 | 8,738 |
import sys
def max_distance_from_home(traj, start_night='22:00', end_night='07:00', show_progress=True):
"""
Compute the maximum distance from home (in kilometers) traveled by an individual.
:param traj: the trajectories of the individuals
:type traj: TrajDataFrame
:param str start_night: the starting time for the night (format HH:MM)
:param str end_night: the ending time for the night (format HH:MM)
:param show_progress: if True show a progress bar
:type show_progress: boolean
:return: the maximum distance from home of the individuals
:rtype: pandas DataFrame
Examples:
Computing the maximum distance from home of each individual in a TrajDataFrame
>>> import skmob
>>> from skmob.measures.individual import max_distance_from_home
>>> from skmob import TrajDataFrame
>>> tdf = TrajDataFrame.from_file('../data_test/brightkite_data.csv'data, user_id='user', datetime='check-in time', latitude='latitude', longitude='longitude')
>>> max_distance_from_home(tdf).head()
uid max_distance_from_home
0 1 46.409510
1 2 68.499333
2 3 56.806038
3 4 78.949592
4 5 69.393777
date_time
.. seealso:: :func:`maximum_distance`, :func:`home_location`
References:
.. [canzian2015trajectories] Luca Canzian and Mirco Musolesi. "Trajectories of depression: unobtrusive monitoring of depressive states by means of smartphone mobilhttps://www.gazzetta.it/?refresh_ceity traces analysis." In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing (UbiComp '15), 1293--1304, 2015.
"""
# if 'uid' column in not present in the TrajDataFrame
if constants.UID not in traj.columns:
return pd.DataFrame([_max_distance_from_home_individual(traj,
start_night=start_night,
end_night=end_night)], columns=[sys._getframe().f_code.co_name])
if show_progress:
df = traj.groupby(constants.UID).progress_apply(lambda x: _max_distance_from_home_individual(x, start_night=start_night, end_night=end_night))
else:
df = traj.groupby(constants.UID).apply(lambda x: _max_distance_from_home_individual(x, start_night=start_night, end_night=end_night))
return pd.DataFrame(df).reset_index().rename(columns={0: sys._getframe().f_code.co_name}) | 9226ce6ab2929d4e36c9b7eabbb6377af315e0ab | 8,739 |
def combine_histogram(old_hist, arr):
""" Collect layer histogram for arr and combine it with old histogram.
"""
new_max = np.max(arr)
new_min = np.min(arr)
new_th = max(abs(new_min), abs(new_max))
(old_hist, old_hist_edges, old_min, old_max, old_th) = old_hist
if new_th <= old_th:
hist, _ = np.histogram(arr,
bins=len(old_hist),
range=(-old_th, old_th))
return (old_hist + hist, old_hist_edges, min(old_min, new_min),
max(old_max, new_max), old_th)
else:
old_num_bins = len(old_hist)
old_step = 2 * old_th / old_num_bins
half_increased_bins = int((new_th - old_th) // old_step + 1)
new_num_bins = half_increased_bins * 2 + old_num_bins
new_th = half_increased_bins * old_step + old_th
hist, hist_edges = np.histogram(arr,
bins=new_num_bins,
range=(-new_th, new_th))
hist[half_increased_bins:new_num_bins -
half_increased_bins] += old_hist
return (hist, hist_edges, min(old_min, new_min), max(old_max,
new_max), new_th) | bc6e6edc9531b07ed347dc0083f86ee921d77c11 | 8,740 |
from typing import Mapping
def unmunchify(x):
""" Recursively converts a Munch into a dictionary.
>>> b = Munch(foo=Munch(lol=True), hello=42, ponies='are pretty!')
>>> sorted(unmunchify(b).items())
[('foo', {'lol': True}), ('hello', 42), ('ponies', 'are pretty!')]
unmunchify will handle intermediary dicts, lists and tuples (as well as
their subclasses), but ymmv on custom datatypes.
>>> b = Munch(foo=['bar', Munch(lol=True)], hello=42,
... ponies=('are pretty!', Munch(lies='are trouble!')))
>>> sorted(unmunchify(b).items()) #doctest: +NORMALIZE_WHITESPACE
[('foo', ['bar', {'lol': True}]), ('hello', 42), ('ponies', ('are pretty!', {'lies': 'are trouble!'}))]
nb. As dicts are not hashable, they cannot be nested in sets/frozensets.
"""
# Munchify x, using `seen` to track object cycles
seen = dict()
def unmunchify_cycles(obj):
# If we've already begun unmunchifying obj, just return the already-created unmunchified obj
try:
return seen[id(obj)]
except KeyError:
pass
# Otherwise, first partly unmunchify obj (but without descending into any lists or dicts) and save that
seen[id(obj)] = partial = pre_unmunchify(obj)
# Then finish unmunchifying lists and dicts inside obj (reusing unmunchified obj if cycles are encountered)
return post_unmunchify(partial, obj)
def pre_unmunchify(obj):
# Here we return a skeleton of unmunchified obj, which is enough to save for later (in case
# we need to break cycles) but it needs to filled out in post_unmunchify
if isinstance(obj, Mapping):
return dict()
elif isinstance(obj, list):
return type(obj)()
elif isinstance(obj, tuple):
type_factory = getattr(obj, "_make", type(obj))
return type_factory(unmunchify_cycles(item) for item in obj)
else:
return obj
def post_unmunchify(partial, obj):
# Here we finish unmunchifying the parts of obj that were deferred by pre_unmunchify because they
# might be involved in a cycle
if isinstance(obj, Mapping):
partial.update((k, unmunchify_cycles(obj[k])) for k in iterkeys(obj))
elif isinstance(obj, list):
partial.extend(unmunchify_cycles(v) for v in obj)
elif isinstance(obj, tuple):
for (value_partial, value) in zip(partial, obj):
post_unmunchify(value_partial, value)
return partial
return unmunchify_cycles(x) | 90ee373099d46ca80cf78c4d8cca885f2258bce2 | 8,741 |
def split_data(mapping, encoded_sequence):
""" Function to split the prepared data in train and test
Args:
mapping (dict): dictionary mapping of all unique input charcters to integers
encoded_sequence (list): number encoded charachter sequences
Returns:
numpy array : train and test split numpy arrays
"""
encoded_sequence_ = np.array(encoded_sequence)
X, y = encoded_sequence_[:, :-1], encoded_sequence_[:, -1]
y = to_categorical(y, num_classes=len(mapping))
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.1, random_state=42)
return X_train, X_test, y_train, y_test | b8044b3c1686b37d4908dd28db7cbe9bff2e899a | 8,742 |
def fsp_loss(teacher_var1_name,
teacher_var2_name,
student_var1_name,
student_var2_name,
program=None):
"""Combine variables from student model and teacher model by fsp-loss.
Args:
teacher_var1_name(str): The name of teacher_var1.
teacher_var2_name(str): The name of teacher_var2. Except for the
second dimension, all other dimensions should
be consistent with teacher_var1.
student_var1_name(str): The name of student_var1.
student_var2_name(str): The name of student_var2. Except for the
second dimension, all other dimensions should
be consistent with student_var1.
program(Program): The input distiller program. If not specified,
the default program will be used. Default: None
Returns:
Variable: fsp distiller loss.
"""
if program == None:
program = paddle.static.default_main_program()
teacher_var1 = program.global_block().var(teacher_var1_name)
teacher_var2 = program.global_block().var(teacher_var2_name)
student_var1 = program.global_block().var(student_var1_name)
student_var2 = program.global_block().var(student_var2_name)
teacher_fsp_matrix = paddle.fluid.layers.fsp_matrix(teacher_var1,
teacher_var2)
student_fsp_matrix = paddle.fluid.layers.fsp_matrix(student_var1,
student_var2)
fsp_loss = paddle.mean(
paddle.nn.functional.square_error_cost(student_fsp_matrix,
teacher_fsp_matrix))
return fsp_loss | b8937a64ec8f5e215128c61edee522c9b2cd83d7 | 8,743 |
def diff_numpy_array(A, B):
"""
Numpy Array A - B
return items in A that are not in B
By Divakar
https://stackoverflow.com/a/52417967/1497443
"""
return A[~np.in1d(A, B)] | 72139ba49cf71abd5ea60772143c26f384e0e171 | 8,744 |
import sys
def load_training_data(training_fns, trunc_min_scores,
trunc_max_scores, debug=False):
""" First parse group, read and position to find shared data points
Then read in training scores, truncating as appropriate """
# Parse file twice. First time get all the loci, second time all the value data
training_ids = [parse_training_loci(t_ids) for t_ids in training_fns]
shared_ids = set.intersection(*training_ids)
id_list = sorted(shared_ids)
if debug:
print('Number of shared ids in training sets:', len(shared_ids), file=sys.stderr)
# value_array contents
# 0 truth
# 1 strand
# 2:len(train)+2, tool score x train
# 2+len(train):2+len(train)*2, tool prediction x train
# 2+2*len(train) or -2, numeric order
# 3+2*len(train) or -1, model predicted score
groups = len(training_fns)
value_array = np.zeros((len(shared_ids),4+(2*groups)), dtype=np.single)
value_array[:,-2] = np.arange(0,len(shared_ids), dtype=np.single)
for index, (training_fn, t_min, t_max) in \
enumerate(zip(training_fns, trunc_min_scores, trunc_max_scores)):
# Read in values
contents = parse_training_values(training_fn, shared_ids, t_min, t_max, debug=debug)
for i, id in enumerate(id_list):
strand, label, predicted, score = contents[id]
if index == 0:
value_array[i,0] = label
value_array[i,1] = strand
value_array[i,2+index] = score
value_array[i,2+groups+index] = predicted
return value_array, id_list | 7107d0ebff84df09589db43004804c22ffa39abc | 8,745 |
def _find_data_between_ranges(data, ranges, top_k):
"""Finds the rows of the data that fall between each range.
Args:
data (pd.Series): The predicted probability values for the postive class.
ranges (list): The threshold ranges defining the bins. Should include 0 and 1 as the first and last value.
top_k (int): The number of row indices per bin to include as samples.
Returns:
list(list): Each list corresponds to the row indices that fall in the range provided.
"""
results = []
for i in range(1, len(ranges)):
mask = data[(data >= ranges[i - 1]) & (data < ranges[i])]
if top_k != -1:
results.append(mask.index.tolist()[: min(len(mask), top_k)])
else:
results.append(mask.index.tolist())
return results | 323986cba953a724f9cb3bad8b2522fc711529e5 | 8,746 |
def validar_entero_n():
"""
"""
try:
n = int(input('n= ')) #si es un float también funciona el programa
except:
print ('Número no válido')
return False
else:
return n | a1238025fd2747c597fc2adf34de441ae6b8055d | 8,747 |
def Conv_Cifar10_32x64x64():
"""A 3 hidden layer convnet designed for 32x32 cifar10."""
base_model_fn = _cross_entropy_pool_loss([32, 64, 64],
jax.nn.relu,
num_classes=10)
datasets = image.cifar10_datasets(batch_size=128)
return _ConvTask(base_model_fn, datasets) | e41e2f0da80f8822187a2ee82dcfe6f70e324213 | 8,748 |
from typing import List
def rotate(angle_list: List, delta: float) -> List:
"""Rotates a list of angles (wraps around at 2 pi)
Args:
angle_list (List): list of angles in pi radians
delta (float): amount to change in pi radians
Returns:
List: new angle list in pi radians
"""
new_angle_list = []
for angle in angle_list:
new_angle = angle + delta
if new_angle >= 2.0:
new_angle -= 2.0
new_angle_list.append(new_angle)
new_angle_list.sort()
return new_angle_list | 560c5138486bd3e67ad956fb2439236a3e3886cc | 8,749 |
def global_average_pooling_3d(tensor: TorchTensorNCX) -> TorchTensorNCX:
"""
3D Global average pooling.
Calculate the average value per sample per channel of a tensor.
Args:
tensor: tensor with shape NCDHW
Returns:
a tensor of shape NC
"""
assert len(tensor.shape) == 5, 'must be a NCDHW tensor!'
return F.avg_pool3d(tensor, tensor.shape[2:]).squeeze(2).squeeze(2).squeeze(2) | 27a73d29fd9dd63b461f2275ed2941bf6bd83348 | 8,750 |
def get_LAB_L_SVD_s(image):
"""Returns s (Singular values) SVD from L of LAB Image information
Args:
image: PIL Image or Numpy array
Returns:
vector of singular values
Example:
>>> from PIL import Image
>>> from ipfml.processing import transform
>>> img = Image.open('./images/test_img.png')
>>> s = transform.get_LAB_L_SVD_s(img)
>>> len(s)
200
"""
L = get_LAB_L(image)
return compression.get_SVD_s(L) | 50a4bd4e4a8b3834baa3aca1f5f1e635baa7a145 | 8,751 |
def path_inclusion_filter_fn(path, param, layer):
"""Returns whether or not layer name is contained in path."""
return layer in path | c93aa83e67c600cd83d053d50fbeaee4f7eebf94 | 8,752 |
from typing import Tuple
def _parse_feature(line: PipelineRecord) -> Tuple[str, Coordinates, Feature]:
""" Creates a Feature from a line of output from a CSVReader """
contig = line[0]
coordinates = parse_coordinates(line[1])
feature = line[2]
# Piler-cr and BLAST both use 1-based indices, but Opfi uses 0-based indices.
# To make both coordinate systems consistent, we subtract 1 from the start
# since feature coordinates come directly from those tools.
# If features are on the reverse strand, the second coordinate will be larger
# than the first, but operon_analyzer assumes the start is always less than the
# end
first_coord, second_coord = parse_coordinates(line[3])
feature_start = min(first_coord, second_coord) - 1
feature_end = max(first_coord, second_coord)
query_orfid = line[4]
strand = int(line[5]) if line[5] else (1 if feature_start < feature_end else -1)
hit_accession = line[6]
hit_eval = float(line[7]) if line[7] else None
description = line[8]
sequence = line[9]
if len(line) > 10:
bit_score = float(line[10]) if line[10] != '' else None
raw_score = int(line[11]) if line[11] != '' else None
aln_len = int(line[12]) if line[12] != '' else None
pident = float(line[13]) if line[13] != '' else None
nident = int(line[14]) if line[14] != '' else None
mismatch = int(line[15]) if line[15] != '' else None
positive = int(line[16]) if line[16] != '' else None
gapopen = int(line[17]) if line[17] != '' else None
gaps = int(line[18]) if line[18] != '' else None
ppos = float(line[19]) if line[19] != '' else None
qcovhsp = int(line[20]) if line[20] != '' else None
contig_filename = line[21] if line[21] else ''
else:
bit_score = None
raw_score = None
aln_len = None
pident = None
nident = None
mismatch = None
positive = None
gapopen = None
gaps = None
ppos = None
qcovhsp = None
contig_filename = None
return contig, contig_filename, coordinates, Feature(
feature,
(feature_start, feature_end),
query_orfid,
strand,
hit_accession,
hit_eval,
description,
sequence,
bit_score,
raw_score,
aln_len,
pident,
nident,
mismatch,
positive,
gapopen,
gaps,
ppos,
qcovhsp) | 201f9c6ed5cd618fc63ec5e07a5b99977f4ef2b0 | 8,753 |
def average_summary_df_tasks(df, avg_columns):
""" Create averages of the summary df across tasks."""
new_df = []
# Columns to have after averaging
keep_cols = ["dataset", "method_name", "trial_number"]
subsetted = df.groupby(keep_cols)
for subset_indices, subset_df in subsetted:
return_dict = {}
return_dict.update(dict(zip(keep_cols, subset_indices)))
for column in avg_columns:
task_values = subset_df[column].values
min_length = min([len(i) for i in task_values])
new_task_values = []
for j in task_values:
j = np.array(j)
if len(j) > min_length:
percentiles = np.linspace(0, len(j) - 1, min_length).astype(int)
new_task_values.append(j[percentiles])
else:
new_task_values.append(j)
avg_task = np.mean(np.array(new_task_values), axis=0).tolist()
return_dict[column] = avg_task
new_df.append(return_dict)
return pd.DataFrame(new_df) | 9c506132cc406a91979777255c092db20d786d12 | 8,754 |
def ml_variance(values, mean):
"""
Given a list of values assumed to come from a normal distribution and
their maximum likelihood estimate of the mean, compute the maximum
likelihood estimate of the distribution's variance of those values.
There are many libraries that do something like this, but they
likely don't do exactly what you want, so you should not use them
directly. (And to be clear, you're not allowed to use them.)
"""
# Your code here
return 1.0 | 440d8d2d2f0a5ed40e01e640aadafb83f16ee14b | 8,755 |
def add_landmarks(particle, d, angle):
"""
Adds a set of landmarks to the particle. Only used on first SLAM cycle
when no landmarks have been added.
:param particle: The particle to be updated
:param d: An array of distances to the landmarks
:param angle: An array of observation angles for the landmarks
:return: Returns the updated particle with landmarks added
"""
# Evaluate sine and cosine values for each observation in z
s = np.sin(pi_2_pi(particle.x[2, 0] + angle))
c = np.cos(pi_2_pi(particle.x[2, 0] + angle))
# Add new landmark locations to mu
particle.mu = np.vstack((particle.mu, np.array(
[particle.x[0, 0] + d * c,
particle.x[1, 0] + d * s]).T))
# Distance values
dpos = np.zeros((len(d), 2))
dpos[:, 0] = d * c # dx
dpos[:, 1] = d * s # dy
d_sq = dpos[:, 0]**2 + dpos[:, 1]**2
d = np.sqrt(d_sq)
H = calc_H(particle, dpos, d_sq, d)
# Add covariance matrices for landmarks
particle.sigma = np.vstack((particle.sigma,
np.linalg.inv(H) @ Q
@ np.linalg.inv(H.transpose((0, 2, 1)))))
particle.i = np.append(particle.i, np.full(len(d), 1))
return particle | d1d168e48f62f60d58e57a79223793108d50dac9 | 8,756 |
def walk_forward_val_multiple(model, ts_list,
history_size=HISTORY_SIZE,
target_size=TARGET_SIZE) -> float:
"""
Conduct walk-forward validation for all states, average the results.
Parameters
----------
model -- The model to be validated
ts_list {list | np.ndarray} -- Array of time series vector
history_size {int} -- The window to use for model input
target_size {int} -- The target prediction window size
Returns
-------
'mse' {float} -- The weighted average MSE across all the states (weighted
by length of time series)
"""
total_error = 0.
total_steps = 0
for ts in ts_list:
mse_state, n_preds = walk_forward_val(model, ts,
history_size=history_size,
target_size=target_size,
return_count=True)
total_error += mse_state * n_preds
total_steps += n_preds
return total_error / total_steps | b3f73ceeddb720fdc7c7d9470a49bccc3c21f81b | 8,757 |
import argparse
def main():
"""The 'real' entry point of this program"""
# parse args
parser = argparse.ArgumentParser(
prog=hdtop.const.PROG_NAME, description=hdtop.const.DESCRIPTION
)
parser.add_argument(
"action",
default="start",
nargs="?",
choices=_SUBPARSERS,
help="Action for the program",
)
args, remain = parser.parse_known_args()
# parse sub args
subparser: argparse.ArgumentParser = _SUBPARSERS[args.action]()
args = subparser.parse_args(remain, args)
# action
return args.func(args) | 926ebb9ecb2af7b6d0a3f0ed9c2eae035531973a | 8,758 |
def inverse_project_lambert_equal_area(pt):
"""
Inverse Lambert projections
Parameters:
pt: point, as a numpy array
"""
X = pt[0]
Y = pt[1]
f = np.sqrt(1.0-(X**2.0+Y**2.0)/4)
return tensors.Vector([f*X,f*Y,-1.0+(X**2.0+Y**2.0)/2]) | f8ab5fb44d2d271a8da13623273d8d687d38b772 | 8,759 |
import dataclasses
def _get_field_default(field: dataclasses.Field):
"""
Return a marshmallow default value given a dataclass default value
>>> _get_field_default(dataclasses.field())
<marshmallow.missing>
"""
# Remove `type: ignore` when https://github.com/python/mypy/issues/6910 is fixed
default_factory = field.default_factory # type: ignore
if default_factory is not dataclasses.MISSING:
return default_factory
elif field.default is dataclasses.MISSING:
return marshmallow.missing
return field.default | 0c45e55a1c14cb6b47365ef90cb68e517342dbbc | 8,760 |
from typing import Optional
from typing import Dict
from typing import Union
import os
import tempfile
def download_file_from_s3(
s3_path: str,
local_path: str,
create_dirs: bool = True,
silent: bool = False,
raise_on_error: bool = True,
boto3_kwargs: Optional[Dict[str, Union[str, float]]] = None,
) -> bool:
"""Download a file from s3 to local machine.
Args:
s3_path: Full path on s3 in format "s3://<bucket_name>/<obj_path>".
local_path: Path on local machine.
create_dirs: Whether the path directory should be created. (Defaults to True)
silent: Whether to print debug information.
raise_on_error: Whether to raise exception on any errors. (Defaults to True)
boto3_kwargs: The parameters for s3.meta.client.download_fileobj() function.
Returns:
Boolean of whether the file was successfully downloaded.
"""
if boto3_kwargs is None:
boto3_kwargs = {}
bucket, key = decompose_s3_path(s3_path)
s3_client = boto3.client("s3")
try:
if not silent:
print(f"Downloading file from '{s3_path}' to '{local_path}'")
dir_name = os.path.dirname(local_path)
if create_dirs:
ensure_dirs(dir_name, silent=silent)
with tempfile.NamedTemporaryFile("wb", dir=dir_name, delete=False) as tf:
s3_client.download_fileobj(bucket, key, tf, **boto3_kwargs)
temp_path = tf.name
os.rename(temp_path, local_path)
except Exception as e:
print(f"ERROR: failed to download from {s3_path} to {local_path}: {e}")
if raise_on_error:
raise e
return False
return True | 7c1cf7cfee127e7583316fe9fef67f072c0441f9 | 8,761 |
from typing import List
from typing import Tuple
import select
def get_all_votes(poll_id: int) -> List[Tuple[str, int]]:
"""
Get all votes for the current poll_id that are stored in the database
Args:
poll_id (int): Telegram's `message_id` for the poll
Returns:
List[Tuple[str, int]]: A list with the current votes in tuples (user, votes)
"""
postgres: Database = get_database()
select_query = (
select([postgres.motos_counter.c.username, postgres.motos_counter.c.vote])
.where(postgres.motos_counter.c.poll_id == poll_id)
.order_by(postgres.motos_counter.c.vote, postgres.motos_counter.c.date)
)
results = postgres.engine.execute(select_query)
return [(row["username"], row["vote"]) for row in results] | cf0ad8ee700a0da70bf29d53d08ab71e08c941ea | 8,762 |
def getUnitConversion():
"""
Get the unit conversion from kT to kJ/mol
Returns
factor: The conversion factor (float)
"""
temp = 298.15
factor = Python_kb/1000.0 * temp * Python_Na
return factor | cb7b33231a53a68358713ce65137cbf13a397923 | 8,763 |
def find_where_and_nearest(array, value):
"""
Returns index and array[index] where value is closest to an array element.
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx, array[idx] | a34ac1d59c8093989978fbca7c2409b241cedd5b | 8,764 |
import numpy
def twoexpdisk(R,phi,z,glon=False,
params=[1./3.,1./0.3,1./4.,1./0.5,logit(0.1)]):
"""
NAME:
twoexpdisk
PURPOSE:
density of a sum of two exponential disks
INPUT:
R,phi,z - Galactocentric cylindrical coordinates or (l/rad,b/rad,D/kpc)
glon= (False) if True, input coordinates above are (l,b,D)
params= parameters [1/hR,1/hz,1/hR2,1/hz2,logit(amp2)]
OUTPUT:
density or log density
HISTORY:
2015-03-24 - Written - Bovy (IAS)
"""
amp= ilogit(params[4])
return (1.-amp)/2.*numpy.fabs(params[1])\
*numpy.exp(-params[0]*(R-_R0)-params[1]*numpy.fabs(z))\
+amp/2.*params[3]*numpy.exp(-params[2]*(R-_R0)-params[3]*numpy.fabs(z)) | bf8c5e0afa28e715846401274941e281a8731f24 | 8,765 |
def sc(X):
"""Silhouette Coefficient"""
global best_k
score_list = [] # 用来存储每个K下模型的平局轮廓系数
silhouette_int = -1 # 初始化的平均轮廓系数阀值
for n_clusters in range(3, 10): # 遍历从2到10几个有限组
model_kmeans = KMeans(n_clusters=n_clusters, random_state=0) # 建立聚类模型对象
cluster_labels_tmp = model_kmeans.fit_predict(X) # 训练聚类模型
silhouette_tmp = metrics.silhouette_score(X, cluster_labels_tmp) # 得到每个K下的平均轮廓系数
score_list.append([n_clusters, silhouette_tmp]) # 将每次K及其得分追加到列表
if silhouette_tmp > silhouette_int: # 如果平均轮廓系数更高
best_k = n_clusters # 将最好的K存储下来
silhouette_int = silhouette_tmp # 将最好的平均轮廓得分存储下来
# best_kmeans = model_kmeans # 将最好的模型存储下来
# cluster_labels_k = cluster_labels_tmp # 将最好的聚类标签存储下来
return best_k | c2898e115db04c1f1ac4d6a7f8c583ea0a8b238e | 8,766 |
import socket
import time
def is_tcp_port_open(host: str, tcp_port: int) -> bool:
"""Checks if the TCP host port is open."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2) # 2 Second Timeout
try:
sock.connect((host, tcp_port))
sock.shutdown(socket.SHUT_RDWR)
except ConnectionRefusedError:
return False
except socket.timeout:
return False
finally:
sock.close()
# Other errors are propagated as odd exceptions.
# We shutdown and closed the connection, but the server may need a second
# to start listening again. If the following error is seen, this timeout
# should be increased. 300ms seems to be the minimum.
#
# Connecting to J-Link via IP...FAILED: Can not connect to J-Link via \
# TCP/IP (127.0.0.1, port 19020)
time.sleep(0.5)
return True | cbe4d0ae58610b863c30b4e1867b47cb1dbdfc3d | 8,767 |
from typing import Callable
from typing import Any
import itertools
def recursive_apply_dict(node: dict, fn: Callable) -> Any:
"""
Applies `fn` to the node, if `fn` changes the node,
the changes should be returned. If the `fn` does not change the node,
it calls `recursive_apply` on the children of the node.
In case the recursion on the children results in one or more
`runtool.datatypes.Versions` objects, the cartesian product of these
versions is calculated and a new `runtool.datatypes.Versions` object will be
returned containing the different versions of this node.
"""
# else merge children of type Versions into a new Versions object
expanded_children = []
new_node = {}
for key, value in node.items():
child = recursive_apply(value, fn)
# If the child is a Versions object, map the key to all its versions,
# child = Versions([1,2]),
# key = ['a']
# ->
# (('a':1), ('a':2))
if isinstance(child, Versions):
expanded_children.append(itertools.product([key], child))
else:
new_node[key] = child
if expanded_children:
# example:
# expanded_children = [(('a':1), ('a':2)), (('b':1), ('b':2))]
# new_node = {"c": 3}
# results in:
# [
# {'a':1, 'b':1, 'c':3},
# {'a':1, 'b':2, 'c':3},
# {'a':2, 'b':1, 'c':3},
# {'a':3, 'b':2, 'c':3},
# ]
new_node = [
fn(
dict(version_of_node, **new_node)
) # apply fn to the new version of the node
for version_of_node in itertools.product(*expanded_children)
]
# if the current node generated Versions object, these
# need to be flattened as well. For example:
# new_node = [Versions([1,2]), Versions([3,4])]
# results in
# Versions([[1,3], [1,4], [2,3], [2,4]])
if all(isinstance(val, Versions) for val in new_node):
return Versions(list(*itertools.product(*new_node)))
return Versions(new_node)
return fn(new_node) | c40daa68caaea02d16511fcc1cd3ee1949c73633 | 8,768 |
import six
def encode_image_array_as_jpg_str(image):
"""Encodes a numpy array into a JPEG string.
Args:
image: a numpy array with shape [height, width, 3].
Returns:
JPEG encoded image string.
"""
image_pil = Image.fromarray(np.uint8(image))
output = six.BytesIO()
image_pil.save(output, format='JPEG')
jpg_string = output.getvalue()
output.close()
return jpg_string | 4c2d27c15c6979678a1c9619a347b7aea5718b2c | 8,769 |
def minify_response(response):
"""Minify response to save bandwith."""
if response.mimetype == u'text/html':
data = response.get_data(as_text=True)
response.set_data(minify(data, remove_comments=True,
remove_empty_space=True,
reduce_boolean_attributes=True))
return response | 29a942d870636337eaf0d125ba6b2ca9945d1d1c | 8,770 |
def get_shorturlhash(myurl):
"""Returns a FNV1a hash of the UNquoted version of the passed URL."""
x = get_hash(unquote(myurl))
return x | f61ef1cfe14fc69a523982888a7b1082244b7bd5 | 8,771 |
def filter_privacy_level(qs, clearance_level, exact=False):
"""
Function to exclude objects from a queryset, which got a higher clearance
level than the wanted maximum clearance level.
:qs: Django queryset.
:clearance_level: Minimum clearance level.
:exact: Boolean to check for the exact clearance level.
"""
if not qs:
return qs
c_type = ContentType.objects.get_for_model(qs.model)
kwargs = {
'content_type': c_type,
'object_id__in': qs.values_list('pk'),
'level__clearance_level{}'.format(
'' if exact else '__gt'): clearance_level,
}
private_objects = PrivacySetting.objects.filter(**kwargs).values_list(
'object_id')
if exact:
return qs.filter(pk__in=private_objects)
return qs.exclude(pk__in=private_objects) | a5fd864b3a9efd86bf40e0a3b966edb047979b2a | 8,772 |
def get_configuration_store(name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing App Configuration.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.appconfiguration.get_configuration_store(name="existing",
resource_group_name="existing")
pulumi.export("id", example.id)
```
:param str name: The Name of this App Configuration.
:param str resource_group_name: The name of the Resource Group where the App Configuration exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:appconfiguration/getConfigurationStore:getConfigurationStore', __args__, opts=opts).value
return AwaitableGetConfigurationStoreResult(
endpoint=__ret__.get('endpoint'),
id=__ret__.get('id'),
location=__ret__.get('location'),
name=__ret__.get('name'),
primary_read_keys=__ret__.get('primaryReadKeys'),
primary_write_keys=__ret__.get('primaryWriteKeys'),
resource_group_name=__ret__.get('resourceGroupName'),
secondary_read_keys=__ret__.get('secondaryReadKeys'),
secondary_write_keys=__ret__.get('secondaryWriteKeys'),
sku=__ret__.get('sku'),
tags=__ret__.get('tags')) | 4c0baa2cdd089439f1a53415ff9679568a097094 | 8,773 |
def _linear(args, output_size, bias, scope=None, use_fp16=False):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError(
"Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError(
"Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
dtype = [a.dtype for a in args][0]
# Now the computation.
with vs.variable_scope(scope or "Linear"):
matrix = _variable_on_cpu('Matrix', [total_arg_size, output_size],
use_fp16=use_fp16)
if use_fp16:
dtype = tf.float16
else:
dtype = tf.float32
args = [tf.cast(x, dtype) for x in args]
if len(args) == 1:
res = math_ops.matmul(args[0], matrix)
else:
res = math_ops.matmul(array_ops.concat(args, 1), matrix)
if not bias:
return res
bias_term = _variable_on_cpu('Bias', [output_size],
tf.constant_initializer(0),
use_fp16=use_fp16)
return res + bias_term | d8daafaf1dfab0bc6425aef704543833bfbf731a | 8,774 |
def find_CI(method, samples, weights=None, coverage=0.683,
logpost=None, logpost_sort_idx=None,
return_point_estimate=False, return_coverage=False,
return_extras=False, options=None):
"""Compute credible intervals and point estimates from samples.
Arguments
---------
method : str
Method to compute CI. Options are "PJ-HPD", "tail CI", "std", and
"HPD".
PJ-HPD: Compute the CI from the joint posterior HPD region such that
the projected range of the HPDR has coverage ``coverage``.
See Joachimi et al. 2020.
The point estimate is the joint posterior MAP.
tail CI: This is the usual quantile CI. I.e., for CI (l,u) and
coverage c, P(x<l) = (1-c)/2 and P(x>u) = 1-(1-c)/2.
The point estimate is the median.
std: Compute the CI as (mean - n_sigma*std, mean + n_sigma*std).
``n_sigma`` is the number of standard devations that cover
``coverage`` in a normal distribution.
The point estimate is the mean.
HPD: Compute the HPDI of the samples.
The point estimate is the MAP.
samples : array
Samples to use.
weights : array, optional
Sample weights.
coverage : float, optional
Target coverage. This gets converted into sigmas. Default: 0.683.
logpost : array, optional
Array of the log posterior values of the samples. Required for method
``PJ-HPD``.
logpost_sort_idx : array, optional
Array of indices that sort the samples in descending posterior value.
If method is ``PJ-HPD`` and it is not provided, this will be computed
internally from logpost.
return_point_estimate : bool, optional
Whether to return the point_estimate.
return_coverage : bool, optional
Whether to return the actual coverage of the CI.
options : dict, optional
Additional options passed to the CI methods.
Returns
-------
(l, u) : tuple
Credible interval of the samples.
p : float
Point estimate. Only returned if return_point_estimate is true.
coverage : float
The achieved coverage of the returned CI.
"""
options = options or {}
extras = None
if method.lower() == "pj-hpd" or method.lower() == "projected joint hpd":
if logpost is None and logpost_sort_idx is None:
raise ValueError("For method PJ-HPD, either logpost or "
"logpost_sort_idx need to be specified.")
CI, MAP, alpha, n_sample = find_projected_joint_HPDI(
samples, weights,
coverage_1d_threshold=coverage,
sort_idx=logpost_sort_idx,
log_posterior=logpost,
return_map=True, return_coverage_1d=True,
return_n_sample=True,
**options)
point_estimate = MAP
extras = n_sample
elif method.lower() == "hpd" or method.lower() == "m-hpd":
CI, marg_MAP, alpha, no_constraints = find_marginal_HPDI(
samples, weights,
coverage=coverage,
return_map=True,
return_coverage=True,
check_prior_edges=True,
**options)
point_estimate = marg_MAP
extras = no_constraints
elif method.lower() == "tail ci" or method.lower() == "quantile ci":
CI, marg_median, alpha = find_quantile_CI(
samples, weights,
coverage=coverage,
return_median=True, return_coverage=True)
point_estimate = marg_median
elif method.lower() == "std":
CI, marg_mean, alpha = find_std_CI(
samples, weights, coverage=coverage,
return_mean=True, return_coverage=True)
point_estimate = marg_mean
else:
raise NotImplementedError(f"Method {method} not supported.")
result = [CI]
if return_point_estimate:
result += [point_estimate]
if return_coverage:
result += [alpha]
if return_extras:
result += [extras]
if len(result) == 1:
# Only CI
return result[0]
else:
return tuple(result) | 6b5ab3ac47f4f4a0251862946336948fd2ff66ed | 8,775 |
def load_csv(filename, fields=None, y_column=None, sep=','):
""" Read the csv file."""
input = pd.read_csv(filename, skipinitialspace=True,
usecols=fields, sep=sep, low_memory=False)
input = input.dropna(subset=fields)
# dtype={"ss_list_price": float, "ss_wholesale_cost": float}
input_data = input.values
data = DataSource()
if y_column == None:
data.features = input_data[:, :-1]
data.labels = input_data[:, -1]
data.headers = input.keys()
else:
data.features = np.delete(
input_data, [y_column], axis=1) # input_data[:, :-1]
data.labels = input_data[:, y_column]
headers = np.array(input.keys())
data.headers = list(np.delete(headers, [y_column]))
data.headers.append(input.keys()[y_column])
# print(data.headers)
try:
data.file = filename.split("/")[-1]
except Exception:
data.file = filename
return data | 126b96e94f4a5ab201460b427828807cf31eb6ae | 8,776 |
def Normalize(array):
"""Normalizes numpy arrays into scale 0.0 - 1.0"""
array_min, array_max = array.min(), array.max()
return ((array - array_min)/(array_max - array_min)) | a8f3bae56f8e17aed80f8e41030d049a69ac8cae | 8,777 |
def obtener_cantidad_anualmente(PaisDestino, AnioInicio, AnioFin):
"""
Obtener cantidad de vuelos entrantes anualmente dado un pais destino y un rango de años
Obtiene la cantidad total de vuelos entrantes de cada año
:param PaisDestino: Pais al que llegan los vuelos
:type PaisDestino: str
:param AnioInicio: Anio Inicio
:type AnioInicio: int
:param AnioFin: Anio Fin
:type AnioFin: int
:rtype: Dict[str, int]
"""
conversor = Conversor()
repository = DBRepository()
cursor, labels = repository.ObtenerDatosVuelosEntrantesAenaDadoPaisDestinoAnioMinMax(PaisDestino, AnioInicio, AnioFin)
arrayTuplas = conversor.ConvertirCursorToTuplas(cursor)
##Mostrar JSON Extendido
matriz, lista = conversor.ConvertirTuplasToMatriz(arrayTuplas, labels)
retval = conversor.ObtenerDataJSONExtendido(matriz)
return retval | 8364a08f4b42124b70a7769dc6a9649cdd9841d7 | 8,778 |
def calculate_shap_for_test(training_data, y, pipeline, n_points_to_explain):
"""Helper function to compute the SHAP values for n_points_to_explain for a given pipeline."""
points_to_explain = training_data[:n_points_to_explain]
pipeline.fit(training_data, y)
return _compute_shap_values(pipeline, pd.DataFrame(points_to_explain), training_data) | d8a88b3c9af05a8274a0cca1a0e63c3a9faaa8d0 | 8,779 |
def read_num_write(input_string):
""" read in the number of output files
"""
pattern = ('NumWrite' +
one_or_more(SPACE) + capturing(INTEGER))
block = _get_training_data_section(input_string)
keyword = first_capture(pattern, block)
assert keyword is not None
return keyword | 0ee1a9ac178eb4c49a01a36208e4c59d6b9023bc | 8,780 |
import requests
import json
def stock_zh_a_minute(symbol: str = 'sh600751', period: str = '5', adjust: str = "") -> pd.DataFrame:
"""
股票及股票指数历史行情数据-分钟数据
http://finance.sina.com.cn/realstock/company/sh600519/nc.shtml
:param symbol: sh000300
:type symbol: str
:param period: 1, 5, 15, 30, 60 分钟的数据
:type period: str
:param adjust: 默认为空: 返回不复权的数据; qfq: 返回前复权后的数据; hfq: 返回后复权后的数据;
:type adjust: str
:return: specific data
:rtype: pandas.DataFrame
"""
url = "https://quotes.sina.cn/cn/api/jsonp_v2.php/=/CN_MarketDataService.getKLineData"
params = {
"symbol": symbol,
"scale": period,
"datalen": "1023",
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame(json.loads(r.text.split('=(')[1].split(");")[0])).iloc[:, :6]
try:
stock_zh_a_daily(symbol=symbol, adjust="qfq")
except:
return temp_df
if adjust == "":
return temp_df
if adjust == "qfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
need_df = temp_df[temp_df["time"] == "15:00:00"]
need_df.index = need_df["date"]
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="qfq")
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df
if adjust == "hfq":
temp_df[["date", "time"]] = temp_df["day"].str.split(" ", expand=True)
need_df = temp_df[temp_df["time"] == "15:00:00"]
need_df.index = need_df["date"]
stock_zh_a_daily_qfq_df = stock_zh_a_daily(symbol=symbol, adjust="hfq")
result_df = stock_zh_a_daily_qfq_df.iloc[-len(need_df):, :]["close"].astype(float) / need_df["close"].astype(float)
temp_df.index = pd.to_datetime(temp_df["date"])
merged_df = pd.merge(temp_df, result_df, left_index=True, right_index=True)
merged_df["open"] = merged_df["open"].astype(float) * merged_df["close_y"]
merged_df["high"] = merged_df["high"].astype(float) * merged_df["close_y"]
merged_df["low"] = merged_df["low"].astype(float) * merged_df["close_y"]
merged_df["close"] = merged_df["close_x"].astype(float) * merged_df["close_y"]
temp_df = merged_df[["day", "open", "high", "low", "close", "volume"]]
temp_df.reset_index(drop=True, inplace=True)
return temp_df | b54f7dce68e102ebfa6c1784de5ebd49fcb405cb | 8,781 |
import random
def randomize_case(s: str) -> str:
"""Randomize string casing.
Parameters
----------
s : str
Original string
Returns
-------
str
String with it's letters in randomized casing.
"""
result = "".join(
[c.upper() if random.randint(0, 1) == 1 else c.lower() for c in s]
)
# If result contains letters and the result is same as original try again.
if UNICODE_LETTERS_RE.search(s) is not None and result == s:
return randomize_case(s)
else:
return result | 5e00ce336e2886a0d3bd52bc033b02560f0fb9ae | 8,782 |
def _get_results():
"""Run speedtest with speedtest.py"""
s = speedtest.Speedtest()
print("Testing download..")
s.download()
print("Testing upload..")
s.upload()
return s.results.ping, s.results.download, s.results.upload | 7092a5aa7200ebc93e266dbd6b7885095b0433bb | 8,783 |
def findCursor(query, keyname, page_no, page_size):
"""Finds the cursor to use for fetching results from the given page.
We store a mapping of page_no->cursor in memcache. If this result is missing, we look for page_no-1, if that's
missing we look for page_no-2 and so on. Once we've found one (or we get back to page_no=0) then we need to fetch
results from that page forward, storing the results back in memcache as we go.
Args:
query: A query used to fetch data from the data store
keyname: A string that'll make the keys unique (e.g. all blog posts could have keyname='blog'
page_no: The page number we're after
page_size: The size of pages we're after"""
cursor_page = page_no
cursor = memcache.get('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size))
while not cursor:
cursor_page -= 1
if cursor_page == 0:
break
cursor = memcache.get('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size))
while cursor_page < page_no:
# if we have to fast-forward through pages then we'll store the pages in memcache as we go
if cursor_page == 0:
it = query.run()
else:
it = query.with_cursor(cursor)
n = 0
for _ in it:
n += 1
if n >= page_size:
break
cursor = query.cursor()
cursor_page += 1
memcache.set('post-page-cursor:%s:%d:%d' % (keyname, cursor_page, page_size), cursor)
return cursor | 9af3368ef0011d7c6c9758f57bc2c956d540f675 | 8,784 |
def _get_seq(window,variants,ref,genotypeAware):
"""
Using the variation in @variants, construct two haplotypes, one which
contains only homozygous variants, the other which contains both hom and het variants
by placing those variants into the reference base string
@param variants: A vcf_eval.ChromVariants object
@param low: the starting position
@param high: the ending position
@param ref: a parsers.genome object
@param loc: the location that we are trying to rescue
@param genotype: whether to phase hets onto their own sequence to check for genotype accuracy (if there are multiple and they don't overlap, phasing doesn't matter)
@return: a tuple of sequences of bases that comes from modifying the reference sequence with the variants
"""
low = window[0]
high = window[1]
hetChunks = []
homChunks = []
hetOffset = low
homOffset = low
# note: if genotypeAware is False, the het chunks/offset will not be used
def get_ref_bases(start,end):
"""VCF parser is 1-based, but genome is 0-based."""
return ref.ref(window[2],start-1,end-1)
def add_ref_bases_until(chunks,begin,end):
chunks.append(get_ref_bases(begin,end))
def add_alt(chunk,start,var):
add_ref_bases_until(chunk,start,var.pos)
chunk.append(var.alt[0])
for variant in variants:
loc = variant.pos
#print((variant.ref, get_ref_bases(variant.pos,variant.pos+len(variant.ref))))
verifyRefBases = get_ref_bases(variant.pos,variant.pos+len(variant.ref))
if ( variant.ref != verifyRefBases ):
raise RescueError("Variant ref does not match reference at " + window[2] + " " + str(loc) + ": " +variant.ref + " != " + verifyRefBases )
if not ( hetOffset <= loc and homOffset <= loc ):
raise RescueError("Attempted to rescue sequence containing overlapping variants around " + window[2] + " " + str(loc))
assert variant.genotype_type != GENOTYPE_TYPE.HOM_REF
assert variant.genotype_type != GENOTYPE_TYPE.NO_CALL
if ( (not genotypeAware) or variant.genotype_type == GENOTYPE_TYPE.HOM_VAR):
add_alt(homChunks,homOffset,variant)
homOffset = len(variant.ref) + loc
else: # ( variant.genotype_type == GENOTYPE_TYPE.HET )
add_alt(hetChunks,hetOffset,variant)
hetOffset = len(variant.ref) + loc
# NB: this check seems redundant with the assert after it
if ( hetOffset > high or homOffset > high ):
print("-----fail-----")
print(window)
print(map(str,variants))
print((homOffset,high))
assert hetOffset <= high and homOffset <= high
if ( genotypeAware ):
add_ref_bases_until(hetChunks,hetOffset,high)
add_ref_bases_until(homChunks,homOffset,high)
return (''.join(homChunks),''.join(hetChunks)) | 316c19f964c6ce29d52358070d994f0fdfbcc1b8 | 8,785 |
import scipy
def interp_logpsd(data, rate, window, noverlap, freqs, interpolation='linear'):
"""Computes linear-frequency power spectral density, then uses interpolation
(linear by default) to estimate the psd at the desired frequencies."""
stft, linfreqs, times = specgram(data, window, Fs=rate, noverlap=noverlap, window = np.hamming(window))
ntimes = len(times)
logpsd = np.log10(np.abs(stft.T)**2)
interps = [scipy.interpolate.interp1d(linfreqs, logpsd[t,:], kind=interpolation) for t in range(ntimes)]
interped_logpsd = np.array([interps[t](freqs) for t in range(ntimes)])
return interped_logpsd, freqs, times | 0822f776063da9f0797aa898b0305fb295d8c0f1 | 8,786 |
def load_replica_camera_traj(traj_file_path):
"""
the format:
index
"""
camera_traj = []
traj_file_handle = open(traj_file_path, 'r')
for line in traj_file_handle:
split = line.split()
#if blank line, skip
if not len(split):
continue
camera_traj.append(split)
traj_file_handle.close()
return camera_traj | 1879c97ed5ce24834689b156ffdc971b023e67f2 | 8,787 |
def test_model(sess, graph, x_, y_):
"""
:param sess:
:param graph:
:param x_:
:param y_:
:return:
"""
data_len = len(x_)
batch_eval = batch_iter(x_, y_, 64)
total_loss = 0.0
total_acc = 0.0
input_x = graph.get_operation_by_name('input_x').outputs[0]
input_y = graph.get_operation_by_name('input_y').outputs[0]
drop_prob = graph.get_operation_by_name('drop_prob').outputs[0]
loss = graph.get_operation_by_name('loss/loss').outputs[0]
acc = graph.get_operation_by_name('accuracy/acc').outputs[0]
for x_batch, y_batch in batch_eval:
batch_len = len(x_batch)
feed_dict = {input_x: x_batch, input_y: y_batch,
drop_prob: 0}
test_loss, test_acc = sess.run([loss, acc], feed_dict=feed_dict)
total_loss += test_loss * batch_len
total_acc += test_acc * batch_len
return total_loss / data_len, total_acc / data_len | 7c310a7cf979004d9f14fbd1ec57228dbfc81cd2 | 8,788 |
def epanechnikov(h: np.ndarray, Xi: np.ndarray, x: np.ndarray) -> np.ndarray:
"""Epanechnikov kernel.
Parameters:
h : bandwidth.
Xi : 1-D ndarray, shape (nobs, 1). The value of the training set.
x : 1-D ndarray, shape (1, nbatch). The value at which the kernel density is being estimated.
Returns:
ndarray of shape ``(n_obs, nbatch)``: The kernel_value at each training point for each var.
"""
u = (Xi - x) / h
out = 3 / 4 * (1 - u**2) * (np.abs(u) <= 1)
assert out.shape == (Xi.shape[0], x.shape[1])
return out | 45902e9396661a6c0f8faf9cfc2d017125f6a427 | 8,789 |
def punctuation(chars=r',.\"!@#\$%\^&*(){}\[\]?/;\'`~:<>+=-'):
"""Finds characters in text. Useful to preprocess text. Do not forget
to escape special characters.
"""
return rf'[{chars}]' | b2fd23d8485c3b6d429723a02a95c981982559b5 | 8,790 |
import time
import logging
def log_http_request(f):
"""Decorator to enable logging on an HTTP request."""
level = get_log_level()
def new_f(*args, **kwargs):
request = args[1] # Second argument should be request.
object_type = 'Request'
object_id = time.time()
log_name = object_type + '.' + str(object_id)
setattr(request, 'LOG_ID', object_id)
logger = logging.getLogger(log_name)
logger.setLevel(level)
handler = LogModelHandler(object_type, object_id)
logger.addHandler(handler)
return f(*args, **kwargs)
new_f.func_name = f.func_name
return new_f | ecb62d0501307330fc0a56d8eadfbee8e729adf6 | 8,791 |
def look_at(vertices, eye, at=[0, 0, 0], up=[0, 1, 0]):
"""
"Look at" transformation of vertices.
"""
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
place = vertices.place
# if list or tuple convert to numpy array
if isinstance(at, list) or isinstance(at, tuple):
at = paddle.to_tensor(at, dtype=paddle.float32, place=place)
# if numpy array convert to tensor
elif isinstance(at, np.ndarray):
at = paddle.to_tensor(at).to(place)
elif paddle.is_tensor(at):
at = at.to(place)
if isinstance(up, list) or isinstance(up, tuple):
up = paddle.to_tensor(up, dtype=paddle.float32, place=place)
elif isinstance(up, np.ndarray):
up = paddle.to_tensor(up).to(place)
elif paddle.is_tensor(up):
up = up.to(place)
if isinstance(eye, list) or isinstance(eye, tuple):
eye = paddle.to_tensor(eye, dtype=paddle.float32, place=place)
elif isinstance(eye, np.ndarray):
eye = paddle.to_tensor(eye).to(place)
elif paddle.is_tensor(eye):
eye = eye.to(place)
batch_size = vertices.shape[0]
if eye.ndimension() == 1:
eye = eye[None, :].tile([batch_size, 1])
if at.ndimension() == 1:
at = at[None, :].tile([batch_size, 1])
if up.ndimension() == 1:
up = up[None, :].tile([batch_size, 1])
# prevent paddle no grad error
at.stop_gradient = False
eye.stop_gradient = False
up.stop_gradient = False
# create new axes
# eps is chosen as 0.5 to match the chainer version
z_axis = F.normalize(at - eye, epsilon=1e-5)
x_axis = F.normalize(paddle.cross(up, z_axis), epsilon=1e-5)
y_axis = F.normalize(paddle.cross(z_axis, x_axis), epsilon=1e-5)
# create rotation matrix: [bs, 3, 3]
r = paddle.concat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), axis=1)
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = paddle.matmul(vertices, r.swapaxes(1,2))
return vertices | 10a6b94ecba08fecd829758f9c94765c718a5add | 8,792 |
import types
def _count(expr, pat, flags=0):
"""
Count occurrences of pattern in each string of the sequence or scalar
:param expr: sequence or scalar
:param pat: valid regular expression
:param flags: re module flags, e.g. re.IGNORECASE
:return:
"""
return _string_op(expr, Count, output_type=types.int64,
_pat=pat, _flags=flags) | c4c387f18ac75977a661662dae7606a066242b57 | 8,793 |
def simplex3_vertices():
"""
Returns the vertices of the standard 3-simplex. Each column is a vertex.
"""
v = np.array([
[1, 0, 0],
[-1/3, +np.sqrt(8)/3, 0],
[-1/3, -np.sqrt(2)/3, +np.sqrt(2/3)],
[-1/3, -np.sqrt(2)/3, -np.sqrt(2/3)],
])
return v.transpose() | b10c2c781d1f7ed7050e14f069efd3e0e9a80a2b | 8,794 |
def get_output_msg(status, num_logs):
""" Returnes the output message in accordance to the script status """
if status == EXECUTION_STATE_COMPLETED:
return "Retrieved successfully {} logs that triggered the alert".format(num_logs)
else:
return "Failed to retrieve logs. Please check the script's logs to see what went wrong..." | caec8de737251cc7c386a85a098d73d19617e71a | 8,795 |
def kSEQK(age):
"""Age-dependent organ-specific absorbed dose rate per unit kerma rate,
normalized against the corresponding value for an adult
Parameters
----------
age: float or list
Age(s) when kSEQK is evaluated.
"""
k=[]
if (not isinstance(age,list)) and (not isinstance(age,np.ndarray)):
age=[age]
for a in age:
if a<20: #TODO is that /1017 actually correct?
k.append((0.0124*a**4-0.5364*a**3+7.4882*a**2-44.888*a+1209.8)/1000)
#k.append((0.0015*a**5 - 0.1214*a**4 + 3.473*a**3 - 40.28*a**2 + 136.3*a + 1233)/1017)
else:
k.append(1.0)
if len(k) == 1:
return k[0]
else:
return np.array(k) | 967584edc3be078eab2b8d21338bb5221f9c65ca | 8,796 |
import re
def insert_channel_links(message: str) -> str:
"""
Takes a message and replaces all of the channel references with
links to those channels in Slack formatting.
:param message: The message to modify
:return: A modified copy of the message
"""
message_with_links = message
matches = re.findall(r'#[a-z0-9\-_(){}\[\]\'\"/]{1,22}', message)
for match in matches:
channel_name = match[1:]
channel = bot.channels.get(channel_name)
if channel is not None:
channel_link_string = f"<#{channel.id}|{channel.name}>"
message_with_links = message_with_links.replace(match, channel_link_string)
return message_with_links | ce56e81e8eb66dc0f2754141bcfc30f42db50c5a | 8,797 |
def check_int_uuid(uuid):
"""Check that the int uuid i pass is valid."""
try:
converted = UUID(int=uuid, version=4)
except ValueError:
return False
return converted.int == uuid | a0ba7447e6c8cc0c35b68024fb4ade25f0802239 | 8,798 |
def calc_E_E_C_hs_d_t_i(i, device, region, A_A, A_MR, A_OR, L_CS_d_t, L_CL_d_t):
"""暖冷房区画𝑖に設置された冷房設備機器の消費電力量(kWh/h)を計算する
Args:
i(int): 暖冷房区画の番号
device(dict): 暖冷房機器の仕様
region(int): 省エネルギー地域区分
A_A(float): 床面積の合計 (m2)
A_MR(float): 主たる居室の床面積 (m2)
A_OR(float): その他の居室の床面積 (m2)
L_CS_d_t(ndarray): 冷房区画の冷房顕熱負荷
L_CL_d_t(ndarray): 冷房区画の冷房潜熱負荷
Returns:
ndarray: 暖冷房区画𝑖に設置された冷房設備機器の消費電力量(kWh/h)
"""
if device['type'] == 'ルームエアコンディショナー':
# 仕様の取得
A_HCZ_i = calc_A_HCZ_i(i, A_A, A_MR, A_OR)
q_rtd_C = rac_spec.get_q_rtd_C(A_HCZ_i)
e_rtd_C = rac_spec.get_e_rtd_C(device['e_class'], q_rtd_C)
# 電力消費量の計算
E_E_C_d_t_i = rac.calc_E_E_C_d_t(
region=region,
q_rtd_C=q_rtd_C,
e_rtd_C=e_rtd_C,
dualcompressor=device['dualcompressor'],
L_CS_d_t=L_CS_d_t[i - 1],
L_CL_d_t=L_CL_d_t[i - 1]
)
else:
raise ValueError(device['type'])
print('{} E_E_C_d_t_{} = {} [kWh] (L_H_d_t_{} = {} [MJ])'.format(device['type'], i, np.sum(E_E_C_d_t_i), i,
np.sum(L_CS_d_t + L_CL_d_t)))
return E_E_C_d_t_i | 8dbd0119ac90f3847de1f5af05891583a9bda26b | 8,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.