content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
async def conversation_steps(month: int = Query(default=1, ge=2, le=6), current_user: User = Depends(Authentication.get_current_user_and_bot)):
"""
Fetches the number of conversation steps that took place in the chat between the users and the agent
"""
return Utility.trigger_history_server_request(
current_user.get_bot(),
f'/api/history/{current_user.get_bot()}/metrics/conversation/steps',
{'month': month}
) | 9845cf39290f056395351953e3d7accbcb14ae06 | 9,600 |
def off(app: str) -> dict:
"""
Switches the app offline, if it isn't already.
:param app: The name of the Heroku app in which you want formation
:return: dictionary containing information about the app
"""
return Herokron(app).off() | 8aa6cef16d8924ce682fa9a7b886cee87d4e02c5 | 9,601 |
import io
import csv
def parse_csv_from_response(response):
"""
Convenience function for working with CSV responses.
Parses the CSV rows and returns a list of dicts, using the
keys as columns
"""
file_from_string = io.StringIO(response.content.decode("utf-8"))
parsed_rows = []
reader = csv.DictReader(file_from_string)
for row in reader:
logger.debug(row)
parsed_rows.append(row)
return parsed_rows | 9a2bf99e810c7b4b9ac947ad3ba53e016deb836a | 9,602 |
def count_weekday(start, stop, wd_target=0):
"""
Returns the number of days between start and stop inclusive which is the
first day of the month and is the specified weekday, with 0 being Monday.
"""
counter = 0
while start != stop + timedelta(days=1):
if start.weekday() == wd_target and start.day == 1:
counter += 1
start += timedelta(days=1)
return counter | 27dd8ce6493ac1c24c65c92208767159a6406348 | 9,603 |
import collections
def _get_ngrams(segment, max_order):
"""Extracts all n-grams upto a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams upto max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i+order])
ngram_counts[ngram] += 1
return ngram_counts | c4b388d71b2c16e6c324718b8a07db8531c83413 | 9,604 |
def pkg_topics_list(data_dict):
"""
Get a list of topics
"""
pkg = model.Package.get(data_dict['id'])
vocabulary = model.Vocabulary.get('Topics')
topics = []
if vocabulary:
topics = pkg.get_tags(vocab=vocabulary)
return topics | 7594ea421ade2a530d8e08490b542bbd05d1a962 | 9,605 |
import re
import os
def get_adrill_cdbs(adrill_user_cfg, adrill_shared_cfg=None):
"""Return the names and locatinos of all user defined MSC Adams Drill databases (cdbs)
Parameters
----------
adrill_user_cfg : str
Full path to an Adams Drill user configuration file. This hould be in the users HOME directory.
adrill_shared_cfg : str
Full path to an Adams Drill shared configuration file. This should be in the Adams Drill installation directory. (the default is None, which means that only user cdbs will be returned.)
Returns
-------
dict
A dictionary in which the cdb names are keys and the cdb locations are values.
"""
cdbs = {}
with open(adrill_user_cfg,'r') as fid:
for line in fid:
if line.startswith('DATABASE'):
# try:
cdb_name = re.split('[\t ]+',line.lstrip())[1]
cdb_loc = thornpy.utilities.convert_path(re.split('[\t ]+', line, maxsplit=2)[-1].replace('\n','').replace('$HOME', os.path.expanduser('~')))
cdbs[cdb_name] = cdb_loc
# except:
# raise cdbError('The following line in {} could not be interpreted.\n\n{}'.format(adrill_user_cfg,line))
if adrill_shared_cfg:
top_dir = os.path.split(os.path.split(adrill_shared_cfg)[0])[0]
with open(adrill_shared_cfg,'r') as fid:
for line in fid:
if line.startswith('DATABASE'):
# try:
cdb_name = re.split('[\t ]+', line, maxsplit=2)[1]
cdb_loc = thornpy.utilities.convert_path(re.split('[\t ]+', line, maxsplit=2)[-1].replace('\n','').replace('$HOME', os.path.expanduser('~')).replace('$topdir', top_dir))
cdbs[cdb_name] = cdb_loc
# except:
# raise cdbError('The following line in {} could not be interpreted.\n\n{}'.format(adrill_shared_cfg,line))
return cdbs | 08ea6fee48bb06627168bb2eefe2cc0f1ef0cb5d | 9,606 |
def five_five(n):
"""
This checks if n is a power of 2 (or 0).
This is because the only way that n and (n-1) have none of the same bits (the
& check) is when n is a power of 2, or 0.
"""
return ((n & (n-1)) == 0) | 0b1cc310b5d8bd6dab6299b6a999a5dd0720ea80 | 9,607 |
from main import bot
from typing import Optional
import asyncio
async def send_message(user_id: int,
text: str,
buttons: Optional[list[dict[str, str]]] = None,
disable_notification: bool = False) -> bool:
"""
Safe messages sender
:param user_id:
:param text:
:param buttons: List of inline buttons in format [{'text': 'text', 'callback_data': 'callback_data', **kwargs}].
A button can have all the same keys that InlineKeyboardButton() take
:param disable_notification:
:return:
"""
try:
await bot.send_message(user_id, text, reply_markup=InlineKeyboardMarkup(
row_width=2,
resize_keyboard=True,
one_time_keyboard=True, ).add(
*[InlineKeyboardButton(**button) for button in buttons])
if buttons else None,
disable_notification=disable_notification)
log.info(f"Sent message to target [ID:{user_id}]")
except exceptions.BotBlocked:
log.error(f"Target [ID:{user_id}]: blocked by user")
except exceptions.ChatNotFound:
log.error(f"Target [ID:{user_id}]: invalid user ID")
except exceptions.RetryAfter as e:
log.error(f"Target [ID:{user_id}]: Flood limit is exceeded. Sleep {e.timeout} seconds.")
await asyncio.sleep(e.timeout)
return await send_message(user_id, text, buttons) # Recursive call
except exceptions.UserDeactivated:
log.error(f"Target [ID:{user_id}]: user is deactivated")
except exceptions.TelegramAPIError:
log.exception(f"Target [ID:{user_id}]: failed")
else:
log.info(f"Target [ID:{user_id}]: success")
return True
return False | e2cb9879a1eea95d639f6ff3c7b7bf7c5b19ef68 | 9,608 |
def convert_bytes_to_size(some_bytes):
"""
Convert number of bytes to appropriate form for display.
:param some_bytes: A string or integer
:return: A string
"""
some_bytes = int(some_bytes)
suffix_dict = {
'0': 'B',
'1': 'KiB',
'2': 'MiB',
'3': 'GiB',
'4': 'TiB',
'5': 'PiB'
}
counter = 0
while some_bytes > 1 and counter <= 5:
tmp = some_bytes / 1024
if tmp < 1:
break
else:
some_bytes = tmp
counter += 1
return str(format(some_bytes, '.2f')) + ' ' + str(suffix_dict[str(counter)]) | d1579e0fc0850a98145910c056b3fac8be7c66f1 | 9,609 |
def create_bbregister_func_to_anat(fieldmap_distortion=False,
name='bbregister_func_to_anat'):
"""
Registers a functional scan in native space to structural. This is meant to be used
after create_nonlinear_register() has been run and relies on some of it's outputs.
Parameters
----------
fieldmap_distortion : bool, optional
If field map-based distortion correction is being run, FLIRT should
take in the appropriate field map-related inputs.
name : string, optional
Name of the workflow.
Returns
-------
register_func_to_anat : nipype.pipeline.engine.Workflow
Notes
-----
Workflow Inputs::
inputspec.func : string (nifti file)
Input functional scan to be registered to MNI space
inputspec.anat_skull : string (nifti file)
Corresponding full-head scan of subject
inputspec.linear_reg_matrix : string (mat file)
Affine matrix from linear functional to anatomical registration
inputspec.anat_wm_segmentation : string (nifti file)
White matter segmentation probability mask in anatomical space
inputspec.bbr_schedule : string (.sch file)
Boundary based registration schedule file for flirt command
Workflow Outputs::
outputspec.func_to_anat_linear_xfm : string (mat file)
Affine transformation from functional to anatomical native space
outputspec.anat_func : string (nifti file)
Functional data in anatomical space
"""
register_bbregister_func_to_anat = pe.Workflow(name=name)
inputspec = pe.Node(util.IdentityInterface(fields=['func',
'anat_skull',
'linear_reg_matrix',
'anat_wm_segmentation',
'bbr_schedule',
'fieldmap',
'fieldmapmask'
]),
name='inputspec')
inputNode_echospacing = pe.Node(
util.IdentityInterface(fields=['echospacing']),
name='echospacing_input')
inputNode_pedir = pe.Node(util.IdentityInterface(fields=['pedir']),
name='pedir_input')
outputspec = pe.Node(util.IdentityInterface(fields=['func_to_anat_linear_xfm',
'anat_func']),
name='outputspec')
wm_bb_mask = pe.Node(interface=fsl.ImageMaths(),
name='wm_bb_mask')
wm_bb_mask.inputs.op_string = '-thr 0.5 -bin'
register_bbregister_func_to_anat.connect(inputspec, 'anat_wm_segmentation',
wm_bb_mask, 'in_file')
def bbreg_args(bbreg_target):
return '-cost bbr -wmseg ' + bbreg_target
bbreg_func_to_anat = pe.Node(interface=fsl.FLIRT(),
name='bbreg_func_to_anat')
bbreg_func_to_anat.inputs.dof = 6
register_bbregister_func_to_anat.connect(inputspec, 'bbr_schedule',
bbreg_func_to_anat, 'schedule')
register_bbregister_func_to_anat.connect(wm_bb_mask, ('out_file', bbreg_args),
bbreg_func_to_anat, 'args')
register_bbregister_func_to_anat.connect(inputspec, 'func',
bbreg_func_to_anat, 'in_file')
register_bbregister_func_to_anat.connect(inputspec, 'anat_skull',
bbreg_func_to_anat, 'reference')
register_bbregister_func_to_anat.connect(inputspec, 'linear_reg_matrix',
bbreg_func_to_anat, 'in_matrix_file')
if fieldmap_distortion:
def convert_pedir(pedir):
# FSL Flirt requires pedir input encoded as an int
conv_dct = {'x': 1, 'y': 2, 'z': 3, '-x': -1, '-y': -2, '-z': -3}
if not isinstance(pedir, str):
raise Exception("\n\nPhase-encoding direction must be a "
"string value.\n\n")
if pedir not in conv_dct.keys():
raise Exception("\n\nInvalid phase-encoding direction "
"entered: {0}\n\n".format(pedir))
return conv_dct[pedir]
register_bbregister_func_to_anat.connect(inputNode_pedir, ('pedir', convert_pedir),
bbreg_func_to_anat, 'pedir')
register_bbregister_func_to_anat.connect(inputspec, 'fieldmap',
bbreg_func_to_anat, 'fieldmap')
register_bbregister_func_to_anat.connect(inputspec, 'fieldmapmask',
bbreg_func_to_anat, 'fieldmapmask')
register_bbregister_func_to_anat.connect(inputNode_echospacing, 'echospacing',
bbreg_func_to_anat, 'echospacing')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_matrix_file',
outputspec, 'func_to_anat_linear_xfm')
register_bbregister_func_to_anat.connect(bbreg_func_to_anat, 'out_file',
outputspec, 'anat_func')
return register_bbregister_func_to_anat | 0598cef86fdebe697bfdc1627554c4340303a86b | 9,610 |
def pointShiftFromRange(dataSize, x = all, y = all, z = all, **args):
"""Calculate shift of points given a specific range restriction
Arguments:
dataSize (str): data size of the full image
x,y,z (tuples or all): range specifications
Returns:
tuple: shift of points from original origin of data to origin of range reduced data
"""
if isinstance(dataSize, str):
dataSize = self.dataSize(dataSize)
dataSize = list(dataSize)
d = len(dataSize)
rr = []
if d > 0:
rr.append(toDataRange(dataSize[0], r = x))
if d > 1:
rr.append(toDataRange(dataSize[1], r = y))
if d > 2:
rr.append(toDataRange(dataSize[2], r = z))
if d > 3 or d < 1:
raise RuntimeError('shiftFromRange: dimension %d to big' % d)
return [r[0] for r in rr] | dbe5c2049c5b76bfdbb839faa2a3e6cb942c8249 | 9,611 |
def callparser():
"""Parses a group of expressions."""
def cull_seps(tokens):
return tokens[0] or tokens[1]
return RepeatParser(exprparser() + OptionParser(dlmparser(',')) ^ cull_seps) | af8fbf81044b90d6a1a9ea769a513109237692d4 | 9,612 |
def write_section(section_name, section, keys, writer) -> bool:
"""
Saves the specified section to the specified writer starting at the current
point in the writer. It will not throw an exception. On error (IO exception
or not being able to write the section) it will return false. WARNING: It can
not scan the destination to see if this section has already been written, so
typically this method is called when writing out an entire configuration with
multiple sections in sequence.
Returns True on success and False on failure.
"""
keys = keys if keys else section.keys()
ret = False
# OBSOLETE with io.TextIOWrapper(writer) as writer2:
try:
writer.write(section_name + ":\n")
for k in keys:
val = section.get(k)
if val:
output = " " + k + _COLONSPACE + val + "\n"
writer.write(output)
ret = True
except OSError as err:
_printerr(err) # Just return false
return ret | 368f0cac04d392b9ea8946d30538a3fb0265c593 | 9,613 |
def _rotation_270(image):
"""Rotate an image with 270 degrees (clockwise).
Parameters
----------
image : np.ndarray
Image to rotate with shape (y, x, channels).
Returns
-------
image_rotated : np.ndarray
Image rotated with shape (y, x, channels).
"""
image_rotated = _flip_v(image)
image_rotated = _transpose(image_rotated)
return image_rotated | 3cd291c9283a32d0bc66902bff7861db855f4420 | 9,614 |
def classification_id_for_objs(object_id: str, url: str, token: str):
"""
Get classification id for a given object
Arguments
----------
object_id : str
Object id to get classification id for
url : str
Skyportal url
token : str
Skyportal token
Returns
----------
status_code : int
HTTP status code
data : list
List of classification ids and their author ids
"""
classifications = api(
"GET",
f"{url}/api/sources/{object_id}/classifications",
token=token,
)
data = {}
if classifications.status_code == 200:
data = {
"id": classifications.json()["data"][0]["id"],
"author_id": classifications.json()["data"][0]["author_id"],
}
return classifications.status_code, data | b03bb7ff18235cafd1b171e5042d64c65c19cffc | 9,615 |
import math
def ciede2000(Lab_1, Lab_2):
"""Calculates CIEDE2000 color distance between two CIE L*a*b* colors."""
C_25_7 = 6103515625 # 25**7
L1, a1, b1 = Lab_1[0], Lab_1[1], Lab_1[2]
L2, a2, b2 = Lab_2[0], Lab_2[1], Lab_2[2]
C1 = math.sqrt(a1**2 + b1**2)
C2 = math.sqrt(a2**2 + b2**2)
C_ave = (C1 + C2) / 2
G = 0.5 * (1 - math.sqrt(C_ave**7 / (C_ave**7 + C_25_7)))
L1_, L2_ = L1, L2
a1_, a2_ = (1 + G) * a1, (1 + G) * a2
b1_, b2_ = b1, b2
C1_ = math.sqrt(a1_**2 + b1_**2)
C2_ = math.sqrt(a2_**2 + b2_**2)
if b1_ == 0 and a1_ == 0: h1_ = 0
elif a1_ >= 0: h1_ = math.atan2(b1_, a1_)
else: h1_ = math.atan2(b1_, a1_) + 2 * math.pi
if b2_ == 0 and a2_ == 0: h2_ = 0
elif a2_ >= 0: h2_ = math.atan2(b2_, a2_)
else: h2_ = math.atan2(b2_, a2_) + 2 * math.pi
dL_ = L2_ - L1_
dC_ = C2_ - C1_
dh_ = h2_ - h1_
if C1_ * C2_ == 0: dh_ = 0
elif dh_ > math.pi: dh_ -= 2 * math.pi
elif dh_ < -math.pi: dh_ += 2 * math.pi
dH_ = 2 * math.sqrt(C1_ * C2_) * math.sin(dh_ / 2)
L_ave = (L1_ + L2_) / 2
C_ave = (C1_ + C2_) / 2
_dh = abs(h1_ - h2_)
_sh = h1_ + h2_
C1C2 = C1_ * C2_
if _dh <= math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2
elif _dh > math.pi and _sh < 2 * math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2 + math.pi
elif _dh > math.pi and _sh >= 2 * math.pi and C1C2 != 0: h_ave = (h1_ + h2_) / 2 - math.pi
else: h_ave = h1_ + h2_
T = 1 - 0.17 * math.cos(h_ave - math.pi / 6) + 0.24 * math.cos(2 * h_ave) + 0.32 * math.cos(3 * h_ave + math.pi / 30) - 0.2 * math.cos(4 * h_ave - 63 * math.pi / 180)
h_ave_deg = h_ave * 180 / math.pi
if h_ave_deg < 0: h_ave_deg += 360
elif h_ave_deg > 360: h_ave_deg -= 360
dTheta = 30 * math.exp(-(((h_ave_deg - 275) / 25)**2))
R_C = 2 * math.sqrt(C_ave**7 / (C_ave**7 + C_25_7))
S_C = 1 + 0.045 * C_ave
S_H = 1 + 0.015 * C_ave * T
Lm50s = (L_ave - 50)**2
S_L = 1 + 0.015 * Lm50s / math.sqrt(20 + Lm50s)
R_T = -math.sin(dTheta * math.pi / 90) * R_C
k_L, k_C, k_H = 1, 1, 1
f_L = dL_ / k_L / S_L
f_C = dC_ / k_C / S_C
f_H = dH_ / k_H / S_H
dE_00 = math.sqrt(f_L**2 + f_C**2 + f_H**2 + R_T * f_C * f_H)
return dE_00 | f95bc8338fbabe09f2038cea34e7a8fcad87f3bf | 9,616 |
import torch
def ifft2c_new(data: torch.Tensor) -> torch.Tensor:
"""
Apply centered 2-dimensional Inverse Fast Fourier Transform.
Args:
data: Complex valued input data containing at least 3 dimensions:
dimensions -3 & -2 are spatial dimensions and dimension -1 has size
2. All other dimensions are assumed to be batch dimensions.
Returns:
The IFFT of the input.
"""
if not data.shape[-1] == 2:
raise ValueError("Tensor does not have separate complex dim.")
data = ifftshift(data, dim=[-3, -2])
data = torch.view_as_real(
torch.fft.ifftn( # type: ignore
torch.view_as_complex(data), dim=(-2, -1), norm="ortho"
)
)
data = fftshift(data, dim=[-3, -2])
return data | 6752dd94c690d8a8d3d0d625a693cd711c12c9c0 | 9,617 |
def _make_options(context, base):
"""Return pyld options for given context and base."""
options = {}
if context is None:
context = default_context()
options['expandContext'] = context
if base is not None:
options['base'] = base
return options | 8fcd514d9b0d11020ea197a29af6e76a53201306 | 9,618 |
def datatable(table_config: DatatableConfig, table_id: str, class_name: str = ''):
"""
Deprecated, use instead
<table id="{table_id}" data-datatable-url="{url}" class="{class_name}"></table>
"""
return {
"rich_columns": table_config.enabled_columns,
"search_box_enabled": table_config.search_box_enabled,
"table_id": table_id,
"class_name": class_name,
"expand_client_renderer": table_config.expand_client_renderer
} | 777d19f0eaa6f1adbb53cc1fa6042fbec3df4398 | 9,619 |
def npelpt(point, ellipse):
"""npelpt(ConstSpiceDouble [3] point, ConstSpiceDouble [NELLIPSE] ellipse)"""
return _cspyce0.npelpt(point, ellipse) | f81ff9a993f0166ed4899338c66b58e5329382ce | 9,620 |
def register_module():
"""Registers this module in the registry."""
# provide parser to verify
verify.parse_content = content.parse_string_in_scope
# setup routes
courses_routes = [('/faq', utils_faq.FaqHandler),('/allresources', utils_allresources.AllResourcesHandler)]
global custom_module
custom_module = custom_modules.Module(
'Course',
'FAQ Module',
[], courses_routes, notify_module_enabled = notify)
return custom_module | e4fe1ae4d3b05a4c396155ae3b471e941de56f7d | 9,621 |
def degreeList(s):
"""Convert degrees given on command line to a list.
For example, the string '1,2-5,7' is converted to [1,2,3,4,5,7]."""
l = []
for r in s.split(','):
t = r.split('-')
if len(t) == 1:
l.append(int(t[0]))
else:
a = int(t[0])
b = int(t[1])
l.extend(range(a,b+1, (1 if a <= b else -1)))
return sorted(l) | 3b517831ddab47da5cd0e36fa5913d6d59e73715 | 9,622 |
import os
def _gen_simple_while_loop(base_dir):
"""Generates a saved model with a while loop."""
class Module(module.Module):
"""A module with a while loop."""
@def_function.function(
input_signature=[tensor_spec.TensorSpec((), dtypes.float32)])
def compute(self, value):
acc, _ = control_flow_ops.while_loop(
cond=lambda acc, i: i > 0,
body=lambda acc, i: (acc + i, i - 1),
loop_vars=(constant_op.constant(0.0), value))
return acc
to_save = Module()
saved_model.save(
to_save, export_dir=os.path.join(base_dir, "SimpleWhileLoop")) | 1b782d14a0a0613a756f7dbf046c01d8a3338d61 | 9,623 |
def _get_corrected_msm(msm: pd.DataFrame, elevation: float, ele_target: float):
"""MSMデータフレーム内の気温、気圧、重量絶対湿度を標高補正
Args:
df_msm(pd.DataFrame): MSMデータフレーム
ele(float): 平均標高 [m]
elevation(float): 目標地点の標高 [m]
Returns:
pd.DataFrame: 補正後のMSMデータフレーム
"""
TMP = msm['TMP'].values
PRES = msm['PRES'].values
MR = msm['MR'].values
# 標高差
ele_gap = ele_target - elevation
# 気温補正
TMP_corr = get_corrected_TMP(TMP, ele_gap)
# 気圧補正
PRES_corr = get_corrected_PRES(PRES, ele_gap, TMP_corr)
# 重量絶対湿度補正
MR_corr = get_corrected_mixing_ratio(
MR=MR,
TMP=TMP_corr,
PRES=PRES_corr
)
# 補正値をデータフレームに戻す
msm = msm.copy()
msm['TMP'] = TMP_corr
msm['PRES'] = PRES_corr
msm['MR'] = MR_corr
# なぜ 気圧消すのか?
# msm.drop(['PRES'], axis=1, inplace=True)
return msm | 5cbfafa077c02ff5b7b74e47eff30c99e6201ff8 | 9,624 |
def get_answers_by_qname(sim_reads_sam_file):
"""Get a dictionary of Direction Start CIGAR MDtag by ReadID (qname)."""
answers_by_qname = {}
reads_file = open(sim_reads_sam_file)
reads_file.next() #skip header line
for line in reads_file:
id, dir, start, cigar, mdtag = line.strip().split('\t')
answers_by_qname[id] = (dir, start, cigar, mdtag)
reads_file.close()
return answers_by_qname | eae27387f4ac0e20b16392ca699fad7e6489c6e9 | 9,625 |
def post_times(post: Post) -> html_tag:
"""Display time user created post.
If user has edited their post show the timestamp for that as well.
:param post: Post ORM object.
:return: Rendered paragraph tag with post's timestamp information.
"""
p = tags.p(cls="small")
p.add(f"{_('Posted')}: ")
p.add(moment(post.created).fromNow())
if post.edited is not None:
p.add(tags.br(), f"{_('Edited')}: ", moment(post.edited).fromNow())
return p | 8e64d6f49ed5bcf8f9a9ea1f3a5350880bbe7b39 | 9,626 |
def read_articles_stat(path):
"""
读取articles_stat文件,生成可以读取法条正负样本数量的字典列表
:param path: articles_stat文件位置
:return: ret: [{'第一条': (负样本数量, 正样本数量), ...}, {...}, ..., {...}]
"""
df = pd.read_csv(path, header=0, index_col=0)
ret = [{} for i in range(4)]
for index, row in df.iterrows():
ret[row['name']][row['number']] = (row['negatives'], row['positives'])
# print(ret)
return ret | be35e11a508e22241188b4719dc6fa0db14f4395 | 9,627 |
def get_bounding_box(font):
""" Returns max and min bbox of given truetype font """
ymin = 0
ymax = 0
if font.sfntVersion == 'OTTO':
ymin = font['head'].yMin
ymax = font['head'].yMax
else:
for g in font['glyf'].glyphs:
char = font['glyf'][g]
if hasattr(char, 'yMin') and ymin > char.yMin:
ymin = char.yMin
if hasattr(char, 'yMax') and ymax < char.yMax:
ymax = char.yMax
return ymin, ymax | 98161ef3426c2bb9b6dc4079c69f5c1f9d4e93a2 | 9,628 |
def create_user(client, profile, user, resend=False):
""" Creates a new user in the specified user pool """
try:
if resend:
# Resend confirmation email for get back password
response = client.admin_create_user(
UserPoolId=profile["user_pool_id"],
Username=user.email,
MessageAction="RESEND",
)
else:
response = client.admin_create_user(
UserPoolId=profile["user_pool_id"],
Username=user.email,
UserAttributes=[
{"Name": "email", "Value": user.email},
{"Name": "email_verified", "Value": "true"},
],
)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
if resend:
print(f"Resend confirmation to user {user.email} successfully")
else:
print(f"User {user.email} was created successfully")
return response
except client.exceptions.UsernameExistsException as error:
print(f"User {user.email} exists")
return error.response
except client.exceptions.ClientError as error:
print(f"Fail to create user {user.email}: {error.response}")
return error.response | 4c1f83c0ab7fd28dc7b1e2d8f2efa224360dfdb1 | 9,629 |
def generate_move_probabilities(
in_probs: np.ndarray,
move_dirn: float,
nu_par: float,
dir_bool: np.ndarray
):
""" create move probabilities from a 1d array of values"""
out_probs = np.asarray(in_probs.copy())
if np.isnan(out_probs).any():
print('NANs in move probabilities!')
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
out_probs = out_probs.clip(min=0.)
out_probs[4] = 0.
out_probs = [ix * float(iy) for ix, iy in zip(out_probs, dir_bool)]
if np.count_nonzero(out_probs) == 0:
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
#out_probs = np.random.rand(len(out_probs))
out_probs[4] = 0.
out_probs = [ix * float(iy) for ix, iy in zip(out_probs, dir_bool)]
if np.count_nonzero(out_probs) == 0:
out_probs = get_directional_probs(move_dirn * np.pi / 180.)
out_probs /= np.sum(out_probs)
out_probs = np.power(out_probs, nu_par)
out_probs /= np.sum(out_probs)
return out_probs | 4ea9ef914b905b6ab79933ba90a3604b0391f038 | 9,630 |
def _is_diagonal(x):
"""Helper to identify if `LinearOperator` has only a diagonal component."""
return (isinstance(x, tf.linalg.LinearOperatorIdentity) or
isinstance(x, tf.linalg.LinearOperatorScaledIdentity) or
isinstance(x, tf.linalg.LinearOperatorDiag)) | de3bb0ab2313c5432abab4bf7b0c1e227bc682d7 | 9,631 |
def index():
"""Index Controller"""
return render_template('login.html') | 53499d68c734e6315e3f24927d70cb7cddca346a | 9,632 |
def match_twosided(desc1,desc2):
""" Two-sided symmetric version of match(). """
matches_12 = match(desc1,desc2)
matches_21 = match(desc2,desc1)
ndx_12 = matches_12.nonzero()[0]
# remove matches that are not symmetric
for n in ndx_12:
if matches_21[int(matches_12[n])] != n:
matches_12[n] = 0
return matches_12 | a86d1cfb19afa5404d8c4950dd8b24a130a6a003 | 9,633 |
import re
def parse_header_links(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links | 58e1a73a524333cbd019387866047d434c7de494 | 9,634 |
import functools
import asyncio
def validate_term(fun):
"""Compares current local (node's) term and request (sender's) term:
- if current local (node's) term is older:
update current local (node's) term and become a follower
- if request (sender's) term is older:
respond with {'success': False}
args:
- data object received from other members
returns:
- True if term validation succeeds, False otherwise
"""
@functools.wraps(fun)
def wrapped(self, data):
logger.debug(f'{self.id} validate_term() start.')
if self.storage['term'] < data['term']:
self.storage.update({'term': data['term']})
if not isinstance(self, Follower):
self.to_follower()
logger.debug(f'{self.id} validate_term() done, bad term, moved to Follower.')
return False
if self.storage['term'] > data['term'] and not data['type'].endswith('_response'):
response = {
'success': False,
'term': self.storage['term'],
'type': f'{data["type"]}_response',
}
sender = self.raft.members.get(data['sender_id'])
host = sender[0]
port = sender[1]
asyncio.ensure_future(
self.raft.send(data=response, dest_host=host, dest_port=port), loop=self.loop
)
logger.debug(f'{self.id} validate_term() done, bad term, responded with False.')
return False
logger.debug(f'{self.id} validate_term() done, good term.')
return fun(self, data)
return wrapped | 326ad6e9d937f9e07c2ac9e774578ddb34c61d04 | 9,635 |
def _friends_bootstrap_radius(args):
"""Internal method used to compute the radius (half-side-length) for each
ball (cube) used in :class:`RadFriends` (:class:`SupFriends`) using
bootstrapping."""
# Unzipping.
points, ftype = args
rstate = np.random
# Resampling.
npoints, ndim = points.shape
idxs = rstate.randint(npoints, size=npoints) # resample
idx_in = np.unique(idxs) # selected objects
sel = np.ones(npoints, dtype='bool')
sel[idx_in] = False
idx_out = np.where(sel)[0] # "missing" objects
if len(idx_out) < 2: # edge case
idx_out = np.append(idx_out, [0, 1])
points_in, points_out = points[idx_in], points[idx_out]
# Construct KDTree to enable quick nearest-neighbor lookup for
# our resampled objects.
kdtree = spatial.KDTree(points_in)
if ftype == 'balls':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "radius" of n-sphere).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=2)
elif ftype == 'cubes':
# Compute distances from our "missing" points its closest neighbor
# among the resampled points using the Euclidean norm
# (i.e. "half-side-length" of n-cube).
dists, ids = kdtree.query(points_out, k=1, eps=0, p=np.inf)
# Conservative upper-bound on radius.
dist = max(dists)
return dist | 0492f316c53b434faf79445313ec853830f87867 | 9,636 |
def _parametrize_plus(argnames=None, # type: Union[str, Tuple[str], List[str]]
argvalues=None, # type: Iterable[Any]
indirect=False, # type: bool
ids=None, # type: Union[Callable, Iterable[str]]
idstyle=None, # type: Optional[Union[str, Callable]]
idgen=_IDGEN, # type: Union[str, Callable]
auto_refs=True, # type: bool
scope=None, # type: str
hook=None, # type: Callable[[Callable], Callable]
debug=False, # type: bool
**args):
"""
:return: a tuple (decorator, needs_inject) where needs_inject is True if decorator has signature (f, host)
and False if decorator has signature (f)
"""
# first handle argnames / argvalues (new modes of input)
argnames, argvalues = _get_argnames_argvalues(argnames, argvalues, **args)
# argnames related
initial_argnames = ','.join(argnames)
nb_params = len(argnames)
# extract all marks and custom ids.
# Do not check consistency of sizes argname/argvalue as a fixture_ref can stand for several argvalues.
marked_argvalues = argvalues
has_cust_ids = (idgen is not _IDGEN or len(args) > 0) or (ids is not None)
p_ids, p_marks, argvalues, fixture_indices, mod_lvid_indices = \
_process_argvalues(argnames, marked_argvalues, nb_params, has_cust_ids, auto_refs=auto_refs)
# idgen default
if idgen is _IDGEN:
# default: use the new id style only when some keyword **args are provided and there are no fixture refs
idgen = AUTO if (len(args) > 0 and len(fixture_indices) == 0 and ids is None) else None
if idgen is AUTO:
# note: we use a "trick" here with mini_idval to get the appropriate result (argname='', idx=v)
def _make_ids(**args):
for n, v in args.items():
yield "%s=%s" % (n, mini_idval(val=v, argname='', idx=v))
idgen = lambda **args: "-".join(_make_ids(**args)) # noqa
# generate id
if idgen is not None:
if ids is not None:
raise ValueError("Only one of `ids` and `idgen` should be provided")
ids = _gen_ids(argnames, argvalues, idgen)
if len(fixture_indices) == 0:
# No fixture refernce: fallback to a standard pytest.mark.parametrize
if debug:
print("No fixture reference found. Calling @pytest.mark.parametrize...")
print(" - argnames: %s" % initial_argnames)
print(" - argvalues: %s" % marked_argvalues)
print(" - ids: %s" % ids)
# handle infinite iterables like latest pytest, for convenience
ids = resolve_ids(ids, marked_argvalues, full_resolve=False)
# no fixture reference: shortcut, do as usual (note that the hook wont be called since no fixture is created)
_decorator = pytest.mark.parametrize(initial_argnames, marked_argvalues, indirect=indirect,
ids=ids, scope=scope)
if indirect:
return _decorator, False
else:
# wrap the decorator to check if the test function has the parameters as arguments
def _apply(test_func):
if not safe_isclass(test_func):
# a Function: raise a proper error message if improper use
s = signature(test_func)
for p in argnames:
if p not in s.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func.__name__, s))
else:
# a Class: we cannot really perform any check.
pass
return _decorator(test_func)
return _apply, False
else:
# there are fixture references: we will create a specific decorator replacing the params with a "union" fixture
if indirect:
warn("Using `indirect=True` at the same time as fixture references in `@parametrize` is not guaranteed to "
"work and is strongly discouraged for readability reasons. See "
"https://github.com/smarie/python-pytest-cases/issues/150")
# First unset the pytest.param id we have set earlier in _process_argvalues: indeed it is only needed in
# the case above where we were defaulting to legacy @pytest.mark.parametrize .
# Here we have fixture refs so we will create a fixture union with several ParamAlternative, and their id will
# anyway be generated with `mini_idvalset` which tackles the case of lazy_value used for a tuple of args
for i in mod_lvid_indices:
p_ids[i] = None
if p_marks[i]:
marked_argvalues[i] = ParameterSet(values=marked_argvalues[i].values, id=None, marks=p_marks[i])
else:
marked_argvalues[i] = argvalues[i] # we can even remove the pytest.param wrapper
if indirect:
raise ValueError("Setting `indirect=True` is not yet supported when at least a `fixure_ref` is present in "
"the `argvalues`.")
if debug:
print("Fixture references found. Creating references and fixtures...")
param_names_str = '_'.join(argnames).replace(' ', '')
# Are there explicit ids provided ?
explicit_ids_to_use = False
ids = resolve_ids(ids, argvalues, full_resolve=False)
if isinstance(ids, list):
explicit_ids_to_use = True
# First define a few functions that will help us create the various fixtures to use in the final "union"
def _create_params_alt(fh, test_func, union_name, from_i, to_i, hook): # noqa
""" Routine that will be used to create a parameter fixture for argvalues between prev_i and i"""
# is this about a single value or several values ?
if to_i == from_i + 1:
i = from_i
del from_i
# If an explicit list of ids was provided, slice it. Otherwise use the provided callable
if ids is not None:
_id = ids[i] if explicit_ids_to_use else ids(argvalues[i])
else:
_id = None
return SingleParamAlternative.create(new_fixture_host=fh, test_func=test_func,
param_union_name=union_name, argnames=argnames, i=i,
argvalue=marked_argvalues[i], id=_id,
hook=hook, debug=debug)
else:
# If an explicit list of ids was provided, slice it. Otherwise the provided callable will be used later
_ids = ids[from_i:to_i] if explicit_ids_to_use else ids
return MultiParamAlternative.create(new_fixture_host=fh, test_func=test_func,
param_union_name=union_name, argnames=argnames, from_i=from_i,
to_i=to_i, argvalues=marked_argvalues[from_i:to_i], ids=_ids,
hook=hook, debug=debug)
def _create_fixture_ref_alt(union_name, test_func, i): # noqa
# If an explicit list of ids was provided, slice it. Otherwise use the provided callable
if ids is not None:
_id = ids[i] if explicit_ids_to_use else ids(argvalues[i])
else:
_id = None
# Get the referenced fixture name
f_fix_name = argvalues[i].fixture
if debug:
print(" - Creating reference to existing fixture %r" % (f_fix_name,))
# Create the alternative
f_fix_alt = FixtureParamAlternative(union_name=union_name, fixture_ref=argvalues[i],
decorated=test_func, argnames=argnames, param_index=i, id=_id)
# Finally copy the custom id/marks on the FixtureParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
f_fix_alt = ParameterSet(values=(f_fix_alt,),
id=get_marked_parameter_id(marked_argvalues[i]),
marks=get_marked_parameter_marks(marked_argvalues[i]))
return f_fix_alt
def _create_fixture_ref_product(fh, union_name, i, fixture_ref_positions, test_func, hook): # noqa
# If an explicit list of ids was provided, slice it. Otherwise the provided callable will be used
_id = ids[i] if explicit_ids_to_use else ids
# values to use:
param_values = argvalues[i]
# Create a unique fixture name
p_fix_name = "%s_%s_P%s" % (test_func.__name__, param_names_str, i)
p_fix_name = check_name_available(fh, p_fix_name, if_name_exists=CHANGE, caller=parametrize)
if debug:
print(" - Creating new fixture %r to handle parameter %s that is a cross-product" % (p_fix_name, i))
# Create the fixture
_make_fixture_product(fh, name=p_fix_name, hook=hook, caller=parametrize,
fixtures_or_values=param_values, fixture_positions=fixture_ref_positions)
# Create the corresponding alternative
p_fix_alt = ProductParamAlternative(union_name=union_name, alternative_name=p_fix_name, decorated=test_func,
argval=argvalues[i], argnames=argnames, param_index=i, id=_id)
# copy the custom id/marks to the ParamAlternative if any
if is_marked_parameter_value(marked_argvalues[i]):
p_fix_alt = ParameterSet(values=(p_fix_alt,),
id=get_marked_parameter_id(marked_argvalues[i]),
marks=get_marked_parameter_marks(marked_argvalues[i]))
return p_fix_alt
# Then create the decorator per se
def parametrize_plus_decorate(test_func, fixtures_dest):
"""
A decorator that wraps the test function so that instead of receiving the parameter names, it receives the
new fixture. All other decorations are unchanged.
:param test_func:
:return:
"""
test_func_name = test_func.__name__
# first check if the test function has the parameters as arguments
if safe_isclass(test_func):
# a test class: not supported yet
raise NotImplementedError("@parametrize can not be used to decorate a Test class when the argvalues "
"contain at least one reference to a fixture.")
old_sig = signature(test_func)
for p in argnames:
if p not in old_sig.parameters:
raise ValueError("parameter '%s' not found in test function signature '%s%s'"
"" % (p, test_func_name, old_sig))
# The name for the final "union" fixture
# style_template = "%s_param__%s"
main_fixture_style_template = "%s_%s"
fixture_union_name = main_fixture_style_template % (test_func_name, param_names_str)
fixture_union_name = check_name_available(fixtures_dest, fixture_union_name, if_name_exists=CHANGE,
caller=parametrize)
# Retrieve (if ref) or create (for normal argvalues) the fixtures that we will union
fixture_alternatives = []
prev_i = -1
for i, j_list in fixture_indices: # noqa
# A/ Is there any non-empty group of 'normal' parameters before the fixture_ref at <i> ? If so, handle.
if i > prev_i + 1:
# create a new "param" fixture parametrized with all of that consecutive group.
# Important note: we could either wish to create one fixture for parameter value or to create
# one for each consecutive group as shown below. This should not lead to different results but perf
# might differ. Maybe add a parameter in the signature so that users can test it ?
# this would make the ids more readable by removing the "P2toP3"-like ids
p_fix_alt = _create_params_alt(fixtures_dest, test_func=test_func, hook=hook,
union_name=fixture_union_name, from_i=prev_i + 1, to_i=i)
fixture_alternatives.append(p_fix_alt)
# B/ Now handle the fixture ref at position <i>
if j_list is None:
# argvalues[i] contains a single argvalue that is a fixture_ref : add the referenced fixture
f_fix_alt = _create_fixture_ref_alt(union_name=fixture_union_name, test_func=test_func, i=i)
fixture_alternatives.append(f_fix_alt)
else:
# argvalues[i] is a tuple, some of them being fixture_ref. create a fixture refering to all of them
prod_fix_alt = _create_fixture_ref_product(fixtures_dest, union_name=fixture_union_name, i=i,
fixture_ref_positions=j_list,
test_func=test_func, hook=hook)
fixture_alternatives.append(prod_fix_alt)
prev_i = i
# C/ handle last consecutive group of normal parameters, if any
i = len(argvalues) # noqa
if i > prev_i + 1:
p_fix_alt = _create_params_alt(fixtures_dest, test_func=test_func, hook=hook,
union_name=fixture_union_name, from_i=prev_i + 1, to_i=i)
fixture_alternatives.append(p_fix_alt)
# if fixtures_to_union has length 1, simplify ? >> No, we leave such "optimization" to the end user
# Handle the list of alternative names. Duplicates should be removed here
fix_alt_names = []
for alt in fixture_alternatives:
if is_marked_parameter_value(alt):
# wrapped by a pytest.param
alt = get_marked_parameter_values(alt, nbargs=1)
assert len(alt) == 1, "Error with alternative please report"
alt = alt[0]
if alt.alternative_name not in fix_alt_names:
fix_alt_names.append(alt.alternative_name)
else:
# non-unique alt fixture names should only happen when the alternative is a fixture reference
assert isinstance(alt, FixtureParamAlternative), "Created fixture names not unique, please report"
# Finally create a "main" fixture with a unique name for this test function
if debug:
print("Creating final union fixture %r with alternatives %r"
% (fixture_union_name, UnionFixtureAlternative.to_list_of_fixture_names(fixture_alternatives)))
# use the custom subclass of idstyle that was created for ParamAlternatives
if idstyle is None or isinstance(idstyle, string_types):
_idstyle = ParamIdMakers.get(idstyle)
else:
_idstyle = idstyle
# note: the function automatically registers it in the module
_make_fixture_union(fixtures_dest, name=fixture_union_name, hook=hook, caller=parametrize,
fix_alternatives=fixture_alternatives, unique_fix_alt_names=fix_alt_names,
idstyle=_idstyle, scope=scope)
# --create the new test function's signature that we want to expose to pytest
# it is the same than existing, except that we want to replace all parameters with the new fixture
# first check where we should insert the new parameters (where is the first param we remove)
_first_idx = -1
for _first_idx, _n in enumerate(old_sig.parameters):
if _n in argnames:
break
# then remove all parameters that will be replaced by the new fixture
new_sig = remove_signature_parameters(old_sig, *argnames)
# finally insert the new fixture in that position. Indeed we can not insert first or last, because
# 'self' arg (case of test class methods) should stay first and exec order should be preserved when possible
new_sig = add_signature_parameters(new_sig, custom_idx=_first_idx,
custom=Parameter(fixture_union_name,
kind=Parameter.POSITIONAL_OR_KEYWORD))
if debug:
print("Creating final test function wrapper with signature %s%s" % (test_func_name, new_sig))
# --Finally create the fixture function, a wrapper of user-provided fixture with the new signature
def replace_paramfixture_with_values(kwargs): # noqa
# remove the created fixture value
encompassing_fixture = kwargs.pop(fixture_union_name)
# and add instead the parameter values
if nb_params > 1:
for i, p in enumerate(argnames): # noqa
try:
kwargs[p] = encompassing_fixture[i]
except TypeError:
raise Exception("Unable to unpack parameter value to a tuple: %r" % encompassing_fixture)
else:
kwargs[argnames[0]] = encompassing_fixture
# return
return kwargs
if not isgeneratorfunction(test_func):
# normal test or fixture function with return statement
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs): # noqa
if kwargs.get(fixture_union_name, None) is NOT_USED:
# TODO why this ? it is probably useless: this fixture
# is private and will never end up in another union
return NOT_USED
else:
replace_paramfixture_with_values(kwargs)
return test_func(*args, **kwargs)
else:
# generator test or fixture function (with one or several yield statements)
@wraps(test_func, new_sig=new_sig)
def wrapped_test_func(*args, **kwargs): # noqa
if kwargs.get(fixture_union_name, None) is NOT_USED:
# TODO why this ? it is probably useless: this fixture
# is private and will never end up in another union
yield NOT_USED
else:
replace_paramfixture_with_values(kwargs)
for res in test_func(*args, **kwargs):
yield res
# move all pytest marks from the test function to the wrapper
# not needed because the __dict__ is automatically copied when we use @wraps
# move_all_pytest_marks(test_func, wrapped_test_func)
# With this hack we will be ordered correctly by pytest https://github.com/pytest-dev/pytest/issues/4429
try:
# propagate existing attribute if any
wrapped_test_func.place_as = test_func.place_as
except: # noqa
# position the test at the original function's position
wrapped_test_func.place_as = test_func
# return the new test function
return wrapped_test_func
return parametrize_plus_decorate, True | 200da7befaa0695744c2f339a32711beb25ad69a | 9,637 |
def _clip_grad(clip_value, grad):
"""
Clip gradients.
Inputs:
clip_value (float): Specifies how much to clip.
grad (tuple[Tensor]): Gradients.
Outputs:
tuple[Tensor], clipped gradients.
"""
dt = ops.dtype(grad)
new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt))
return new_grad | 31cd4693a2bd80af7d3dd4be6a830b2982f8fce8 | 9,638 |
def sample_cast(user, name='David'):
"""Creates a sample Cast"""
return Cast.objects.create(user=user, name=name) | 3e4d03878697dfac931babbeaacaa7687d520189 | 9,639 |
import re
def sort_special_vertex_groups(vgroups,
special_vertex_group_pattern='STYMO:',
global_special_vertex_group_suffix='Character'):
"""
Given a list of special vertex group names, all with the prefix of
special_vertex_group_pattern, selects all that start with global_special_vertex_group_suffix
and puts them at the start of the list. This enables e.g. to easily define
top-level vertex groups that always go first, followed by details that
overwrite top level assignments.
"""
global_vg_name_pattern = special_vertex_group_pattern + \
global_special_vertex_group_suffix
first = []
last = []
for g in vgroups:
if re.match(global_vg_name_pattern, g) is not None:
first.append(g)
else:
last.append(g)
first.sort()
last.sort()
first.extend(last)
return first | 0cc8f0992553e5da5b37ea9a9886996cb9013582 | 9,640 |
def _GetFullDesktopName(window_station, desktop) -> str:
"""Returns a full name to a desktop.
Args:
window_station: Handle to window station.
desktop: Handle to desktop.
"""
return "\\".join([
win32service.GetUserObjectInformation(handle, win32service.UOI_NAME)
for handle in [window_station, desktop]
]) | e9a2aeebdb6f705efab1a0c1997ca66f4079cc07 | 9,641 |
def decrypt(plain_text: str, a: np.ndarray, b: np.ndarray, space: str) -> str:
"""Decrypts the given text with given a, b and space
:param plain_text: Text you want to decrypt
:type plain_text: str
:param a: An integer that corresponds to the A parameter in block cypher
:type a: np.ndarray
:param b: An integer that corresponds to the B parameter in block cypher
:type b: np.ndarray
:param space: Target space
:type space: str
:return: Decrypted text in string form
:rtype: str
"""
result = []
t = math_utils.get_inverse_matrix(a)
pairs = cryption_utils.get_pairs_of_int_two_from_text(plain_text, space)
for pair in pairs:
c = math_utils.create_nested_list_from_flat_list(pair)
subtracted_matrix = math_utils.sub_matrices(c, b)
dot_product = math_utils.dot_product_with_multiple_matrices(
[t, np.array(subtracted_matrix)]
)
result_list = space_utils.convert_nested_ints_to_char(dot_product, space)
result.append("".join(result_list))
return "".join(result) | 642b47d3459c64c5c7b280401aa96bd8f37cfa59 | 9,642 |
from typing import List
from functools import reduce
def merge_rois(roi_list: List,
temporal_coefficient: float, original_2d_vol: np.ndarray,
roi_eccentricity_limit=1.0, widefield=False):
# TODO is this the most efficient implementation I can do
"""
Merges rois based on temporal and spacial overlap
Parameters
----------
roi_list
List of Rois in format: [[np.array of pixels roi 1],
[np.array of pixels roi 2] ... ]
temporal_coefficient
The coefficient limiting merging based of temporal information, 0 merge all
1 merge none
original_2d_vol
Volume of each pixel's time trace
Returns
-------
List of new rois in format: [[np.array of pixels roi 1],
[np.array of pixels roi 2] ... ]
"""
A = np.zeros([original_2d_vol.shape[0], len(roi_list)], dtype=int) # create 2d
# matrix of zeros with dims number of pixels in image by number of rois
# Change pixels of each roi to 1
for num, roi in enumerate(roi_list):
A[roi, num] = 1
# Create graph of which rois have pixels which intersect with each other.
A_graph = np.matmul(A.transpose(), A)
connected_rois = np.nonzero(A_graph)
# print(A_graph)
timetraces = [np.mean(original_2d_vol[roi], axis=0) for roi in roi_list]
A_graph_new = np.identity(A_graph.shape[0], dtype=float)
# print(list(zip(*connected_rois)))
for x in list(zip(*connected_rois)):
# applies a 10% overlap condition to the rois.
if x[0] != x[1] and (widefield or (
A_graph[x[0], x[1]] > len(roi_list[x[1]]) * .1 and A_graph[
x[0], x[1]] > len(roi_list[x[0]]) * .1)):
A_graph_new[x[0], x[1]] = compare_time_traces(timetraces[x[0]],
timetraces[x[1]])
# print(A_graph_new[x[0],x[1]])
A_graph_new[x[1], x[0]] = A_graph_new[x[0], x[1]]
A_graph[x[0], x[1]] = False
A_graph[x[1], x[0]] = False
A_components_to_merge = A_graph_new >= temporal_coefficient
A_csr = csr_matrix(A_components_to_merge)
# Use connected components to group these rois together
connected = connected_components_graph(A_csr, False, return_labels=True)
# processes connected components putting each group of rois into roi_groups list
roi_groups = [[] for _ in range(len(roi_list))]
for num in range(len(roi_list)):
roi_groups[connected[1][num]].append(roi_list[num])
new_rois = []
for group in roi_groups:
if len(group) != 0:
# combine those rois that should be merged with first roi.
first_roi = list(reduce(combine_rois, group))
new_rois.append(np.array(first_roi))
return new_rois | 631b953dcbf401f17392cd0cade06dc51d369e11 | 9,643 |
def well2D_to_df1D(xlsx_path, sheet, data_col):
"""
Convert new 2D output format (per well) to 1D dataframe
:param str xlsx_path: path to the xlsx file
:param str sheet: sheet name to load
:param str data_col: new column name of the linearized values
:return dataframe df: linearized dataframe
"""
df = pd.read_excel(xlsx_path, sheet_name=sheet, index_col=0)
df = df.unstack().reset_index(name=data_col) # unpivot (linearize) the table
df.rename(columns={'level_1': 'row_id', 'level_0': 'col_id'}, inplace=True)
df['well_id'] = df.row_id + df.col_id.map(str)
df = df[['well_id', data_col]]
return df | 0d8403b311c50cbc7f723e044f3aa93c50f17e80 | 9,644 |
import hashlib
def obtain_file_hash(path, hash_algo="md5"):
"""Obtains the hash of a file using the specified hash algorithm
"""
hash_algo = hashlib.sha256() if hash_algo=="sha256" else hashlib.md5()
block_size = 65535
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(block_size),b''):
hash_algo.update(chunk)
return hash_algo.hexdigest() | daa996339c638eaab4f3d067dcaaa4b865a6f923 | 9,645 |
def b_q_bar(z_c):
"""Result of integrating from z_c to 1/2 of the
hard collinear part of the quark splitting function"""
b_q_zc = CF * (-3. + 6. * z_c + 4.* np.log(2. - 2.*z_c))/2.
return b_q_zc | c7e68a2b4b17e035081fd07784aeef017fcedabc | 9,646 |
def getsize(store, path=None):
"""Compute size of stored items for a given path."""
path = normalize_storage_path(path)
if hasattr(store, 'getsize'):
# pass through
return store.getsize(path)
elif isinstance(store, dict):
# compute from size of values
prefix = _path_to_prefix(path)
size = 0
for k in listdir(store, path):
try:
v = store[prefix + k]
except KeyError:
pass
else:
try:
size += buffer_size(v)
except TypeError:
return -1
return size
else:
return -1 | e537a231c49ac1edb6153d4751bd7f1b01979778 | 9,647 |
def split_2DL5AB(GL, cursor, log):
"""
splits the KIR2DL5 GL-string into 2 separate GL strings for 2DL5A and 2DL5B
:param GL: GL-string for KIR2DL5, combining both A and B
:param cursor: cursor to a connection to the nextype archive
:param log: logger instance
"""
log.info("Splitting 2DL5-alleles...")
proc_name = "GL_STRINGS_MGMT.SPLIT_GL_STRING_2DL5@ngsa"
proc_params = [GL]
proc_params2 = [2, 'KIR', 'J', 'J', '2DL5', 'J', '2DL5', 'N']
success, values = call_procedure(proc_name, proc_params, 2, proc_params2, cursor, log)
if success:
log.info("\t=> Success!")
[part1, part2] = values
if "2DL5A" in part1:
A = part1
B = part2
else:
A = part2
B = part1
A_alleles = A.replace("2DL5A*", "")
B_alleles = B.replace("2DL5B*", "")
else:
log.info("\t=> Procedure call did not work. :-(")
A_alleles = ""
B_alleles = ""
return A_alleles, B_alleles | e4c5eb51927b9e9cd607f95c1e2d1f853f4f2a3e | 9,648 |
def set_system_bios(context, settings, system_id=None, workaround=False):
"""
Finds a system matching the given ID and sets the BIOS settings
Args:
context: The Redfish client object with an open session
settings: The settings to apply to the system
system_id: The system to locate; if None, perform on the only system
workaround: Indicates if workarounds should be attempted for non-conformant services
Returns:
The response of the PATCH
"""
# Locate the system
system = get_system(context, system_id)
# Get the BIOS resource and determine if the settings need to be applied to the resource itself or the settings
# object
if "Bios" not in system.dict:
raise RedfishSystemBiosNotFoundError("System '{}' does not support representing BIOS".format(system.dict["Id"]))
bios_uri = system.dict["Bios"]["@odata.id"]
bios = context.get(bios_uri)
etag = bios.getheader("ETag")
if "@Redfish.Settings" in bios.dict:
bios_settings = get_system_bios_settings(context, bios, system.dict["Id"], workaround)
bios_uri = bios_settings.dict["@odata.id"]
etag = bios_settings.getheader("ETag")
# Update the settings
payload = {"Attributes": settings}
headers = None
if etag is not None:
headers = {"If-Match": etag}
response = context.patch(bios_uri, body=payload, headers=headers)
verify_response(response)
return response | c28f52db53363399df534efacc506a7e25c99930 | 9,649 |
def geometric_augmentation(images,
flow = None,
mask = None,
crop_height = 640,
crop_width = 640,
probability_flip_left_right = 0.5,
probability_flip_up_down = 0.1,
probability_scale = 0.8,
probability_relative_scale = 0.,
probability_stretch = 0.8,
probability_rotation = 0.0,
probability_relative_rotation = 0.0,
probability_crop_offset = 0.0,
min_bound_scale = -0.2,
max_bound_scale = 0.6,
max_strech_scale = 0.2,
min_bound_relative_scale = -0.1,
max_bound_relative_scale = 0.1,
max_rotation_deg = 15,
max_relative_rotation_deg = 3,
max_relative_crop_offset = 5,
return_full_scale=False):
"""Applies geometric augmentations to an image pair and corresponding flow.
Args:
images: Image pair of shape [2, height, width, channels].
flow: Corresponding forward flow field of shape [height, width, 2].
mask: Mask indicating which positions in the flow field hold valid flow
vectors of shape [height, width, 1]. Non-valid poisitions are encoded with
0, valid positions with 1.
crop_height: Height of the final augmented output.
crop_width: Width of the final augmented output.
probability_flip_left_right: Probability of applying left/right flip.
probability_flip_up_down: Probability of applying up/down flip
probability_scale: Probability of applying scale augmentation.
probability_relative_scale: Probability of applying scale augmentation to
only the second frame of the the image pair.
probability_stretch: Probability of applying stretch augmentation (scale
without keeping the aspect ratio).
probability_rotation: Probability of applying rotation augmentation.
probability_relative_rotation: Probability of applying rotation augmentation
to only the second frame of the the image pair.
probability_crop_offset: Probability of applying a relative offset while
cropping.
min_bound_scale: Defines the smallest possible scaling factor as
2**min_bound_scale.
max_bound_scale: Defines the largest possible scaling factor as
2**max_bound_scale.
max_strech_scale: Defines the smallest and largest possible streching factor
as 2**-max_strech_scale and 2**max_strech_scale.
min_bound_relative_scale: Defines the smallest possible scaling factor for
the relative scaling as 2**min_bound_relative_scale.
max_bound_relative_scale: Defines the largest possible scaling factor for
the relative scaling as 2**max_bound_relative_scale.
max_rotation_deg: Defines the maximum angle of rotation in degrees.
max_relative_rotation_deg: Defines the maximum angle of rotation in degrees
for the relative rotation.
max_relative_crop_offset: Defines the maximum relative offset in pixels for
cropping.
return_full_scale: bool. If this is passed, the full size images will be
returned in addition to the geometrically augmented (cropped and / or
resized) images. In addition to the resized images, the crop height,
width, and any padding applied will be returned.
Returns:
if return_full_scale is False:
Augmented images, flow and mask (if not None).
if return_full_scale is True:
Augmented images, flow, mask, full_size_images, crop_h, crop_w, pad_h,
and pad_w.
"""
# apply geometric augmentation
if probability_flip_left_right > 0:
images, flow, mask = random_flip_left_right(
images, flow, mask, probability_flip_left_right)
if probability_flip_up_down > 0:
images, flow, mask = random_flip_up_down(
images, flow, mask, probability_flip_up_down)
if probability_scale > 0 or probability_stretch > 0:
images, flow, mask = random_scale(
images,
flow,
mask,
min_scale=min_bound_scale,
max_scale=max_bound_scale,
max_strech=max_strech_scale,
probability_scale=probability_scale,
probability_strech=probability_stretch)
if probability_relative_scale > 0:
images, flow, mask = random_scale_second(
images, flow, mask,
min_scale=min_bound_relative_scale,
max_scale=max_bound_relative_scale,
probability_scale=probability_relative_scale)
if probability_rotation > 0:
images, flow, mask = random_rotation(
images, flow, mask,
probability=probability_rotation,
max_rotation=max_rotation_deg, not_empty_crop=True)
if probability_relative_rotation > 0:
images, flow, mask = random_rotation_second(
images, flow, mask,
probability=probability_relative_rotation,
max_rotation=max_relative_rotation_deg, not_empty_crop=True)
images_uncropped = images
images, flow, mask, offset_h, offset_w = random_crop(
images, flow, mask, crop_height, crop_width,
relative_offset=max_relative_crop_offset,
probability_crop_offset=probability_crop_offset)
# Add 100 / 200 pixels to crop height / width for full scale warp
pad_to_size_h = crop_height + 200
pad_to_size_w = crop_width + 400
if return_full_scale:
if pad_to_size_w:
uncropped_shape = tf.shape(images_uncropped)
if images.shape[1] > uncropped_shape[1] or images.shape[
2] > uncropped_shape[2]:
images_uncropped = images
uncropped_shape = tf.shape(images_uncropped)
offset_h = tf.zeros_like(offset_h)
offset_w = tf.zeros_like(offset_w)
if uncropped_shape[1] > pad_to_size_h:
crop_ht = offset_h - (200 // 2)
crop_hb = offset_h + crop_height + (200 // 2)
crop_hb += tf.maximum(0, -crop_ht)
crop_ht -= tf.maximum(0, -(uncropped_shape[1] - crop_hb))
crop_ht = tf.maximum(crop_ht, 0)
crop_hb = tf.minimum(crop_hb, uncropped_shape[1])
offset_h -= crop_ht
images_uncropped = images_uncropped[:, crop_ht:crop_hb, :, :]
if uncropped_shape[2] > pad_to_size_w:
crop_wt = offset_w - (400 // 2)
crop_wb = offset_w + crop_width + (400 // 2)
crop_wb += tf.maximum(0, -crop_wt)
crop_wt -= tf.maximum(0, -(uncropped_shape[2] - crop_wb))
crop_wt = tf.maximum(crop_wt, 0)
crop_wb = tf.minimum(crop_wb, uncropped_shape[2])
offset_w -= crop_wt
images_uncropped = images_uncropped[:, :, crop_wt:crop_wb, :]
uncropped_shape = tf.shape(images_uncropped)
# remove remove_pixels_w from the width while keeping the crop centered
pad_h = pad_to_size_h - uncropped_shape[1]
pad_w = pad_to_size_w - uncropped_shape[2]
with tf.control_dependencies([
tf.compat.v1.assert_greater_equal(pad_h, 0),
tf.compat.v1.assert_greater_equal(pad_w, 0)
]):
images_uncropped = tf.pad(images_uncropped,
[[0, 0], [pad_h, 0], [pad_w, 0], [0, 0]])
images_uncropped = tf.ensure_shape(images_uncropped,
[2, pad_to_size_h, pad_to_size_w, 3])
return images, flow, mask, images_uncropped, offset_h, offset_w, pad_h, pad_w
return images, flow, mask | f1a9ce6983edfd47388360b9d777ad5909c046e7 | 9,650 |
def edit_distance_between_seqs(seq1, seq2):
"""Input is two strings. They are globally aligned
and the edit distance is returned. An indel of any length
is counted as one edit"""
aln1, aln2 = _needleman_wunsch(seq1, seq2)
return edit_distance_from_aln_strings(aln1, aln2) | 88e98475c1652311745af69c6f521bba0497e633 | 9,651 |
import torch
def sentence_prediction(sentence):
"""Predict the grammar score of a sentence.
Parameters
----------
sentence : str
The sentence to be predicted.
Returns
-------
float
The predicted grammar probability.
"""
tokenizer = config.TOKENIZER.from_pretrained(
config.MODEL_PATH, local_files_only=True
)
model = config.MODEL.from_pretrained(config.MODEL_PATH, local_files_only=True)
max_len = config.MAX_LEN
sentence = str(sentence)
sentence = " ".join(sentence.split())
inputs = tokenizer.encode_plus(
sentence,
add_special_tokens=True,
max_length=max_len,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors="pt",
truncation=True,
)
ids = torch.LongTensor(inputs["input_ids"][0]).unsqueeze(0)
mask = torch.LongTensor(inputs["attention_mask"][0]).unsqueeze(0)
ids = ids.to(DEVICE)
mask = mask.to(DEVICE)
model.to(DEVICE)
outputs = model(ids, token_type_ids=None, attention_mask=mask, return_dict=True)
outputs = torch.sigmoid(outputs.logits).cpu().detach().numpy()
return outputs[0][0] | 14d7c8efa76df4727419c2d99d685707ef46eb25 | 9,652 |
def DB():
"""Create a DB wrapper object connecting to the test database."""
db = pg.DB(dbname, dbhost, dbport)
if debug:
db.debug = debug
return db | fceb8d15d52cabc1814b42b286c10bbe7470216c | 9,653 |
def build_seq(variants, phased_genotype, ref, pre_start, ref_end=None):
"""
Build or extend the haplotype according to provided genotype. We marked the start position iterator of each haplotype and
update with variant alternative base.
"""
seqs = ""
position = pre_start
for variant, phased in zip(variants, phased_genotype):
if variant.start < pre_start:
if variant.start == pre_start - 1 and phased != 0: # this only happen when pre pos is deletion and current pos is insertion
ref_base = variant.reference_bases
alt_base = variant.alternate_bases[phased - 1]
if len(alt_base) > len(ref_base): # is an insertion
# print ('has insertion and deletion overlap'.format(variant.start))
return alt_base[1:], position
if phased != 0: # impossible # sometimes happen in true vcf
return None, None
else:
return "", pre_start # do not do anything if 0 allele
else:
seqs += ref.query(pre_start, variant.start)
allele = variant.reference_bases if phased == 0 else variant.alternate_bases[phased - 1]
if phased == 0:
allele = allele[0]
position = variant.start + 1
seqs += allele # only add one ref base
else:
ref_base = variant.reference_bases
alt_base = variant.alternate_bases[phased-1]
ref_base, alt_base = remove_common_suffix(ref_base, [alt_base])
end = variant.start + len(ref_base)
position = end
seqs += alt_base[0]
return seqs, position | b5f5168603b941fe8a55df5bd3bbf69898db3804 | 9,654 |
def handle_str(x):
"""
handle_str returns a random string of the same length as x.
"""
return random_string(len(x)) | 856341d0e3ff6d41c4c0f14beda5133b7285478c | 9,655 |
def clean_profile(profile, sid, state_final, state_canceled):
"""
This method will prepare a profile for consumption in radical.analytics. It
performs the following actions:
- makes sure all events have a `ename` entry
- remove all state transitions to `CANCELLED` if a different final state
is encountered for the same uid
- assignes the session uid to all events without uid
- makes sure that state transitions have an `ename` set to `state`
"""
entities = dict() # things which have a uid
if not isinstance(state_final, list):
state_final = [state_final]
for event in profile:
uid = event['uid' ]
state = event['state']
time = event['time' ]
name = event['event']
# we derive entity_type from the uid -- but funnel
# some cases into the session
if uid:
event['entity_type'] = uid.split('.',1)[0]
else:
event['entity_type'] = 'session'
event['uid'] = sid
uid = sid
if uid not in entities:
entities[uid] = dict()
entities[uid]['states'] = dict()
entities[uid]['events'] = list()
if name == 'advance':
# this is a state progression
assert(state)
assert(uid)
event['event_name'] = 'state'
if state in state_final and state != state_canceled:
# a final state other than CANCELED will cancel any previous
# CANCELED state.
if state_canceled in entities[uid]['states']:
del(entities[uid]['states'][state_canceled])
if state in entities[uid]['states']:
# ignore duplicated recordings of state transitions
# FIXME: warning?
continue
# raise ValueError('double state (%s) for %s' % (state, uid))
entities[uid]['states'][state] = event
else:
# FIXME: define different event types (we have that somewhere)
event['event_name'] = 'event'
entities[uid]['events'].append(event)
# we have evaluated, cleaned and sorted all events -- now we recreate
# a clean profile out of them
ret = list()
for uid,entity in entities.iteritems():
ret += entity['events']
for state,event in entity['states'].iteritems():
ret.append(event)
# sort by time and return
ret = sorted(ret[:], key=lambda k: k['time'])
return ret | d58f8ad53623c6809f12f0ba589afcdaa807cf21 | 9,656 |
def get_process_causality_network_activity_query(endpoint_ids: str, args: dict) -> str:
"""Create the process causality network activity query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
process_causality_id_list = args.get('process_causality_id', '')
if not process_causality_id_list:
raise DemistoException('Please provide a process_causality_id argument.')
process_causality_id_list = wrap_list_items_in_double_quotes(process_causality_id_list)
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = NETWORK
and actor_process_causality_id in ({process_causality_id_list}) | fields agent_hostname, agent_ip_addresses,agent_id,
action_local_ip, action_remote_ip, action_remote_port, dst_action_external_hostname,dns_query_name,
action_app_id_transitions, action_total_download, action_total_upload, action_country,action_as_data,
actor_process_image_sha256, actor_process_image_name , actor_process_image_path,actor_process_signature_vendor,
actor_process_signature_product, actor_causality_id,actor_process_image_command_line, actor_process_instance_id''' | 97330c89f7599cf4096322088ed7e7bad0699d49 | 9,657 |
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
try:
client = TickTick()
client.login(data.get("username"), data.get("password"))
except RequestException as exc:
raise CannotConnect from exc
except ValueError as exc:
raise InvalidAuth from exc
# Return some info we want to store in the config entry.
return {"title": "TickTick"} | 7f6989ae0a87579f2270aab479247634b7d1f7e8 | 9,658 |
def _shard_batch(xs):
"""Shards a batch for a pmap, based on the number of devices."""
local_device_count = jax.local_device_count()
def _prepare(x):
return x.reshape((local_device_count, -1) + x.shape[1:])
return jax.tree_map(_prepare, xs) | 5c6fb53a97af3543b9e147abfb896719f83a0a28 | 9,659 |
def get_time_limit(component_limit, overall_limit):
"""
Return the minimum time limit imposed by the component and overall limits.
"""
limit = component_limit
if overall_limit is not None:
try:
elapsed_time = util.get_elapsed_time()
except NotImplementedError:
returncodes.exit_with_driver_unsupported_error(CANNOT_LIMIT_TIME_MSG)
else:
remaining_time = max(0, overall_limit - elapsed_time)
if limit is None or remaining_time < limit:
limit = remaining_time
return limit | 4699ff18459a434a93fb50f8ac8bcc569ceb5e63 | 9,660 |
def keras_decay(step, decay=0.0001):
"""Learning rate decay in Keras-style"""
return 1. / (1. + decay * step) | f26f1f100ecf1622d6da9958d0a6cd95a37b8b2a | 9,661 |
def get_swagger():
""" Request handler for the /swagger path.
GET: returns the My Cars API spec as a swagger json doc.
"""
try:
return _make_response(response=validator.get_swagger_spec())
except Exception as e:
return _make_error(500, e.message) | a7ce1def456264d180dcb15e6039cd32e4df7597 | 9,662 |
def subtract(value, *args, **kwargs):
"""
Return the difference between ``value`` and a :class:`relativedelta`.
:param value: initial date or datetime.
:param args: positional args to pass directly to :class:`relativedelta`.
:param kwargs: keyword args to pass directly to :class:`relativedelta`.
:return: the resulting date/datetime.
"""
return value - relativedelta(*args, **kwargs) | 9f3c17b07c4010d9b1bfcff93280f0a59247fc5f | 9,663 |
def plot_coastline(
axes,
bathymetry,
coords='grid',
isobath=0,
xslice=None,
yslice=None,
color='black',
server='local',
zorder=2,
):
"""Plot the coastline contour line from bathymetry on the axes.
The bathymetry data may be specified either as a file path/name,
or as a :py:class:`netCDF4.Dataset` instance.
If a file path/name is given it is opened and read into a
:py:class:`netCDF4.Dataset` so,
if this function is being called in a loop,
it is best to provide it with a bathymetry dataset to avoid
the overhead of repeated file reads.
:arg axes: Axes instance to plot the coastline contour line on.
:type axes: :py:class:`matplotlib.axes.Axes`
:arg bathymetry: File path/name of a netCDF bathymetry data file
or a dataset object containing the bathymetry data.
:type bathymetry: str or :py:class:`netCDF4.Dataset`
:arg coords: Type of plot coordinates to set the aspect ratio for;
either :kbd:`grid` (the default) or :kbd:`map`.
:type coords: str
:arg isobath: Depth to plot the contour at; defaults to 0.
:type isobath: float
:arg xslice: X dimension slice to defined the region for which the
contour is to be calculated;
defaults to :kbd:`None` which means the whole domain.
If an xslice is given,
a yslice value is also required.
:type xslice: :py:class:`numpy.ndarray`
:arg yslice: Y dimension slice to defined the region for which the
contour is to be calculated;
defaults to :kbd:`None` which means the whole domain.
If a yslice is given,
an xslice value is also required.
:type yslice: :py:class:`numpy.ndarray`
:arg color: Matplotlib colour argument
:type color: str, float, rgb or rgba tuple
:arg zorder: Plotting layer specifier
:type zorder: integer
:returns: Contour line set
:rtype: :py:class:`matplotlib.contour.QuadContourSet`
"""
# Index names based on results server
if server == 'local':
lon_name = 'nav_lon'
lat_name = 'nav_lat'
bathy_name = 'Bathymetry'
elif server == 'ERDDAP':
lon_name = 'longitude'
lat_name = 'latitude'
bathy_name = 'bathymetry'
else:
raise ValueError('Unknown results server name: {}'.format(server))
if any((
xslice is None and yslice is not None,
xslice is not None and yslice is None,
)):
raise ValueError('Both xslice and yslice must be specified')
if not hasattr(bathymetry, 'variables'):
bathy = nc.Dataset(bathymetry)
else:
bathy = bathymetry
depths = bathy.variables[bathy_name]
if coords == 'map':
lats = bathy.variables[lat_name]
lons = bathy.variables[lon_name]
if xslice is None and yslice is None:
contour_lines = axes.contour(
np.array(lons), np.array(lats), np.array(depths),
[isobath], colors=color, zorder=zorder)
else:
contour_lines = axes.contour(
lons[yslice, xslice], lats[yslice, xslice],
depths[yslice, xslice].data, [isobath], colors=color,
zorder=zorder)
else:
if xslice is None and yslice is None:
contour_lines = axes.contour(
np.array(depths), [isobath], colors=color, zorder=zorder)
else:
contour_lines = axes.contour(
xslice, yslice, depths[yslice, xslice].data,
[isobath], colors=color, zorder=zorder)
if not hasattr(bathymetry, 'variables'):
bathy.close()
return contour_lines | 1166ea3e942bf5c9212cf07e69326c38f6e77f96 | 9,664 |
def safelog(func):
"""Version of prism.log that has prism as an optional dependency.
This prevents the sql database, which may not be available, from becoming a strict dependency."""
@wraps(func)
def inner(self, update, context):
try:
self.bot.cores["prism"].log_user(update.effective_user)
if update.effective_user.id != update.effective_chat.id: # If the chat is not a one-to-one chat with the user.
self.bot.cores["prism"].log_chat(update.effective_chat)
except KeyError: # If the prism core is not loaded.
pass
func(self, update, context)
return inner | fbd1ad03417151705640f0fd20c0caa685896496 | 9,665 |
def draw(args):
"""
Draw a GraphML with the tribe draw method.
"""
G = nx.read_graphml(args.graphml[0])
draw_social_network(G, args.write)
return "" | f5347dceaf6f79ab22218eb8838944d4f3e5a8ea | 9,666 |
import re
def extract_stem_voc(x):
"""extract word from predefined vocbulary with stemming and lemmatization
Args:
x ([string]): [a sentence]
Returns:
[list]: [word after stemming and lemmatization]
"""
stem = PorterStemmer()
# wnl = WordNetLemmatizer()
all_words = set(words.words())
# lemma_word = [word for word in map(lambda x: wnl.lemmatize(stem.stem(x)), re.findall('[a-zA-Z][-._a-zA-Z]*[a-zA-Z]', x)) if word in all_words]
lemma_word = [word for word in map(lambda x: stem.stem(x), re.findall('[a-zA-Z][-._a-zA-Z]*[a-zA-Z]', x)) if word in all_words]
return lemma_word | 0e882eb8f9b938fc8eb50e69dda2864d2d8a12da | 9,667 |
from typing import Union
from typing import Optional
from typing import List
from typing import Dict
def plot_without_vis_spec(
conditions_df: Union[str, pd.DataFrame],
grouping_list: Optional[List[IdsList]] = None,
group_by: str = 'observable',
measurements_df: Optional[Union[str, pd.DataFrame]] = None,
simulations_df: Optional[Union[str, pd.DataFrame]] = None,
plotted_noise: str = MEAN_AND_SD,
subplot_dir: Optional[str] = None,
plotter_type: str = 'mpl',
format_: str = 'png',
) -> Optional[Dict[str, plt.Subplot]]:
"""
Plot measurements and/or simulations. What exactly should be plotted is
specified in a grouping_list.
If grouping list is not provided, measurements (simulations) will be
grouped by observable, i.e. all measurements for each observable will be
visualized on one plot.
Parameters
----------
grouping_list:
A list of lists. Each sublist corresponds to a plot, each subplot
contains the Ids of datasets or observables or simulation conditions
for this plot.
group_by:
Grouping type.
Possible values: 'dataset', 'observable', 'simulation'
conditions_df:
A condition DataFrame in the PEtab format or path to the condition
file.
measurements_df:
A measurement DataFrame in the PEtab format or path to the data file.
simulations_df:
A simulation DataFrame in the PEtab format or path to the simulation
output data file.
plotted_noise:
A string indicating how noise should be visualized:
['MeanAndSD' (default), 'MeanAndSEM', 'replicate', 'provided']
subplot_dir:
A path to the folder where single subplots should be saved.
PlotIDs will be taken as file names.
plotter_type:
Specifies which library should be used for plot generation. Currently,
only matplotlib is supported
format_:
File format for the generated figure.
(See :py:func:`matplotlib.pyplot.savefig` for supported options).
Returns
-------
ax: Axis object of the created plot.
None: In case subplots are saved to a file.
"""
if measurements_df is None and simulations_df is None:
raise TypeError('Not enough arguments. Either measurements_data '
'or simulations_data should be provided.')
vis_spec_parser = VisSpecParser(conditions_df, measurements_df,
simulations_df)
figure, dataprovider = vis_spec_parser.parse_from_id_list(
grouping_list, group_by, plotted_noise)
if plotter_type == 'mpl':
plotter = MPLPlotter(figure, dataprovider)
else:
raise NotImplementedError('Currently, only visualization with '
'matplotlib is possible.')
return plotter.generate_figure(subplot_dir, format_=format_) | dba21fae889057e83dd8084b727e7c6312c3cd0f | 9,668 |
import torch
def _get_culled_faces(face_verts: torch.Tensor, frustum: ClipFrustum) -> torch.Tensor:
"""
Helper function used to find all the faces in Meshes which are
fully outside the view frustum. A face is culled if all 3 vertices are outside
the same axis of the view frustum.
Args:
face_verts: An (F,3,3) tensor, where F is the number of faces in
the packed representation of Meshes. The 2nd dimension represents the 3 vertices
of a triangle, and the 3rd dimension stores the xyz locations of each
vertex.
frustum: An instance of the ClipFrustum class with the information on the
position of the clipping planes.
Returns:
faces_culled: An boolean tensor of size F specifying whether or not each face should be
culled.
"""
clipping_planes = (
(frustum.left, 0, "<"),
(frustum.right, 0, ">"),
(frustum.top, 1, "<"),
(frustum.bottom, 1, ">"),
(frustum.znear, 2, "<"),
(frustum.zfar, 2, ">"),
)
faces_culled = torch.zeros(
[face_verts.shape[0]], dtype=torch.bool, device=face_verts.device
)
for plane in clipping_planes:
clip_value, axis, op = plane
# If clip_value is None then don't clip along that plane
if frustum.cull and clip_value is not None:
if op == "<":
verts_clipped = face_verts[:, axis] < clip_value
else:
verts_clipped = face_verts[:, axis] > clip_value
# If all verts are clipped then face is outside the frustum
faces_culled |= verts_clipped.sum(1) == 3
return faces_culled | edb9594b4a9d5fe6c3d7fcf24e9b0e312b94d3cb | 9,669 |
import collections
def _build_pep8_output(result):
"""
Build the PEP8 output based on flake8 results.
Results from both tools conform to the following format:
<filename>:<line number>:<column number>: <issue code> <issue desc>
with some issues providing more details in the description within
parentheses.
:param result: output from flake8
:returns: list of flake8 output lines by error
"""
# Aggregate individual errors by error
_dict = collections.defaultdict(list)
for line in str(result).split("\n"):
if line:
# Preserve only the code and brief description for each issue to
# facilitate aggregating the results. For example,
#
# E501 line too long (178 > 79 characters) -> E501 line too long
# E303 too many blank lines (4) -> E303 too many blank lines
parts = line.replace("(", ":").split(":")
line_num, col_num, base_issue = parts[1:4]
# Strip the whitespace around the base <issue code> <description>.
#
# Also restore the missing colon, stripped above, if the issue
# was 'missing whitespace' surrounding a colon.
issue = base_issue.strip()
key = "{}:'".format(issue) if issue.endswith("after '") else issue
_dict[key].append("{} ({})".format(line_num, col_num))
# Build the output as one issue per entry
return ["{}: {}".format(k, ", ".join(_dict[k])) for k in
sorted(_dict.keys())] | a4abda2f9d3a2d9b3524c60429b047cbfe0285d9 | 9,670 |
def form_value(request, entity, attribute):
"""
Return value from request params or the given entity.
:param request: Pyramid request.
:param entity: Instance to get attribute from if it isn't found in the request
params.
:param str attribute: Name of attribute to search for in the request params or
on as an attribute of the given entity.
"""
# Check for contains, because we want the request value even if it's empty
if attribute in request.params:
return request.params.get(attribute, '')
if entity:
# Don't provide a default value, because we want to make attribute typos clear
return getattr(entity, attribute)
return '' | 1daea77474dae5a1cb6fdab0b075a5b2f5c40865 | 9,671 |
def process_batch_data(batch_words, batch_tags=None):
"""
Padding batched dataset.
Args:
batch_words: Words in a batch.
batch_tags: Punctuations in a batch.
Returns: Words and punctuations after padding.
"""
b_words, b_words_len = pad_sequences(batch_words)
if batch_tags is None:
return {"words": b_words, "seq_len": b_words_len, "batch_size": len(b_words)}
else:
b_tags, _ = pad_sequences(batch_tags)
return {"words": b_words, "tags": b_tags, "seq_len": b_words_len, "batch_size": len(b_words)} | 2428b1009cfcaf55df8ef5be275d87f1053643fd | 9,672 |
import os
import glob
def grb2nc(glob_str, in_dir='./', out_dir='./'):
"""
Creates netCDF files from grib files.
:param glob_str: (str) - the naming pattern of the files
:param in_dir: (str) - directory of input files
:param out_dir: (str) - directory of output files
:return fo_names: (list) - list of netCDF files' names
"""
fi_url = os.path.join(in_dir, glob_str)
fi_names = sorted(glob.glob('{}'.format(fi_url)))
fo_names = []
for fi_name in fi_names:
fo_name_dir = fi_name.replace(in_dir, out_dir)
if fi_name.endswith('.grb'):
fo_name = fo_name_dir.replace('.grb', '.nc')
elif fi_name.endswith('.grb2'):
fo_name = fo_name_dir.replace('.grb2', '.nc')
elif fi_name.endswith('.grib'):
fo_name = fo_name_dir.replace('.grib', '.nc')
elif fi_name.endswith('.grib2'):
fo_name = fo_name_dir.replace('.grib2', '.nc')
os.system("wgrib2 {fi_name} -netcdf {fo_name}".format(
fi_name=fi_name,
fo_name=fo_name))
fo_names.append(fo_name)
if len(fo_names) == 1:
return fo_names[0]
else:
return fo_names | c06a9496600ff847d4f35128d9c90fe373b32163 | 9,673 |
import uuid
import os
import json
def emit(path_local, path_s3, time, poisson=0.0, ls=None, z_line=None,
actin_permissiveness=None, comment = None, write = True, **kwargs):
"""Produce a structured JSON file that will be consumed to create a run
Import emit into an interactive workspace and populate a directory with
run configurations to be executed by a cluster.
Parameters
----------
path_local: string
The local (absolute or relative) directory to which we save both
emitted files and run output.
path_s3: string
The s3 bucket (and optional folder) to save run output to and to which
the emitted files should be uploaded.
time: iterable
Time trace for run, in ms
poisson: float
poisson ratio of lattice. 0.5 const vol; 0 default const lattice;
negative for auxetic
ls: float, optional
Specifies the initial starting lattice spacing which will act as a
zero or offset for the spacing. If not given, the default lattice
spacing from hs.hs will be used.
z_line: float or iterable, optional
If not given, default distance specified in hs.hs is used. If given as
float, the z-line distance for the run. If given as an iterable, used as
trace for run, timestep by timestep.
actin_permissiveness: float or iterable, optional
Same as for z-line.
comment: string, optional
Space for comment on the purpose or other characteristics of the run
write: bool, optional
True (default) writes file to path_local/name.meta.json. Other values
don't. In both cases the dictionary describing the run is returned.
**kwargs:
Further keyword args will be included in the output dictionary. These
are used to sort the resulting runs by their properties of interest.
For example, where we are varying phase of activation across a series
of runs we would include the argument, e.g. 'phase=0.2', in order to
sort over phase when looking at results.
Returns
-------
rund: dict
Copy of run dictionary saved to disk as json.
Examples
--------
>>> emit('./', None, .1, 100, write=False)
{'actin_permissiveness': None,
... 'actin_permissiveness_func': None,
... 'comment': None,
... 'lattice_spacing': None,
... 'lattice_spacing_func': None,
... 'name': ...,
... 'path_local': './',
... 'path_s3': None,
... 'timestep_length': 0.1,
... 'timestep_number': 100,
... 'z_line': None,
... 'z_line_func': None}
"""
rund = {}
name = str(uuid.uuid1())
## Build dictionary
rund['name'] = name
rund['comment'] = comment
rund['path_local'] = path_local
rund['path_s3'] = path_s3
rund['poisson_ratio'] = poisson
rund['lattice_spacing'] = ls
rund['z_line'] = z_line
rund['actin_permissiveness'] = actin_permissiveness
rund['timestep_length'] = np.diff(time)[0]
rund['timestep_number'] = len(time)
## Include kwargs
for k in kwargs:
rund[k] = kwargs[k]
## Write out the run description
if write is True:
output_filename = os.path.join(path_local, name+'.meta.json')
with open(output_filename , 'w') as metafile:
json.dump(rund, metafile, indent=4)
return rund | 279159bfdd4b6035c439dcefe05d4e351a3f3dce | 9,674 |
import torch
import math
def adjust_learning_rate(
optimizer: torch.optim,
base_lr: float,
iteration: int,
warm_iter: int,
max_iter: int,
) -> float:
""" warmup + cosine lr decay """
start_lr = base_lr / 10
if iteration <= warm_iter:
lr = start_lr + (base_lr - start_lr) * iteration / warm_iter
else:
lr = start_lr + (base_lr - start_lr) * 0.5 * (1 + math.cos((iteration - warm_iter) * math.pi / (max_iter - warm_iter)))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr | 1304e22abb712cfb6c589a2adf199971c058986f | 9,675 |
import os
def load_data(region):
"""
Function to read in data according to region
Args:
region (str): valid values are US, JP and EU
Returns:
pd.DataFrame containing factors returns
"""
# region='US'
reg_mapper = {'US': 'USA', 'JP': 'JPN', 'EU': 'Europe'}
if region not in reg_mapper:
raise ValueError('region has to be one of %s'
% (', '.join(reg_mapper.keys())))
data_folder = 'Data'
filename = 'AQR_Data_Daily.xlsx'
filepath = os.path.join(data_folder, filename)
qual_df = pd.read_excel(filepath, sheet_name='QMJ Factors', skiprows=18,
parse_dates=[0], index_col=[0])[reg_mapper[region]]
mkt_df = pd.read_excel(filepath, sheet_name='MKT', skiprows=18,
parse_dates=[0], index_col=[0])[reg_mapper[region]]
mom_df = pd.read_excel(filepath, sheet_name='UMD', skiprows=18,
parse_dates=[0], index_col=[0])[reg_mapper[region]]
val_df = pd.read_excel(filepath, sheet_name='HML FF', skiprows=18,
parse_dates=[0], index_col=[0])[reg_mapper[region]]
rf_df = pd.read_excel(filepath, sheet_name='RF', skiprows=18,
parse_dates=[0], index_col=[0])['Risk Free Rate']
data_df = pd.concat([mkt_df.rename('MKT'), val_df.rename('VAL'),
mom_df.rename('MOM'), qual_df.rename('QUAL'),
rf_df.rename('RF')], axis=1)
# Drop dates with NaN RF
data_df.dropna(subset=['RF'], inplace=True)
# Drop dates with all NaNs
data_df.dropna(how='all', inplace=True)
# Check that returns are all valid after the first valid index
if (data_df.apply(lambda x: x.loc[x.first_valid_index():].isnull().sum(),
axis=0) != 0).any():
raise ValueError('Check the data. It has intermediate NaNs')
# Provide basic data description
print('Basic Description:')
print(data_df.apply(lambda x: pd.Series(
[x.mean(), x.std(ddof=1), x.skew(), x.kurtosis()],
index=['Mean', 'Std Dev', 'Skew', 'Excess Kurtosis'])))
print('\nCorrelations:')
print(data_df.corr())
return data_df | 04de0cf3836d84907ce61fbfc7aea280d2fe92e0 | 9,676 |
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename).drop_duplicates()
df = filter_data(df)
return df.drop("price", axis=1), df.filter(['price']) | a5d044fa5be8ceefdb3cee7fb212608110f8dae5 | 9,677 |
def face_at_link(shape, actives=None, inactive_link_index=BAD_INDEX_VALUE):
"""Array of faces associated with links.
Returns an array that maps link ids to face ids. For inactive links,
which do not have associated faces, set their ids to
*inactive_link_index*. Use the *actives* keyword to specify an array that
contains the ids of all active links in the grid. The default assumes
that only the perimeter nodes are inactive.
Examples
--------
>>> from landlab.utils.structured_grid import face_at_link
>>> faces = face_at_link((3, 4), inactive_link_index=-1)
>>> faces # doctest: +NORMALIZE_WHITESPACE
array([-1, 0, 1, -1, -1, 2, 3,
-1, -1, -1, -1, 4, 5, 6, -1, -1, -1])
"""
if actives is None:
actives = active_links(shape)
num_links = link_count(shape)
link_faces = np.empty(num_links, dtype=np.int)
link_faces.fill(inactive_link_index)
link_faces[actives] = np.arange(len(actives))
return link_faces | db7e3e87144354fb850b0741a7531d06e73227f6 | 9,678 |
def snrcat(spec,plugmap):
"""This function calculates the S/N for each fiber.
Parameters
----------
spec : SpecFrame object
The SpecFrame object that constrains the 1D extracted spectra.
plugmap : numpy structured array
The plugmap information for each fiber including which fiber contains
sky or stars.
Returns
-------
cat : numpy structured array
A catalog containing information on each object in the fibers and the
median S/N.
Example
-------
.. code-block:: python
cat = snrcat(spec,plugmap)
"""
dtype = np.dtype([('apogee_id',np.str,30),('ra',np.float64),('dec',np.float64),('hmag',np.float),('objtype',np.str,30),
('fiberid',np.int),('fiberindex',np.int),('flux',np.float),('err',np.float),('snr',np.float)])
cat = np.zeros(300,dtype=dtype)
# Load the spectral data
cat['fiberindex'] = np.arange(300)
cat['flux'] = np.median(spec.flux,axis=1)
cat['err'] = np.median(spec.err,axis=1)
err = cat['err']
bad = (err <= 0.0)
err[bad] = 1.0
cat['snr'] = cat['flux']/err
# Load the plugging data
pcat = plugmap['PLUGMAPOBJ']
fibs, = np.where( (pcat['fiberId']>=0) & (pcat['holeType']=='OBJECT') & (pcat['spectrographId']==2) )
fiberindex = 300-pcat[fibs]['fiberId']
cat['apogee_id'][fiberindex] = pcat[fibs]['tmass_style']
cat['ra'][fiberindex] = pcat[fibs]['ra']
cat['dec'][fiberindex] = pcat[fibs]['dec']
cat['hmag'][fiberindex] = pcat[fibs]['mag'][:,1]
cat['objtype'][fiberindex] = pcat[fibs]['objType']
cat['fiberid'][fiberindex] = pcat[fibs]['fiberId']
cat = Table(cat)
return cat | 30b3de8197425b92d0b69c3d49f3a1bca46ca659 | 9,679 |
def forward(S, A, O, obs):
"""Calculates the forward probability matrix F. This is a matrix where each
(i, j) entry represents P(o_1, o_2, ... o_j, X_t = i| A, O). In other words,
each (i, j) entry is the probability that the observed sequence is o_1, ...
o_j and that at position j we are in hidden state i. We build F from the
first observation o_1 up to the entire observed sequence o_1, ... o_M. Thus
F has dimension L x M where L is the number of hidden states and M is the
length of our input sample 'obs'.
@params:
S np.array - state vector for starting distribution.
A np.array - transition matrix, L x L for L hidden states, each (i, j)
entry is P(X_i | X_j), or the probability of transitioning
from start state X_j (column entry) to target state X_i
(row entry).
O np.array - observation matrix, L x M' for L hidden states and M' total
possible observations. each (i, j) entry is P(Y_j | X_i), or
the probability of observing observation Y_j while in state
X_i.
obs np.array, list - the observations. these are assumed to be integers
that index correctly into A and O.
"""
assert np.shape(A)[0] == np.shape(A)[1] # transition matrix should be square
L = np.shape(A)[0] # L is the number of hidden states
M = len(obs) # M is the number of observations in our sample 'obs'
C = [] # the list of coefficients used to normalize each column to 1
F = np.zeros((L, M)) # the foward algorithm generates an L x M matrix
F[:, 0] = np.multiply(S, O[:, obs[0]]) # initialize the first column of F via S * (obs[0] column of B)
c_0 = np.sum(F[:, 0]) # compute the first normalizing coefficient
C.append(c_0) # record c_0
F[:, 0] = np.divide(F[:, 0], c_0) # normalize the first column so the entries sum to 1
# begin the forward algorithm. generate each subsequent column of F via the previous one,
# normalizing at each step
for j in range(1, M):
F[:, j] = np.dot(np.multiply(A, O[:,obs[j]]), F[:,j - 1]) # compute the new column j
c_j = np.sum(F[:, j]) # compute the jth coeff.
C.append(c_j) # record the jth coeff.
F[:, j] = np.divide(F[:, j], c_j) # normalize column j
# return the foward matrix F and the list of normalizing coefficients C (these will be used
# to normalize the backward probabilities in the backward step)
return (F, C) | 24c6ddfa053b623a5a56dcc773c8fc4419258df8 | 9,680 |
def calculate_state(position, dt):
"""
Sometimes, a data file will include position only. In those cases,
the velocity must be calculated before the regression is run.
If the position is
| position_11 position_21 |
| position_12 position_22 |
| ....................... |
| position_1n position_2n |
The value returned is
| position_11 position_21 velocity_11 velocity_21 |
| position_12 position_22 velocity_12 velocity_22 |
| ....................................................... |
| position_1n-1 position_2n-1 velocity_1n-1 velocity_2n-1 |
The last value of each state is clipped off because given n values,
there are n-1 differences between them.
"""
# velocity is (x1 - x0) * dt
velocity = (position[1:, :] - position[:-1, :]) * dt
state = np.hstack((position[:-1, :], velocity))
return state | 9db6d94c8d80f99ccebbdcb9e0691e85e03c836d | 9,681 |
from typing import Optional
def get_server(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerResult:
"""
Use this data source to retrieve an auth server from Okta.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.auth.get_server(name="Example Auth")
```
:param str name: The name of the auth server to retrieve.
"""
__args__ = dict()
__args__['name'] = name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('okta:auth/getServer:getServer', __args__, opts=opts, typ=GetServerResult).value
return AwaitableGetServerResult(
audiences=__ret__.audiences,
credentials_last_rotated=__ret__.credentials_last_rotated,
credentials_next_rotation=__ret__.credentials_next_rotation,
credentials_rotation_mode=__ret__.credentials_rotation_mode,
description=__ret__.description,
id=__ret__.id,
issuer=__ret__.issuer,
issuer_mode=__ret__.issuer_mode,
kid=__ret__.kid,
name=__ret__.name,
status=__ret__.status) | d1693c032a254397d44f82bea626a54343c2dfef | 9,682 |
import pathos
from functools import partial
from itertools import repeat
from typing import Sequence
from typing import Dict
from typing import Set
def estimate_aps_user_defined(ml, X_c = None, X_d = None, data = None, C: Sequence = None, D: Sequence = None, L: Dict[int, Set] = None,
S: int = 100, delta: float = 0.8, seed: int = None, pandas: bool = False, pandas_cols: Sequence = None,
keep_order: bool = False, reorder: Sequence = None, parallel: bool = False, nprocesses: int = None, ntasks: int = 1, **kwargs):
"""Estimate APS for given dataset and user defined ML function
Approximate propensity score estimation involves taking draws :math:`X_c^1, \\ldots,X_c^S` from the uniform distribution on :math:`N(X_{ci}, \\delta)`, where :math:`N(X_{ci},\\delta)` is the :math:`p_c` dimensional ball centered at :math:`X_{ci}` with radius :math:`\\delta`.
:math:`X_c^1, \\ldots,X_c^S` are destandardized before passed for ML inference. The estimation equation is :math:`p^s(X_i;\\delta) = \\frac{1}{S} \\sum_{s=1}^{S} ML(X_c^s, X_{di})`.
Parameters
-----------
ml: Object
User defined ml function
X_c: array-like, default: None
1D/2D vector of continuous input variables
X_d: array-like, default: None
1D/2D vector of discrete input variables
data: array-like, default: None
Dataset containing ML input variables
C: array-like, default: None
Integer column indices for continous variables
D: array-like, default: None
Integer column indices for discrete variables
L: Dict[int, Set]
Dictionary with keys as indices of X_c and values as sets of discrete values
S: int, default: 100
Number of draws for each APS estimation
delta: float, default: 0.8
Radius of sampling ball
seed: int, default: None
Seed for sampling
pandas: bool, default: False
Whether to cast inputs into pandas dataframe
pandas_cols: Sequence, default: None
Columns names for dataframe input
keep_order: bool, default: False
Whether to maintain the column order if data passed as a single 2D array
reorder: Sequence, default: False
Indices to reorder the data assuming original order [X_c, X_d]
parallel: bool, default: False
Whether to parallelize the APS estimation
nprocesses: int, default: None
Number of processes to parallelize. Defaults to number of processors on machine.
ntasks: int, default: 1
Number of tasks to send to each worker process.
**kwargs: keyword arguments to pass into user function
Returns
-----------
np.ndarray
Array of estimated APS for each observation in sample
Notes
------
X_c, X_d, and data should never have any overlapping variables. This is not checkable through the code, so please double check this when passing in the inputs.
The arguments `keep_order`, `reorder`, and `pandas_cols` are applied sequentially, in that order. This means that if `keep_order` is set, then `reorder` will reorder the columns from the original column order as `data`. `pandas_cols` will then be the names of the new ordered dataset.
The default ordering of inputs is [X_c, X_d], where the continuous variables and discrete variables will be in the original order regardless of how their input is passed. If `reorder` is called without `keep_order`, then the reordering will be performed on this default ordering.
Parallelization uses the `Pool` module from pathos, which will NOT be able to deal with execution on GPU. If the user function enables inference on GPU, then it is recommended to implement parallelization within the user function as well.
The optimal settings for nprocesses and nchunks are specific to each machine, and it is highly recommended that the user pass these arguments to maximize the performance boost. `This SO thread <https://stackoverflow.com/questions/42074501/python-concurrent-futures-processpoolexecutor-performance-of-submit-vs-map>`_ recommends setting nchunks to be 14 * # of workers for optimal performance.
"""
# Set X_c and X_d based on inputs
if X_c is None and data is None:
raise ValueError("APS estimation requires continuous data!")
# Prioritize explicitly passed variables
if X_c is not None:
X_c = np.array(X_c).astype(float)
if X_d is not None:
X_d = np.array(X_d).astype(float)
if data is not None:
data = np.array(data).astype(float)
# If X_c not given, but data is, then we assume all of data is X_c
if X_c is None and X_d is not None and data is not None:
print("`X_c` not given but both `X_d` and `data` given. We will assume that all the variables in `data` are continuous.")
X_c = data
# If X_d not given, but data is, then we assume all of data is X_d
if X_c is not None and X_d is None and data is not None:
print("`X_d` not given but both `X_c` and `data` given. We will assume that all the variables in `data` are discrete.")
X_d = data
# If both X_c and X_d are none, then use indices
order = None
if X_c is None and X_d is None:
# Save original order if keep order in place
if keep_order:
order = _get_og_order(data.shape[1], C, D)
if C is None and D is None:
print("`data` given but no indices passed. We will assume that all the variables in `data` are continuous.")
X_c = data
elif C is None:
if isinstance(D, int):
d_len = 1
else:
d_len = len(D)
X_d = data[:,D]
if d_len >= data.shape[1]:
raise ValueError(f"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Continuous variables are necessary to conduct APS estimation.")
else:
print(f"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be continuous variables.")
X_c = np.delete(data, D, axis = 1)
elif D is None:
if isinstance(C, int):
c_len = 1
else:
c_len = len(C)
X_c = data[:,C]
if c_len < data.shape[1]:
print(f"Passed continuous indices of length {c_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be discrete variables.")
X_d = np.delete(data, C, axis = 1)
else:
X_c = data[:,C]
X_d = data[:,D]
# Force X_c to be 2d array
if X_c.ndim == 1:
X_c = X_c[:,np.newaxis]
if X_d is not None:
if X_d.ndim == 1:
X_d = X_d[:,np.newaxis]
# === Preprocess mixed variables ===
if L is not None:
L_keys = np.array(list(L.keys()))
L_vals = np.array(list(L.values()))
X_c, mixed_og_vals, mixed_og_inds = _preprocessMixedVars(X_c, L_keys, L_vals)
mixed_rows, mixed_cols = mixed_og_inds
else:
mixed_og_vals = None
mixed_og_inds = None
# === Standardize continuous variables ===
# Formula: (X_ik - u_k)/o_k; k represents a continuous variable
X_c, mu, sigma = standardize(X_c)
if seed is not None:
np.random.seed(seed)
# If parallelizing, then force inference on CPU
if parallel == True:
cpu = True
computeUserAPS_frozen = partial(_computeUserAPS, ml = ml, S = S, delta = delta, mu = mu, sigma = sigma, pandas = pandas,
pandas_cols = pandas_cols, order = order, reorder = reorder, **kwargs)
mp = pathos.helpers.mp
p = mp.Pool(nprocesses)
if nprocesses is None:
workers = "default (# processors)"
nprocesses = mp.cpu_count()
else:
workers = nprocesses
print(f"Running APS estimation with {workers} workers...")
# Split input arrays into chunked rows
nchunks = ntasks * nprocesses
X_c_split = np.array_split(X_c, nchunks)
iter_c = iter(X_c_split)
if X_d is None:
iter_d = repeat(None)
else:
iter_d = iter(np.array_split(X_d, nchunks))
if L is None:
iter_L_ind = repeat(None)
iter_L_val = repeat(None)
else:
# Split indices depending on which chunk they fall into
chunksizes = np.append([0], np.cumsum([c.shape[0] for c in X_c_split]))
chunked_inds = [(mixed_rows[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] - chunksizes[i],
mixed_cols[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))]) for i in range(len(chunksizes) - 1)]
chunked_vals = [mixed_og_vals[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] for i in range(len(chunksizes) - 1)]
iter_L_ind = iter(chunked_inds)
iter_L_val = iter(chunked_vals)
iter_args = zip(iter_c, iter_d, iter_L_ind, iter_L_val)
p_out = p.starmap(computeUserAPS_frozen, iter_args)
p.close()
p.join()
aps_vec = np.concatenate(p_out)
else:
aps_vec = _computeUserAPS(X_c, X_d, mixed_og_inds, mixed_og_vals, ml, S, delta, mu, sigma, pandas, pandas_cols, order, reorder, **kwargs) # Compute APS for each individual i
aps_vec = np.array(aps_vec)
return aps_vec | 23e94a27800e8cdeca140666d23aace3fd8c5b2d | 9,683 |
def shared_vinchain_instance():
""" This method will initialize ``SharedInstance.instance`` and return it.
The purpose of this method is to have offer single default
vinchainio instance that can be reused by multiple classes.
"""
if not SharedInstance.instance:
clear_cache()
SharedInstance.instance = vin.VinChain()
return SharedInstance.instance | 8668ec7b3b56353545f7fb35fd834918de4207fc | 9,684 |
import os
import math
import uuid
def export_viewpoint_to_nw(view_point: ViewPoint) -> Element:
"""
Represents current view point as a NavisWorks view point XML structure
:param view_point: ViewPoint instance that should be represented in XML
:return: XML Element instance with inserted view point
"""
path_to_viewpoint_template = os.path.join(
BASE_DIR, 'EasyView', 'static', 'EasyView', 'export', 'view_point_template.xml')
viewpoint_template = ET.parse(path_to_viewpoint_template)
view = viewpoint_template.getroot()
# View point - fov, position and rotation
camera = view[0][0]
pos3f = camera[0][0]
quaternion = camera[1][0]
camera_attributes = (
('height', str(math.radians(view_point.fov))),
)
# Either a remark description (if presented), a view point description(if presented) or generated name
description = view_point.description
if not view_point.description:
description = f'Точка обзора {view_point.pk}'
related_remark = Remark.objects.filter(view_point=view_point)
if related_remark:
description = view_point.remark.description
view_attributes = (
('guid', str(uuid.uuid4())),
('name', description),
)
pos3f_attributes = tuple(zip(('x', 'y', 'z',), map(lambda x: str(x), view_point.position)))
quaternion_attributes = tuple(zip(('a', 'b', 'c', 'd'), map(lambda x: str(x), view_point.quaternion)))
# Clipping planes
clip_plane_set = view[1]
clip_planes = clip_plane_set[1]
clipped = False
clip_counter = 0
for i, status in enumerate(view_point.clip_constants_status):
if status:
if not clipped:
clipped = True
clip_counter += 1
clip_planes[i].set('state', 'enabled')
clip_planes[i][0].set('distance', f'{view_point.clip_constants[i]:.10f}')
if clipped:
clip_plane_set.set('enabled', '1')
clip_plane_set.set('current', str(clip_counter - 1))
element_attribute_pairs = (
(camera, camera_attributes),
(view, view_attributes),
(pos3f, pos3f_attributes),
(quaternion, quaternion_attributes),
)
for element, attributes in element_attribute_pairs:
for attribute, value in attributes:
element.set(attribute, value)
return view | ef50899b94d82300831e0b668fd334fdfc73a8a5 | 9,685 |
def generateMfccFeatures(filepath):
"""
:param filepath:
:return:
"""
y, sr = librosa.load(filepath)
mfcc_features = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40)
return mfcc_features | 985519827a04d7375e021524d9b8ce6b4fb0482f | 9,686 |
def homography_crop_resize(org_img_size, crop_y, resize_img_size):
"""
compute the homography matrix transform original image to cropped and resized image
:param org_img_size: [org_h, org_w]
:param crop_y:
:param resize_img_size: [resize_h, resize_w]
:return:
"""
# transform original image region to network input region
ratio_x = resize_img_size[1] / org_img_size[1]
ratio_y = resize_img_size[0] / (org_img_size[0] - crop_y)
H_c = np.array([[ratio_x, 0, 0],
[0, ratio_y, -ratio_y*crop_y],
[0, 0, 1]])
return H_c | 85d252627e947306f3ece89eec1a678c2fa86bc9 | 9,687 |
def extract_project_info(req_soup, full_name=False):
"""Extract the relevant project info from a request.
Arguments:
req_soup (BS4 soup object):
The soup of the request.
full_name (boolean):
Whether or not to capture the entire project name or just the last
hyphenated element.
Returns:
prj_info (Project):
The required info to post a project.
"""
if full_name:
prj_name = req_soup.find("name").string
else:
prj_name = req_soup.find("name").string.split('-')[-1]
res_name = req_soup.find("owner").find("name").string
email = req_soup.find("owner").find("email").string
# NOTE: Change this line to your own institution's email domain.
if "email.arizona.edu" in email:
res_lab = "internal"
else:
res_lab = "external"
# Replace all not ascii chars with ascii ones, and any symbols with '-'.
prj_res = api_types.Researcher(
extract_custom_forms._sanitize_text(res_name.split()[0]),
extract_custom_forms._sanitize_text(res_name.split()[-1]),
extract_custom_forms._sanitize_text(res_lab),
email,
"")
prj_info = api_types.Project(prj_name, prj_res)
return prj_info | 04064b769cb97688f47133df6c8dca0f806b6544 | 9,688 |
from typing import Optional
def get_underlying_asset_price(token_symbol: str) -> Optional[Price]:
"""Gets the underlying asset price for token symbol, if any
This function is neither in inquirer.py or chain/ethereum/defi.py
due to recursive import problems
"""
price = None
if token_symbol == 'yaLINK':
price = Inquirer().find_usd_price(A_ALINK)
elif token_symbol == 'yDAI':
price = Inquirer().find_usd_price(A_DAI)
elif token_symbol == 'yWETH':
price = Inquirer().find_usd_price(A_ETH)
elif token_symbol == 'yYFI':
price = Inquirer().find_usd_price(A_YFI)
elif token_symbol == 'yUSDT':
price = Inquirer().find_usd_price(A_USDT)
elif token_symbol == 'yUSDC':
price = Inquirer().find_usd_price(A_USDC)
elif token_symbol == 'yTUSD':
price = Inquirer().find_usd_price(A_TUSD)
elif token_symbol in ('ycrvRenWSBTC', 'crvRenWBTC', 'crvRenWSBTC'):
price = Inquirer().find_usd_price(A_BTC)
return price | 8b8e7e79e3e77e7e2985d1a7f5aee337da424f87 | 9,689 |
import asyncio
def do_call_async(
fn_name, *args, return_type=None, post_process=None
) -> asyncio.Future:
"""Perform an asynchronous library function call."""
lib_fn = getattr(get_library(), fn_name)
loop = asyncio.get_event_loop()
fut = loop.create_future()
cf_args = [None, c_int64, c_int64]
if return_type:
cf_args.append(return_type)
cb_type = CFUNCTYPE(*cf_args) # could be cached
cb_res = _create_callback(cb_type, fut, post_process)
# keep a reference to the callback function to avoid it being freed
CALLBACKS[fut] = (loop, cb_res)
result = lib_fn(*args, cb_res, c_void_p()) # not making use of callback ID
if result:
# callback will not be executed
if CALLBACKS.pop(fut):
fut.set_exception(get_current_error())
return fut | c3d32f9521a58c81231fe70601a9807b4b9841be | 9,690 |
def prefer_static_value(x):
"""Return static value of tensor `x` if available, else `x`.
Args:
x: `Tensor` (already converted).
Returns:
Numpy array (if static value is obtainable), else `Tensor`.
"""
static_x = tensor_util.constant_value(x)
if static_x is not None:
return static_x
return x | 4bd38e5f3a57314b48c86e37f543e6fb69847d1c | 9,691 |
from psyneulink.core.components.component import Component, ComponentsMeta
import types
import copy
def copy_parameter_value(value, shared_types=None, memo=None):
"""
Returns a copy of **value** used as the value or spec of a
Parameter, with exceptions.
For example, we assume that if we have a Component in an
iterable, it is meant to be a pointer rather than something
used in computation requiring it to be a "real" instance
(like `Component.function`)
e.g. in spec attribute or Parameter `Mechanism.input_ports_spec`
"""
if shared_types is None:
shared_types = (Component, ComponentsMeta, types.MethodType)
else:
shared_types = tuple(shared_types)
try:
return copy_iterable_with_shared(
value,
shared_types=shared_types,
memo=memo
)
except TypeError:
# this will attempt to copy the current object if it
# is referenced in a parameter, such as
# ComparatorMechanism, which does this for input_ports
if not isinstance(value, shared_types):
return copy.deepcopy(value, memo)
else:
return value | 2586cc79524b63d74920f4b262d0b9b63cb8ef02 | 9,692 |
def ajax_login_required(function):
"""
Decorator for views that checks that the user is logged in, resulting in a
403 Unauthorized response if not.
"""
@wraps(function, assigned=available_attrs(function))
def wrapped_function(request, *args, **kwargs):
if request.user.is_authenticated:
return function(request, *args, **kwargs)
else:
return HttpResponseForbidden()
return wrapped_function | 194d97cd4f9897482a27addeca74780316c39083 | 9,693 |
def compile_str_from_parsed(parsed):
"""The (quasi-)inverse of string.Formatter.parse.
Args:
parsed: iterator of (literal_text, field_name, format_spec, conversion) tuples,
as yield by string.Formatter.parse
Returns:
A format string that would produce such a parsed input.
>>> s = "ROOT/{}/{0!r}/{1!i:format}/hello{:0.02f}TAIL"
>>> assert compile_str_from_parsed(string.Formatter().parse(s)) == s
>>>
>>> # Or, if you want to see more details...
>>> parsed = list(string.Formatter().parse(s))
>>> for p in parsed:
... print(p)
('ROOT/', '', '', None)
('/', '0', '', 'r')
('/', '1', 'format', 'i')
('/hello', '', '0.02f', None)
('TAIL', None, None, None)
>>> compile_str_from_parsed(parsed)
'ROOT/{}/{0!r}/{1!i:format}/hello{:0.02f}TAIL'
"""
result = ''
for literal_text, field_name, format_spec, conversion in parsed:
# output the literal text
if literal_text:
result += literal_text
# if there's a field, output it
if field_name is not None:
result += '{'
if field_name != '':
result += field_name
if conversion:
result += '!' + conversion
if format_spec:
result += ':' + format_spec
result += '}'
return result | bfc3d39ecee6e07e41690ee8f85f969c110de69b | 9,694 |
def calculate_class_weight(labels):
"""Calculates the inverse of the class cardinalities and
normalizes the weights such that the minimum is equal to 1.
Args:
labels: List of integers representing class labels
Returns:
Numpy array with weight for each class
"""
labels = np.array(labels)
unique = sorted(np.unique(labels))
counts = np.zeros(len(unique))
for i, label in enumerate(unique):
counts[i] = np.sum(labels == label)
weight = 1. / counts
weight = weight / weight.min()
return weight | ff88ac33b49e90f75ac743aec463e87d36023876 | 9,695 |
def CosEnv(length,rft=(0.005),fs=(44100)):
"""
rft : Rise and fall time [s]
length : Total length of window [s]
fs : Sampling freq [Hz]
"""
rfsamp = int(np.round(rft * fs))
windowsamp = int(np.round(length * fs))
flatsamp = windowsamp - (2 * rfsamp)
time_index = np.arange(0, 1, 1 / rfsamp)
r_env = (1 + np.cos(np.pi + np.pi * time_index)) / 2
f_env = (1 + np.cos(np.pi * time_index)) / 2
flat_env = np.ones(flatsamp)
env = np.concatenate((r_env, flat_env, f_env), 0)
return env | e15cf02bfad7f0a1935507d8e394718429df6958 | 9,696 |
def flatten(nested_list):
"""
Args:
nested_list (list): list of lists
Returns:
list: flat list
Example:
>>> import ubelt as ub
>>> nested_list = [['a', 'b'], ['c', 'd']]
>>> list(ub.flatten(nested_list))
['a', 'b', 'c', 'd']
"""
return it.chain.from_iterable(nested_list) | 418c8d76ff97991ef26e59d7740df14690655cd5 | 9,697 |
def fetch_reply(query, session_id):
"""
main function to fetch reply for chatbot and
return a reply dict with reply 'type' and 'data'
"""
response = apiai_response(query, session_id)
intent, params = parse_response(response)
reply = {}
if intent == None:
reply['type'] = 'none'
reply['data'] = "I didn't understand"
elif intent == "news":
reply['type'] = 'news'
print(params)
articles = get_news(params)
news_elements = []
for article in articles:
element = {}
element['title'] = article['title']
element['item_url'] = article['link']
element['image_url'] = article['img']
element['buttons'] = [{
"type":"web_url",
"title":"Read more",
"url":article['link']}]
news_elements.append(element)
reply['data'] = news_elements
elif intent.startswith('smalltalk'):
reply['type'] = 'smalltalk'
reply['data'] = response['result']['fulfillment']['speech']
return reply | bfcb2938042b964da15c33c614d37433948b37f2 | 9,698 |
def create_SpatialReference(sr):
""" creates an arcpy.spatial reference object """
return arcpy.SpatialReference(sr) | fdb535d4e1c1acda4da4270d78ceb4db9c114584 | 9,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.