content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import ast
def get_import_stmt_str(alias_list, import_src=None, max_linechars=88):
"""
Construct an import statement by building an AST, convert it to source using
astor.to_source, and then return the string.
alias_list: List of strings to use as ast.alias `name`, and optionally also
`asname entries. If only one name is listed per item in the
alias_list, the `asname` will be instantiated as None.
import_src: If provided, the import statement will be use the
`ast.ImportFrom` class, otherwise it will use `ast.Import`.
Relative imports are permitted for "import from" statements
(such as `from ..foo import bar`) however absolute imports
(such as `from foo import bar`) are recommended in PEP8.
max_linechars: Maximum linewidth, beyond which the import statement string will
be multilined with `multilinify_import_stmt_str`.
"""
alias_obj_list = []
assert type(alias_list) is list, "alias_list must be a list"
for alias_pair in alias_list:
if type(alias_pair) is str:
alias_pair = [alias_pair]
assert len(alias_pair) > 0, "Cannot import using an empty string"
assert type(alias_pair[0]) is str, "Import alias name must be a string"
if len(alias_pair) < 2:
alias_pair.append(None)
al = ast.alias(*alias_pair[0:2])
alias_obj_list.append(al)
if import_src is None:
ast_imp_stmt = ast.Import(alias_obj_list)
else:
import_level = len(import_src) - len(import_src.lstrip("."))
import_src = import_src.lstrip(".")
ast_imp_stmt = ast.ImportFrom(import_src, alias_obj_list, level=import_level)
import_stmt_str = to_source(ast.Module([ast_imp_stmt]))
if len(import_stmt_str.rstrip(nl)) > max_linechars:
return multilinify_import_stmt_str(import_stmt_str)
else:
return import_stmt_str
|
f4c7dad79b7c949bd7e9045b9c9ae474a09303d2
| 34,217 |
def convert_into_decimal(non_binary_number: str, base: int) -> int:
"""
Converts a non binary string number into a decimal number
:param non_binary_number:
:param base:
:return: a decimal number
"""
decimal_number = 0
for digit in range(len(non_binary_number)):
decimal_number += int(non_binary_number[digit]) * base ** abs((digit - (len(non_binary_number) - 1)))
return decimal_number
|
fecdd152000399fbc33a259a647118b0607daac9
| 34,218 |
from typing import Sequence
def sgld_sweep() -> Sequence[SGMCMCConfig]:
"""sweep for vanilla sgld."""
sweep = []
for learning_rate in [
1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2
]:
for prior_variance in [0.01, 0.1, 0.5]:
sweep.append(SGMCMCConfig(learning_rate, prior_variance))
return tuple(sweep)
|
9f64505c8aa7efb33f2fbc4a7e0756ef896ee9c3
| 34,219 |
def stringify(num):
"""
Takes a number and returns a string putting a zero in front if it's
single digit.
"""
num_string = str(num)
if len(num_string) == 1:
num_string = '0' + num_string
return num_string
|
7cf37776bc774d02bce0b2016d41b26b8ab94cf7
| 34,220 |
def euc_distance(vertex, circle_obstacle):
"""
Finds the distance between the point and center of the circle.
vertex: Vertex in question.
circle_obstacle: Circle obstacle in question.
return: Distance between the vertex and the center of the circle.
"""
x = vertex[0] - circle_obstacle.position[0]
y = vertex[1] - circle_obstacle.position[1]
dist = ((x ** 2) + (y ** 2)) ** 0.5
return dist
|
60ed338eb7a81fc282196c38d41cecda8f28efb7
| 34,222 |
def less(x, y):
"""
Returns the truth value of (x < y) element-wise.
Parameters
----------
x : tensor
Must be one of the following types: float32, float64, int32, uint8,
int16, int8, int64, bfloat16, uint16, half, uint32, uint64.
y : tensor
A Tensor. Must have the same type as x.
Returns
-------
A Tensor of type bool.
Examples
---------
>>> import tensorlayerx as tlx
>>> x = tlx.ops.constant(value=[1, 2, 3])
>>> y = tlx.ops.less(x, x)
"""
return tf.math.less(x, y)
|
6db5cf54e7f23e174050676f7f1e319e8dd6982d
| 34,223 |
def decision_tree_learning(examples: list, attributes: list, parent_examples,
importance_function: callable):
"""
Decision tree learning algorithm as given in figure 18.5 in Artificial
Intelligence A Modern Approach.
:param examples: list of dictionaries containing examples to learn from
:param attributes: list of all attributes in the examples
:param parent_examples: list of all parent examples (can be the same as
`examples` when first running)
:param importance_function: function that takes an attribute (str) and a
list of examples and returns a number representing the importance of
that attribute
:return: DecisionTree, a decision tree
"""
if not examples:
return plurality_value(parent_examples)
elif examples_have_same_classification(examples):
return examples[0]["classification"]
elif not attributes:
return plurality_value(examples)
else:
imp = [importance_function(a, examples) for a in attributes]
A = attributes[imp.index(max(imp))] # essentially like argmax
tree = DecisionTree(attr=A)
for vk in get_attribute_values(A, examples):
exs = [e for e in examples if e.get(A) == vk]
att = [a for a in attributes if a != A]
subtree = decision_tree_learning(exs, att, examples,
importance_function)
tree.add_branch(vk=vk, subtree=subtree)
return tree
|
df9dd631b5df38451e03cfab65d8a8c0f6a218eb
| 34,225 |
def modified_bessel(times, bes_A, bes_Omega, bes_s, res_begin, bes_Delta):
""" Not Tested. """
b = np.where(times > res_begin + bes_Delta / 2.,
special.j0(bes_s * bes_Omega *
(- res_begin + times - bes_Delta / 2.) ),
(np.where(times < res_begin - bes_Delta / 2.,
special.j0(bes_Omega *
(res_begin - times - bes_Delta / 2.) ),
1)))
return bes_A * b
|
1afcb27b53b364199289dff2bd7850fe6e5027fa
| 34,226 |
def predict_csr_val(csr_op, rs1_val, csr_val, csr_mask):
"""
Predicts the CSR reference value, based on the current CSR operation.
Args:
csr_op: A string of the CSR operation being performed.
rs1_val: A bitarray containing the value to be written to the CSR.
csr_val: A bitarray containing the current value of the CSR.
csr_mask: A bitarray containing the CSR's mask.
Returns:
A hexadecimal string of the predicted CSR value.
"""
prediction = None
# create a zero bitarray to zero extend immediates
zero = bitarray(uint=0, length=csr_val.len - 5)
if csr_op == 'csrrw':
prediction = csr_read(csr_val, csr_mask)
csr_write(rs1_val, csr_val, csr_mask)
elif csr_op == 'csrrs':
prediction = csr_read(csr_val, csr_mask)
csr_write(rs1_val | prediction, csr_val, csr_mask)
elif csr_op == 'csrrc':
prediction = csr_read(csr_val, csr_mask)
csr_write((~rs1_val) & prediction, csr_val, csr_mask)
elif csr_op == 'csrrwi':
prediction = csr_read(csr_val, csr_mask)
zero.append(rs1_val[-5:])
csr_write(zero, csr_val, csr_mask)
elif csr_op == 'csrrsi':
prediction = csr_read(csr_val, csr_mask)
zero.append(rs1_val[-5:])
csr_write(zero | prediction, csr_val, csr_mask)
elif csr_op == 'csrrci':
prediction = csr_read(csr_val, csr_mask)
zero.append(rs1_val[-5:])
csr_write((~zero) & prediction, csr_val, csr_mask)
return f"0x{prediction.hex}"
|
1e8be22540b89db9719ea078dbb4e1d11e53bdcc
| 34,227 |
def request(url: str, args: dict = None, method: str = 'GET'):
"""
Custom Method that requests data from requests_session
and confirms it has a valid JSON return
"""
response = request_session(url, method, args)
try:
# you only come here if we can understand the API response
return response.json()
except ValueError:
return None
except AttributeError:
return None
|
603712a7aa2978fc00e664eaebf18929108064e3
| 34,228 |
def create_se3(ori, trans=None):
"""
Args:
ori (np.ndarray): orientation in any following form:
rotation matrix (shape: :math:`[3, 3]`)
quaternion (shape: :math:`[4]`)
euler angles (shape: :math:`[3]`).
trans (np.ndarray): translational vector (shape: :math:`[3]`)
Returns:
np.ndarray: a transformation matrix (shape: :math:`[4, 4]`)
"""
rot = to_rot_mat(ori)
out = np.eye(4)
out[:3, :3] = rot
if trans is not None:
trans = np.array(trans)
out[:3, 3] = trans.flatten()
return out
|
c564b8e3761b737bfc95c94e5134c57cec05805e
| 34,230 |
def build_negative_log_likelihood(
image,
telescope_description,
oversampling,
min_lambda,
max_lambda,
spe_width,
pedestal,
hole_radius=0 * u.m,
):
"""Create an efficient negative log_likelihood function that does
not rely on astropy units internally by defining needed values as closures
in this function
"""
# get all the neeed values and transform them into appropriate units
optics = telescope_description.optics
mirror_area = optics.mirror_area.to_value(u.m ** 2)
mirror_radius = np.sqrt(mirror_area / np.pi)
focal_length = optics.equivalent_focal_length
cam = telescope_description.camera.geometry
camera_frame = CameraFrame(focal_length=focal_length, rotation=cam.cam_rotation)
cam_coords = SkyCoord(x=cam.pix_x, y=cam.pix_y, frame=camera_frame)
tel_coords = cam_coords.transform_to(TelescopeFrame())
pixel_x = tel_coords.fov_lon.to_value(u.rad)
pixel_y = tel_coords.fov_lat.to_value(u.rad)
pixel_diameter = 2 * (
np.sqrt(cam.pix_area[0] / np.pi) / focal_length * u.rad
).to_value(u.rad)
min_lambda = min_lambda.to_value(u.m)
max_lambda = max_lambda.to_value(u.m)
hole_radius_m = hole_radius.to_value(u.m)
def negative_log_likelihood(
impact_parameter,
phi,
center_x,
center_y,
radius,
ring_width,
optical_efficiency_muon,
):
"""
Likelihood function to be called by minimizer
Parameters
----------
impact_parameter: float
Impact distance from telescope center
center_x: float
center of muon ring in the telescope frame
center_y: float
center of muon ring in the telescope frame
radius: float
Radius of muon ring
ring_width: float
Gaussian width of muon ring
optical_efficiency_muon: float
Efficiency of the optical system
Returns
-------
float: Likelihood that model matches data
"""
# center_x *= self.unit
# center_y *= self.unit
# radius *= self.unit
# ring_width *= self.unit
# impact_parameter *= u.m
# phi *= u.rad
# Generate model prediction
prediction = image_prediction_no_units(
mirror_radius_m=mirror_radius,
hole_radius_m=hole_radius_m,
impact_parameter_m=impact_parameter,
phi_rad=phi,
center_x_rad=center_x,
center_y_rad=center_y,
radius_rad=radius,
ring_width_rad=ring_width,
pixel_x_rad=pixel_x,
pixel_y_rad=pixel_y,
pixel_diameter_rad=pixel_diameter,
oversampling=oversampling,
min_lambda_m=min_lambda,
max_lambda_m=max_lambda,
)
# scale prediction by optical efficiency of array
prediction *= optical_efficiency_muon
sq = 1 / np.sqrt(
2 * np.pi * (pedestal ** 2 + prediction * (1 + spe_width ** 2))
)
diff = (image - prediction) ** 2
denom = 2 * (pedestal ** 2 + prediction * (1 + spe_width ** 2))
expo = np.exp(-diff / denom) + 1e-16 # add small epsilon to avoid nans
value = sq * expo
return -2 * np.log(value).sum()
return negative_log_likelihood
|
10c481448b1b2eb2e09eb4d76dda228bc6ed894d
| 34,231 |
import collections
def _ParseParameterType(type_string):
"""Parse a parameter type string into a JSON dict for the DF SQL launcher."""
type_dict = {'type': type_string.upper()}
if type_string.upper().startswith('ARRAY<') and type_string.endswith('>'):
type_dict = collections.OrderedDict([
('arrayType', _ParseParameterType(type_string[6:-1])), ('type', 'ARRAY')
])
if type_string.startswith('STRUCT<') and type_string.endswith('>'):
type_dict = collections.OrderedDict([('structTypes',
_ParseStructType(type_string[7:-1])),
('type', 'STRUCT')])
if not type_string:
raise exceptions.Error('Query parameter missing type')
return type_dict
|
213a6d6e119c76da7fc6193493e11b57e260f200
| 34,233 |
import time
def get_now_utc_epoch():
"""
Returns the epoch
:return:
"""
return int(time.time())
|
7e9836908aed9598bab1f01365fe255aa43a94e0
| 34,234 |
from typing import List
def get_top_words(text: str, blacktrie: dict, top_n: int) -> List[str]:
"""Get top words in a string, excluding a predefined list of words.
Args:
text (str): Text to parse
blacktrie (dict): Trie built from a blacklist of words to ignore
top_n (int): Number of top words to return
Returns:
List[str]: List of top N words sorted by most frequently occuring to least frequently occuring
"""
found_words = {}
lower_text = text.lower()
current_word = ""
current_lower_word = ""
current_node = blacktrie
text_len = len(text)
idx = 0
while idx < text_len:
char = text[idx]
lower_char = lower_text[idx]
if lower_char in current_node:
# char in blacktrie, get rest of word and see if that's still true
current_word += char
current_lower_word += lower_char
current_node = current_node[lower_char]
elif lower_char in VALID_WORD_CHARS:
# char not in blacktrie and is a valid word char
current_word += char
current_lower_word += lower_char
current_node = blacktrie
elif None in current_node:
# Word in blacktrie, ignore
current_word = ""
current_lower_word = ""
current_node = blacktrie
elif current_word:
# Non-word char found and not at an end node of the trie, update found_words
if current_lower_word in found_words:
found_words[current_lower_word]["count"] += 1
elif len(current_lower_word) > 1 or current_lower_word in set("ai"):
found_words[current_lower_word] = {"word": current_word, "count": 1}
current_word = ""
current_lower_word = ""
current_node = blacktrie
if idx + 1 >= text_len and current_word:
if current_lower_word in found_words:
found_words[current_lower_word]["count"] += 1
elif len(current_lower_word) > 1 or current_lower_word in set("ai"):
found_words[current_lower_word] = {"word": current_word, "count": 1}
current_word = ""
current_lower_word = ""
current_node = blacktrie
idx += 1
sorted_word_list = filter(lambda x: not x.isdigit(), [
v["word"] for _, v in sorted(found_words.items(), key=lambda x: x[1]["count"], reverse=True)
])
return list(sorted_word_list)[:top_n]
|
20839162d06447f7651c1051ed88feef9d62db72
| 34,235 |
def get_fluid_colors(fXs, fVs, normalize=False):
## I realized that with limited data, this coloring does not make much sense
"""
Given the velocity field (Ux, Uy) at positions (X,Y), compute for every x in (X,Y) if the position is contributing to the inflow or the outflow. This can be obtained by u*x (scalar product). If this quantity is positive, then it's an outflow, if it is negative it's an inflow.
"""
ret = [np.dot(x,v) for x,v in zip(fXs, fVs)]
if normalize:
mi=min(ret)
mx=max(ret)
assert(mi<0 and mx>0)
ret = [r/mi if r<0 else r/mx for r in ret]
return np.array(ret)
|
cda836658f30c7f8cfd96d774f2654e2a056de51
| 34,236 |
def getitem(x, item):
"""returns specific item of a tensor (Functional).
# Arguments
item: Item list.
# Returns
A new functional object.
"""
validate_functional(x)
res = x.copy()
ys = []
lmbd = [Lambda(lambda xx: xx.__getitem__(item)) for xx in x.outputs]
for l, y in zip(lmbd, x.outputs):
# l.name = "slice/" + l.name.split("_")[-1]
ys.append(l(y))
res.outputs = ys
return res
|
7854bad7ff67aa3d04be7e26b25785832ce04522
| 34,237 |
import torch
from typing import Optional
def norm_range(
data: torch.Tensor, min: float, max: float, per_channel: bool = True, out: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""
Scale range of tensor
Args:
data: input data. Per channel option supports [C,H,W] and [C,H,W,D].
min: minimal value
max: maximal value
per_channel: range is normalized per channel
out: if provided, result is saved in here
Returns:
torch.Tensor: normalized data
"""
if out is None:
out = torch.zeros_like(data)
out = norm_min_max(data, per_channel=per_channel, out=out)
_range = max - min
out = (out * _range) + min
return out
|
52f36f0b9a74d8813ee72bc7a773620e92c4923b
| 34,238 |
def get_mvarg(size_pos, position="full"):
"""Take xrandrs size&pos and prepare it for wmctrl (MVARG) format
MVARG: <G>,<X>,<Y>,<W>,<H>
* <G> - gravity, 0 is default
"""
allowed = ["left", "right", "top", "bottom", "full"]
if position not in allowed:
raise ValueError(f"Position has to be one of {allowed}")
size, x, y = size_pos.split("+")
w, h = size.split("x")
if position == "left":
w = int(w) // 2
if position == "right":
w = int(w) // 2
x = int(x) + w
return f"0,{x},{y},{w},{h}"
|
0b8a9c3f5ca7e24212502a3f2c76b18167deef6e
| 34,239 |
def fr2date(edate, **kwargs):
"""
Wrapper function for :func:`date2date` with French input and
standard output date formats
That means `date2date(edate, fr=True, **kwargs)`;
but *fr* given in call will overwrite *fr=True*.
Examples
--------
>>> edate = ['12/11/2014 12:00', '01/03/2015 17:56:00',
... '01/12/1990', '04/05/1786']
>>> print(", ".join(fr2date(edate)))
12.11.2014 12:00, 01.03.2015 17:56:00, 01.12.1990, 04.05.1786
>>> print(", ".join(fr2date(edate, full=True)))
12.11.2014 12:00:00, 01.03.2015 17:56:00, 01.12.1990 00:00:00, 04.05.1786 00:00:00
>>> print(fr2date(list(edate), format='%Y%m%d%H%M%S'))
['20141112120000', '20150301175600', '19901201000000', '17860504000000']
>>> print(fr2date(tuple(edate)))
('12.11.2014 12:00', '01.03.2015 17:56:00', '01.12.1990', '04.05.1786')
>>> print(fr2date(np.array(edate)))
['12.11.2014 12:00' '01.03.2015 17:56:00' '01.12.1990' '04.05.1786']
>>> print(fr2date(edate[0]))
12.11.2014 12:00
2-digit year
>>> edate = ['12/11/14 12:00', '01/03/15 17:56:00', '01/12/90']
>>> print(", ".join(fr2date(edate)))
12.11.2014 12:00, 01.03.2015 17:56:00, 01.12.1990
>>> print(", ".join(fr2date(edate, full=True)))
12.11.2014 12:00:00, 01.03.2015 17:56:00, 01.12.1990 00:00:00
"""
if 'fr' not in kwargs:
kwargs.update({'fr': True})
return date2date(edate, **kwargs)
|
4bdf32e344585caa159d64bcdaa3e870b41f8633
| 34,240 |
def convert_species_tuple2chianti_str(species):
"""
Convert a species tuple to the ion name format used in `chiantipy`.
Parameters
-----------
species: tuple (atomic_number, ion_number)
Returns
--------
str
ion name in the chiantipy format
Examples
---------
>>> convert_species_tuple2chianti_str((1,0))
'h_1'
>>> convert_species_tuple2chianti_str((14,1))
'si_2'
"""
atomic_number, ion_number = species
chianti_ion_name = convert_atomic_number2symbol(atomic_number).lower() + '_' + str(ion_number + 1)
return chianti_ion_name
|
d37aa9c9a9e186febc4ef6df2f3b53f9110ace7a
| 34,241 |
import hashlib
def vote(request, village_no, day_no):
"""投票処理"""
vote_id = request.POST['vote']
login_id = request.session.get('login_id', False)
participant = VillageParticipant.objects.get(village_no=village_no, pl=login_id, cancel_flg=False)
ability = VillageParticipantExeAbility.objects.get(village_participant_id=participant.id, day_no=day_no)
ability.vote = hashlib.sha256(vote_id.encode('utf-8')).hexdigest()
ability.save()
# 村メイン画面に戻る
return HttpResponseRedirect(reverse('pywolf:village', args=(village_no, day_no)))
|
4935ee87645d037e9455e61c588168f3f276f8ce
| 34,243 |
def read_header_offsets(file, block_size):
"""Reads the PBDF header (not checking the included file size) and returns the list of offsets adjusted to match
decrypted data positions.
Args:
file: The decrypted input file.
block_size (int): The block size in bytes at which end a checksum is placed.
Returns:
The list of offsets, adjusted to point to decrypted file positions.
"""
_ = read_int32(file)
num_offsets = read_int32(file)
offsets = list(read_int32s(file, num_offsets))
for i in range(num_offsets):
offsets[i] -= offsets[i] // block_size * 4
return offsets
|
cf2d5c89083bebf3dff1b92bd2365e1a62b2f97e
| 34,244 |
def level_2_win_play(x):
"""
Probability that a deck with x SSGs will win on the play using level two reasoning
:param int x: SSGs in deck
:return float:
"""
return sum([
level_2_hand_odds_play(i, x) * mull_to_play(i, x) for i in range(0, 8)
])
|
94834ad0cc603b2923c0f5c441def95196197c25
| 34,245 |
def getfullargspec(func):
"""Get the names and default values of a function's arguments.
A tuple of seven things is returned:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults annotations).
'args' is a list of the argument names.
'varargs' and 'varkw' are the names of the * and ** arguments or None.
'defaults' is an n-tuple of the default values of the last n arguments.
'kwonlyargs' is a list of keyword-only argument names.
'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
'annotations' is a dictionary mapping argument names to annotations.
The first four items in the tuple correspond to getargspec().
"""
if ismethod(func):
func = func.__func__
if not isfunction(func):
raise TypeError('{!r} is not a Python function'.format(func))
args, varargs, kwonlyargs, varkw = _getfullargs(func.__code__)
return FullArgSpec(args, varargs, varkw, func.__defaults__,
kwonlyargs, func.__kwdefaults__, func.__annotations__)
|
d239d6d9b7399689157d6a5f025148e287f06b68
| 34,246 |
import json
def get_json(file_path):
"""
Faz a leitura de um arquivo Json com codificacao utf-8,
apenas para arquivos dentro do diretorio folhacerta_settings
:param file_path: (string) nome do arquivo json com extensao
:return: Dicionario com os dados do json
"""
with open(file_path, encoding='utf-8') as data_json:
return json.load(data_json)
|
bd475d7427705026ad17d32d25a1a016d6c6f93d
| 34,247 |
def color_array(arr, alpha=1):
""" take an array of colors and convert to
an RGBA image that can be displayed
with imshow
"""
img = np.zeros(arr.shape + (4,))
for row in range(arr.shape[0]):
for col in range(arr.shape[1]):
c = mpl.colors.to_rgb(arr[row, col])
img[row, col, 0:3] = c
img[row, col, 3] = alpha
return img
|
dbfdf7e78788365f6bc0bb61b7c7607aec79e250
| 34,248 |
def showturtle():
"""Makes the turtle visible.
Aliases: showturtle | st
No argument.
Example (for a Turtle instance named turtle):
>>> turtle.hideturtle()
>>> turtle.showturtle()
"""
return __SingletonTurtle().showturtle()
|
011edcca34c25d42831826c73f8a823d992448b7
| 34,249 |
def get_setting(db, name):
"""
Get a specific setting
@param db The database connection
@param name The setting's name
@return The setting's value
"""
result = _query(db, "SELECT value FROM settings WHERE name=%(name)s;", { "name" : name })
if len(result) > 0:
return getattr(result[0], "value")
return None
|
1a82faf9e2f222328d9deb9c0beb19a8a7b22862
| 34,250 |
def check_table_exists(client: AitoClient, table_name: str) -> bool:
"""check if a table exists in the instance
:param client: the AitoClient instance
:type client: AitoClient
:param table_name: the name of the table
:type table_name: str
:return: True if the table exists
:rtype: bool
"""
existing_tables = get_existing_tables(client)
return table_name in existing_tables
|
b45a7cbe8a896040903af6f5ece177d53773d098
| 34,252 |
def bytes_to_str(s, encoding='latin-1'):
"""Extract null-terminated string from bytes."""
if b'\0' in s:
s, _ = s.split(b'\0', 1)
return s.decode(encoding, errors='replace')
|
7d98d91443ab16478f1b8ecba39311110e92009c
| 34,253 |
from typing import Pattern
import re
from typing import Optional
from typing import Match
from typing import AnyStr
from typing import Dict
def get_time_delta(time_string: str) -> timedelta:
"""
Takes a time string (1 hours, 10 days, etc.) and returns
a python timedelta object
:param time_string: the time value to convert to a timedelta
:type time_string: str
:returns: datetime.timedelta for relative time
:type datetime.timedelta
"""
rel_time: Pattern = re.compile(
pattern=r"((?P<hours>\d+?)\s+hour)?((?P<minutes>\d+?)\s+minute)?((?P<seconds>\d+?)\s+second)?((?P<days>\d+?)\s+day)?",
# noqa
flags=re.IGNORECASE,
)
parts: Optional[Match[AnyStr]] = rel_time.match(string=time_string)
if not parts:
raise Exception(f"Invalid relative time: {time_string}")
# https://docs.python.org/3/library/re.html#re.Match.groupdict
parts: Dict[str, str] = parts.groupdict()
time_params = {}
if all(value == None for value in parts.values()):
raise Exception(f"Invalid relative time: {time_string}")
for time_unit, magnitude in parts.items():
if magnitude:
time_params[time_unit]: int = int(magnitude)
return timedelta(**time_params)
|
8a1df45286f0994e1f9aab4f4209ea14f639e8b9
| 34,254 |
def lens_of(data):
"""Apply len(x) to elemnts in data."""
return len(data)
|
e10cba2bd801afd8f41dd0b15bfc23b7849c06ba
| 34,255 |
from typing import Any
def _format_phone(
phone: Any, output_format: str, fix_missing: str, split: bool, errors: str
) -> Any:
"""
Function to transform a phone number instance into the desired format.
The last component of the returned tuple contains a code indicating how the
input value was changed:
0 := the value is null
1 := the value could not be parsed
2 := the value is cleaned and the cleaned value is DIFFERENT than the input value
3 := the value is cleaned and is THE SAME as the input value (no transformation)
"""
country_code, area_code, office_code, station_code, ext_num, status = _check_phone(phone, True)
if status == "null":
return (np.nan, np.nan, np.nan, np.nan, np.nan, 0) if split else (np.nan, 0)
if status == "unknown":
if errors == "raise":
raise ValueError(f"unable to parse value {phone}")
result = phone if errors == "ignore" else np.nan
return (result, np.nan, np.nan, np.nan, np.nan, 1) if split else (result, 1)
if split:
missing_code = "1" if fix_missing == "auto" and area_code else np.nan
country_code = country_code if country_code else missing_code
area_code = area_code if area_code else np.nan
ext_num = ext_num if ext_num else np.nan
return country_code, area_code, office_code, station_code, ext_num, 2
if output_format == "nanp": # NPA-NXX-XXXX
area_code = f"{area_code}-" if area_code else ""
ext_num = f" ext. {ext_num}" if ext_num else ""
result = f"{area_code}{office_code}-{station_code}{ext_num}"
elif output_format == "e164": # +NPANXXXXXX
print(country_code)
if country_code is None and area_code:
country_code = "+1"
else:
country_code = "+" + country_code if area_code else ""
area_code = area_code if area_code else ""
ext_num = f" ext. {ext_num}" if ext_num else ""
result = f"{country_code}{area_code}{office_code}{station_code}{ext_num}"
elif output_format == "national": # (NPA) NXX-XXXX
area_code = f"({area_code}) " if area_code else ""
ext_num = f" ext. {ext_num}" if ext_num else ""
result = f"{area_code}{office_code}-{station_code}{ext_num}"
return result, 2 if phone != result else 3
|
97341088f03f5d276339c83d0061bbfec28933b9
| 34,256 |
def mvg_all_joints(jLs, face_keypoints=True, upper_sternum=True, upper_body=True, lower_body=True, wsize=6):
"""
:param jLs: list of lists with length 54 derived from text file
including x,y,z coordinates for each joint as separately list.
:type jLs: list
:param face_keypoints: 15 face keypoints in total, defaults to True
:type face_keypoints: bool, optional
:param upper_sternum: 3 upper sternum keypoints in total, defaults to True
:type upper_sternum: bool, optional
:param upper_body: 18 upper body keypoints in total, defaults to True
:type upper_body: bool, optional
:param lower_body: 18 lower body keypoints in total, defaults to True
:type lower_body: bool, optional
:param wsize: windows size of moving average filter, defaults to 6
:type wsize: int, optional
:return: maximum 54 arrays for all body joints
:rtype: list of arrays
"""
if wsize <= 2:
raise ValueError('Windows size is recommended to be at least 3.')
if face_keypoints == False and upper_sternum == False and lower_body == False and upper_body == False:
raise NotImplementedError('There are no keypoints to filter.')
mvg_joints = []
mvg = MovingAverage()
if face_keypoints:
# Nose
mvg_nose_x = mvg.moving_average(jLs[0], wsize)
mvg_nose_y = mvg.moving_average(jLs[1], wsize)
mvg_nose_z = mvg.moving_average(jLs[2], wsize)
# Eyes
mvg_reye_x = mvg.moving_average(jLs[42], wsize)
mvg_reye_y = mvg.moving_average(jLs[43], wsize)
mvg_reye_z = mvg.moving_average(jLs[44], wsize)
mvg_leye_x = mvg.moving_average(jLs[45], wsize)
mvg_leye_y = mvg.moving_average(jLs[46], wsize)
mvg_leye_z = mvg.moving_average(jLs[47], wsize)
# Ears
mvg_rear_x = mvg.moving_average(jLs[48], wsize)
mvg_rear_y = mvg.moving_average(jLs[49], wsize)
mvg_rear_z = mvg.moving_average(jLs[50], wsize)
mvg_lear_x = mvg.moving_average(jLs[51], wsize)
mvg_lear_y = mvg.moving_average(jLs[52], wsize)
mvg_lear_z = mvg.moving_average(jLs[53], wsize)
mvg_joints.extend(
[mvg_nose_x, mvg_nose_y, mvg_nose_z,
mvg_reye_x, mvg_reye_y, mvg_reye_z,
mvg_leye_x, mvg_leye_y, mvg_leye_z,
mvg_rear_x, mvg_rear_y, mvg_rear_z,
mvg_lear_x, mvg_lear_y, mvg_lear_z]
)
if upper_sternum:
# Upper sternum
mvg_upster_x = mvg.moving_average(jLs[3], wsize)
mvg_upster_y = mvg.moving_average(jLs[4], wsize)
mvg_upster_z = mvg.moving_average(jLs[5], wsize)
mvg_joints.extend(
[mvg_upster_x, mvg_upster_y, mvg_upster_z]
)
if upper_body:
# Right upper body side
mvg_rs_x = mvg.moving_average(jLs[6], wsize)
mvg_rs_y = mvg.moving_average(jLs[7], wsize)
mvg_rs_z = mvg.moving_average(jLs[8], wsize)
mvg_rel_x = mvg.moving_average(jLs[9], wsize)
mvg_rel_y = mvg.moving_average(jLs[10], wsize)
mvg_rel_z = mvg.moving_average(jLs[11], wsize)
mvg_rw_x = mvg.moving_average(jLs[12], wsize)
mvg_rw_y = mvg.moving_average(jLs[13], wsize)
mvg_rw_z = mvg.moving_average(jLs[14], wsize)
mvg_ls_x = mvg.moving_average(jLs[15], wsize)
mvg_ls_y = mvg.moving_average(jLs[16], wsize)
mvg_ls_z = mvg.moving_average(jLs[17], wsize)
mvg_lel_x = mvg.moving_average(jLs[18], wsize)
mvg_lel_y = mvg.moving_average(jLs[19], wsize)
mvg_lel_z = mvg.moving_average(jLs[20], wsize)
mvg_lw_x = mvg.moving_average(jLs[21], wsize)
mvg_lw_y = mvg.moving_average(jLs[22], wsize)
mvg_lw_z = mvg.moving_average(jLs[23], wsize)
mvg_joints.extend(
[mvg_rs_x, mvg_rs_y, mvg_rs_z,
mvg_rel_x, mvg_rel_y, mvg_rel_z,
mvg_rw_x, mvg_rw_y, mvg_rw_z,
mvg_ls_x, mvg_ls_y, mvg_ls_z,
mvg_lel_x, mvg_lel_y, mvg_lel_z,
mvg_lw_x, mvg_lw_y, mvg_lw_z]
)
if lower_body:
# Right lower body side
mvg_rh_x = mvg.moving_average(jLs[24], wsize)
mvg_rh_y = mvg.moving_average(jLs[25], wsize)
mvg_rh_z = mvg.moving_average(jLs[26], wsize)
mvg_rk_x = mvg.moving_average(jLs[27], wsize)
mvg_rk_y = mvg.moving_average(jLs[28], wsize)
mvg_rk_z = mvg.moving_average(jLs[29], wsize)
mvg_ra_x = mvg.moving_average(jLs[30], wsize)
mvg_ra_y = mvg.moving_average(jLs[31], wsize)
mvg_ra_z = mvg.moving_average(jLs[32], wsize)
# Left lower body side
mvg_lh_x = mvg.moving_average(jLs[33], wsize)
mvg_lh_y = mvg.moving_average(jLs[34], wsize)
mvg_lh_z = mvg.moving_average(jLs[35], wsize)
mvg_lk_x = mvg.moving_average(jLs[36], wsize)
mvg_lk_y = mvg.moving_average(jLs[37], wsize)
mvg_lk_z = mvg.moving_average(jLs[38], wsize)
mvg_la_x = mvg.moving_average(jLs[39], wsize)
mvg_la_y = mvg.moving_average(jLs[40], wsize)
mvg_la_z = mvg.moving_average(jLs[41], wsize)
mvg_joints.extend(
[mvg_rh_x, mvg_rh_y, mvg_rh_z,
mvg_rk_x, mvg_rk_y, mvg_rk_z,
mvg_ra_x, mvg_ra_y, mvg_ra_z,
mvg_lh_x, mvg_lh_y, mvg_lh_z,
mvg_lk_x, mvg_lk_y, mvg_lk_z,
mvg_la_x, mvg_la_y, mvg_la_z]
)
return mvg_joints
|
cf6b2d5092cbbf902fe33902efbb4ca9dbe32d74
| 34,257 |
def check_in_all_models(models_per_expt):
"""
Check intersection of which models are in all experiments
:param models_per_expt: an ordered dictionary of expts as keys with a list of valid models
:return: list of models that appear in all experiments
"""
in_all = None
for key, items in models_per_expt.items():
if in_all is None:
in_all = set(items)
else:
in_all.intersection_update(items)
return in_all
|
75e17b8558a592471dda8a855959f60c40b06759
| 34,258 |
import torch
def _translate_x(video: torch.Tensor, factor: float, **kwargs):
"""
Translate the video along the vertical axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much (relative to the image size) to translate along the
vertical axis.
"""
_check_fill_arg(kwargs)
translation_offset = factor * video.size(-1)
return F_t.affine(
video,
[1, 0, translation_offset, 0, 1, 0],
fill=kwargs["fill"],
interpolation="bilinear",
)
|
925a9dc0b67b70330937b38cc1694c8cebd93e1c
| 34,259 |
import numpy
def get_volume_pixeldata(sorted_slices):
"""
the slice and intercept calculation can cause the slices to have different dtypes
we should get the correct dtype that can cover all of them
:type sorted_slices: list of slices
:param sorted_slices: sliced sored in the correct order to create volume
"""
slices = []
combined_dtype = None
for slice_ in sorted_slices:
slice_data = _get_slice_pixeldata(slice_)
slice_data = slice_data[numpy.newaxis, :, :]
slices.append(slice_data)
if combined_dtype is None:
combined_dtype = slice_data.dtype
else:
combined_dtype = numpy.promote_types(combined_dtype, slice_data.dtype)
# create the new volume with with the correct data
vol = numpy.concatenate(slices, axis=0)
# Done
# if rgb data do separate transpose
if len(vol.shape) == 4 and vol.shape[3] == 3:
vol = numpy.transpose(vol, (2, 1, 0, 3))
else:
vol = numpy.transpose(vol, (2, 1, 0))
return vol
|
f935a3b820211fe1535a95d24f021cc2e8d48fbe
| 34,260 |
import logging
def read_val_dataframe_from_hdf5(input_pattern, val_fold=0):
"""Reads the hdf5 tables for the validation fold only.
Args:
input_pattern: String with the path for the hdf5 with a '%d' for the fold.
val_fold: Integer. The zero-based validation fold num.
Returns:
A pandas dataframe containing the validation data loaded from hdf5
"""
logging.info('The validation path is : %s', input_pattern % val_fold)
return io_utils.read_dataframe_from_hdf5(input_pattern % val_fold)
|
894e59c893b6e2eb7beaa7bd2dd67ac2eeca7a99
| 34,261 |
from pathlib import Path
def hash_file(hash_obj, fout: Path):
"""Create a hash of the file.
Parameters
hash_obj () The hash object
fout (Path) : Path to file to be written.
Return
str : If fout was found.
None : If fout was not found.
"""
try:
with fout.open('rb') as f:
buf = f.read(config.BUF_SIZE)
while len(buf) > 0:
hash_obj.update(buf)
buf = f.read(config.BUF_SIZE)
except FileNotFoundError:
return None
return hash_obj.hexdigest()
|
b57b87b192dbd63a3cd3136ffd7a6b98fd8ed7dd
| 34,262 |
import scipy
def Get_Wav_EMA_PerFile(EMA_file, Wav_file, F, EmaDir, MFCCpath, BeginEnd, XvectorPath, cfg):
"""Return mean and variance normalised ema, mfcc, and x-vectors if required (of the cross-corpus).
Parameters
----------
EMA_file: str
path to ema file
Wav_file: str
path to mfcc file
F: int
sentence number from MOCHA-TIMIT
EMADir: str
Directory to EMA data
MFCCpath: str
Directory to MFCC data
BeginEnd: array
Start and end of a speech segment
XvectorPath: str
Path to xvector directory
cfg: main.Configuration
Configuration file
Returns
-------
If x-vectors are required, return preprocessed ema and mfcc features along w the x-vectors for each speech chunk.
Else, return preprocessed ema and mfcc features.
"""
EmaMat = scipy.io.loadmat(EmaDir + EMA_file)
EMA_temp = EmaMat['EmaData']
EMA_temp = np.transpose(EMA_temp)# time X 18
Ema_temp2 = np.delete(EMA_temp, [4,5,6,7,10,11,14,15,16,17], 1) # time X 12
MeanOfData = np.mean(Ema_temp2, axis=0)
Ema_temp2 -= MeanOfData
C = 0.5*np.sqrt(np.mean(np.square(Ema_temp2), axis=0))
Ema = np.divide(Ema_temp2, C) # Mean & variance normailized
[aE,bE] = Ema.shape
#print F.type
EBegin = np.int(BeginEnd[0,F]*100) # start of a speech segment
EEnd = np.int(BeginEnd[1,F]*100) # end of the segment
feats = np.loadtxt(MFCCpath + Wav_file[:-4] + '.txt')#np.asarray(htkfile.data)
MFCC_G = feats
TimeStepsTrack = EEnd - EBegin
## X-vector Embeddings
SPK_emd = np.loadtxt(XvectorPath + Wav_file[:-4] + '.txt') # load x-vectors, given a wav file name
if cfg.x_vectors:
return Ema[EBegin:EEnd,:], MFCC_G[EBegin:EEnd,:cfg.mfcc_dim], SPK_emd # with out
return Ema[EBegin:EEnd,:], MFCC_G[EBegin:EEnd,:cfg.mfcc_dim]
|
91d77fb4dd2a3202f7c94db2fa7a029c9562b63a
| 34,263 |
def is_class_name(class_name):
"""
Check if the given string is a python class.
The criteria to use is the convention that Python classes start with uppercase
:param class_name: The name of class candidate
:type class_name: str
:return: True whether the class_name is a python class otherwise False
"""
return class_name.capitalize()[0] == class_name[0]
|
2b4b6a09f2a112f7e8163f3caf97fdfca0c93e12
| 34,264 |
def is_pesummary_json_file_deprecated(path):
"""Determine if the results file is a deprecated pesummary json file
Parameters
----------
path: str
path to results file
"""
return _is_pesummary_json_file(path, _check_pesummary_file_deprecated)
|
49ddd8f84e259cf26955220013bcfe7003bcca5d
| 34,265 |
def popart(image_array):
"""
Applique la fonction popart 4 fois avec des paramètres un peu différents
:return: une image avec moins de couleurs.
"""
h = get_image_height(image_array)
w = get_image_width(image_array)
small_height = h // 2
small_width = w // 2
small_image = image_array[0:small_height * 2:2, 0:small_width * 2:2, CANAUX_RGB]
new_image = np.zeros(image_array.shape, dtype=image_array.dtype)
hue_delta = 0
for i in range(2):
for j in range(2):
new_image[i * small_height:(i + 1) * small_height, j * small_width:(j + 1) * small_width, CANAUX_RGB] = popart_one(
small_image,
hue_delta)
hue_delta += 45
return new_image
|
77b8af2bc5db976289a94118d8e01789c535b2b0
| 34,266 |
from AugSeg.get_instance_group import extract
from AugSeg.affine_transform import transform_image, transform_annotation
from datasetsAug.roidb import combined_roidb_for_training
def _get_image_blob(roidb, coco):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
# AUG BEGIN--------------------------------
backupim = im
backuproidb = roidb[i]
try:
img_id = roidb[i]['id']
ann_ids = coco.getAnnIds(imgIds=img_id)
anns = coco.loadAnns(ann_ids)
background, instances_list, transforms_list, groupbnds_list, groupidx_list = extract(anns, im)
new_img = transform_image(background, instances_list, transforms_list)
new_ann = transform_annotation(anns, transforms_list, groupbnds_list, groupidx_list,
background.shape[1], background.shape[0])
im = new_img
new_roidb, ratio_list, ratio_index = combined_roidb_for_training( \
('coco_2017_train',), cfg.TRAIN.PROPOSAL_FILES, \
img_id, new_ann, coco
)
if roidb[i]['flipped']:
roidb[i] = new_roidb[1]
else:
roidb[i] = new_roidb[0]
except:
roidb[i] = backuproidb
im = backupim
# AUG END----------------------------------
# If NOT using opencv to read in images, uncomment following lines
# if len(im.shape) == 2:
# im = im[:, :, np.newaxis]
# im = np.concatenate((im, im, im), axis=2)
# # flip the channel, since the original one using cv2
# # rgb -> bgr
# im = im[:, :, ::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
# Create a blob to hold the input images [n, c, h, w]
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales, roidb
|
637983405303c304bdf480feca7ea29f1a06d6a6
| 34,267 |
def solve_lu(A, P, B, options=MATPROP.NONE):
"""
Solve a system of linear equations, using LU decomposition.
Parameters
----------
A: af.Array
- A 2 dimensional arrayfire array representing the coefficients of the system.
- This matrix should be decomposed previously using `lu_inplace(A)`.
P: af.Array
- Permutation array.
- This array is the output of an earlier call to `lu_inplace(A)`
B: af.Array
A 1 or 2 dimensional arrayfire array representing the constants of the system.
Returns
-------
X: af.Array
A 1 or 2 dimensional arrayfire array representing the unknowns in the system.
"""
X = Array()
safe_call(backend.get().af_solve_lu(c_pointer(X.arr), A.arr, P.arr, B.arr, options.value))
return X
|
96eb1f222618c5b17db48a69535204b6246b873f
| 34,268 |
import mimetypes
def guess_type(filename, strict=False, default="application/octet-stream"):
""" Wrap std mimetypes.guess_type to assign a default type """
content_type, encoding = mimetypes.guess_type(filename, strict=strict)
if content_type is None:
content_type = default
return content_type, encoding
|
ae3dae1d005797b2dc96cd893f4a79d20922e6a2
| 34,269 |
def _lookup_alias(aliases, value):
"""
Translate to a common name if our value is an alias.
:type aliases: dict of (str, [str])
:type value: str
:rtype: str
>>> _lookup_alias({'name1': ['alias1']}, 'name1')
'name1'
>>> _lookup_alias({'name1': ['alias1', 'alias2']}, 'alias1')
'name1'
>>> _lookup_alias({'name1': ['alias1', 'alias2']}, 'alias2')
'name1'
>>> _lookup_alias({'name1': ['alias1']}, 'name2')
'name2'
"""
better_name = [name for (name, aliases) in aliases.items()
if value in aliases]
return better_name[0] if better_name else value
|
df0641b1f8aca964f76afd2a83fb91a587d52e1d
| 34,270 |
def triplet_loss(anchor, positive, negative, alpha):
"""Calculate the triplet loss according to the FaceNet paper
Args:
anchor: the embeddings for the anchor images.
positive: the embeddings for the positive images.
negative: the embeddings for the negative images.
Returns:
the triplet loss according to the FaceNet paper as a float tensor.
"""
with tf.name_scope('triplet_loss') as scope:
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)
basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)
return loss
|
7c3d218833e86f62a0de493ff8dc1045fd3a7788
| 34,271 |
import json
def loadConfig() -> list:
"""Loads configuration from CONFIG_PATH
Returns:
[list] -- the CONFIG_VARS as loaded from CONFIG_PATH
"""
configData = json.load(open(CONFIG_PATH, 'r', encoding='utf-8'))
returnValue = []
# make sure we're not missing anything
for configItem in CONFIG_VARS:
if configItem not in configData:
# We can't use core.log() because config.loglevel doesn't exist yet
print(f"E: {configItem} not found in config.json")
else:
returnValue.append(configData[configItem])
return returnValue
|
f115cd8896ca5d52086f4054cc7fb5a05c230d0e
| 34,272 |
import httpx
async def get_corrected_name_api(keyphrase: str) -> str:
"""
Get corrected artist name via Last.fm API method
artist.getCorrection. See: https://last.fm/api/show/artist.getCorrection
:param keyphrase: Name of an artist or a band.
:return: Corrected artist name.
"""
async with httpx.AsyncClient() as client:
res = await client.get(
f"https://ws.audioscrobbler.com/2.0/"
f"?method=artist.getcorrection&artist={_quote(keyphrase)}"
f"&api_key={LASTFM_API_KEY}&format=json"
)
res.raise_for_status()
parsed = res.json()
name = parsed["corrections"]["correction"]["artist"]["name"]
return name
|
befe2f89b233bf0453873d710531483e61832786
| 34,273 |
def missing_columns(df, missing_threshold=0.6):
"""Find missing features
Parameters
----------
df : pd.DataFrame, shape (n_samples, n_features)
Training data, where n_samples is the number of samples and n_features is the number of features.
missing_threshold : float, default=0.6
Count all features with a missing rate greater than `missing_threshold`.
Returns
-------
t : All features with a missing rate greater than `missing_threshold`
"""
assert 1>=missing_threshold>=0, "`missing_threshold` should be one of [0, 1]."
t = (1-df.count()/len(df)).reset_index()
t.columns = ['feature_name', 'missing_rate']
t = t[t.missing_rate>=missing_threshold].reset_index(drop=True)
return t
|
4d31673670d894556b6571a0233ec36c8452570a
| 34,274 |
import re
import yaml
def read_message(raw_data: bytes):
"""Reads a UDP raw message, split the metadata, and decode data.
:param raw_data: Raw UDP packet
:type raw_data: bytes
:return: Metadata and decoded data required for packet reassembly.
:rtype: tuple
"""
header, data = read_raw_message(raw_data)
#'device/endpoint:encoding:id:sequence'
match = re.search("(.*?)/(.*?):(.*?):(.*?):(.*?):(.*)", header)
device = match[1]
endpoint = match[2]
encoding = match[3]
id = match[4]
sequence = match[5]
max_sequence = match[6]
if encoding == "yaml":
decoded_data = yaml.load(data.decode("utf-8"), Loader=yaml.Loader)
else:
decoded_data = data
return (
device,
endpoint,
decoded_data,
encoding,
int(id),
int(sequence),
int(max_sequence),
)
|
bd039b55f579971be6750519bcc79f5ac6670ea4
| 34,276 |
def is_ordered(treap):
""" Utility to check that every node in the given Treap satisfies the following:
Rules:
- if v is a child of u, then v.priority <= u.priority
- if v is a left child of u, then v.key < u.key
- if v is a right child of u, then v.key > u.key
"""
# iterate through all nodes in the heap
for node in treap:
# check parent (if not root)
if node != treap.root and (node.priority > node.parent.priority):
print("Node {} and parent ({}) have mismatched priorities.".format(node, node.parent))
return False
# check left and right. All are optional, technically
if node.left and (node.key < node.left.key):
print("Node {} and left child ({}) have mismatched keys.".format(node, node.left))
return False
if node.right and (node.key > node.right.key):
print("Node {} and right child ({}) have mismatched keys.".format(node, node.right))
return False
return True
|
38b7fd7690931e017e9ece52b6cba09dbb708400
| 34,278 |
def log_prob(x, df, loc, scale):
"""Compute log probability of Student T distribution.
Note that scale can be negative.
Args:
x: Floating-point `Tensor`. Where to compute the log probabilities.
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values.
loc: Floating-point `Tensor`; the location(s) of the distribution(s).
scale: Floating-point `Tensor`; the scale(s) of the distribution(s).
Returns:
A `Tensor` with shape broadcast according to the arguments.
"""
# Writing `y` this way reduces XLA mem copies.
y = (x - loc) * (tf.math.rsqrt(df) / scale)
log_unnormalized_prob = -0.5 * (df + 1.) * log1psquare(y)
log_normalization = (
tf.math.log(tf.abs(scale)) + 0.5 * tf.math.log(df) +
0.5 * np.log(np.pi) + tfp_math.log_gamma_difference(0.5, 0.5 * df))
return log_unnormalized_prob - log_normalization
|
6714c53e1be59aaa8c9194ea30b6e51d665caba9
| 34,279 |
def get_cleaned_query_data_http_error(handler, *args):
"""
同上
:param handler:
:param args:
:return:
"""
data = {}
for k in args:
try:
data[k] = clean_data(handler.get_query_argument(k))
except MissingArgumentError:
raise HTTPError(400)
return data
|
f5513034be8cd9082a3ad632c77ed4d364644819
| 34,281 |
import http
from datetime import datetime
def CreateOrUpdateActivity(request, program, activity=None):
"""Creates or updates an activity.
Caller must provide the program. Activity is optional when creating a new one.
Args:
request: A request.
program: A models.Program under which we are either creating or updating
the activity.
activity: Optional activity. If available, this method will handle the
update of the given activity. If not available a new one is created.
Returns:
http response.
"""
is_update = activity is not None
if activity:
assert activity.parent() == program
activity_formset = formsets.formset_factory(forms.ActivityScheduleForm,
formset=
forms.ActivityScheduleFormSet,
extra=0, can_delete=True)
if request.method == 'POST':
formset = activity_formset(request.POST, request.FILES)
if formset.PrepareCreateUpdate(request.user, program, activity):
formset.CreateUpdateSchedules(activity)
return http.HttpResponseRedirect(urlresolvers.reverse(
'ShowProgram',
kwargs=dict(program_key=program.key())))
else:
#Some errors are in the forms
if not formset.IsScheduleAvailable():
#We undo the delete in every form we send back. User can not delete
#all forms.
for index in range(len(formset.forms)):
#We need to change the data in the request form because the deletion
#field is a special field added at the formset level, not directly
#accessible at the form level like anyother field/form attribute.
#There is probably a way to set the value of deletion to false, but
#nothing straightforward/obvious, so changing POST data instead.
param_id = 'form-%s-%s' % (index, formsets.DELETION_FIELD_NAME)
request.POST[param_id] = ''
assert not formset.forms[index].IsDeleted()
else: #GET
if is_update:
form_data = forms.ActivityScheduleFormSet.BuildPostData(request.user,
activity)
formset = activity_formset(form_data)
assert formset.is_valid()
else: # User wants to create a new activity.
user_now = request.user.GetLocalTime(datetime.datetime.utcnow())
user_now = user_now.replace(second=0, microsecond=0)
a_start = user_now + datetime.timedelta(minutes=60)
a_end = a_start + datetime.timedelta(minutes=60)
a_start_time = format.FormatTime(a_start)
a_end_time = format.FormatTime(a_end)
formset = activity_formset(initial=[{'start_date': a_start.date(),
'start_time': a_start_time,
'end_date': a_end.date(),
'end_time': a_end_time,
'owner': program.owner.email}])
# We put the access point rooms.
access_points_info = ap_utils.GetAccessPointsInfo(utils.AccessPointType.ROOM)
access_point_names = simplejson.dumps(access_points_info['uris'])
access_point_keys = simplejson.dumps(access_points_info['keys'])
access_point_tzs = simplejson.dumps(access_points_info['timezone_names'])
data = {'formset': formset,
'is_update': is_update,
'program': program,
'access_point_names': access_point_names,
'access_point_keys': access_point_keys,
'access_point_tzs': access_point_tzs}
context = template.RequestContext(request, data)
return shortcuts.render_to_response('manage_activity.html', context)
|
15ddb2a423a82f4a3ebfda43b20a697be54d0d1d
| 34,282 |
import json
def dowork(lon, lat):
""" Actually do stuff"""
pgconn = get_dbconn('postgis')
cursor = pgconn.cursor()
res = dict(mcds=[])
cursor.execute("""
SELECT issue at time zone 'UTC' as i,
expire at time zone 'UTC' as e,
product_num,
product_id
from text_products WHERE pil = 'SWOMCD'
and ST_Contains(geom, ST_GeomFromEWKT('SRID=4326;POINT(%s %s)'))
ORDER by product_id DESC
""", (lon, lat))
for row in cursor:
url = ("http://www.spc.noaa.gov/products/md/%s/md%04i.html"
) % (row[3][:4], row[2])
res['mcds'].append(
dict(spcurl=url,
year=row[0].year,
utc_issue=row[0].strftime(ISO9660),
utc_expire=row[1].strftime(ISO9660),
product_num=row[2],
product_id=row[3]))
return json.dumps(res)
|
4acc671cd3701562866c237b06267f7b5038413b
| 34,283 |
from scdali.models.gp import SparseGP
def run_interpolation(
A, D,
cell_state,
kernel='Linear',
num_inducing=800,
maxiter=2000,
return_prior_mean=False,
n_cores=1):
"""Run scDALI interpolation of allelic rates for each region.
A, D are assumed to be n-by-d, where n is the number of cells and d the
number of regions to model.
Args:
A: Alternative counts for each cell and region.
D: Total counts for each cell and region.
cell_state: Matrix of cell states, e.g. clusters or coordinates
in a low-dimensional cell-state space.
kernel: Kernel function for GP interpolation, e.g. 'Linear' or 'RBF'.
num_inducing: Number of inducing points for the GP model
maxiter: Max iterations for GP optimization.
return_prior_mean: Return the estimated GP prior mean.
n_cores: Number of cores to use.
Returns:
Estimated posterior mean and variances for each region.
"""
D = atleast_2d_column(D)
A = atleast_2d_column(A)
if A.shape != D.shape:
raise ValueError('A and D need to be of the same shape.')
if cell_state is None:
raise ValueError('Interpolation requires cell_state to be specified')
init_kwargs = {}
fit_kwargs = {}
init_kwargs['kernel'] = kernel
init_kwargs['num_inducing'] = num_inducing
fit_kwargs['maxiter'] = maxiter
init_kwargs['E'] = cell_state
n_cores = min(n_cores, D.shape[1])
print('[scdali] Processing %d regions on %d core(s) ... ' % (D.shape[1], n_cores), flush=True)
callbacks = []
callbacks.append(create_method_callback('compute_posterior', E=cell_state))
if return_prior_mean:
callbacks.append(create_method_callback('get_prior_mean'))
show_progress = False if n_cores > 1 else True
f = partial(
run_model,
SparseGP,
init_kwargs=init_kwargs,
fit_kwargs=fit_kwargs,
callbacks=callbacks,
show_progress=show_progress)
results = process_parallel(
f,
mat_dict={'A':A, 'D':D},
n_cores=n_cores)
out = dict()
out['posterior_mean'] = np.asarray([r[0][0].flatten() for r in results]).T
out['posterior_var'] = np.asarray([r[0][1].flatten() for r in results]).T
if return_prior_mean:
out['prior_mean'] = [float(r[1]) for r in results]
return out
|
ccc07014ebae18a37e878f34c968b84b23995a5f
| 34,284 |
def extended_knapsack_dp(value_set, weight_set, total_allowed_weight, max_number_of_elements):
"""Knapsack with limited number of elements allowed.
Notes
-----
The rows are the number of elements in value_set
Columns are the number of weights, ranging from 0 to weight_set.
The third dimension is solution for the `max_number_of_elements` allowed.
So this differs a little from the normal version of the knapsack where we link to the previous 3 dimension if the current element is selected.
i
Parameters
----------
value_set : [int]
weight_set : [int]
total_allowed_weight : int
max_number_of_elements : int
Returns
-------
int
Maximum sum weight possible.
"""
number_of_elements = len(value_set)
# Just bounding the problem as the user can give ridiculous inputs. This is not an unbounded knapsack problem.
max_number_of_elements = min(max_number_of_elements, number_of_elements)
total_allowed_weight = min(total_allowed_weight, sum(weight_set))
# Create a 3D dp table. 2D internal table is a singular knapsack table, outer dimension for number of elements allowed.
dp_table = [
[
[0 for _ in range(total_allowed_weight + 1)] for _ in range(number_of_elements + 1)
] for _ in range(max_number_of_elements + 1)
]
for dimension in range(1, max_number_of_elements + 1):
for row in range(1, len(value_set) + 1):
for current_weight in range(1, total_allowed_weight + 1):
element_weight = weight_set[row - 1]
if current_weight < element_weight:
dp_table[dimension][row][current_weight] = dp_table[dimension][row -1][current_weight]
else:
#if dp_table[dimension - 1][row][column - element_weight] + value_set[row - 1] > sum(value_set):
dp_table[dimension][row][current_weight] = max(
dp_table[dimension][row -1][current_weight], #Current element is not selected.
dp_table[dimension - 1][row -1][current_weight - element_weight] + value_set[row - 1]
)
return dp_table[max_number_of_elements][row][current_weight]
|
85527b28706a92eeb822594a0a98e90e4e6f6257
| 34,285 |
def get_matrix_rbf(X, var, have_var=True):
"""e^-||(x_i - x_j)||/ 2*var^2 RBF"""
(rows, _) = X.shape
K = np.empty((rows, rows))
for i in range(rows):
c = np.sum(np.abs(X - X[i]) ** 2, axis=-1) ** (1. / 2) # norm
if (have_var):
c = c / (2 * (var ** 2))
K[i] = c
w = np.exp(- K.T)
return w
|
b5260a44a1abca1ddbf9703a68e9a562feb22dc6
| 34,287 |
import time
import logging
def buildDict(filterKeys=None, mapColumns=False, targetDatasets=None):
""" Maps the dataset names, contained tables, and contained
columns in the projects BigQuery.
Args:
filterKeys (LIST of STRING): A list of substrings,
one of which must be included in
the table name for the table to be
included in the final dictionary. If
no filterKeys are pass all tables are
included.
mapColumns (BOOL): A boolean flag that indicates if
the columns of every tables
should be mapped or not
targetDatasets (LIST of STRING): list of dataset which are to be mapped
Returns:
DICT: {"table_id": {
"datasets": A list of names of datasets that contain this table,
"columns": A list of lists. Each containing column names of this
for it's respective dataset,
"masterCol": A list containing all the column names present amgonst
instances of this table in various datasets
}}
"""
def identifyCols(build):
""" Condenses lists of lists of column names into one list containing every
unique column name
Args:
Build DICT: {"table_id": {
"datasets": A list of names of datasets that contain this table,
"columns": A list of lists. Each containing column names of this
for it's respective dataset
}}
Returns:
DICT: {"table_id": {
"datasets": A list of names of datasets that contain this table,
"columns": A list of lists. Each containing column names of this
for it's respective dataset,
"masterCol": A list containing all the column names present amgonst
instances of this table in various datasets
}}
"""
def allColumns(colLists):
master_list = []
master_low = []
for formatted_list in colLists:
lows = [name.lower() for name in formatted_list]
for name_low, name_formatted in zip(lows, formatted_list):
if name_low not in master_low and name_low != 'agent':
master_list.append(name_formatted)
master_low.append(name_low)
return master_list
for key in build.keys():
cols = allColumns(build[key]['columns'])
build[key]['masterCol'] = cols
return build
start = time.time()
datasets = list(client.list_datasets())
if targetDatasets:
datasets = [dataset for dataset in datasets
if dataset.dataset_id in targetDatasets]
if stage == 'test':
datasets = [dataset for dataset in datasets if 'test' in dataset.dataset_id]
logging.info("Building Dict with {} datasets".format(len(datasets)))
logging.info("Datasets: {}".format([dataset.dataset_id for
dataset in datasets]))
build = mapDatasets(datasets, mapColumns, filterKeys)
if mapColumns:
build = identifyCols(build)
logging.info("The function buildDict \
needed {} seconds to complete".format(
round(time.time() - start),
0))
return build
|
882ab375eacd946f8d3751055076566d30a1af46
| 34,289 |
from functools import reduce
def build_gaussian_pyramid(im, max_levels, filter_size):
"""
Construct a Gaussian pyramid for a given image
:param im: a grayscale image with double values in [0,1]
:param max_levels: the maximal number of levels in the resulting pyramid.
:param filter_size: the size of the Gaussian filter (an odd scalar that represents a squared filter) to be used
in constructing the pyramid filter
:return: tuple(pyr, filter_vec) where:
pyr: the resulting pyramid as a standard python array with maximum length of max_levels,
where each element of the array is a grayscale image.
filter_vec: normalized row vector of shape(1, filter_size) used for the pyramid construction
"""
pyr = [im]
filter_vec = create_filter(filter_size)
i = 1 # counter to keep track of levels
rows, cols = im.shape
while i < max_levels and min(rows, cols) > MIN_RES:
pyr.append(reduce(pyr[i - 1], filter_vec))
rows, cols = pyr[i].shape
i += 1
return pyr, filter_vec
|
9e7975695a3ba435644595ea6c2ce1e876d022b1
| 34,290 |
def broadcast_weeks(start, end):
"""return broadcast weeks with start, end for date range"""
starts = broadcast_week_starts(start, end)
return [(start, start+timedelta(days=6)) for start in starts]
|
1a19fb8128104c480d2c2bd2ccd2b3c4e7a2d428
| 34,292 |
def _aix_iqn():
"""
Return iSCSI IQN from an AIX host.
"""
ret = []
aix_cmd = "lsattr -E -l iscsi0 | grep initiator_name"
aix_ret = salt.modules.cmdmod.run(aix_cmd)
if aix_ret[0].isalpha():
try:
ret.append(aix_ret.split()[1].rstrip())
except IndexError:
pass
return ret
|
f51a1f95bb42db9f8bc42ae26f8409074d3c81f7
| 34,293 |
def generate_token(user):
""" Currently this is workaround
since the latest version that already has this function
is not published on PyPI yet and we don't want
to install the package directly from GitHub.
See: https://github.com/mattupstate/flask-jwt/blob/9f4f3bc8dce9da5dd8a567dfada0854e0cf656ae/flask_jwt/__init__.py#L145
"""
jwt = current_app.extensions['jwt']
token = jwt.jwt_encode_callback(user)
return token
|
e357d33f0ba8ee5a80f1d45d3576e0676ca6660d
| 34,294 |
from typing import Counter
def calculate_lines_per_gloss(lines):
"""
Calculates lines per gloss of lines
Parameters
----------
lines : list
lines in the corpus
Returns
-------
number : int
the count of lines per gloss
"""
line_counts = [len(x[1]) for x in lines]
equaled = list()
number = 1
for i, line in enumerate(line_counts):
if i == 0:
equaled.append(False)
else:
equaled.append(line == line_counts[i - 1])
if False not in equaled[1:]:
# All lines happen to have the same length
for i in range(2, 6):
if len(lines) % i == 0:
number = i
else:
false_intervals = list()
ind = 0
for i, e in enumerate(equaled):
if i == 0:
continue
if not e:
false_intervals.append(i - ind)
ind = i
false_intervals.append(i + 1 - ind)
counter = Counter(false_intervals)
number = max(counter.keys(), key=lambda x: (counter[x], x))
if number > 10:
prev_maxes = set([number])
while number > 10:
prev_maxes.add(number)
number = max(x for x in false_intervals if x not in prev_maxes)
return number
|
5e7a3ab4b819f4a7dd0cc7053eb09a5572e5f448
| 34,296 |
def random_selection(population: np.ndarray, mut_p: float) -> np.ndarray:
"""Randomly select genes to mutate
Args:
population (np.ndarray): Population of chromosomes
chromosome_size (int): Number of genes per chromosome
mut_p (float): Mutation probability
Returns:
np.ndarray: Mask of elements from population to mutate
"""
return np.random.rand(*population.shape) < mut_p
|
b498bf816d085aa1cd4a2f68d144e4b4df051d77
| 34,297 |
def _combo_runner(fn, combos, constants, split=False, parallel=False,
num_workers=None, executor=None, verbosity=1, pool=None):
"""Core combo runner, i.e. no parsing of arguments.
"""
executor = _choose_executor_depr_pool(executor, pool)
n = prod(len(x) for _, x in combos)
ndim = len(combos)
kws = {'fn': fn, 'combos': combos, 'constants': constants, 'n': n,
'ndim': ndim, 'verbosity': verbosity}
# Custom pool supplied
if executor is not None:
results = _combo_runner_executor(executor=executor, **kws)
# Else for parallel, by default use a process pool-exceutor
elif parallel or num_workers:
results = _combo_runner_parallel(num_workers=num_workers, **kws)
# Evaluate combos sequentially
else:
results = _combo_runner_sequential(**kws)
return tuple(unzip(results, ndim)) if split else results
|
86a57d1800457166395ae2baa4e04ca8ea4c0cdf
| 34,298 |
from typing import Sequence
import fnmatch
def _should_ignore(fd_name: str, patterns: Sequence[str]) -> bool:
"""Return whether `fd_name` should be ignored according to `patterns`.
Examples
--------
>>> fd_name = "google/protobuf/empty.proto"
>>> pattern = "google/protobuf/*"
>>> _should_ignore(fd_name, [pattern])
True
>>> fd_name = "foo/bar"
>>> _should_ignore(fd_name, [pattern])
False
"""
return any(fnmatch.fnmatchcase(fd_name, pattern) for pattern in patterns)
|
8bf698afddbda869e26ebcaa98e1f4e950117c08
| 34,300 |
def unc_inertia_eval(awg, afg, bout, bi, mw, ed, out_xml):
""" Unconventional aircraft Moment of Inertia analysis main function.
It dvides the cases defined and evaluates them calling the
function in the with_fuse_geom subfolder.
Source: An introduction to mechanics, 2nd ed., D. Kleppner
and R. Kolenkow, Cambridge University Press.
ARGUMENTS
(class) awg --Arg.: AircraftWingGeometry class.
(class) afg --Arg.: AircraftFuseGeometry class.
(class) bout --Arg.: BalanceOutputs class.
(class) bi --Arg.: BalanceInputs class.
(class) mw --Arg.: MassesWeights class.
(class) ed --Arg.: EngineData class.
##======= Classes are defined in the InputClasses folder =======##
RETURN
(float_array) fx --Out.: Array containing the x-coordinates
of the fuselage nodes.
(float_array) fy --Out.: Array containing the y-coordinates
of the fuselage nodes.
(float_array) fz --Out.: Array containing the z-coordinates
of the fuselage nodes.
(float_array) wx --Out.: Array containing the x-coordinates
of the wing nodes.
(float_array) wy --Out.: Array containing the y-coordinates
of the wing nodes.
(float_array) wz --Out.: Array containing the z-coordinates
of the wing nodes.
(class) bout --Out.: Updated BalanceOutputs class.
"""
center_of_gravity_seg = []
mass_component = []
log.info('---------- Inertia Evaluation ---------')
if bi.USER_EN_PLACEMENT:
(bout.Ixxen, bout.Iyyen, bout.Izzen, bout.Ixyen, bout.Iyzen,\
bout.Ixzen) = lumpedmassesinertia.engine_inertia(\
bout.center_of_gravity, ed)
else:
(bout.Ixxen, bout.Iyyen, bout.Izzen, bout.Ixyen, bout.Iyzen,\
bout.Ixzen) = (0, 0, 0, 0, 0, 0)
# Max Payload Configuration
log.info('------------ Lumped mass Inertia ------------')
log.info('--------- Max Payload configuration ---------')
(fx, fy, fz, Ixxf, Iyyf, Izzf, Ixyf, Iyzf, Ixzf)\
= lumpedmassesinertia.fuselage_inertia(\
bi.SPACING_FUSE, bout.center_of_gravity, mw.mass_seg_i,\
afg, out_xml)
(wx, wy, wz, Ixxw, Iyyw, Izzw, Ixyw, Iyzw, Ixzw)\
= lumpedmassesinertia.wing_inertia(\
bi.WPP, bi.SPACING_WING, afg.fuse_nb, bout.center_of_gravity,\
mw.mass_seg_i, awg, out_xml)
rd = check_rounding(Izzf + Izzw + bout.Izzen,\
Ixyf + Ixyw + bout.Ixyen)
bout.Ixx_lump = round(Ixxf + Ixxw + bout.Ixxen,rd)
bout.Iyy_lump = round(Iyyf + Iyyw + bout.Iyyen,rd)
bout.Izz_lump = round(Izzf + Izzw + bout.Izzen,rd)
bout.Ixy_lump = round(Ixyf + Ixyw + bout.Ixyen,rd)
bout.Iyz_lump = round(Iyzf + Iyzw + bout.Iyzen,rd)
bout.Ixz_lump = round(Ixzf + Ixzw + bout.Ixzen,rd)
# Zero Fuel Configuration
log.info('---------- Zero Fuel configuration ----------')
(fx, fy, fz, Ixxf2, Iyyf2, Izzf2, Ixyf2, Iyzf2, Ixzf2)\
= lumpedmassesinertia.fuselage_inertia(bi.SPACING_FUSE,\
bout.cg_zfm, mw.ms_zfm, afg, out_xml)
(wx, wy, wz, Ixxw2, Iyyw2, Izzw2, Ixyw2, Iyzw2, Ixzw2)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
afg.fuse_nb, bout.cg_zfm, mw.ms_zfm, awg, out_xml)
bout.Ixx_lump_zfm = round(Ixxf2 + Ixxw2 + bout.Ixxen,rd)
bout.Iyy_lump_zfm = round(Iyyf2 + Iyyw2 + bout.Iyyen,rd)
bout.Izz_lump_zfm = round(Izzf2 + Izzw2 + bout.Izzen,rd)
bout.Ixy_lump_zfm = round(Ixyf2 + Ixyw2 + bout.Ixyen,rd)
bout.Iyz_lump_zfm = round(Iyzf2 + Iyzw2 + bout.Iyzen,rd)
bout.Ixz_lump_zfm = round(Ixzf2 + Ixzw2 + bout.Ixzen,rd)
# Zero Payload Configuration
log.info('--------- Zero Payload configuration --------')
(fx, fy, fz, Ixxf3, Iyyf3, Izzf3, Ixyf3, Iyzf3, Ixzf3)\
= lumpedmassesinertia.fuselage_inertia(bi.SPACING_FUSE,\
bout.cg_zpm, mw.ms_zpm, afg, out_xml)
(wx, wy, wz, Ixxw3, Iyyw3, Izzw3, Ixyw3, Iyzw3, Ixzw3)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
afg.fuse_nb, bout.cg_zpm, mw.ms_zpm, awg, out_xml)
bout.Ixx_lump_zpm = round(Ixxf3 + Ixxw3 + bout.Ixxen,rd)
bout.Iyy_lump_zpm = round(Iyyf3 + Iyyw3 + bout.Iyyen,rd)
bout.Izz_lump_zpm = round(Izzf3 + Izzw3 + bout.Izzen,rd)
bout.Ixy_lump_zpm = round(Ixyf3 + Ixyw3 + bout.Ixyen,rd)
bout.Iyz_lump_zpm = round(Iyzf3 + Iyzw3 + bout.Iyzen,rd)
bout.Ixz_lump_zpm = round(Ixzf3 + Ixzw3 + bout.Ixzen,rd)
# OEM Configuration
log.info('------------- OEM configuration -------------')
(fx, fy, fz, Ixxf4, Iyyf4, Izzf4, Ixyf4, Iyzf4, Ixzf4)\
= lumpedmassesinertia.fuselage_inertia(bi.SPACING_FUSE,\
bout.cg_oem, mw.ms_oem, afg, out_xml)
(wx, wy, wz, Ixxw4, Iyyw4, Izzw4, Ixyw4, Iyzw4, Ixzw4)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
afg.fuse_nb, bout.cg_oem, mw.ms_oem, awg, out_xml)
bout.Ixx_lump_oem = round(Ixxf4 + Ixxw4 + bout.Ixxen,rd)
bout.Iyy_lump_oem = round(Iyyf4 + Iyyw4 + bout.Iyyen,rd)
bout.Izz_lump_oem = round(Izzf4 + Izzw4 + bout.Izzen,rd)
bout.Ixy_lump_oem = round(Ixyf4 + Ixyw4 + bout.Ixyen,rd)
bout.Iyz_lump_oem = round(Iyzf4 + Iyzw4 + bout.Iyzen,rd)
bout.Ixz_lump_oem = round(Ixzf4 + Ixzw4 + bout.Ixzen,rd)
# User Configuration
if bi.USER_CASE:
log.info('------------- User configuration ------------')
(fx, fy, fz, Ixxfu, Iyyfu, Izzfu, Ixyfu, Iyzfu, Ixzfu)\
= lumpedmassesinertia.fuselage_inertia(bi.SPACING_FUSE,\
bout.cg_user, mw.ms_user, afg, out_xml)
(wx, wy, wz, Ixxwu, Iyywu, Izzwu, Ixywu, Iyzwu, Ixzwu)\
= lumpedmassesinertia.wing_inertia(bi.WPP, bi.SPACING_WING,\
afg.fuse_nb, bout.cg_user, mw.ms_user, awg, out_xml)
bout.Ixx_lump_user = round(Ixxfu + Ixxwu + bout.Ixxen,rd)
bout.Iyy_lump_user = round(Iyyfu + Iyywu + bout.Iyyen,rd)
bout.Izz_lump_user = round(Izzfu + Izzwu + bout.Izzen,rd)
bout.Ixy_lump_user = round(Ixyfu + Ixywu + bout.Ixyen,rd)
bout.Iyz_lump_user = round(Iyzfu + Iyzwu + bout.Iyzen,rd)
bout.Ixz_lump_user = round(Ixzfu + Ixzwu + bout.Ixzen,rd)
bout.Ixxen = round(bout.Ixxen, rd)
bout.Iyyen = round(bout.Iyyen, rd)
bout.Izzen = round(bout.Izzen, rd)
bout.Ixyen = round(bout.Ixyen, rd)
bout.Iyzen = round(bout.Iyzen, rd)
bout.Ixzen = round(bout.Ixzen, rd)
return(bout, fx, fy, fz, wx, wy, wz)
|
cff3ebe8f0e613c5742c3d16bae29dfcc0998d2a
| 34,301 |
import json
def str2body(v):
"""
convert str to json data or keep original string
:param v:
:return:
"""
try:
return json.loads(v)
except:
return v
|
5785d9dca1d1120a4827feb93382d1d0bf68519a
| 34,302 |
from datetime import datetime
def get_default_law_key():
""" Default key needs to be unique timestamp until changed """
x = str(datetime.now())
key = x[5:25]
return key
|
569d1ff7cc9c6d0e95dded80c1e068ff885e95c7
| 34,303 |
def _evaluate_Delaunay_component(
subgraph_RE_comp: nx.Graph,
subgraph_R_comp: nx.Graph,
subgraph_E_comp: nx.Graph,
num_R: int,
):
"""
Phase 3
Evaluates given graph-connected component.
:param subgraph_RE_comp: Delaunay graph of the component containing R and E points..
:param subgraph_R_comp: Delaunay graph of component restricted to R.
:param subgraph_E_comp: Delaunay graph of component restricted to E.
:param num_R: total number of R points.
:return: indices of R points in the component; indices of E points in the component;
component consistency score; component quality score; number of heteroeneous edges
in the component; total number of edges in the component
"""
comp_R_idxs = np.array(list(subgraph_R_comp.nodes()))
comp_E_idxs = np.array(list(subgraph_E_comp.nodes())) - num_R
comp_consistency = _get_graph_consistency(len(comp_R_idxs), len(comp_E_idxs))
comp_quality, num_comp_RE_edges, num_total_comp_edges = _get_graph_quality(
subgraph_RE_comp, subgraph_R_comp, subgraph_E_comp
)
return (
comp_R_idxs,
comp_E_idxs,
comp_consistency,
comp_quality,
num_comp_RE_edges,
num_total_comp_edges,
)
|
98f12800201cf89f3a92c07d93d9b446e019ec60
| 34,304 |
def abcd_normalize(A=None, B=None, C=None, D=None):
"""Check state-space matrices and ensure they are two-dimensional.
If enough information on the system is provided, that is, enough
properly-shaped arrays are passed to the function, the missing ones
are built from this information, ensuring the correct number of
rows and columns. Otherwise a ValueError is raised.
Parameters
----------
A, B, C, D : array_like, optional
State-space matrices. All of them are None (missing) by default.
Returns
-------
A, B, C, D : array
Properly shaped state-space matrices.
Raises
------
ValueError
If not enough information on the system was provided.
"""
A, B, C, D = map(_atleast_2d_or_none, (A, B, C, D))
MA, NA = _shape_or_none(A)
MB, NB = _shape_or_none(B)
MC, NC = _shape_or_none(C)
MD, ND = _shape_or_none(D)
p = _choice_not_none(MA, MB, NC)
q = _choice_not_none(NB, ND)
r = _choice_not_none(MC, MD)
if p is None or q is None or r is None:
raise ValueError("Not enough information on the system.")
A, B, C, D = map(_none_to_empty_2d, (A, B, C, D))
A = _restore(A, (p, p))
B = _restore(B, (p, q))
C = _restore(C, (r, p))
D = _restore(D, (r, q))
return A, B, C, D
|
2292347708f93570d96062d6e94058e55c96b112
| 34,307 |
def extract_full_symbol(full_symbol: str):
"""
:return: (symbol, exchange)
"""
tmp = full_symbol.split(' ')
if len(tmp) < 4:
return "unknwonsymbol", Exchange.SHFE
symbol = tmp[2] + tmp[3]
exchange_str = tmp[0]
ex = Exchange(exchange_str)
if ex in [Exchange.SHFE, Exchange.DCE, Exchange.INE]:
symbol = symbol.lower()
return symbol, ex
|
7e031bfb969215092765a33cdb93377a6c98d922
| 34,309 |
def remove(user_id):
"""User removing"""
user = User.query.get(user_id)
db.session.delete(user)
db.session.commit()
flash('User "%s" successfully remove' % user.name, 'success')
return redirect(url_for('backend.index'))
|
33ced3d0e54a3edcc04e89440faca6778dd65944
| 34,310 |
from typing import Dict
from typing import List
from typing import Tuple
def phonemes_to_sentences(
arpabet: Dict[str, List[List[str]]],
data: Tuple[List[list], ...],
print_every: int,
of: int,
) -> Tuple[str, ...]:
"""Convert list of phoneme lists to sentences."""
data = [
(
" ".join([phonetic_to_word(arpabet, [p]) for p in d]),
(print("Line:", i, "of", of) if i % print_every == 0 else ""),
)
for i, d in enumerate(data, 1)
]
return list(zip(*data))[0]
|
25f21e79f5029a80adeb4bd277c273db1e0caf31
| 34,311 |
def butter_bandpass_filter(flux, lowcut, fs, order=3):
"""
Apply a Butterworth high-pass filter.
Args:
flux (array): The flux array.
lowcut (float): The frequency cut off.
fs (array): The frequency array.
order (Optional[int]): The order of the Butterworth filter. Default
is 3.
"""
nyq = 0.5 * fs
low = lowcut / nyq
b, a = sps.butter(order, lowcut, btype='highpass')
y = sps.lfilter(b, a, flux)
return y
|
aed17a9f43292a5c7274e3ad0d708aabf14e4404
| 34,313 |
def main(filepath=None):
"""execution starts here"""
# print("main():")
if filepath==None:
filepath = r"C:\Program Files\7-Zip\7z.exe"
print("Getting Metadata From File: " + filepath)
parser = hachoir.parser.createParser(str(filepath))
metadata = hachoir.metadata.extractMetadata(parser)
# See what keys you can extract
# https://stackoverflow.com/questions/14546533/hachoir-retrieving-data-from-a-group
if not metadata:
print("No Metadata Found!")
return None
else:
for k in metadata._Metadata__data:
if k:
# print(k) # print all keys
if metadata.has(k):
print(f"key: `{k}` value:")
for x in metadata.getValues(k):
print(x)
print("\n")
# print(metadata.get("comment", index=0))
# print(metadata.getItem("comment", index=0))
# print(metadata.__iter__)
print(metadata)
metadata_dict = metadata.exportDictionary()["Metadata"]
for k, v in metadata_dict.items():
print(f"key: `{k}` Value: `{v}`")
# print(metadata_dict["Creation date"])
# #array:
# print(metadata.getValues("creation_date"))
# for x in metadata.getValues("creation_date"):
# print(x)
|
eb6f8d3086cd3e527be7a7d2f4c79453fed2f21c
| 34,314 |
def calculate_background_gradient(u, v, w, weights, u_back, v_back, Cb=0.01):
"""
Calculates the gradient of the background cost function. For each u, v
this is given as 2*coefficent*(analysis wind - background wind).
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
weights: Float array
Weights for each point to consider into cost function
u_back: 1D float array
Zonal winds vs height from sounding
w_back: 1D float array
Meridional winds vs height from sounding
Cb: float
Weight of background constraint to total cost function
Returns
-------
y: float array
value of gradient of background cost function
"""
the_shape = u.shape
u_grad = np.zeros(the_shape)
v_grad = np.zeros(the_shape)
w_grad = np.zeros(the_shape)
for i in range(the_shape[0]):
u_grad[i] = Cb*2*(u[i]-u_back[i])*(weights[i])
v_grad[i] = Cb*2*(v[i]-v_back[i])*(weights[i])
y = np.stack([u_grad, v_grad, w_grad], axis=0)
return y.flatten()
|
bab0558148faf3694c7b5c4db174af69c70fd526
| 34,315 |
def read_embedding_vocabularies(filenames):
"""
Reads every vector embedding file in the given collection of
filenames, and returns the union of their vocabularies. (The
files are assumed to be hdf5 files containing dataframes, and
the vocabularies are their indices.
"""
result = pd.Index([])
for filename in filenames:
vectors = load_hdf(filename)
result = result.union(vectors.index)
return result
|
17c72725cc8fe06ac000a00eb76fc19bb98e6dc5
| 34,316 |
def sorted_keys(lut: dict) -> list:
"""returns sorted lut keys if env_flags[sorted] is set to True"""
if env_flags['sorted'] is True or str(env_flags['sorted']).lower() == 'true':
return sorted(lut.keys(), key=lambda x: x)
return lut.keys()
|
db801d14096af85de679ccf26c7ef3045b992a69
| 34,317 |
def sigmax(dim=2):
"""Qiskit wrapper of sigma-X operator.
"""
if dim == 2:
return ops.sigmax()
else:
raise Exception('Invalid level specification of the qubit subspace')
|
a27ec81ad0396167c6207120a031375816b463ec
| 34,318 |
def is_relative_path(p):
""" Return True if path is relative """
p = p.replace('\\', '/')
if p and p[0] == '/':
return False
if sabnzbd.WIN32 and p and len(p) > 2:
if p[0].isalpha() and p[1] == ':' and p[2] == '/':
return False
return True
|
0368ea2588344f90eb915f7144f0b60145b13b74
| 34,319 |
import types
def setting_keybord(
is_admin: bool, language: str, session_timeout: int
) -> types.InlineKeyboardMarkup:
"""انشاء كيبورد الاعدادات
المعطيات:
is_admin (bool): هل الكيبورد مرسل لادمن
language (str): لغة االكيبورد
session_timeout (int): وقت الجلسة
المخرجات:
types.InlineKeyboardMarkup: كيبورد الاعدادات
"""
change_password_button = get_message("change_password_button", language=language)
change_language_button = get_message("change_language_button", language=language)
session_timeout_button = get_message("session_timeout_button", language=language)
bot_configuration_button = get_message(
"bot_configuration_button", language=language
)
bot_messages_button = get_message("bot_messages_button", language=language)
infinity_session_timeout = get_message(
"infinity_session_timeout", language=language
)
back_button = get_message("back_button", language=language)
mod = lambda num: num % max_session_timeout
session_timeout = mod(session_timeout)
session_timeout_word = (
infinity_session_timeout
if session_timeout == 0
else hours2words(session_timeout, language=language)
)
rows = []
if is_admin:
rows.append(
{
bot_configuration_button: {
"callback_data": f"update{callback_split_chr}bot_messages"
},
bot_messages_button: {
"callback_data": f"update{callback_split_chr}bot_configuration"
},
}
)
rows.extend(
[
{
change_password_button: {
"callback_data": f"run{callback_split_chr}change_password"
},
change_language_button: {
"callback_data": f"run{callback_split_chr}change_language"
},
},
{
session_timeout_button
+ " 👇": {
"callback_data": f"print{callback_split_chr}{session_timeout_button}"
},
},
# if session_timeout == 0, in button will be '♾'
{
session_timeout_word: {
"callback_data": f"print{callback_split_chr}{session_timeout_word}"
}
},
{
# use `mod` to return `0` if `session_timeout+1` == `max_session_timeout`
# and return `max_session_timeout` if `session_timeout-1` == '-1'
"⬅️": {
"callback_data": f"updatek{callback_split_chr}setting{callback_split_chr}{mod(session_timeout-1)}"
},
"➡️": {
"callback_data": f"updatek{callback_split_chr}setting{callback_split_chr}{mod(session_timeout+1)}"
},
},
{
back_button: {"callback_data": f"update{callback_split_chr}home_page"},
},
]
)
return _quick_markup(rows)
|
c53f2dd95dae6bb972789ab81d1d15ba6ddb5be4
| 34,320 |
def IoU(bbox1, bbox2):
"""Compute IoU of two bounding boxes
Args:
bbox1 - 4-tuple (x, y, w, h) where (x, y) is the top left corner of
the bounding box, and (w, h) are width and height of the box.
bbox2 - 4-tuple (x, y, w, h) where (x, y) is the top left corner of
the bounding box, and (w, h) are width and height of the box.
Returns:
score - IoU score
"""
x1, y1, w1, h1 = bbox1
x2, y2, w2, h2 = bbox2
score = 0
### YOUR CODE HERE
leftIntBox = max(x1, x2)
rightIntBox = min(x1 + w1, x2 + w2)
topIntBox = max(y1, y2)
bottomIntBox = min(y1 + h1, y2 + h2)
intersection = (rightIntBox - leftIntBox) * (bottomIntBox - topIntBox)
union = w1 * h1 + w2 * h2
score = intersection / (union - intersection)
### END YOUR CODE
return score
|
dfb7aabac557a4ca71fe989da94e3775233d734b
| 34,321 |
def filter_red_pen(rgb, output_type="bool"):
"""
Create a mask to filter out red pen marks from a slide.
Args:
rgb: RGB image as a NumPy array.
output_type: Type of array to return (bool, float, or uint8).
Returns:
NumPy array representing the mask.
"""
result = (
filter_red(
rgb, red_lower_thresh=150, green_upper_thresh=80, blue_upper_thresh=90
)
& filter_red(
rgb, red_lower_thresh=110, green_upper_thresh=20, blue_upper_thresh=30
)
& filter_red(
rgb, red_lower_thresh=185, green_upper_thresh=65, blue_upper_thresh=105
)
& filter_red(
rgb, red_lower_thresh=195, green_upper_thresh=85, blue_upper_thresh=125
)
& filter_red(
rgb, red_lower_thresh=220, green_upper_thresh=115, blue_upper_thresh=145
)
& filter_red(
rgb, red_lower_thresh=125, green_upper_thresh=40, blue_upper_thresh=70
)
& filter_red(
rgb, red_lower_thresh=200, green_upper_thresh=120, blue_upper_thresh=150
)
& filter_red(
rgb, red_lower_thresh=100, green_upper_thresh=50, blue_upper_thresh=65
)
& filter_red(
rgb, red_lower_thresh=85, green_upper_thresh=25, blue_upper_thresh=45
)
)
if output_type == "bool":
pass
elif output_type == "float":
result = result.astype(float)
else:
result = result.astype("uint8") * 255
return result
|
33f78f9048754e306fbd1a9a783eff79a7442ed8
| 34,323 |
import shutil
def project_delete(request, project_slug):
"""
Make a project as deleted on POST, otherwise show a form asking for
confirmation of delete.
"""
project = get_object_or_404(request.user.projects.live(),
slug=project_slug)
if request.method == 'POST':
# Remove the repository checkout
shutil.rmtree(project.doc_path, ignore_errors=True)
# Delete the project and everything related to it
project.delete()
project_dashboard = reverse('projects_dashboard')
return HttpResponseRedirect(project_dashboard)
return render_to_response(
'projects/project_delete.html',
{'project': project},
context_instance=RequestContext(request)
)
|
0bd8bd63a51892438fb321487d8538630fe21854
| 34,324 |
def compose_left(*funcs):
"""Compose sync and async functions to operate in series.
Returns a function that applies other functions in sequence. The returned
function will be an async function iff at least one of the functions in the
sequence is async.
Functions are applied from left to right so that
``compose_left(f, g, h)(x, y)`` is the same as ``h(g(f(x, y)))``.
>>> inc = lambda i: i + 1
>>> compose_left(inc, str)(3)
'4'
See Also:
compose
pipe
"""
return compose(*reversed(funcs))
|
ea02215c46b369cea993dc5565937807e76b584f
| 34,325 |
def squarefree_part(x, timeout_duration=20, use_ecm=True):
"""return the squarefree part of x or 'NO DATA (timed out)'"""
F = squarefree_and_factorization(
x=x, timeout_duration=timeout_duration, use_ecm=use_ecm
)
if isinstance(F, str):
return F
else:
return F[0]
|
3143aa9032b65f22631488a7f8dd96b0ae71cb4c
| 34,326 |
def unix_time_to_id(unix_time):
"""
Converts the given unix time to id.
Parameters
----------
unix_time : `int`, `float`
The unix time to convert to id.
Returns
-------
id_ : `int`
"""
return (floor(unix_time*1000.)-DISCORD_EPOCH)<<22
|
50b40ab1e23971b8ba44cfdd6546158b42415d66
| 34,327 |
def method_withBadName_with_bad_params_on_single_line(myBadlyNamedParam, my_other_Bad_name):
"""Provide parameters with bad names on single line."""
return myBadlyNamedParam + my_other_Bad_name
|
10857c53fb36ef96ebfc4cd10b52b412b600836a
| 34,328 |
async def async_setup_platform(
hass, config, async_add_devices, discovery_info: object = {}
):
"""Set up Loxone Sensor from yaml"""
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
# Devices from yaml
if config != {}:
# Here setup all Sensors in Yaml-File
new_sensor = LoxoneCustomSensor(**config)
async_add_devices([new_sensor])
return True
return True
|
36a896270e448b50121d685aa3048006b7246abe
| 34,329 |
from datetime import datetime
def build_cal_args(args):
"""Determine the year/month. Return list [year] or [year, month]"""
t = datetime.date.today()
m, y = t.month, t.year
x = None
if len(args) == 0:
# no args - print default calendar
x = [y, m]
elif len(args) == 1:
# cal + ==> next month
if args[0] == '+':
x = [y + 1, 1] if (m == 12) else [y, m + 1]
# cal - ==> prev month
elif args[0] == '-':
x = [y - 1, 12] if (m == 1) else [y, m - 1]
# cal jun+ or cal 12+ ==> given month for next year
elif args[0].endswith('+'):
m = month2num(args[0][:-1])
x = [y + 1, int(args[0][:-1])] if (m == 0) else [y + 1, m]
# cal jun- or cal 12- ==> given month for prev year
elif args[0].endswith('-'):
m = month2num(args[0][:-1])
x = [y - 1, int(args[0][:-1])] if (m == 0) else [y - 1, m]
# cal 0nn ==> assume as year
elif args[0].startswith('0'):
x = [int(args[0])]
# cal numeric ==> assume as month if in 1..12
elif args[0].isnumeric():
m = int(args[0])
x = [y, m] if (1 <= m <= 12) else [m]
# cal any ==> convert month if possible or let cal handle it
else:
m = month2num(args[0])
x = [int(args[0])] if (m == 0) else [y, m]
elif len(args) == 2:
# cal month year
m = month2num(args[0])
x = [int(args[1]), int(args[0])] if (m == 0) \
else [int(args[1]), m]
return x
|
241630f6a15f1a8c2ac195740aba8a7307b763ba
| 34,330 |
def _dice(array):
"""Given an array containing true/false positive/negative columns for the
'shadow' class, calculates the dice coefficient."""
v = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}
for val in v:
v[val] = float(np.sum(array['Shadow ' + val]))
dice = v['TP'] / ((v['FP'] + v['TP']) + (v['TP'] + v['FN']))
return dice
|
644f1e5d982b9d5239f2b339ae4bdd14c10ebc73
| 34,335 |
import math
def euclidian_dist(p1, p2):
""" p1 and p2 must both be of len 2 where p1 = (x1,y1); p2 = (x2,y2)"""
return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
|
3bceb46cf311e418cac83370c673772f885587be
| 34,336 |
def colorify_by_name(image, cmap_name, flip_map=False, rescale_type='min_max', limits=None, num_colors=256):
"""
Return 2D image as 3D RGB stack colored with a given colormap.
Parameters
----------
image: 2d array
image to convert to RGB
cmap_name: str
Matplotlib colormap or 'pure_red', 'pure_green', 'pure_blue'
flip_map: bool
invert colormap
rescale_type: str
'min_max': between extrema values of image
'dtype': full range of image dtype
'zero_max': between zero and image max
'limits': between limits given by parameter limits
limits: list
[min, max] limits to use for rescaling
num_colors: int
number of steps in color scale
Returns
-------
image_colored: array
3D RGB float array
cmap: Matplotlib colormap object
Generated colormap from name
"""
image = rescale_image(image, rescale_type=rescale_type, limits=limits)
cmap = cmaps_def(cmap_name, num_colors=num_colors, flip_map=flip_map)
image_colored = cmap(image)
return image_colored, cmap
|
68a034150e327c3fbef2603f422d18e935dec590
| 34,337 |
import numpy
def lazy_matrix_mul(m_a, m_b):
"""
multiply 2 matrix that is given
Args:
m_a: input first matrix
m_b: input second matrix
Returns:
return m_a * m_b
"""
return numpy.matmul(m_a, m_b)
|
3e58214d944d1962260b747af53dc8c82cc79b40
| 34,338 |
def denormalize_tags(df: pd.DataFrame) -> pd.DataFrame:
""" For a DataFrame with a column 'tags' that contains comma-space-separated
tags, denormalize the 'tags' column. """
if df.empty:
return add_missing_columns(df, required_columns=['tag'])
df = df.copy()
df['tags'] = df['tags'].str.split(', ')
return df \
.explode('tags', ignore_index=True) \
.rename(columns={'tags': 'tag'})
|
ce61cd49041a6238fc91bb1256ee0357944dc71e
| 34,339 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.