content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def fslimage_to_qpdata(img, name=None, vol=None, region=None, roi=False):
""" Convert fsl.data.Image to QpData """
if not name: name = img.name
if vol is not None:
data = img.data[..., vol]
else:
data = img.data
if region is not None:
data = (data == region).astype(np.int)
return NumpyData(data, grid=DataGrid(img.shape[:3], img.voxToWorldMat), name=name, roi=roi) | 1f383f17196a11f64ab6d95f5cf001dc41346372 | 15,696 |
import gc
def xgb_cv(
data_, test_, y_, max_depth,gamma, reg_lambda , reg_alpha,\
subsample, scale_pos_weight, min_child_weight, colsample_bytree,
test_phase=False, stratify=False,
):
"""XGBoost cross validation.
This function will instantiate a XGBoost classifier with parameters
such as max_depth, subsample etc. Combined with data and
targets this will in turn be used to perform cross validation. The result
of cross validation is returned.
Our goal is to find combinations of parameters that maximizes AUC.
Returns:
if test_phase (and new data for validators, just change the test_ param
to the new data and make sure that the features are processed):
sub_preds : models prediction to get the hold-out score
else:
validation AUC score
Model Notes:
XGBoost overfits in this dataset, params should be set accordingly.
Parameter Notes
gamma : Minimum loss reduction required to make a further partition on a leaf \
node of the tree. The larger gamma is, the more conservative the algorithm will be.
min_child_weight : The larger min_child_weight is, the more conservative the algorithm will be.
colsample_bytree : The subsample ratio of columns when constructing each tree.
scale_pos_weight : A typical value to consider: sum(negative instances) / sum(positive instances)
"""
oof_preds = np.zeros(data_.shape[0])
sub_preds = np.zeros(test_.shape[0])
if test_phase:
max_depth = int(np.round(max_depth))
feats = [f for f in data_.columns if f not in ['bookingid', 'label']]
if stratify:
folds_ = StratifiedKFold(n_splits=4, shuffle=True, random_state=610)
splitted = folds_.split(data_, y_)
else:
splitted = folds_.split(data_)
for n_fold, (trn_idx, val_idx) in enumerate(splitted):
trn_x, trn_y = data_[feats].iloc[trn_idx], y_.iloc[trn_idx]
val_x, val_y = data_[feats].iloc[val_idx], y_.iloc[val_idx]
xg_train = xgb.DMatrix(
trn_x.values, label=trn_y.values
)
xg_valid = xgb.DMatrix(
val_x.values, label=val_y.values
)
watchlist = [(xg_train, 'train'),(xg_valid, 'eval')]
num_round=10000
param = {
'gamma' : gamma,
'max_depth':max_depth,
'colsample_bytree':colsample_bytree,
'subsample':subsample,
'min_child_weight':min_child_weight,
'objective':'binary:logistic',
'random_state':1029,
'n_jobs':8,
'eval_metric':'auc',
'metric': 'auc',
'scale_pos_weight':scale_pos_weight,
'eta':0.05,
'silent':True
}
clf = xgb.train(param, xg_train, num_round, watchlist, verbose_eval=100, early_stopping_rounds = 100)
oof_preds[val_idx] = clf.predict(xgb.DMatrix(data_[feats].iloc[val_idx].values), ntree_limit=clf.best_ntree_limit)
if test_phase:
sub_preds += clf.predict(xgb.DMatrix(test_[feats].values), ntree_limit=clf.best_ntree_limit) / folds_.n_splits
print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(val_y, oof_preds[val_idx])))
del clf, trn_x, trn_y, val_x, val_y
gc.collect()
print('Full AUC score %.6f' % roc_auc_score(y_, oof_preds))
if test_phase:
return sub_preds
else:
return roc_auc_score(y_, oof_preds) | 2ad542c0a6f10835b352ea941a40dfb20b0f02f2 | 15,697 |
def _infer_elem_type(list_var):
"""
Returns types.tensor. None if failed to infer element type.
Example:
Given:
main(%update: (2,fp32)) {
block0() {
%list: List[unknown] = tf_make_list(...) # unknown elem type
%while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...))
while_loop_0_body(...) {
%list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update)
} -> (%add_0, %list_write_0)
Result:
main(%update: (2,fp32)) {
block0() {
%list: List[(2,fp32)] = tf_make_list(...) # Get the elem type from list_write
%while_loop_0:0: (i32), %while_loop_0:1: List[(2,fp32)] = while_loop(loop_vars=(...))
while_loop_0_body(...) {
%list_write_0: List[(2,fp32)] = list_write(index=..., ls=%list, value=%update)
} -> (%add_0, %list_write_0)
"""
# Search for child op that have informative element types
for o in list_var.child_ops:
if o.op_type in ["list_write", "list_scatter"]:
return o.outputs[0].elem_type
if o.op_type == "while_loop":
idx = list(o.loop_vars).index(list_var)
block = o.blocks[0]
# the corresponding Var in body block
block_var = block.inputs[idx]
elem_type = _infer_elem_type(block_var)
if elem_type is not None:
def _set_types_for_block_inputs(block):
block_var = block.inputs[idx]
new_block_var = ListVar(name=block_var.name, elem_type=elem_type,
init_length=block_var.sym_type.T[1],
dynamic_length=block_var.sym_type.T[2])
block._replace_var(block_var, new_block_var)
_set_types_for_block_inputs(o.blocks[0]) # condition block
_set_types_for_block_inputs(o.blocks[1]) # body block
return elem_type
# otherwise continue to other block_var (a list_var can be
# passed into while_loop twice).
return None | 207d9ca4bd4f666d867d17756a7cd84110c47e76 | 15,698 |
def plot_hairy_mean_binstat_base(
list_of_pred_true_weight_label_color, key, spec,
is_rel = False, err = 'rms'
):
"""Plot binstats of means of relative energy resolution vs true energy."""
spec = spec.copy()
if spec.title is None:
spec.title = 'MEAN + E[ %s ]' % (err.upper())
else:
spec.title = '(MEAN + E[ %s ]) ( %s )' % (err.upper(), spec.title)
f, ax = plt.subplots()
for pred,true,weights,label,color in list_of_pred_true_weight_label_color:
x = true[key]
y = (pred[key] - true[key])
if is_rel:
y = y / x
plot_hairy_mean_binstat_single(
ax, x, y, weights, spec.bins_x, color, label, err
)
ax.axhline(0, 0, 1, color = 'C2', linestyle = 'dashed')
spec.decorate(ax)
ax.legend()
return f, ax | aebe7d7b4131618c4ca1ef4b78a79258b5f405b7 | 15,699 |
def parse_csv_file(file_contents):
"""
The helper function which converts the csv file into a dictionary where each
item's key is the provided value 'id' and each item's value is another
dictionary.
"""
list_of_contents = file_contents.split('\n')
key, lines = (list_of_contents[0].split(','),
list_of_contents[1:])
objects_dict = {}
# Build a dictionary
for index, line in enumerate(lines):
if line.isspace() or line == u'': continue
values = unicode_csv_reader([line]).next()
line_dict = dict([(key[i], val)
for i, val in enumerate(values)])
media_id = line_dict.get('id') or index
objects_dict[media_id] = (line_dict)
return objects_dict | 32bec37d01a58ff374b28225e4619f3b9cb98480 | 15,700 |
def get_config():
"""
Returns the current bot config.
"""
return BOT_CONFIG | bb9f3a8c5176d31bb32ea7ecd8e334dae2de1ebc | 15,701 |
def SensorLocation_Meta():
"""SensorLocation_Meta() -> MetaObject"""
return _DataModel.SensorLocation_Meta() | f062916300ae9669fa8e22fd6449ec829371a7f4 | 15,702 |
from pathlib import Path
def plot_sample_eval(images: list,
sub_titles=None,
main_title=None,
vmin=None, vmax=None,
label_str=None, pred_str=None,
additional_info=None,
show_plot=False, save_as=None):
"""
Plots one or multiple images in a row, including titles and additional information, if given.
Recommended to use for visualising network input, prediction, label etc. of a data sample or time step
Args:
images (list[2D numpy.ndarray]): Images to display in the plot, e.g. sensor frames, flowfronts etc.
sub_titles (list[str]): list of titles that will be displayed above the corresponding image. Length should match
the number of images
main_title (str): the main title displayed at the top
vmin (list[float or int]): set the min value for each subplot manually (useful for time series plots).
Length should match the number of images
vmax (list[float or int]): set the max value for each subplot manually (useful for time series plots).
Length should match the number of images
label_str: Label as a string (useful if label is a class, not an image)
pred_str: Prediction as a string (useful if prediction is a class, not an image)
additional_info (list[str]): List of strings that will be displayed at the bottom of the plot. Each list entry
is put in a new row.
show_plot: if True, the plot will be shown in a window during runtime
save_as (pathlib.Path or str): full path, including filename and type (e.g. '/cfs/example/output.png')
"""
assert bool(images)
assert sub_titles is None or len(sub_titles) == len(images)
assert vmin is None or len(vmin) == len(images)
assert vmin is None or len(vmin) == len(images)
plt.rcParams['figure.constrained_layout.use'] = True
# set up figure size and basic structure
ratio = images[0].shape[0] / images[0].shape[1]
base_size = 4
text_space = 0.35 if main_title is not None else 0
text_space += 0.35 if label_str is not None else 0
text_space += 0.35 if pred_str is not None else 0
text_space += 0.35 * len(additional_info) if additional_info is not None else 0
figsize = (base_size * len(images), base_size * ratio + text_space)
fig, axs = plt.subplots(1, len(images), figsize=figsize)
if len(images) == 1:
axs = [axs]
if main_title is not None:
fig.suptitle(main_title)
for i, img in enumerate(images):
axs[i].imshow(img, vmin=None if vmin is None else vmin[i], vmax=None if vmax is None else vmax[i])
axs[i].set(xticks=[], yticks=[], title=None if sub_titles is None else sub_titles[i])
text = ""
color = 'black'
if label_str is not None:
text += f"{'Label: ':8}{label_str}"
if label_str is not None and pred_str is not None:
color = 'green' if label_str == pred_str else 'red'
text += '\n'
if pred_str is not None:
text += f"{'Pred: ':8}{pred_str}"
if additional_info is not None:
for info in additional_info:
text += f"\n{info}"
plt.figtext(0.01, 0.01, text, c=color, ha='left')
if show_plot:
plt.show()
if save_as is not None:
Path(save_as).parent.mkdir(parents=True, exist_ok=True)
plt.savefig(save_as)
return fig | a71d2b84a0c5b236bcf17e82a44dcbaaecd98169 | 15,703 |
def predict():
"""
Get data and do the same processing as when we prototyped,
because we need to normalize based on training data summary stats
:return:
"""
data = pd.read_csv('data.csv')
df = data.drop("Unnamed: 32", axis=1)
df = data.drop("id", axis=1)
df.drop(columns=["Unnamed: 32"], inplace=True)
X = df.drop(labels="diagnosis", axis=1)
input_data = []
for col in cols:
input_data.append(float(request.form[col]))
df_norm = (input_data - X.mean()) / (X.max() - X.min())
pred = loaded_model.predict(df_norm)
if pred == 1:
return "Prediction : Benign Tumor Found"
else:
return "Prediction : Malignant Tumor Found" | 7c0d21b38ce79cb9d2700b79ccb1c47bf27952de | 15,704 |
def format_number(number, num_decimals=2):
"""
Format a number as a string including thousands separators.
:param number: The number to format (a number like an :class:`int`,
:class:`long` or :class:`float`).
:param num_decimals: The number of decimals to render (2 by default). If no
decimal places are required to represent the number
they will be omitted regardless of this argument.
:returns: The formatted number (a string).
This function is intended to make it easier to recognize the order of size
of the number being formatted.
Here's an example:
>>> from humanfriendly import format_number
>>> print(format_number(6000000))
6,000,000
> print(format_number(6000000000.42))
6,000,000,000.42
> print(format_number(6000000000.42, num_decimals=0))
6,000,000,000
"""
integer_part, _, decimal_part = str(float(number)).partition('.')
reversed_digits = ''.join(reversed(integer_part))
parts = []
while reversed_digits:
parts.append(reversed_digits[:3])
reversed_digits = reversed_digits[3:]
formatted_number = ''.join(reversed(','.join(parts)))
decimals_to_add = decimal_part[:num_decimals].rstrip('0')
if decimals_to_add:
formatted_number += '.' + decimals_to_add
return formatted_number | d898afd3254ee012c94653641ce177eb6e70a842 | 15,705 |
def query_rockets():
"""
request all rockets
"""
query = '''
{
rockets {
id
}
}
'''
return query | 8bf6c912a21bc0250c9a74f7fc26347b50ba1fa8 | 15,706 |
def get_block_len(built_prims, prim_type):
""" Calculates the maximum block length for a given primitive type """
retval = 0
for _, p in built_prims:
if p.prim_type == prim_type:
retval = max(retval, p.block_len)
return retval | 091d96a864abce6d782f3baf832ae508a018d083 | 15,708 |
import torch
def _interpolate_gather(array, x):
"""
Like ``torch.gather(-1, array, x)`` but continuously indexes into the
rightmost dim of an array, linearly interpolating between array values.
"""
with torch.no_grad():
x0 = x.floor().clamp(min=0, max=array.size(-1) - 2)
x1 = x0 + 1
f0 = _gather(array, -1, x0.long())
f1 = _gather(array, -1, x1.long())
return f0 * (x1 - x) + f1 * (x - x0) | 47025fbe25f2d1f5df9ee1423d63d5022b8d280d | 15,709 |
def modify_column_cell_content(content, value_to_colors):
"""
Function to include colors in the cells containing values.
Also removes the index that was used for bookkeeping.
"""
idx, value = content
if type(value) == int or type(value) == float:
color = value_to_colors[content]
return ' '.join(['\cellcolor{{{}}}'.format(color), str(value)])
else:
return value | efbac52eb49efa2054b7b346def96b8e7608bae7 | 15,710 |
def ReversePolishSolver(expression):
"""
Solves a given problem in reverse polish notation
:param expression - tuple of strings
"""
# Create empty stack
rp_calculator = Stack()
for c in expression:
# Check if next part of expression is an operator or a number
operator = {'+', '-', '*', '/'}
if c in operator:
if rp_calculator.count < 2:
print('Error: Not enough operands')
else:
# Pop two values
right_operand = rp_calculator.pop()
left_operand = rp_calculator.pop()
# Evaluate and push result back to stack
if c == '+':
rp_calculator.push(left_operand + right_operand)
elif c == '-':
rp_calculator.push(left_operand - right_operand)
elif c == '*':
rp_calculator.push(left_operand * right_operand)
elif c == '/':
rp_calculator.push(left_operand / right_operand)
elif c.isnumeric():
# Operand: add to stack
rp_calculator.push(int(c))
else:
print('Error: invalid character')
if rp_calculator.count > 1:
print('Error: too many operands')
return rp_calculator.pop() | 13f916be6a64f15f2d96642e178ef1842df77d24 | 15,711 |
import curses
def _make_selection(stdscr, classes, message='(select one)'):
"""
This function was originally branched from https://stackoverflow.com/a/45577262/5009004
:return: option, classes index
:rtype: (str, int)
"""
attributes = {}
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
attributes['normal'] = curses.color_pair(1)
curses.init_pair(2, curses.COLOR_BLACK, curses.COLOR_WHITE)
attributes['highlighted'] = curses.color_pair(2)
c = 0 # last character read
option = 0 # the current option that is marked
while c != 10: # Enter in ascii
stdscr.erase()
stdscr.addstr(f"{message}\n", curses.A_UNDERLINE)
for i in range(len(classes)):
if i == option:
attr = attributes['highlighted']
else:
attr = attributes['normal']
try:
stdscr.addstr(f"{i + 1}. ")
stdscr.addstr(classes[i] + '\n', attr)
except curses.error as e:
print(f"Curses error {classes[i]} {attr}")
print(e)
return None, None
c = stdscr.getch()
if c == curses.KEY_UP and option > 0:
option -= 1
elif c == curses.KEY_DOWN and option < len(classes) - 1:
option += 1
# stdscr.addstr("You chose {0}".format(classes[option]))
# stdscr.getch()
return classes[option], option | 7b2d22e70c84138d4bcfae1d1bc5d6a11d4ce806 | 15,712 |
def _check_keys(keys, spec):
"""Check a list of ``keys`` equals ``spec``.
Sorts both keys and spec before checking equality.
Arguments:
keys (``list``): The list of keys to compare to ``spec``
spec (``list``): The list of keys to compare to ``keys``
Returns:
``bool``
Raises:
``exceptions.InvalidListError``: Raised if ``keys`` is not
equal to ``spec``.
"""
if not sorted(keys) == sorted(spec):
raise exceptions.InvalidListError('{} does not equal {}'.format(
keys, spec
))
return True | 83d7662b2e28bb4a5ad959ea04d0ede5ac15458b | 15,713 |
def hangToJamo(hangul: str):
"""한글을 자소 단위(초, 중, 종성)로 분리하는 모듈입니다.
@status `Accepted` \\
@params `"안녕하세요"` \\
@returns `"ㅇㅏㄴㄴㅕㅇㅎㅏ_ㅅㅔ_ㅇㅛ_"` """
result = []
for char in hangul:
char_code = ord(char)
if not 0xAC00 <= char_code <= 0xD7A3:
result.append(char)
continue
initial_idx = int((((char_code - 0xAC00) / 28) / 21) % 19)
midial_idx = int(((char_code - 0xAC00) / 28) % 21)
final_idx = int((char_code - 0xAC00) % 28)
initial = chosung[initial_idx]
midial = jungsung[midial_idx]
final = jongsung[final_idx]
result.append(initial)
result.append(midial)
result.append(final)
return ''.join(result) | 680ff6f873eff03df36a619e221f62c38b69daec | 15,715 |
import json
def get_config(config_path):
""" Open a Tiler config and return it as a dictonary """
with open(config_path) as config_json:
config_dict = json.load(config_json)
return config_dict | 72a2133b44ffc553ad72d6c9515f1f218de6a08c | 15,716 |
from typing import Iterable
from re import T
from typing import Tuple
import itertools
def groupby_index(iter: Iterable[T],n:int) -> Iterable[Iterable[T]]:
"""group list by index
Args:
iter (Iterable[T]): iterator to group by index
n (int): The size of groups
Returns:
Iterable[Iterable[T]]: iterable object to group by index
>>> [*map(lambda x:[*x],groupby_index([1,2,3,4],2))]
[[1, 2], [3, 4]]
"""
def keyfunc(x: Tuple[int,T]) -> int:
k, _ = x
return (k // n)
def mapper(x: Tuple[int, Tuple[int, T]]):
_, v = x
return map(lambda y: y[1],v)
g = itertools.groupby(enumerate(iter), keyfunc)
return map(mapper,g) | 20172b8d52247228253790150225177a2d65caa3 | 15,717 |
def _build_message_classes(message_name):
"""
Create a new subclass instance of DIMSEMessage for the given DIMSE
`message_name`.
Parameters
----------
message_name : str
The name/type of message class to construct, one of the following:
* C-ECHO-RQ
* C-ECHO-RSP
* C-STORE-RQ
* C-STORE-RSP
* C-FIND-RQ
* C-FIND-RSP
* C-GET-RQ
* C-GET-RSP
* C-MOVE-RQ
* C-MOVE-RSP
* C-CANCEL-RQ
* N-EVENT-REPORT-RQ
* N-EVENT-REPORT-RSP
* N-GET-RQ
* N-GET-RSP
* N-SET-RQ
* N-SET-RSP
* N-ACTION-RQ
* N-ACTION-RSP
* N-CREATE-RQ
* N-CREATE-RSP
* N-DELETE-RQ
* N-DELETE-RSP
"""
def __init__(self):
DIMSEMessage.__init__(self)
# Create new subclass of DIMSE Message using the supplied name
# but replace hyphens with underscores
cls = type(message_name.replace('-', '_'),
(DIMSEMessage,),
{"__init__": __init__})
# Create a new Dataset object for the command_set attributes
d = Dataset()
for elem_tag in command_set_elem[message_name]:
tag = Tag(elem_tag)
vr = dcm_dict[elem_tag][0]
# If the required command set elements are expanded this will need
# to be checked to ensure it functions OK
try:
d.add_new(tag, vr, None)
except:
d.add_new(tag, vr, '')
cls.command_set = d
globals()[cls.__name__] = cls
return cls | bee717a712acb3463811a64ca2e960823bb60cc5 | 15,718 |
def valid_string(s, min_len=None, max_len=None,
allow_blank=False, auto_trim=True, pattern=None):
"""
@param s str/unicode 要校验的字符串
@param min_len None/int
@param max_len None/int
@param allow_blank boolean
@param auto_trim boolean
@:param pattern re.pattern
@return boolean is_ok
@return string/int value 若是ok,返回int值,否则返回错误信息
"""
if s is None:
return False, u'不能为None'
if not isinstance(s, basestring):
return False, u"参数类型需要是字符串"
if auto_trim:
s = s.strip()
str_len = len(s)
if not allow_blank and str_len < 1:
return False, u"参数不允许为空"
if max_len is not None and str_len > max_len:
return False, u"参数长度需小于%d" % max_len
if min_len is not None and str_len < min_len:
return False, u"参数长度需大于 %d" % min_len
if pattern is not None and s and not _match_pattern(pattern, s):
return False, u'参数包含的字符: %s' % pattern
return True, s | 07bb43fd9fc3581330377e16b7e876d5ee051543 | 15,719 |
def update_visitor(visitor_key, session_key=None):
""" update the visitor using the visitor key """
visitor = get_visitor(visitor_key)
if visitor:
visitor.mark_visit()
if session_key:
visitor.last_session_key = session_key
visitor.save()
return visitor | cc73997ff0fdb3b591f4f55f8a00b6d0ab02a8ea | 15,720 |
def PNewUVTable (inUV, access, tabType, tabVer, err):
""" Obsolete use PGetTable
"""
if ('myClass' in inUV.__dict__) and (inUV.myClass=='AIPSUVData'):
raise TypeError("Function unavailable for "+inUV.myClass)
return PGetTable (inUV, access, tabType, tabVer, err) | a7f9c156a9787839a55de10bf524f12a8086e1b0 | 15,721 |
import binascii
def fmt_hex(bytes):
"""Format the bytes as a hex string, return upper-case version.
"""
# This is a separate function so as to not make the mistake of
# using the '%X' format string with an ints, which will not
# guarantee an even-length string.
#
# binascii works on all versions of Python, the hex encoding does not.
hex = binascii.hexlify(bytes)
hex = hex.decode() # Returns bytes, which makes no sense to me
return hex.upper() | d25379ec333a653549c329932e304e61c57f173d | 15,722 |
import requests
def getTracksAudioFeatures(access_token, id_string):
"""
getTracksAudioFeatures() retrieves the track list audio features, this includes danceability, energy, loudness, etc..
"""
# URL to pass a list of tracks to get their audio features
audio_features_url = f"/audio-features"
# Header parameter to allow the application to make requests to the Spotify Web API
header = {
"Authorization" : "Bearer " + access_token,
}
# Query parameters:
# ids: string of song ids separated by a comma
param = {
"ids" : id_string,
}
# GET request to recieve track audio information
response = requests.get(BASE_URL + audio_features_url, headers=header, params=param)
return response | c0f627bf804065826e2e3e47edaba3077dbfad8f | 15,724 |
def tan(x):
"""Element-wise `tangent`."""
return sin(x) / cos(x) | 4854c102c02c0cc32af51eb5f5b47ab39f66f17e | 15,726 |
def scalingImage(img, minVal, maxVal):
"""
Scale image given a range.
Parameters: img, image to be scaled;
minVal, lower value for range;
maxVal, upper value for range.
Returns: imgScaled, image scaled.
"""
imax = np.max(img)
imin = np.min(img)
std = (img - imin) / (imax - imin)
imgScaled = std * (maxVal - minVal) + minVal
return imgScaled | 7e4edb22f464afdf5dbc12f2dd9ab99533a68d54 | 15,727 |
def forcestr(name):
""" returns `name` as string, even if it wasn't before """
return name if isinstance(name, bytes) else name.encode(RAW_ENCODING, ENCODING_ERROR_HANDLING) | 8b2dff5762ebab584b1e578f640d56d0c3af3e1a | 15,728 |
def IsGitSVNDirty(directory):
"""
Checks whether our git-svn tree contains clean trunk or some branch.
Errors are swallowed.
"""
# For git branches the last commit message is either
# some local commit or a merge.
return LookupGitSVNRevision(directory, 1) is None | 903e3827fc3569a9ac67038a9112afe6e2db9842 | 15,729 |
def create_pyfunc_dataset(batch_size=32, repeat_size=1, num_parallel_workers=1, num_samples=None):
"""
Create Cifar10 dataset pipline with Map ops containing only Python functions and Python Multiprocessing enabled
"""
# Define dataset
cifar10_ds = ds.Cifar10Dataset(DATA_DIR, num_samples=num_samples)
cifar10_ds = cifar10_ds.map(operations=[py_vision.ToType(np.int32)], input_columns="label",
num_parallel_workers=num_parallel_workers, python_multiprocessing=True)
# Setup transforms list which include Python ops / Pyfuncs
transforms_list = [
py_vision.ToPIL(),
py_vision.RandomGrayscale(prob=0.2),
np.array] # need to convert PIL image to a NumPy array to pass it to C++ operation
compose_op = py_transforms.Compose(transforms_list)
cifar10_ds = cifar10_ds.map(operations=compose_op, input_columns="image",
num_parallel_workers=num_parallel_workers,
python_multiprocessing=True)
# Apply Dataset Ops
buffer_size = 10000
cifar10_ds = cifar10_ds.shuffle(buffer_size=buffer_size)
cifar10_ds = cifar10_ds.batch(batch_size, drop_remainder=True)
cifar10_ds = cifar10_ds.repeat(repeat_size)
return cifar10_ds | c03b730e58abe09560d073b2abe5ee43e782b5f5 | 15,731 |
from typing import Sequence
from typing import Union
from typing import Tuple
from typing import List
from typing import Type
def _infer_structured_outs(
op_config: LinalgStructuredOpConfig,
in_arg_defs: Sequence[OperandDefConfig], ins: Sequence[Value],
out_arg_defs: Sequence[OperandDefConfig],
outs: Union[Sequence[Value], OpResultList]) -> Tuple[ValueList, List[Type]]:
"""Infers implicit outs and output types.
Respects existing contents of outs if not empty.
Returns:
normalized outs, output types
"""
# If outs were explicitly provided, we accept them verbatim.
if outs:
return outs, [out.type for out in outs]
raise NotImplementedError(f"Output tensor inference not yet supported for "
"structured ops") | 00efb063451c1a6b6d9f451043f162bb0ca91efc | 15,732 |
import functools
def event_source(method: t.Callable, name: t.Optional[str] = None):
"""A decorator which makes the function act as a source of before and after call events.
You can later subscribe to these event with :py:func:`before` and :py:func`after` decorators.
:param method: Target class method
:param: Name of for the join point. If not given use the function name.
"""
# We must use function name instead of function pointer for the registry, because function object changes with unbound vs. bound Python class methods
if not name:
name = method.__name__
@functools.wraps(method)
def _inner(*args, **kwargs):
_self = args[0]
fire_advisor_event(_self, name, AdvisorRole.before)
retval = method(*args, **kwargs)
fire_advisor_event(_self, name, AdvisorRole.after)
return retval
assert name not in _event_source_hooks, "There already exist event_source with same name"
_event_source_hooks.append(name)
method._event_source_name = name
return _inner | 136e25826d508259c95d2b6967b8c5f43a17e2e8 | 15,733 |
def lst_blocks(uvp, blocks=2, lst_range=(0., 2.*np.pi)):
"""
Split a UVPSpec object into multiple objects, each containing spectra
within different contiguous LST ranges. There is no guarantee that each
block will contain the same number of spectra or samples.
N.B. This function uses the `lst_avg_array` property of an input UVPSpec
object to split the LSTs (and not the LSTs of the individual visibilities
that went into creating each delay spectrum).
Parameters
----------
uvp : UVPSpec
Object containing delay spectra.
blocks : int, optional
How many blocks to return. Default: 2.
lst_range : tuple, optional
Tuple containing the minimum and maximum LST to retain. This is the
range that will be split up into blocks. Default: (0., 2*pi)
Returns
-------
uvp_list : list of UVPSpec
List of UVPSpec objects, one for each LST range. Empty blocks will
appear as None in the list.
lst_bins : array_like
Array of LST bin edges. This has dimension (blocks+1,).
"""
# Check validity of inputs
if not isinstance(uvp, hp.UVPSpec):
raise TypeError("uvp must be a single UVPSpec object.")
if not (lst_range[0] >= 0. and lst_range[1] <= 2.*np.pi):
raise ValueError("lst_range must be in the interval (0, 2*pi)")
if not isinstance(blocks, (int, np.int, np.integer)):
raise TypeError("'blocks' must be an integer")
if not blocks > 0:
raise ValueError("Must have blocks >= 1")
# Get LSTs
lsts = np.unique(uvp.lst_avg_array)
# Define bin edges
lst_bins = np.linspace(lst_range[0], lst_range[1], blocks+1)
# Loop over bins and select() the LST ranges required
uvp_list = []
for i in range(lst_bins.size - 1):
idxs = np.where( np.logical_and(lsts >= lst_bins[i],
lsts < lst_bins[i+1]) )[0]
_uvp = None
if idxs.size > 0:
# Select LSTs in this range
_uvp = uvp.select(lsts=lsts[idxs], inplace=False)
uvp_list.append(_uvp)
return uvp_list, lst_bins | 4c2882f7d513dbb920c33aa6e829852ee22e74fb | 15,734 |
def is_completed(book):
"""Determine if the book is completed.
Args:
book: Row instance representing a book.
"""
return True if book.status == BOOK_STATUS_ACTIVE \
and not book.complete_in_progress \
and book.release_date \
else False | 002d3d5829218dc02a6e6bdc39d67bc9bdd6ca15 | 15,735 |
def compute_encryption_key_AESV3(password : 'str', encryption_dict : 'dict'):
"""
Derives the key to be used with encryption/decryption algorithms from a user-defined password.
Parameters
----------
password : bytes
Bytes representation of the password string.
encryption_dict : dict
The dictionary containing all the information about the encryption procedure.
Returns
-------
A bytes sequence representing the encryption key.
"""
U = encryption_dict["U"]
U = U.value if isinstance(U, PDFLiteralString) else unhexlify(U.value)
O = encryption_dict["O"]
O = O.value if isinstance(O, PDFLiteralString) else unhexlify(O.value)
prepped = sals_stringprep(password)
truncated = prepped.encode("utf8")[:127]
digest = sha256(truncated + O[32:32+8] + U).digest()
if digest == O[:32]:
intermediate = sha256(truncated + O[-8:] + U).digest()
OE = encryption_dict["OE"]
OE = OE.value if isinstance(OE, PDFLiteralString) else unhexlify(OE.value)
file_encryption_key = cbc_decrypt(OE, intermediate, b'\x00'*16, padding = False)
else:
digest = sha256(truncated + U[32:32+8]).digest()
if digest == U[:32]:
intermediate = sha256(truncated + U[-8:]).digest()
UE = encryption_dict["UE"]
UE = UE.value if isinstance(UE, PDFLiteralString) else unhexlify(UE.value)
file_encryption_key = cbc_decrypt(UE, intermediate, b'\x00'*16, padding = False)
else:
raise PDFWrongPasswordError()
return file_encryption_key | c6d3ec657e43187f582b414824082f5483145f94 | 15,736 |
def list_group(group_name, recursive=True):
"""Returns all members, all globs and all nested groups in a group.
The returned lists are unordered.
Returns:
GroupListing object.
"""
return get_request_cache().auth_db.list_group(group_name, recursive) | ebd0ddfc7494d2af18057b2adde51cdb0e0b3a76 | 15,737 |
def pi():
"""Compute Pi to the current precision.
>>> print(pi())
3.141592653589793238462643383
"""
getcontext().prec += 2 # extra digits for intermediate steps
three = Decimal(3) # substitute "three=3.0" for regular floats
lasts, t, s, n, na, d, da = 0, three, 3, 1, 0, 0, 24
while s != lasts:
lasts = s
n, na = n + na, na + 8
d, da = d + da, da + 32
t = (t * n) / d
s += t
getcontext().prec -= 2
return +s # unary plus applies the new precision | e9907ef4e437ddbeae34d869d48328ee32bd8d5c | 15,738 |
def load_cifar_data(limit=None) -> np.ndarray:
"""
:param limit:
:return:
"""
# cifar10 data (integrated in TensorFlow, downloaded on first use)
cifar10_data = tf.keras.datasets.cifar10
# split into training and test data
train_data, test_data = cifar10_data.load_data()
# split dta into image and label (not yet desired format c.f. PreProc)
x_train, label_train = train_data
x_test, label_test = test_data
if limit is not None: # optional limit to develop/test faster
x_train = x_train[:limit, :, :, :]
label_train = label_train[:limit]
x_test = x_test[:limit, :, :, :]
label_test = label_test[:limit]
# provide some basic information about data
print('Number of images in training set', len(x_train))
print('Number of images in testing set', len(x_test))
print('Input image size', x_train.shape[1], 'x',
x_train.shape[2], 'in', x_train.shape[-1], 'channels')
return x_train, label_train, x_test, label_test | 48bbdc60d614b00fdddd7492f37c5ebb62d21caf | 15,739 |
def set_goal_orientation(delta,
current_orientation,
orientation_limit=None,
set_ori=None):
"""
Calculates and returns the desired goal orientation, clipping the result accordingly to @orientation_limits.
@delta and @current_orientation must be specified if a relative goal is requested, else @set_ori must be
specified to define a global orientation position
"""
# directly set orientation
if set_ori is not None:
goal_orientation = set_ori
# otherwise use delta to set goal orientation
else:
rotation_mat_error = trans.euler2mat(-delta)
goal_orientation = np.dot(rotation_mat_error.T, current_orientation)
#check for orientation limits
if np.array(orientation_limit).any():
if orientation_limit.shape != (2,3):
raise ValueError("Orientation limit should be shaped (2,3) "
"but is instead: {}".format(orientation_limit.shape))
# Convert to euler angles for clipping
euler = trans.mat2euler(goal_orientation)
# Clip euler angles according to specified limits
limited = False
for idx in range(3):
if orientation_limit[0][idx] < orientation_limit[1][idx]: # Normal angle sector meaning
if orientation_limit[0][idx] < euler[idx] < orientation_limit[1][idx]:
continue
else:
limited = True
dist_to_lower = euler[idx] - orientation_limit[0][idx]
if dist_to_lower > np.pi:
dist_to_lower -= 2 * np.pi
elif dist_to_lower < -np.pi:
dist_to_lower += 2 * np.pi
dist_to_higher = euler[idx] - orientation_limit[1][idx]
if dist_to_lower > np.pi:
dist_to_higher -= 2 * np.pi
elif dist_to_lower < -np.pi:
dist_to_higher += 2 * np.pi
if dist_to_lower < dist_to_higher:
euler[idx] = orientation_limit[0][idx]
else:
euler[idx] = orientation_limit[1][idx]
else: # Inverted angle sector meaning
if (orientation_limit[0][idx] < euler[idx]
or euler[idx] < orientation_limit[1][idx]):
continue
else:
limited = True
dist_to_lower = euler[idx] - orientation_limit[0][idx]
if dist_to_lower > np.pi:
dist_to_lower -= 2 * np.pi
elif dist_to_lower < -np.pi:
dist_to_lower += 2 * np.pi
dist_to_higher = euler[idx] - orientation_limit[1][idx]
if dist_to_lower > np.pi:
dist_to_higher -= 2 * np.pi
elif dist_to_lower < -np.pi:
dist_to_higher += 2 * np.pi
if dist_to_lower < dist_to_higher:
euler[idx] = orientation_limit[0][idx]
else:
euler[idx] = orientation_limit[1][idx]
if limited:
goal_orientation = trans.euler2mat(np.array([euler[1], euler[0], euler[2]]))
return goal_orientation | 48475c0428f8fa4c179ece5c3d13f5db701256e8 | 15,740 |
def get_fixed_income_index():
"""获取固定收益及中债总财富指数对比走势"""
return get_stg_index('fixed_income', '037.CS') | b04c8a84e0f65c7a88e5e48f2460cd70e57fd394 | 15,741 |
def get_jobid(db):
"""
Ask MongoDB for the a valid jobid.
All processing jobs should have a call to this function at the beginning
of the job script. It simply queries MongoDB for the largest current
value of the key "jobid" in the history collection. If the history
collection is empty it returns 1 under a bias that a jobid of 0 is
illogical.
:param db: database handle
:type db: top level database handle returned by a call to MongoClient.database
"""
hiscol=db.history
hist_size=hiscol.find().count()
if(hist_size<=0):
return 1
else:
maxcur=hiscol.find().sort([('jobid',pymongo.DESCENDING)]).limit(1)
maxcur.rewind() # may not be necessary but near zero cost
maxdoc=maxcur[0]
return maxdoc['jobid']+1 | c21976961c8465b387cdc0d8298a7887563d8233 | 15,742 |
def isA(token, tt=None, tv=None):
"""
function to check if a token meets certain criteria
"""
# Row and column info may be useful? for error messages
try:
tokTT, tokTV, _row, _col = token
except:
return False
if tt is None and tv is None:
return True
elif tv is None:
return tt == tokTT
elif tt is None:
return tv == tokTV
else:
return tv == tokTV and tt == tokTT | d16eb9c963addcdc5eb416dc627c18ee98ddd28c | 15,744 |
from typing import Tuple
from typing import Dict
def authenticate(
*,
token: str,
key: str,
) -> Tuple[bool, Dict]:
"""Authenticate user by token"""
try:
token_header = jwt.get_unverified_header(token)
decoded_token = jwt.decode(token, key, algorithms=token_header.get("alg"))
except JWTError:
return False, {}
else:
return True, decoded_token | 1bb326b4d958ab6e4b8dc11a67f658dae0e8c753 | 15,745 |
def backup(source, destination, *, return_wrappers=False):
"""
Backup the selected source(s) into the destination(s) provided.
Source and destination will be converted into ``Source`` and
``Destination`` respectively. If this conversion fails,
an exception will be raised.
:param return_wrappers: If True, the Source and
Destination objects will be returned.
:param source: The source(s) to backup.
:param destination: The destination(s) of backup.
"""
boa = Boa()
_source = get_any_source(source)
_destination = get_any_destination(destination)
boa.backup(_source, _destination)
if return_wrappers:
return _source, _destination | 9ee19caa86a398abbfce2bbe28d0995ebe1e842f | 15,746 |
def _prepare_data(cfg, imgs):
"""Inference image(s) with the detector.
Args:
model (nn.Module): The loaded detector.
imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
Either image files or loaded images.
Returns:
result (dict): Predicted results.
"""
if isinstance(imgs, (list, tuple)):
if not isinstance(imgs[0], (np.ndarray, str)):
raise AssertionError('imgs must be strings or numpy arrays')
elif isinstance(imgs, (np.ndarray, str)):
imgs = [imgs]
else:
raise AssertionError('imgs must be strings or numpy arrays')
is_ndarray = isinstance(imgs[0], np.ndarray)
if is_ndarray:
cfg = cfg.copy()
# set loading pipeline type
cfg.data.test.pipeline[0].type = 'LoadImageFromNdarray'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
test_pipeline = Compose(cfg.data.test.pipeline)
datas = []
for img in imgs:
# prepare data
if is_ndarray:
# directly add img
data = dict(img=img)
else:
# add information into dict
data = dict(img_info=dict(filename=img), img_prefix=None)
# build the data pipeline
data = test_pipeline(data)
# get tensor from list to stack for batch mode (text detection)
datas.append(data)
if isinstance(datas[0]['img'], list) and len(datas) > 1:
raise Exception('aug test does not support '
f'inference with batch size '
f'{len(datas)}')
data = collate(datas, samples_per_gpu=len(imgs))
# process img_metas
if isinstance(data['img_metas'], list):
data['img_metas'] = [
img_metas.data[0] for img_metas in data['img_metas']
]
else:
data['img_metas'] = data['img_metas'].data
if isinstance(data['img'], list):
data['img'] = [img.data for img in data['img']]
if isinstance(data['img'][0], list):
data['img'] = [img[0] for img in data['img']]
else:
data['img'] = data['img'].data
return data | 993074d9f789469a38a0050c0ed970b3c86227b8 | 15,747 |
import json
import six
def _HandleJsonList(response, service, method, errors):
"""Extracts data from one *List response page as JSON and stores in dicts.
Args:
response: str, The *List response in JSON
service: The service which responded to *List request
method: str, Method used to list resources. One of 'List' or
'AggregatedList'.
errors: list, Errors from response will be appended to this list.
Returns:
Pair of:
- List of items returned in response as dicts
- Next page token (if present, otherwise None).
"""
items = []
response = json.loads(response)
# If the request is a list call, then yield the items directly.
if method == 'List':
items = response.get('items', [])
# If the request is an aggregatedList call, then do all the
# magic necessary to get the actual resources because the
# aggregatedList responses are very complicated data
# structures...
elif method == 'AggregatedList':
items_field_name = service.GetMethodConfig(
'AggregatedList').relative_path.split('/')[-1]
for scope_result in six.itervalues(response['items']):
# If the given scope is unreachable, record the warning
# message in the errors list.
warning = scope_result.get('warning', None)
if warning and warning['code'] == 'UNREACHABLE':
errors.append((None, warning['message']))
items.extend(scope_result.get(items_field_name, []))
return items, response.get('nextPageToken', None) | db87c9ed87df1268e1187f74c193b5f96f9e10f7 | 15,748 |
def gray_arrays_to_rgb_sequence_array(arrays, start_rgb, end_rgb, normalise_input=False, normalise_output=True):
"""Returns an RGB array that is mean of grayscale arrays mapped to linearly spaced RGB colors in a range.
:param list arrays: list of numpy.ndarrays of shape (N, M)
:param tuple start_rgb: (R, G, B) mapping of first array in `arrays`
:param tuple end_rgb: (R, G, B) mapping of last array in `arrays`
:param bool normalise_input: if True, input arrays are normalised concurrently to max value of 1. Default is False.
:param bool normalise_output: if True (default), output is normalised to range between 0 and 1.
:return: rgb_sequence_array shape (N, M, 3)
:rtype: numpy.ndarray
"""
if normalise_input:
max_gray_value = max([np.max(array) for array in arrays])
arrays = [array / max_gray_value for array in arrays]
colors = np.array([np.linspace(start, end, len(arrays)) for start, end in zip(start_rgb, end_rgb)]).T
color_arrays = [color[np.newaxis, np.newaxis, :] * array[:, :, np.newaxis] for color, array in zip(colors, arrays)]
rgb_sequence_array = np.mean(np.stack(color_arrays, axis=3), axis=3)
if normalise_output:
rgb_sequence_array = rgb_sequence_array / np.nanmax(rgb_sequence_array)
return rgb_sequence_array | 2b20c524529e195f49348cc56bf2aa2c46a7ab4e | 15,749 |
def normalize_inputs(df, metrics):
"""Normalize all inputs around mean and standard deviation.
"""
for m in metrics:
mean = np.mean(df[m])
stdev = np.std(df[m])
def std_normalize(x):
return (x - mean) / stdev
#df[m] = df[m].map(std_normalize)
xmin = min(df[m])
xmax = max(df[m])
def minmax_normalize(x):
return (x - xmin) / (xmax - xmin)
df[m] = df[m].map(minmax_normalize)
return df | 6e861050d4cec7d5d75c3597412df91b175a821f | 15,750 |
def calc_z_rot_from_right(right):
"""
Calculates z rotation of an object based on its right vector, relative to the positive x axis,
which represents a z rotation euler angle of 0. This is used for objects that need to rotate
with the HMD (eg. VrBody), but which need to be robust to changes in orientation in the HMD.
"""
# Project right vector onto xy plane
r = np.array([right[0], right[1], 0])
z_zero_vec = np.array([1, 0, 0])
# Get angle in radians
z = np.arccos(np.dot(r, z_zero_vec))
# Flip sign if on the right side of the xy plane
if r[1] < 0:
z *= -1
# Add pi/2 to get forward direction, but need to deal with jumping
# over quadrant boundaries
if 0 <= z and z <= np.pi / 2:
return z + np.pi / 2
elif np.pi / 2 < z and z <= np.pi:
angle_from_ax = np.pi / 2 - (np.pi - z)
return -np.pi + angle_from_ax
elif -np.pi <= z and z <= -np.pi / 2:
return z + np.pi / 2
else:
return np.pi / 2 + z | 32ee873fe96d5460ac9bfe6d0a3361b9f7e88cc7 | 15,751 |
def poisson_interval(data, alpha=0.32):
"""Calculates the confidence interval
for the mean of a Poisson distribution.
Parameters
----------
data: array_like
Data giving the mean of the Poisson distributions.
alpha: float
Significance level of interval. Defaults to
one sigma (0.32).
Returns
-------
low, high: array_like
Lower and higher limits for the interval."""
a = alpha
low, high = (chi2.ppf(a / 2, 2 * data) / 2,
chi2.ppf(1 - a / 2, 2 * data + 2) / 2)
low = np.nan_to_num(low)
return low, high | bf5d9071df9cea065af63205c6557d1a9334c236 | 15,752 |
def sales_administrative_expense(ticker, frequency):
"""
:param ticker: e.g., 'AAPL' or MULTIPLE SECURITIES
:param frequency: 'A' or 'Q' for annual or quarterly, respectively
:return: obvious..
"""
df = financials_download(ticker, 'is', frequency)
return (df.loc["Sales, General and administrative"]) | 30f5c4a1a1b28ec2c581671964b35c4009c5dffe | 15,753 |
def clip(x, min_, max_):
"""Clip value `x` by [min_, max_]."""
return min_ if x < min_ else (max_ if x > max_ else x) | 3ad7625fa3dc5a0c06bb86dc16698f6129ee9034 | 15,754 |
import random
def generate_code():
"""Generate a URL-compatible short code."""
return ''.join(random.choice(ALPHABET) for _ in range(10)) | 35c8674f39dd1ad6e8f4a238c01fbf5e020513e8 | 15,755 |
def prox_pos(v, t = 1, *args, **kwargs):
"""Proximal operator of :math:`tf(ax-b) + c^Tx + d\\|x\\|_2^2`, where :math:`f(x) = \\max(x,0)` applied
elementwise for scalar t > 0, and the optional arguments are a = scale, b = offset, c = lin_term, and
d = quad_term. We must have t > 0, a = non-zero, and d >= 0. By default, t = 1, a = 1, b = 0, c = 0, and d = 0.
"""
return prox_scale(prox_pos_base, *args, **kwargs)(v, t) | 18136db3b35ef20a03dfe2502930dad648bbc96e | 15,756 |
def delete_mapping(module, sdk, cloud, mapping):
"""
Attempt to delete a Mapping
returns: the "Changed" state
"""
if mapping is None:
return False
if module.check_mode:
return True
try:
cloud.identity.delete_mapping(mapping)
except sdk.exceptions.OpenStackCloudException as ex:
module.fail_json(msg='Failed to delete mapping: {0}'.format(str(ex)))
return True | 2b9a0606775949ed1361010b2ca8ef1257d9db25 | 15,757 |
def insert_target(x, segment_size):
"""
Creates segments of surrounding words for each word in x.
Inserts a zero token halfway the segment to mark the end of the intended
token.
Parameters
----------
x: list(int)
A list of integers representing the whole data as one long encoded
sentence. Each integer is an encoded subword.
segment_size: int
The size of the output samples.
Returns
-------
np.array:
A numpy matrix representing window of `segment_size` moving over the
input sample `x`.
"""
X = []
#pad the start & end of x
x_pad = x[-((segment_size-1)//2-1):] + x + x[:segment_size//2]
for i in range(len(x_pad)-segment_size+2):
segment = x_pad[i:i+segment_size-1]
#zero at the middle to mark the end of intended token
segment.insert((segment_size-1)//2, 0)
X.append(segment)
return np.array(X) | 7874e26deb13e7872992741dc232ffbdcdaf7d00 | 15,758 |
import functools
def find_nearest_network(ipa, nets):
"""
:param ipa: An ip address string
:param nets:
A of str gives and ip address with prefix, e.g. 10.0.1.0/24
>>> net1 = "192.168.122.0/24"
>>> net2 = "192.168.0.0/16"
>>> net3 = "192.168.1.0/24"
>>> net4 = "192.168.254.0/24"
>>> net5 = "0.0.0.0/32"
>>> find_nearest_network(net1, [net1, net5])
'192.168.122.0/24'
>>> find_nearest_network(net2, [net1, net5])
'192.168.122.0/24'
>>> find_nearest_network(net1, [net2, net3])
'192.168.0.0/16'
>>> find_nearest_network(net3, [net1, net4])
'192.168.122.0/24'
"""
return sorted(nets, key=functools.partial(distance, ipa))[0] | 43001fb2236a3654ff769e0e7a7eaad37bc74100 | 15,759 |
def set_action_translation(
language_id: int,
action_id: int,
name: str,
description: str = '',
short_description: str = '',
) -> ActionTranslation:
"""
Create or update an action translation.
:param language_id: the ID of an existing language
:param action_id: the ID of an existing action
:param name: the name of the action
:param description: a (possibly empty) description for the action
:param short_description: the new (possibly empty) short description
:return: the created action translation
:raise errors.LanguageDoesNotExistError: if no language with the given ID
exists
:raise errors.ActionDoesNotExistError: if no action with the given ID
exists
"""
action_translation = models.ActionTranslation.query.filter_by(
language_id=language_id,
action_id=action_id
).first()
if action_translation is None:
actions.get_action(action_id)
languages.get_language(language_id)
action_translation = models.ActionTranslation(
language_id=language_id,
action_id=action_id,
name=name,
description=description,
short_description=short_description,
)
else:
action_translation.name = name
action_translation.description = description
action_translation.short_description = short_description
db.session.add(action_translation)
db.session.commit()
return ActionTranslation.from_database(action_translation) | b0fe43b8e0fdee7a6604a4de4a8546763f152498 | 15,761 |
def partialSVD(batch, S, VT, ratio = 1, solver = 'full', tol = None, max_iter = 'auto'):
"""
Fits a partial SVD after given old singular values S
and old components VT.
Note that VT will be used as the number of old components,
so when calling truncated or randomized, will output a
specific number of eigenvectors and singular values.
Checks if new batch's size matches that of the old VT.
Note that PartialSVD has different solvers. Either choose:
1. full
Solves full SVD on the data. This is the most
stable and will guarantee the most robust results.
You can select the number of components to keep
within the model later.
2. truncated
This keeps the top K right eigenvectors and top
k right singular values, as determined by
n_components. Note full SVD is not called for the
truncated case, but rather ARPACK is called.
3. randomized
Same as truncated, but instead of using ARPACK, uses
randomized SVD.
Notice how Batch = U @ S @ VT. However, partialSVD returns
S, VT, and not U. In order to get U, you might consider using
the relation that X = U @ S @ VT, and approximating U by:
X = U @ S @ VT
X @ V = U @ S
(X @ V)/S = U
So, U = (X @ V)/S, so you can output U from (X @ V)/S
You can also get U partially and slowly using reverseU.
"""
data, k, __ = _utilSVD(batch, S, VT, eig = False)
if solver == 'full':
U, S, VT = svd(data)
elif solver == 'truncated':
U, S, VT = truncatedSVD(data, n_components = k, tol = tol)
else:
U, S, VT = randomizedSVD(data, n_components = k, max_iter = max_iter)
return U[k:,:k], S[:k], VT[:k] | 9b50e7f60ea187e7e6553ee1406322dd920e26a3 | 15,762 |
def can_move_in_direction(node: Node, direction: Direction, factory: Factory):
"""If an agent has a neighbour in the specified direction, add a 1,
else 0 to the observation space. If that neighbour is free, add 1,
else 0 (a non-existing neighbour counts as occupied).
"""
has_direction = node.has_neighbour(direction)
is_free = False
if has_direction:
neighbour: Node = node.get_neighbour(direction)
if neighbour.is_rail:
neighbour_rail = factory.get_rail(neighbour)
is_free = neighbour_rail.is_free() or node in neighbour_rail.nodes
else:
is_free = not neighbour.has_table()
return is_free | 7e747b8ba2c5b385b484b16c0c108aafd3c11351 | 15,763 |
def Usable(entity_type,entity_ids_arr):
"""Only for Linux modules"""
filNam = entity_ids_arr[0]
return filNam.endswith(".ko.xz") | d64aebf033fad9d81350b9221368c2208d9a003f | 15,765 |
import torch
def detection_collate(batch):
"""Custom collate fn for dealing with batches of images that have a different
number of associated object annotations (bounding boxes).
Arguments:
batch: (tuple) A tuple of tensor images and lists of annotations
Return:
A tuple containing:
1) (tensor) batch of images stacked on their 0 dim
2) (list of tensors) annotations for a given image are stacked on 0 dim
"""
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets) | ec2217eb0fcd1c348d3d6f65f07ea20ac33b55ea | 15,766 |
import scipy
def _apply_fft_high_pass_filter(data, fmin, fs=None, workers=None,
detrend=True, time_name=None):
"""Apply high-pass filter to FFT of given data.
Parameters
----------
data : xarray.DataArray
Data to filter.
fmin : float
Lowest frequency in pass band.
fs : float
Sampling frequency.
workers : int
Number of parallel jobs to use in computing FFT.
detrend : bool
If True, remove linear trend from data before computing FFT.
time_name : str
Name of the time coordinate.
Returns
-------
filtered : xarray.DataArray
Array containing the high-pass filtered data.
"""
data = rdu.ensure_data_array(data)
time_name = time_name if time_name is not None else rdu.get_time_name(data)
feature_dims = [d for d in data.dims if d != time_name]
# Handle case in which data is simply a time-series
if not feature_dims:
original_shape = None
else:
original_shape = [data.sizes[d] for d in feature_dims]
time_dim_pos = data.get_axis_num(time_name)
if time_dim_pos != 0:
data = data.transpose(*([time_name] + feature_dims))
# Convert to 2D array
n_samples = data.sizes[time_name]
if feature_dims:
n_features = np.product(original_shape)
else:
n_features = 1
flat_data = data.values.reshape((n_samples, n_features))
rdu.check_fixed_missing_values(flat_data, axis=0)
valid_data, missing_features = rdu.remove_missing_features(flat_data)
valid_features = [d for d in range(n_features)
if d not in missing_features]
valid_data = valid_data.swapaxes(0, 1)
if detrend:
valid_data = scipy.signal.detrend(
valid_data, axis=-1, type='linear')
# Compute spectrum and apply high-pass filter
spectrum = rfft(valid_data, axis=-1, workers=workers)
fft_freqs = rfftfreq(n_samples, d=(1.0 / fs))
filter_mask = fft_freqs < fmin
spectrum[..., filter_mask] = 0.0
filtered_valid_data = irfft(
spectrum, n=n_samples, axis=-1, workers=workers).swapaxes(0, 1)
if rdu.is_dask_array(flat_data):
filtered_cols = [None] * n_features
pos = 0
for j in range(n_features):
if j in valid_features:
filtered_cols[j] = filtered_valid_data[:, pos].reshape(
(n_samples, 1))
pos += 1
else:
filtered_cols[j] = da.full((n_samples, 1), np.NaN)
filtered_data = da.hstack(filtered_cols)
else:
filtered_data = np.full((n_samples, n_features), np.NaN)
filtered_data[:, valid_features] = filtered_valid_data
if original_shape:
filtered_data = filtered_data.reshape([n_samples] + original_shape)
filtered_dims = [time_name] + feature_dims
else:
filtered_data = filtered_data.ravel()
filtered_dims = [time_name]
filtered_coords = deepcopy(data.coords)
result = xr.DataArray(
filtered_data, coords=filtered_coords, dims=filtered_dims)
if time_dim_pos != 0:
result = result.transpose(*data.dims)
return result | a1e345b19c6f39b7c667a03b2a95f824f3c15aa8 | 15,768 |
def truncate(array:np.ndarray, intensity_profile:np.ndarray, seedpos:int, iso_split_level:float)->np.ndarray:
"""Function to truncate an intensity profile around its seedposition.
Args:
array (np.ndarray): Input array.
intensity_profile (np.ndarray): Intensities for the input array.
seedpos (int): Seedposition.
iso_split_level (float): Split level.
Returns:
np.ndarray: Truncated array.
"""
minima = int_list_to_array(get_minpos(intensity_profile, iso_split_level))
if len(minima) > 0:
left_minima = minima[minima < seedpos]
right_minima = minima[minima > seedpos]
# If the minimum is smaller than the seed
if len(left_minima) > 0:
minpos = left_minima[-1]
else:
minpos = 0
if len(right_minima) > 0:
maxpos = right_minima[0]
else:
maxpos = len(array)
array = array[minpos:maxpos+1]
return array | 81d3cd02d9e784bfbdaf5c77041ba1192c72b2dc | 15,769 |
import warnings
def fitting_process_parent(scouseobject, SAA, key, spec, parent_model):
"""
Pyspeckit fitting of an individual spectrum using the parent SAA model
Parameters
----------
scouseobject : Instance of the scousepy class
SAA : Instance of the saa class
scousepy spectral averaging area
key : number
index of the individual spectrum
spec : pyspeckit spectrum
the spectrum to fit
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
"""
# Check the model
happy = False
initfit = True
fit_dud = False
while not happy:
if np.all(np.isfinite(np.array(spec.flux))):
if initfit:
guesses = np.asarray(parent_model.params)
if np.sum(guesses) != 0.0:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
spec.specfit(interactive=False, \
clear_all_connections=True,\
xmin=scouseobject.ppv_vol[0], \
xmax=scouseobject.ppv_vol[1], \
fittype = scouseobject.fittype, \
guesses = guesses,\
verbose=False,\
use_lmfit=True)
log.setLevel(old_log)
modparnames = spec.specfit.fitter.parnames
modncomps = spec.specfit.npeaks
modparams = spec.specfit.modelpars
moderrors = spec.specfit.modelerrs
modrms = spec.error[0]
_inputs = [modparnames, [modncomps], modparams, moderrors, [modrms]]
happy, guesses = check_spec(scouseobject, parent_model, _inputs, happy)
initfit = False
else:
# If no satisfactory model can be found - fit a dud!
fit_dud=True
happy = True
else:
# If no satisfactory model can be found - fit a dud!
fit_dud = True
happy = True
if fit_dud:
bf = fitting_process_duds(scouseobject, SAA, key, spec)
else:
bf = fit(spec, idx=key, scouse=scouseobject)
return bf | 763ac6bca7db2813c0b2d9dc67baa541b9596546 | 15,770 |
def _load_grammar(grammar_path):
"""Lee una gramática libre de contexto almacenada en un archivo .cfg y
la retorna luego de realizar algunas validaciones.
Args:
grammar_path (str): Ruta a un archivo .cfg conteniendo una gramática
libre de contexto en el formato utilizado por NLTK.
Raises:
InvalidGrammarException: en caso de que la gramática no sea válida.
Returns:
nltk.CFG: Gramática libre de contexto leída del archivo.
"""
grammar = nltk.data.load('file:{}'.format(grammar_path))
if grammar.start().symbol() != _START_PRODUCTION:
raise InvalidGrammarException('Start rule must be "{}"'.format(
_START_PRODUCTION))
if not grammar.is_nonempty():
raise InvalidGrammarException('Empty productions are not allowed')
nonterminals = set()
terminals = {token_name for token_name, _ in _TOKEN_TYPES}
for production in grammar.productions():
nonterminals.add(production.lhs().symbol())
for production in grammar.productions():
for element in production.rhs():
symbol = str(element)
if nltk.grammar.is_nonterminal(element):
if symbol not in nonterminals:
raise InvalidGrammarException(
'Invalid nonterminal: {}'.format(symbol))
elif symbol not in terminals:
raise InvalidGrammarException(
'Invalid terminal: {}'.format(symbol))
return grammar | a7432ca9ef4a19b2ec07dcf2434e1dfd140333b9 | 15,771 |
from datetime import datetime
def time_span(ts):
"""计算时间差"""
delta = datetime.now() - ts.replace(tzinfo=None)
if delta.days >= 365:
return '%d年前' % (delta.days / 365)
elif delta.days >= 30:
return '%d个月前' % (delta.days / 30)
elif delta.days > 0:
return '%d天前' % delta.days
elif delta.seconds < 60:
return "%d秒前" % delta.seconds
elif delta.seconds < 60 * 60:
return "%d分钟前" % (delta.seconds / 60)
else:
return "%d小时前" % (delta.seconds / 60 / 60) | b93100a0ac3d7b7f45ea7f26b03a0f0149cce1a3 | 15,772 |
def check_point(point_a, point_b, alpha, mask):
"""
Test the point "alpha" of the way from P1 to P2
See if it is on a face of the cube
Consider only faces in "mask"
"""
plane_point_x = lerp(alpha, point_a[0], point_b[0])
plane_point_y = lerp(alpha, point_a[1], point_b[1])
plane_point_z = lerp(alpha, point_a[2], point_b[2])
plane_point = (plane_point_x, plane_point_y, plane_point_z)
return face_plane(plane_point) & mask | a3848840edd7ab2408197463a7c688954e456a66 | 15,773 |
def gIndex(df, query_txt, coluna_citacoes:str):
"""Calcula índice g"""
df = df.query(query_txt).sort_values(by=[coluna_citacoes],ascending=False)
df = df.reset_index(drop=True)
df.index+= 1
df['g^2'] = df.index**2
df['citações acumuladas'] = df[coluna_citacoes].cumsum()
df['corte'] = abs(df['g^2'] - df['citações acumuladas'])
posicao_g = df['corte'].idxmin()
return df.loc[posicao_g]['g^2'] | 2ef7343026ddf214a5fea7771c1a67887a87f320 | 15,774 |
from typing import List
def dockerize_cli_args(arg_str: str, container_volume_root="/home/local") -> str:
"""Return a string with all host paths converted to their container equivalents.
Parameters
----------
arg_str : str
The cli arg string to convert
container_volume_root : str, optional
The container directory which is mapped to local working directory,
by default "/home/local"
Returns
-------
str
A string with host paths converted to container paths.
"""
args = arg_str.split(" ")
newargs: List[str] = []
for arg in args:
if uio.file_exists(arg):
newargs.append(_dockerize_path(arg, container_volume_root))
elif "=" in arg:
left, right = arg.split("=")[0], "=".join(arg.split("=")[1:])
if uio.file_exists(right):
newargs.append(
f"{left}={_dockerize_path(right, container_volume_root)}"
)
else:
newargs.append(arg)
return " ".join(newargs) | de4ce13700459089489a8e0d3d10795931d5ba51 | 15,775 |
def _invert_monoms(p1):
"""
Compute ``x**n * p1(1/x)`` for a univariate polynomial ``p1`` in ``x``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> from sympy.polys.ring_series import _invert_monoms
>>> R, x = ring('x', ZZ)
>>> p = x**2 + 2*x + 3
>>> _invert_monoms(p)
3*x**2 + 2*x + 1
See Also
========
sympy.polys.densebasic.dup_reverse
"""
terms = list(p1.items())
terms.sort()
deg = p1.degree()
R = p1.ring
p = R.zero
cv = p1.listcoeffs()
mv = p1.listmonoms()
for i in range(len(mv)):
p[(deg - mv[i][0],)] = cv[i]
return p | 790e308f4ec5b689f1bb89d33f5ecf6aaa858334 | 15,776 |
from typing import Dict
def cleaned_picker_data(date: dt.date) -> Dict:
"""Retrieve and process data about Podcast Picker visits
from Webtrekk API for a specific date.
Args:
date (dt.date): Date to request data for.
Returns:
Dict: Reply from API.
"""
config = AnalysisConfig(
[
AnalysisObject("Seiten"),
],
metrics=[
Metric(
"Visits",
sort_order="desc",
),
Metric(
"Visits",
metric_filter=Filter(
filter_rules=[
FilterRule("Werbemittel", "=", "*"),
]
),
),
Metric(
"Ausstiege",
),
],
analysis_filter=Filter(
filter_rules=[
FilterRule("Seiten", "=", "*Podcast-Picker*"),
],
),
start_time=date,
stop_time=date,
row_limit=10000,
)
webtrekk = Webtrekk()
with webtrekk.session():
analysis = webtrekk.get_analysis_data(dict(config))
data = analysis["analysisData"]
date_start = analysis["timeStart"]
date_end = analysis["timeStop"]
logger.info("Start scraping Webtrekk Data between {} and {}.", date_start, date_end)
data_dict = {}
for element in data[:-1]:
name = normalize_name(element[0].split("_")[-1])
item = dict(
visits=int(element[1]),
visits_campaign=int(element[2]),
exits=int(element[3]),
)
if name in data_dict:
data_dict[name]["visits"] += item["visits"]
data_dict[name]["visits_campaign"] += item["visits_campaign"]
data_dict[name]["exits"] += item["exits"]
else:
data_dict[name] = item
return data_dict | 7b12183ea54a05e20c72a2826bb4adb6e2b6a6ac | 15,777 |
import math
def create_model(args, vocab_size, num_labels, mode='train'):
"""create lac model"""
# model's input data
words = fluid.data(name='words', shape=[-1, 1], dtype='int64', lod_level=1)
targets = fluid.data(
name='targets', shape=[-1, 1], dtype='int64', lod_level=1)
if mode == "train":
print("create model mode: ", mode)
teacher_crf_decode = fluid.data(
name='teacher_crf_decode', shape=[-1, 1], dtype='float32', lod_level=1)
else:
print("create model mode: ", mode)
teacher_crf_decode = None
feed_list = [words, targets]
if teacher_crf_decode:
feed_list.append(teacher_crf_decode)
pyreader = fluid.io.DataLoader.from_generator(
feed_list=feed_list,
capacity=200,
use_double_buffer=True,
iterable=False)
# for test or train process
avg_cost, crf_avg_cost, teacher_cost, crf_decode= nets.lex_net(
words, args, vocab_size, num_labels, teacher_crf_decode,for_infer=False, target=targets)
(precision, recall, f1_score, num_infer_chunks, num_label_chunks,
num_correct_chunks) = fluid.layers.chunk_eval(
input=crf_decode,
label=targets,
chunk_scheme="IOB",
num_chunk_types=int(math.ceil((num_labels - 1) / 2.0)))
chunk_evaluator = fluid.metrics.ChunkEvaluator()
chunk_evaluator.reset()
ret = {
"pyreader": pyreader,
"words": words,
"targets": targets,
"avg_cost": avg_cost,
"crf_avg_cost": crf_avg_cost,
"teacher_cost": teacher_cost,
"crf_decode": crf_decode,
"precision": precision,
"recall": recall,
"f1_score": f1_score,
"chunk_evaluator": chunk_evaluator,
"num_infer_chunks": num_infer_chunks,
"num_label_chunks": num_label_chunks,
"num_correct_chunks": num_correct_chunks
}
return ret | 0996c0a9f8d97463816946b50af112f3738676df | 15,778 |
def escape_string(value):
"""escape_string escapes *value* but not surround it with quotes.
"""
value = value.replace('\\', '\\\\')
value = value.replace('\0', '\\0')
value = value.replace('\n', '\\n')
value = value.replace('\r', '\\r')
value = value.replace('\032', '\\Z')
value = value.replace("'", "\\'")
value = value.replace('"', '\\"')
return value | 1373ea81d22d246c0c0429d6588995e719bd61fb | 15,779 |
def _supports_masking(remask_kernel: bool):
"""Returns a decorator that turns layers into layers supporting masking.
Specifically:
1) `init_fn` is left unchanged.
2) `apply_fn` is turned from
a function that accepts a `mask=None` keyword argument (which indicates
`inputs[mask]` must be masked), into
a function that accepts a `mask_constant=None` keyword argument (which
indicates `inputs[inputs == mask_constant]` must be masked).
3) `kernel_fn` is modified to
3.a) propagate the `kernel.mask1` and `kernel.mask2` through intermediary
layers, and,
3.b) if `remask_kernel == True`, zeroes-out covariances between entries of
which at least one is masked.
4) If the decorated layers has a `mask_fn`, it is used to propagate masks
forward through the layer, in both `apply_fn` and `kernel_fn`. If not, it is
assumed the mask remains unchanged.
Must be applied before the `layer` decorator.
Args:
remask_kernel: `True` to zero-out kernel covariance entries between masked
inputs after applying `kernel_fn`. Some layers don't need this and setting
`remask_kernel=False` can save compute.
Returns:
A decorator that turns functions returning
`(init_fn, apply_fn, kernel_fn[, mask_fn])`
into functions returning
`(init_fn, apply_fn_with_masking, kernel_fn_with_masking)`.
"""
def supports_masking(layer):
@utils.wraps(layer)
def layer_with_masking(*args, **kwargs):
layer_fns = layer(*args, **kwargs)
init_fn, apply_fn, kernel_fn = layer_fns[:3]
if len(layer_fns) == 3:
# No mask propagation function supplied - use identity.
_mask_fn = lambda mask, input_shape: mask
elif len(layer_fns) == 4:
# Custom mask propagation function supplied.
_mask_fn = layer_fns[3]
else:
raise ValueError(f'Expected 3 (`init_fn`, `apply_fn`, `kernel_fn`) or 4'
f' (..., `mask_fn`) layer functions, '
f'got {len(layer_fns)}.')
@utils.wraps(_mask_fn)
def mask_fn(mask, input_shape):
if mask is None:
return None
return _mask_fn(mask, input_shape)
def apply_fn_with_masking(params, inputs, *,
mask_constant=None, **kwargs):
inputs = utils.get_masked_array(inputs, mask_constant)
inputs, mask = inputs.masked_value, inputs.mask
outputs = apply_fn(params, inputs, mask=mask, **kwargs)
outputs_mask = mask_fn(mask,
inputs.shape if isinstance(inputs, np.ndarray)
else [i.shape for i in inputs])
if outputs_mask is None:
return outputs
return utils.MaskedArray(outputs, outputs_mask)
def kernel_fn_with_masking(k: Kernels, **user_reqs):
if isinstance(k, Kernel):
mask1 = mask_fn(k.mask1, k.shape1)
mask2 = mask_fn(k.mask2, k.shape2)
elif isinstance(k, list):
mask1 = mask_fn([k.mask1 for k in k],
[k.shape1 for k in k])
mask2 = mask_fn([k.mask2 for k in k],
[k.shape2 for k in k])
else:
raise TypeError(type(Kernel), Kernel)
k = kernel_fn(k, **user_reqs) # type: Kernel
if remask_kernel:
k = k.mask(mask1, mask2)
else:
k = k.replace(mask1=mask1, mask2=mask2)
return k
if hasattr(kernel_fn, _INPUT_REQ):
setattr(kernel_fn_with_masking,
_INPUT_REQ,
getattr(kernel_fn, _INPUT_REQ))
return init_fn, apply_fn_with_masking, kernel_fn_with_masking
return layer_with_masking
return supports_masking | 0fd189ee791edb394fe5fb0efd1f7dd6d944c689 | 15,781 |
def warnings(request: HttpRequest):
"""Adiciona alguns avisos no content"""
warning = list()
if hasattr(request, 'user'):
user: User = request.user
if not user.is_anonymous:
# Testa email
if user.email is None or user.email == "":
warning.append({
'message': 'Você não possui um e-mail registrado, por favor registre um',
'link_page_name': 'escola:self-email-change'
})
else:
logger.info("Não há atributo user")
return {'warnings': warning} | 990c45c03235eca90b1074d8cc1b6a27b8c5c014 | 15,782 |
def _get_raw_key(args, key_field_name):
"""Searches for key values in flags, falling back to a file if necessary.
Args:
args: An object containing flag values from the command surface.
key_field_name (str): Corresponds to a flag name or field name in the key
file.
Returns:
The flag value associated with key_field_name, or the value contained in the
key file.
"""
flag_key = getattr(args, key_field_name, None)
if flag_key is not None:
return flag_key
return _read_key_store_file().get(key_field_name) | 4af0b0c680b4b0642f40f3a08718239da6de552d | 15,783 |
def get_images(headers, name, handler_registry=None,
handler_override=None):
"""
This function is deprecated. Use Header.data instead.
Load images from a detector for given Header(s).
Parameters
----------
fs: RegistryRO
headers : Header or list of Headers
name : string
field name (data key) of a detector
handler_registry : dict, optional
mapping spec names (strings) to handlers (callable classes)
handler_override : callable class, optional
overrides registered handlers
Example
-------
>>> header = DataBroker[-1]
>>> images = Images(header, 'my_detector_lightfield')
>>> for image in images:
# do something
"""
res = DataBroker.get_images(headers=headers, name=name,
handler_registry=handler_registry,
handler_override=handler_override)
return res | fcbf887d1a5c71ab4f6c5dcc91df0743497ccefb | 15,784 |
from typing import Union
from typing import Sequence
from typing import Any
from typing import Tuple
def shape_is_ok(sequence: Union[Sequence[Any], Any], expected_shape: Tuple[int, ...]) -> bool:
"""
Check the number of items the array has and compare it with the shape product
"""
try:
sequence_len = len(flatten(sequence))
except Exception as err:
logger.info(f"Error when trying to compare shapes. {err}")
return False
return prod(expected_shape) == sequence_len | f23c0cec12e9038b693e345ccc6909cc2d25c8b1 | 15,785 |
def ChannelSE(reduction=16, **kwargs):
"""
Squeeze and Excitation block, reimplementation inspired by
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
Args:
reduction: channels squeeze factor
"""
channels_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def layer(input_tensor):
# get number of channels/filters
channels = backend.int_shape(input_tensor)[channels_axis]
x = input_tensor
# squeeze and excitation block in PyTorch style with
x = layers.GlobalAveragePooling2D()(x)
x = layers.Lambda(expand_dims, arguments={'channels_axis': channels_axis})(x)
x = layers.Conv2D(channels // reduction, (1, 1), kernel_initializer='he_uniform')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(channels, (1, 1), kernel_initializer='he_uniform')(x)
x = layers.Activation('sigmoid')(x)
# apply attention
x = layers.Multiply()([input_tensor, x])
return x
return layer | 791089f5729c39d4397afae3a18bd76205ab3185 | 15,786 |
def add_trainingset_flag(cam_parquet,
trainingset_pkl_path,
cam=None):
"""
Add to a single-cam parquet the information flags (adding columns)
indicating if a given cam view was used in a training set for
melting, hydro classif or riming degree
Input
cam_parquet: parquet file to add the columns to
trainingset_pkl_path: path where the pickles of the trainingset flags are locally stored
cam = 'cam0', 'cam1' or 'cam2'
"""
print('CAM: '+cam)
# Read the parquet file
table = pd.read_parquet(cam_parquet)
flake_uid = table.datetime.apply(lambda x: x.strftime('%Y.%m.%d_%H.%M.%S'))+'_flake_'+table.flake_number_tmp.apply(str)
# 1 Add hydro columns
add = pd.read_pickle(trainingset_pkl_path+'hydro_trainingset_'+cam+'.pkl')
is_in = np.asarray([0] * len(table))
value_in = np.asarray([np.nan] * len(table))
# Intersect
intersect = np.intersect1d(flake_uid,add.flake_id,return_indices=True)
ind1=intersect[1]
ind2=intersect[2]
# Fill
is_in[ind1] = 1
value_in[ind1] = add.class_id.iloc[ind2]
table['hl_snowflake'] = is_in
table['hl_snowflake_class_id'] = value_in
print('Found: '+str(len(ind1))+' in training, for hydro' )
# 2 Add melting columns
add = pd.read_pickle(trainingset_pkl_path+'melting_trainingset_'+cam+'.pkl')
is_in = np.asarray([0] * len(table))
value_in = np.asarray([np.nan] * len(table))
# Intersect
intersect = np.intersect1d(flake_uid,add.flake_id,return_indices=True)
ind1=intersect[1]
ind2=intersect[2]
# Fill
is_in[ind1] = 1
value_in[ind1] = add.melting.iloc[ind2]
table['hl_melting'] = is_in
table['hl_melting_class_id'] = value_in
print('Found: '+str(len(ind1))+' in training, for melting' )
# 3 Add riming columns
add = pd.read_pickle(trainingset_pkl_path+'riming_trainingset_'+cam+'.pkl')
is_in = np.asarray([0] * len(table))
value_in = np.asarray([np.nan] * len(table))
# Intersect
intersect = np.intersect1d(flake_uid,add.flake_id,return_indices=True)
ind1=intersect[1]
ind2=intersect[2]
# Fill
is_in[ind1] = 1
value_in[ind1] = add.riming_id.iloc[ind2]
table['hl_riming'] = is_in
table['hl_riming_class_id'] = value_in
print('Found: '+str(len(ind1))+' in training, for riming' )
# Overwrite
table = pa.Table.from_pandas(table)
pq.write_table(table, cam_parquet)
return(None) | be6cfd10ab6e9fb7a71b55c13f343caabafa62da | 15,787 |
def pw_wavy(n_samples=200, n_bkps=3, noise_std=None, seed=None):
"""Return a 1D piecewise wavy signal and the associated changepoints.
Args:
n_samples (int, optional): signal length
n_bkps (int, optional): number of changepoints
noise_std (float, optional): noise std. If None, no noise is added
seed (int): random seed
Returns:
tuple: signal of shape (n_samples, 1), list of breakpoints
"""
# breakpoints
bkps = draw_bkps(n_samples, n_bkps, seed=seed)
# we create the signal
f1 = np.array([0.075, 0.1])
f2 = np.array([0.1, 0.125])
freqs = np.zeros((n_samples, 2))
for sub, val in zip(np.split(freqs, bkps[:-1]), cycle([f1, f2])):
sub += val
tt = np.arange(n_samples)
# DeprecationWarning: Calling np.sum(generator) is deprecated
# Use np.sum(np.from_iter(generator)) or the python sum builtin instead.
signal = np.sum([np.sin(2 * np.pi * tt * f) for f in freqs.T], axis=0)
if noise_std is not None:
rng = np.random.default_rng(seed=seed)
noise = rng.normal(scale=noise_std, size=signal.shape)
signal += noise
return signal, bkps | 94a9a681b763a4db36d2186a93e3c5bd0cbd2389 | 15,788 |
def date():
"""
____this is data type for date column____
"""
return Column(Date) | 3953cd155ed03a8cafcc09ee45efcab00c557611 | 15,789 |
import logging
def get_people_urls(gedcom_data, apid_full_map):
"""
Read in all the person URLs for later reference
"""
people = {}
found = False
logging.info("Extracting person specific URL information")
for line in gedcom_data.split("\n"):
if len(line) > 5:
tag = line.split(" ")[1]
if "@P" in tag:
person = tag
found = False
continue
if tag == "_APID" and not found:
apid = line.split(" ")[2]
if apid in apid_full_map:
if "person_url" in apid_full_map[apid]:
if apid_full_map[apid]["person_url"] != "":
people.update({person: apid_full_map[apid]["person_url"]})
found = True
logging.info("Person URL extraction completed")
return people | 2495a39c988d726cf8b4f63a34a963d6a442dc32 | 15,792 |
import torch
def permute_masks(old_masks):
"""
Function to randomly permute the mask in a global manner.
Arguments
---------
old_masks: List containing all the layer wise mask of the neural network, mandatory. No default.
seed: Integer containing the random seed to use for reproducibility. Default is 0
Returns
-------
new_masks: List containing all the masks permuted globally
"""
layer_wise_flatten = [] # maintain the layerwise flattened tensor
for i in range(len(old_masks)):
layer_wise_flatten.append(old_masks[i].flatten())
global_flatten = []
for i in range(len(layer_wise_flatten)):
if len(global_flatten) == 0:
global_flatten.append(layer_wise_flatten[i].cpu())
else:
global_flatten[-1] = np.append(global_flatten[-1], layer_wise_flatten[i].cpu())
permuted_mask = np.random.permutation(global_flatten[-1])
new_masks = []
idx1 = 0
idx2 = 0
for i in range(len(old_masks)):
till_idx = old_masks[i].numel()
idx2 = idx2 + till_idx
new_masks.append(permuted_mask[idx1:idx2].reshape(old_masks[i].shape))
idx1 = idx2
# Convert to tensor
for i in range(len(new_masks)):
new_masks[i] = torch.tensor(new_masks[i])
return new_masks | 55a45f0e6c651bb4df0a5b8d58f1f50f992cdfb8 | 15,793 |
import asyncio
async def file_clang_formatted_correctly(filename, semaphore, verbose=False):
"""
Checks if a file is formatted correctly and returns True if so.
"""
ok = True
# -style=file picks up the closest .clang-format
cmd = "{} -style=file {}".format(CLANG_FORMAT_PATH, filename)
async with semaphore:
proc = await asyncio.create_subprocess_shell(cmd, stdout=asyncio.subprocess.PIPE)
# Read back the formatted file.
stdout, _ = await proc.communicate()
formatted_contents = stdout.decode()
# Compare the formatted file to the original file.
with open(filename) as orig:
orig_contents = orig.read()
if formatted_contents != orig_contents:
ok = False
if verbose:
print("{} is not formatted correctly".format(filename))
return ok | 0372fe5da05c3ede36db0bc35d228adde6c0aaa9 | 15,794 |
import json
def service_builder(client: Client, is_for_update: bool, endpoint_tag: str,
name: str, service_type: str, protocol: str = None, source_port: int = None,
destination_port: int = None, protocol_name: str = None,
icmp_type: str = None, icmp_code: str = None,
icmp_v6_type: str = None, icmp_v6_code: str = None) -> dict:
"""Builder for the service object - build the body of the request
Args:
client (Client): Sophos XG Firewall Client
is_for_update (bool): True if the object should be updated
endpoint_tag (str): The endpoint_tag of the object we want to get data from
name (str): The name of the object we want to add/update
service_type (str, optional): Service Type information of the service
protocol (str, optional): Protocol information of the service
source_port (str, optional): Source Port information of the service
destination_port (str, optional): Destination Port information of the service
protocol_name (str, optional): Protocol Name information of the service
icmp_type (str, optional): ICMP Type information of the service
icmp_code (str, optional): ICMP Code information of the service
icmp_v6_type (str, optional): ICMP V6 Type information of the service
icmp_v6_code (str, optional): ICMP V6 Code information of the service
Raises:
Exception: Missing protocol, source port and destination port
Exception: Missing protocol name
Exception: Missing icmp_type and icmp_code
Exception: Missing icmp_v6_type and icmp_v6_code
Returns:
dict: returned dictionary
"""
previous_service_details = []
# if the object need to be updated, merge between old and new information will happen
if is_for_update:
previous_object = client.get_item_by_name(endpoint_tag, name)
previous_object = json.loads(xml2json(previous_object.text))
check_error_on_response(previous_object)
service_type = retrieve_dict_item_recursively(previous_object, 'Type')
previous_service_details = retrieve_dict_item_recursively(previous_object, 'ServiceDetail')
if not previous_service_details:
previous_service_details = []
elif not isinstance(previous_service_details, list):
previous_service_details = [previous_service_details]
json_data = {
'Name': name,
'Type': service_type,
}
if service_type == 'TCPorUDP':
if not (protocol and source_port and destination_port):
raise Exception('Please provide protocol, source_port and destination_port')
service_details = {
'Protocol': protocol,
'SourcePort': source_port,
'DestinationPort': destination_port
}
elif service_type == 'IP':
if not protocol_name:
raise Exception('Please provide protocol_name')
service_details = {
'ProtocolName': protocol_name
}
elif service_type == 'ICMP':
if not (icmp_type and icmp_code):
raise Exception('Please provide icmp_type and icmp_code')
service_details = {
'ICMPType': icmp_type,
'ICMPCode': icmp_code
}
else: # type == 'ICMPv6'
if not (icmp_v6_type and icmp_v6_code):
raise Exception('Please provide icmp_v6_type and icmp_v6_code')
service_details = {
'ICMPv6Type': icmp_v6_type,
'ICMPv6Code': icmp_v6_code
}
previous_service_details.append(service_details)
json_data.update({
'ServiceDetails': {
'ServiceDetail': previous_service_details
}
})
return remove_empty_elements(json_data) | ef51eeff73d7f32f92185ebe6607af525816d13c | 15,795 |
def fkl( angles ):
"""
Convert joint angles and bone lenghts into the 3d points of a person.
Based on expmap2xyz.m, available at
https://github.com/asheshjain399/RNNexp/blob/7fc5a53292dc0f232867beb66c3a9ef845d705cb/structural_rnn/CRFProblems/H3.6m/mhmublv/Motion/exp2xyz.m
Args
angles: 99-long vector with 3d position and 3d joint angles in expmap format
parent: 32-long vector with parent-child relationships in the kinematic tree
offset: 96-long vector with bone lenghts
rotInd: 32-long list with indices into angles
expmapInd: 32-long list with indices into expmap angles
Returns
xyz: 32x3 3d points that represent a person in 3d space
"""
parent, offset, posInd, expmapInd = _some_variables()
assert len(angles) == 117
# Structure that indicates parents for each joint
njoints = 38
xyzStruct = [dict() for x in range(njoints)]
for i in np.arange( njoints ):
# try:
# if not rotInd[i] : # If the list is empty
# xangle, yangle, zangle = 0, 0, 0
# else:
# xangle = angles[ rotInd[i][2]-1 ]
# yangle = angles[ rotInd[i][1]-1 ]
# zangle = angles[ rotInd[i][0]-1 ]
# except:
# print (i)
try:
if not posInd[i] : # If the list is empty
xangle, yangle, zangle = 0, 0, 0
else:
xangle = angles[ posInd[i][2]-1 ]
yangle = angles[ posInd[i][1]-1 ]
zangle = angles[ posInd[i][0]-1 ]
except:
print (i)
r = angles[ expmapInd[i] ]
thisRotation = expmap2rotmat(r)
thisPosition = np.array([zangle, yangle, xangle])
if parent[i] == -1: # Root node
xyzStruct[i]['rotation'] = thisRotation
xyzStruct[i]['xyz'] = np.reshape(offset[i,:], (1,3)) + thisPosition
else:
xyzStruct[i]['xyz'] = (offset[i,:] + thisPosition).dot( xyzStruct[ parent[i] ]['rotation'] ) + xyzStruct[ parent[i] ]['xyz']
xyzStruct[i]['rotation'] = thisRotation.dot( xyzStruct[ parent[i] ]['rotation'] )
xyz = [xyzStruct[i]['xyz'] for i in range(njoints)]
xyz = np.array( xyz ).squeeze()
xyz = xyz[:,[0,2,1]]
return np.reshape( xyz, [-1] ) | 667f1356bf2d56ec5adad7bd723167d4c741faae | 15,796 |
import builtins
def no_matplotlib(monkeypatch):
""" Mock an import error for matplotlib"""
import_orig = builtins.__import__
def mocked_import(name, globals, locals, fromlist, level):
""" """
if name == 'matplotlib.pyplot':
raise ImportError("This is a mocked import error")
return import_orig(name, globals, locals, fromlist, level)
monkeypatch.setattr(builtins, '__import__', mocked_import) | 90182d9dbbde52779109d4d6cf43ae4fbac140d6 | 15,797 |
from typing import Callable
from typing import Any
from typing import Optional
def profile(func: Callable[..., Any]) -> Callable[..., Any]:
"""
Create a decorator for wrapping a provided function in a LineProfiler context.
Parameters
----------
func : callable
The function that is to be wrapped inside the LineProfiler context.
Returns
-------
wrapper : callable
The context containing the wrapped function.
"""
@wraps(func)
def wrapper(*args: Optional[Any], **kwargs: Optional[Any]) -> LineProfiler:
prof = LineProfiler()
try:
return prof(func)(*args, **kwargs)
finally:
prof.print_stats()
return wrapper | 13db0f994f472b95535a27ab8bb58c01491eb092 | 15,798 |
import torch
def phase_comp(psi_comp, uwrap=False, dens=None):
"""Compute the phase (angle) of a single complex wavefunction component.
Parameters
----------
psi_comp : NumPy :obj:`array` or PyTorch :obj:`Tensor`
A single wavefunction component.
Returns
-------
angle : NumPy :obj:`array` or PyTorch :obj:`Tensor`
The phase (angle) of the component's wavefunction.
"""
if isinstance(psi_comp, np.ndarray):
ang = np.angle(psi_comp)
if uwrap:
ang = rest.unwrap_phase(ang)
elif isinstance(psi_comp, torch.Tensor):
ang = torch.angle(psi_comp)
if uwrap:
raise NotImplementedError("Unwrapping the complex phase is not "
"implemented for PyTorch tensors.")
if dens is not None:
ang[dens < (dens.max() * 1e-6)] = 0
return ang | 3a27564b2e4ad323bb9d6c5c4d344027219ccd3d | 15,799 |
def EG(d1,d2,P):
"""
Méthode permettant de calculer l'esperance de gain du joueur 1 s'il lance d1 dés et
que le joueur 2 lance d2 dés
----------------------------------------------------
Args:
- d1 : nombre de dés lancés par le joueur 1
- d2 : nombre de dés lancés par le joueur 2
- P : matrice de probabilités
"""
s = 0
L = np.arange(1,6*d2+1)
for k in range(1,6*d1+1):
s += np.sum(P[d1,k]*P[d2,L[L<k]]) - np.sum(P[d1,k]*P[d2,L[L>k]])
return s | 7032bbff4bcf721727c2cb86d6e6f480aa520ee2 | 15,800 |
def roi_max_counts(images_sets, label_array):
"""
Return the brightest pixel in any ROI in any image in the image set.
Parameters
----------
images_sets : array
iterable of 4D arrays
shapes is: (len(images_sets), )
label_array : array
labeled array; 0 is background.
Each ROI is represented by a distinct label (i.e., integer).
Returns
-------
max_counts : int
maximum pixel counts
"""
max_cts = 0
for img_set in images_sets:
for img in img_set:
max_cts = max(max_cts, ndim.maximum(img, label_array))
return max_cts | 2a8993ddb417ac9852ac8a85a4b021cd3db46b66 | 15,802 |
import unicodedata
def normalize_full_width(text):
"""
a function to normalize full width characters
"""
return unicodedata.normalize('NFKC', text) | f8b443089e7083e11f6539f4103ce05f616170c4 | 15,803 |
import random
def make_definitions(acronym, words_by_letter, limit=1):
"""Find definitions an acronym given groupings of words by letters"""
definitions = []
for _ in range(limit):
definition = []
for letter in acronym.lower():
opts = words_by_letter.get(letter.lower(), [])
definition.append(random.choice(opts).title() if opts else "?")
definitions.append(" ".join(definition))
return definitions | bc0af7b4e81a443c0afe62c2d77ace15bd1ab306 | 15,804 |
def plot_effective_area_from_file(file, all_cuts=False, ax=None, **kwargs):
""" """
ax = plt.gca() if ax is None else ax
if all_cuts:
names = ["", "_NO_CUTS", "_ONLY_GH", "_ONLY_THETA"]
else:
names = tuple([""])
label_basename = kwargs["label"] if "label" in kwargs else ""
kwargs.setdefault("ls", "")
for name in names:
area = QTable.read(file, hdu="EFFECTIVE_AREA" + name)[0]
kwargs["label"] = label_basename + name.replace("_", " ")
ax.errorbar(
0.5 * (area["ENERG_LO"] + area["ENERG_HI"]).to_value(u.TeV)[1:-1],
area["EFFAREA"].to_value(u.m ** 2).T[1:-1, 0],
xerr=0.5 * (area["ENERG_LO"] - area["ENERG_HI"]).to_value(u.TeV)[1:-1],
**kwargs,
)
# Style settings
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel("True energy / TeV")
ax.set_ylabel("Effective collection area / m²")
ax.grid(which="both")
ax.legend()
ax.grid(True, which="both")
return ax | b2627c767dfe8abf64eba1b8b1c1f14a4bf52d87 | 15,805 |
def get_spreading_coefficient(dist):
"""Calculate the spreading coefficient.
Args:
dist: A Distribution from a direct (GC) spreading simulation.
Returns:
The dimensionless spreading coefficient (beta*s*A).
"""
potential = -dist.log_probs
valley = np.amin(potential)
split = int(0.5 * len(potential))
plateau = np.mean(potential[split:])
return valley - plateau | 549a0052400466f64f707588e313a9e88829a4d7 | 15,806 |
from pathlib import Path
def get_config_path() -> Path:
"""Returns path to the root of the project"""
return Path(__file__).parent / "config" | b66ece2bc77717b59e88ac65746a2e3b3e8576a2 | 15,807 |
def round(x):
"""
Return ``x`` rounded to an ``Integer``.
"""
return create_RealNumber(x).round() | 403f5f0b4316ef2f06f45885d21fe352f003e193 | 15,808 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.