content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def proxy(values=(0,), names=('constant',), types=('int8',)):
""" Create a proxy image with the given values, names and types
:param values: list of values for every band of the resulting image
:type values: list
:param names: list of names
:type names: list
:param types: list of band types. Options are 'int8', 'int16', 'int32',
'int64', 'uint8', 'uint16', 'uint32', 'byte', 'short', 'int', 'long',
'float' and 'double'
:type types: list
:rtype: ee.Image
"""
values = list(values)
names = list(names)
types = list(types)
tps = dict(zip(names, types))
im = ee.Image(values).rename(names).cast(tps)
# update mask
for v, n in zip(values, names):
if v is None:
band = im.select(n)
masked = band.selfMask()
im = im.addBands(masked, overwrite=True)
return im | b57c4a625d8fa8c7a76bb0f1d2202e0e5cf2d41e | 13,375 |
import six
def _to_versions(raw_ls_remote_lines, version_join, tag_re, tag_filter_re):
"""Converts raw ls-remote output lines to a sorted (descending)
list of (Version, v_str, git_hash) objects.
This is used for source:git method to find latest version and git hash.
"""
ret = []
for line in raw_ls_remote_lines:
git_hash, ref = six.ensure_text(line).split('\t')
if ref.startswith('refs/tags/'):
tag = ref[len('refs/tags/'):]
if tag_filter_re and not tag_filter_re.match(tag):
continue
m = tag_re.match(tag)
if not m:
continue
v_str = m.group(1)
if version_join:
v_str = '.'.join(v_str.split(version_join))
ret.append((parse_version(v_str), v_str, git_hash))
return sorted(ret, reverse=True) | 9113d26dbec144bbc72c89ca41935305a7321a18 | 13,376 |
def arraysum(x: int)->int:
"""
These function gives sum of all elements of list by iterating through loop and adding them.
Input: Integer
Output: Interger
"""
sum = 0
for i in x:
sum += i
return sum | aa14eaf4e2bb800ad5e61a63ab0bc17c56dbd86d | 13,377 |
def get_sensitivity_scores(model, features, top_n):
"""
Finds the sensitivity of each feature in features for model. Returns the top_n
feature names, features_top, alongside the sensitivity values, scores_top.
"""
# Get just the values of features
x_train = features.values
# Apply min max normalization
scaler = MinMaxScaler().fit(x_train)
x_train = scaler.transform(x_train)
# Find mean and standard deviation of each feature
x_train_avg = np.mean(x_train, axis=0).reshape(1, -1)
x_train_std = np.std(x_train, axis=0).reshape(1, -1)
prediction_mean = model.predict(x_train_avg)
scores_max = []
# Iterate over each feature
for i in range(x_train_avg.shape[1]):
# Copy x_train_avg
x_train_i = x_train_avg.copy()
# Add the standard deviation of i to that column
x_train_i[:, i] = x_train_i[:, i] + x_train_std[:, i]
result_i = model.predict(x_train_i)
# Take the difference and divide by standard deviation
diff = (result_i - prediction_mean) / x_train_std[:, i]
scores_max.append(diff.flatten()[0])
scores_max = np.absolute(scores_max)
indices_top = np.argsort(scores_max)[-top_n:]
features_top = features.iloc[:, indices_top].columns
scores_top = scores_max[indices_top]
return features_top, scores_top | a955c93691b09073be20fc65a2c6958a620f5548 | 13,378 |
def mad(data):
"""Median absolute deviation"""
m = np.median(np.abs(data - np.median(data)))
return m | 6b32901a94aca256736c1cb936c8b1c1794857d7 | 13,379 |
async def get_intents(current_user: User = Depends(Authentication.get_current_user_and_bot)):
"""
Fetches list of existing intents for particular bot
"""
return Response(data=mongo_processor.get_intents(current_user.get_bot())).dict() | 2a62bc579f6b392bc0038bf4f555941f764d456c | 13,380 |
def find_offset(
ax: Numbers, ay: Numbers, bx: Numbers, by: Numbers, upscale: bool = True
) -> float:
"""Finds value, by which the spectrum should be shifted along x-axis to best
overlap with the first spectrum. If resolution of spectra is not identical,
one of them will be interpolated to match resolution of the other one. By default
interpolation is done on the lower-resolution spectra. This can be changed
by passing ``upscale = False`` to function call.
Parameters
----------
ax
Abscissa of the first spectrum.
ay
Values of the first spectrum.
bx
Abscissa of the second spectrum.
by
Values of the second spectrum.
upscale
If interpolation should be done on more loosely spaced spectrum (default).
When set to False, spectrum with lower resolution will be treated as reference
for density of data points.
Returns
-------
float
Value, by which second spectrum should be shifted, in appropriate units.
"""
ax, ay, bx, by = unify_abscissa(ax, ay, bx, by, upscale=upscale)
shift = idx_offset(ay, by)
if shift < 0:
offset = ax[0] - bx[abs(shift)]
else:
offset = ax[shift] - bx[0]
return offset | 64e0c13a16b3ead30227ab80398fea296674385d | 13,383 |
def And(*xs, simplify=True):
"""Expression conjunction (product, AND) operator
If *simplify* is ``True``, return a simplified expression.
"""
xs = [Expression.box(x).node for x in xs]
y = exprnode.and_(*xs)
if simplify:
y = y.simplify()
return _expr(y) | 5f25e8b2f37a4bbc077f10eee561936e41defefa | 13,384 |
def get_assignment_submissions(course_id, assignment_id):
""" return a list of submissions for an assignment """
return api.get_list('courses/{}/assignments/{}/submissions'.format(course_id, assignment_id)) | eb1a6143b551298efdb6c4181e2356d759c6fd6c | 13,385 |
def send_email(to, content=None, title=None, mail_from=None,
attach=None, cc=None, bcc=None, text=None, html=None, headers=None):
"""
:param to: 收件人,如 '[email protected]' 或 '[email protected], [email protected]' 或 ['[email protected], [email protected]']
:param content: 邮件内容,纯文本或HTML str
:param title: 邮件标题 str or list
:param mail_from: 发件人 str
:param attach: 附件列表 ["@/tmp/test.txt"]
:param cc: 抄送人, 格式同收件人
:param bcc: 匿名抄送人, 格式同收件人
:param text: 邮件纯文本 str
:param html: 邮件HTML str
:param headers: 其他 MIME Header属性 dict
:return: 正常返回 {} dict
"""
arg_dict = dict()
if isinstance(to, list):
to = ', '.join(to)
arg_dict['to'] = to
if isinstance(cc, list):
cc = ', '.join(cc)
arg_dict['cc'] = cc
if isinstance(bcc, list):
bcc = ', '.join(bcc)
arg_dict['bcc'] = bcc
if isinstance(title, list):
title = ''.join(title)
arg_dict['title'] = title
arg_dict['mail_from'] = mail_from
arg_dict['content'] = content
arg_dict['attach'] = attach
arg_dict['text'] = text
arg_dict['html'] = html
arg_dict['headers'] = headers or {}
e = Email()
msg = e.build_email(arg_dict)
return e.send_email(msg) | c68c4db3c96890d1e82f33666b69cd4e1ac4c116 | 13,386 |
def get_abbreviation(res_type, abbr):
"""
Returns abbreviation value from data set
@param res_type: Resource type (html, css, ...)
@type res_type: str
@param abbr: Abbreviation name
@type abbr: str
@return dict, None
"""
return get_settings_resource(res_type, abbr, 'abbreviations') | 91831f10fc2be1d7c7201b02e0d044939ce82e83 | 13,387 |
import time
def get_stock_list(month_before=12, trade_date='20200410', delta_price=(10, 200), total_mv=50, pe_ttm=(10, 200)):
"""
month_before : 获取n个月之前所有上市公司的股票列表,
默认为获取一年前上市公司股票列表
delta_price :用于剔除掉金额大于delta_price的股票,若为空则不剔除
TIPS : delta_price 和今天的股价进行比较
"""
stock_list = pro.stock_basic(exchange='', list_status='L', fields='ts_code,name,market,list_date')
# 去除创业板和科创板股票
stock_list1 = stock_list[~stock_list['market'].isin(["科创板","创业板"])].reset_index(drop=True)
# 去除ST,银行和证券股票
index_list = []
for i in range(len(stock_list1)):
if '银行' in stock_list1.iloc[i]['name'] \
or 'ST' in stock_list1.iloc[i]['name'] \
or '证券' in stock_list1.iloc[i]['name'] :
index_list.append(i)
for i in index_list:
stock_list1 = stock_list1.drop(i)
stock_list1 = stock_list1.reset_index(drop=True)
# 去除上市时间未满一年的股票(默认)
delta_date = date_util.get_date_months_before(month_before)
stock_list2 = stock_list1[stock_list1["list_date"] <= delta_date].reset_index(drop=True)
stock_list = stock_list2.drop(['market', 'list_date'], axis=1)
# 去除市值在x亿之下的公司
if total_mv is not None:
for i in range(len(stock_list)):
try:
df = pro.daily_basic(ts_code=stock_list["ts_code"][i], \
trade_date=trade_date, fields='ts_code,total_mv')
stock_list.loc[i, "total_mv"] = df.loc[0, "total_mv"] if df.empty is False else 0
except:
time.sleep(3)
stock_list = stock_list[stock_list["total_mv"] > total_mv * 10000].reset_index(drop=True)
# 去除pe_ttm为None且不在区间内的公司
if pe_ttm is not None:
for i in range(len(stock_list)):
try:
df = pro.daily_basic(ts_code=stock_list["ts_code"][i], \
trade_date=trade_date, fields='ts_code,pe_ttm')
stock_list.loc[i, "pe_ttm"] = df.loc[0, "pe_ttm"] if df.empty is False else None
except:
time.sleep(3)
stock_list = stock_list[stock_list['pe_ttm'] > pe_ttm[0]].reset_index(drop=True)
stock_list = stock_list[stock_list['pe_ttm'] < pe_ttm[1]].dropna().reset_index(drop=True)
# 剔除 date_time 时刻价格不在区间内的股票
if delta_price is not None:
stock_list['price'] = np.zeros(len(stock_list))
for i in range(len(stock_list)):
stock_code = stock_list.iloc[i]["ts_code"]
try:
current_df = ts.pro_bar(ts_code=stock_code, adj='qfq',
start_date=trade_date, end_date=trade_date)
if current_df.empty:
continue
stock_list.loc[i, "price"] = (current_df.loc[0, "close"] + current_df.loc[0, "pre_close"]) / 2
except:
time.sleep(3)
stock_list = stock_list[stock_list['price'] > delta_price[0]].reset_index(drop=True)
stock_list = stock_list[stock_list['price'] < delta_price[1]].reset_index(drop=True)
stock_list.to_csv("./data_pulled/stock_date_delta_price{}.csv".format(delta_price), index=False)
return stock_list | 13f0dd7b31c297ea643ad42efb519a88907bbfd5 | 13,388 |
def dim_axis_label(dimensions, separator=', '):
"""
Returns an axis label for one or more dimensions.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
return separator.join([d.pprint_label for d in dimensions]) | f03e4eb02fc57890421bdcdaa0aea7d6541b8678 | 13,389 |
def get_random_idx(k: int, size: int) -> np.ndarray:
"""
Get `k` random values of a list of size `size`.
:param k: number or random values
:param size: total number of values
:return: list of `k` random values
"""
return (np.random.rand(k) * size).astype(int) | eedcc9953e878c9b475cc18666eb621de2811dbe | 13,390 |
from typing import Union
def fhir_search_path_meta_info(path: str) -> Union[tuple, NoneType]:
""" """
resource_type = path.split(".")[0]
properties = path.split(".")[1:]
model_cls = resource_type_to_resource_cls(resource_type)
result = None
for prop in properties:
for (
name,
jsname,
typ,
is_list,
of_many,
not_optional,
) in model_cls().elementProperties():
if prop != name:
continue
if typ not in (int, float, bool, str):
model_cls = typ
result = (jsname, is_list, of_many)
break
return result | 2117e9f09c401e2b027d3c3eb7347650eaa03582 | 13,392 |
def _is_camel_case_ab(s, index):
"""Determine if the index is at 'aB', which is the start of a camel token.
For example, with 'workAt', this function detects 'kA'."""
return index >= 1 and s[index - 1].islower() and s[index].isupper() | c21ec7d8aa7e786d1ea523106af6f9426fea01d8 | 13,393 |
def create_bulleted_tool_list(tools):
"""
Helper function that returns a text-based bulleted list of the given tools.
Args:
tools (OrderedDict): The tools whose names (the keys) will be added to the
text-based list.
Returns:
str: A bulleted list of tool names.
"""
return TOOL_LIST_HEADER + create_bulleted_list(tools.keys()) | d75fb7793c019f2499b549c2af627bb2038876e7 | 13,395 |
def _c3_merge(sequences, cls, context):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
# Show all the remaining bases, which were considered as
# candidates for the next mro sequence.
raise exceptions.InconsistentMroError(
message="Cannot create a consistent method resolution order "
"for MROs {mros} of class {cls!r}.",
mros=sequences, cls=cls, context=context)
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0] | 6453f151fe227226f3fcbc29d4e5fffd800683cb | 13,396 |
def rgb2hex(rgb: tuple) -> str:
"""
Converts RGB tuple format to HEX string
:param rgb:
:return: hex string
"""
return '#%02x%02x%02x' % rgb | 1ecb1ca68fa3dbe7b58f74c2e50f76175e9a0c5a | 13,397 |
import warnings
def min_var_portfolio(cov_mat, allow_short=False):
"""
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# x >= 0
G = opt.matrix(-np.identity(n))
h = opt.matrix(0.0, (n, 1))
else:
G = None
h = None
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights | 2efd839b8ca8ea6fe7b26f645630beb78699a8ea | 13,398 |
def relu(fd: DahliaFuncDef) -> str:
"""tvm.apache.org/docs/api/python/relay/nn.html#tvm.relay.nn.relu"""
data, res = fd.args[0], fd.dest
num_dims = get_dims(data.comp)
args = data.comp.args
indices = ""
var_name = CHARACTER_I
for _ in range(num_dims):
indices += f'[{var_name}]'
var_name = next_character(var_name)
data_type = fd.data_type
zero = f'({"0.0" if "fix" in data_type else "0"} as {data_type})'
input = f'{data.id.name}{indices}'
result = f'{res.id.name}{indices}'
loop_body = f"""if ({input} > {zero}) {{ {result} := {input}; }}
else {{ {result} := {zero}; }}"""
return emit_dahlia_definition(
fd,
emit_dahlia_loop(data, loop_body)
) | 30eefb572f632e91993d715ecc70570d38030657 | 13,399 |
import random
def base_hillclimb(base_sol: tuple, neighbor_method: str, max_fevals: int, searchspace: Searchspace, all_results, kernel_options, tuning_options, runner, restart=True, randomize=True, order=None):
""" Hillclimbing search until max_fevals is reached or no improvement is found
Base hillclimber that evaluates neighbouring solutions in a random or fixed order
and possibly immediately moves to the neighbour if it is an improvement.
:params base_sol: Starting position for hillclimbing
:type base_sol: list
:params neighbor_method: Method to use to select neighboring parameter configurations to visit
during hillclimbing, either "Hamming", "strictly-adjacent" or "adjacent" are supported.
:type neighbor_method: string
:params max_fevals: Maximum number of unique function evaluations that is allowed
during the search.
:type max_fevals: int
:params searchspace: The searchspace object.
:type searchspace: Seachspace
:params all_results: List of dictionaries with all benchmarked configurations
:type all_results: list(dict)
:param kernel_options: A dictionary with all options for the kernel.
:type kernel_options: dict
:param tuning_options: A dictionary with all options regarding the tuning
process.
:type tuning_options: dict
:params runner: A runner from kernel_tuner.runners
:type runner: kernel_tuner.runner
:params restart: Boolean that controls whether to greedely restart hillclimbing
from a new position as soon as an improved position is found. True by default.
:type restart: bool
:params randomize: Boolean that controls whether the dimensions of the tunable
parameters are randomized.
:type randomize: bool
:params order: Fixed order among the dimensions of the tunable parameters are
to be evaluated by the hillclimber.
:type order: list
:returns: The final position that was reached when hillclimbing halted.
:rtype: list
"""
if randomize and order:
raise ValueError("Using a preset order and randomize at the same time is not supported.")
tune_params = tuning_options.tune_params
# measure start point score
best_score = _cost_func(base_sol, kernel_options, tuning_options, runner, all_results, check_restrictions=False)
found_improved = True
while found_improved:
child = list(base_sol[:])
found_improved = False
current_results = []
vals = list(tune_params.values())
if order is None:
indices = list(range(len(vals)))
else:
indices = order
if randomize:
random.shuffle(indices)
# in each dimension see the possible values
for index in indices:
neighbors = searchspace.get_param_neighbors(tuple(child), index, neighbor_method, randomize)
# for each value in this dimension
for val in neighbors:
orig_val = child[index]
child[index] = val
# get score for this position
score = _cost_func(child, kernel_options, tuning_options, runner, current_results, check_restrictions=False)
# generalize this to other tuning objectives
if score < best_score:
best_score = score
base_sol = child[:]
found_improved = True
if restart:
break
else:
child[index] = orig_val
fevals = len(tuning_options.unique_results)
if fevals >= max_fevals:
all_results += current_results
return base_sol
if found_improved and restart:
break
# append current_results to all_results
all_results += current_results
return base_sol | 4007f66d14d52620b7917fb45a7701a8ec2ae96f | 13,401 |
def filter_dates(dates):
"""filter near dates"""
j = 0
while j < len(dates):
date = dates[j]
i = 3
j += 1
while True:
date += timedelta(days=1)
if date in dates:
i += 1
else:
if i > 2:
del dates[j:j+i-1]
break
return dates | 447f2e082672c8f37918fce02863bad1f141854b | 13,402 |
def biweight_location(a, c=6.0, M=None, axis=None, eps=1e-8):
"""
Copyright (c) 2011-2016, Astropy Developers
Compute the biweight location for an array.
Returns the biweight location for the array elements.
The biweight is a robust statistic for determining the central
location of a distribution.
The biweight location is given by the following equation
.. math::
C_{bl}= M+\\frac{\Sigma_{\|u_i\|<1} (x_i-M)(1-u_i^2)^2}
{\Sigma_{\|u_i\|<1} (1-u_i^2)^2}
where M is the sample mean or if run iterative the initial guess,
and u_i is given by
.. math::
u_{i} = \\frac{(x_i-M)}{cMAD}
where MAD is the median absolute deviation.
For more details, see Beers, Flynn, and Gebhardt, 1990, AJ, 100, 32B
Parameters
----------
a : array-like
Input array or object that can be converted to an array.
c : float, optional
Tuning constant for the biweight estimator. Default value is 6.0.
M : float, optional
Initial guess for the biweight location.
axis : tuple, optional
tuple of the integer axis values ot calculate over. Should be sorted.
Returns
-------
biweight_location : float
Returns the biweight location for the array elements.
Examples
--------
This will generate random variates from a Gaussian distribution and return
the biweight location of the distribution::
>>> from utils import biweight_location
>>> from numpy.random import randn
>>> randvar = randn(10000)
>>> cbl = biweight_location(randvar)
See Also
--------
median_absolute_deviation, biweight_midvariance
Note
--------
Copy of the astropy function with the "axis" argument added appropriately.
"""
if M is None:
if isinstance(a, np.ma.MaskedArray):
func = np.ma.median
else:
a = np.array(a, copy=False)
func = np.median
M = func(a, axis=axis)
else:
a = np.array(a, copy=False)
N = M*1.
# set up the difference
if axis is not None:
for i in axis:
N = np.expand_dims(N, axis=i)
d = a - N
# set up the weighting
if axis is not None:
MAD = median_absolute_deviation(a, axis=axis)
for i in axis:
MAD = np.expand_dims(MAD, axis=i)
else:
MAD = median_absolute_deviation(a)
u = np.where(MAD < eps, 0., d / c / MAD)
# now remove the outlier points
if isinstance(a, np.ma.MaskedArray):
mask = (np.abs(u) < 1).astype(np.int) * (1-a.mask.astype(np.int))
else:
mask = (np.abs(u) < 1).astype(np.int)
u = (1 - u ** 2) ** 2
return M + (d * u * mask).sum(axis=axis) / (u * mask).sum(axis=axis) | 4743b85f01f0d655a22f3b6037aaababcd375c7f | 13,403 |
def model_21(GPUS = 1):
""" one dense: 3000 """
model = Sequential()
model.add(Convolution3D(60, kernel_size = (3, 3, 3), strides = (1, 1, 1), input_shape = (9, 9, 9, 20))) # 32 output nodes, kernel_size is your moving window, activation function, input shape = auto calculated
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Convolution3D(60, (3, 3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Convolution3D(60, (3, 3, 3)))
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Flatten()) # now our layers have been combined to one
model.add(Dense(3000)) # 300 nodes in the last hidden layer
model.add(BatchNormalization())
model.add(Activation(activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(20, activation = 'softmax')) # output layer has 20 possible classes (amino acids 0 - 19)
if GPUS >= 2:
model = multi_gpu_model(model, gpus=GPUS)
return model | 990b1f1c8b1271d44cb371733f7cf17c7f288997 | 13,405 |
from datetime import datetime
def strWeekday(
date: str,
target: int,
after: bool = False,
) -> str:
"""
Given a ISO string `date` return the nearest `target` weekday.
**Parameters**
- `date`: The date around which the caller would like target searched.
- `target`: Weekday number as in the `datetime` Standard Library Module.
**Returns**
The ISO YYYY-MM-DD string representation of the nearest given weekday.
"""
dtdate = pd.to_datetime(date)
if datetime.datetime.weekday(dtdate) != target:
if not after:
date = str(dtdate - pd.offsets.Week(weekday=target)).split(" ")[0]
else:
date = str(dtdate + pd.offsets.Week(weekday=target)).split(" ")[0]
return date | d3511212cbbe8935b7acc7e63afff5b454aa039e | 13,406 |
def combine_bincounts_kernelweights(
xcounts, ycounts, gridsize, colx, coly, L, lenkernel, kernelweights, mid, binwidth
):
"""
This function combines the bin counts (xcounts) and bin averages (ycounts) with
kernel weights via a series of direct convolutions. As a result, binned
approximations to X'W X and X'W y, denoted by weigthedx and weigthedy, are computed.
Recall that the local polynomial curve estimator beta_ and its derivatives are
minimizers to a locally weighted least-squares problem. At each grid
point g = 1,..., M in the grid, beta_ is computed as the solution to the
linear matrix equation:
X'W X * beta_ = X'W y,
where W are kernel weights approximated by the Gaussian density function.
X'W X and X'W y are approximated by weigthedx and weigthedy,
which are the result of a direct convolution of bin counts (xcounts) and kernel
weights, and bin averages (ycounts) and kernel weights, respectively.
The terms "kernel" and "kernel function" are used interchangeably
throughout.
For more information see the documentation of the main function locpoly
under KernReg.locpoly.
Parameters
----------
xcounts: np.ndarry
1-D array of binned x-values ("bin counts") of length gridsize.
ycounts: np.ndarry
1-D array of binned y-values ("bin averages") of length gridsize.
gridsize: int
Number of equally-spaced grid points.
colx: int
Number of columns of output array weigthedx, i.e. the binned approximation to X'W X.
coly: int
Number of columns of output array weigthedy, i.e the binned approximation to X'W y.
lenkernel: int
Length of 1-D array kernelweights.
kernelweights: np.ndarry
1-D array of length lenfkap containing
approximated weights for the Gaussian kernel
(W in the notation above).
L: int
Parameter defining the number of times the kernel function
has to be evaluated.
Note that L < N, where N is the total number of observations.
mid: int
Midpoint of kernelweights.
binwidth: float
Bin width.
Returns
-------
weigthedx: np.ndarry
Dimensions (M, colx). Binned approximation to X'W X.
weigthedy: np.ndarry
Dimensions (M, coly). Binned approximation to X'W y.
"""
weigthedx = np.zeros((gridsize, colx))
weigthedy = np.zeros((gridsize, coly))
for g in range(gridsize):
if xcounts[g] != 0:
for i in range(max(0, g - L - 1), min(gridsize, g + L)):
if 0 <= i <= gridsize - 1 and 0 <= g - i + mid - 1 <= lenkernel - 1:
fac_ = 1
weigthedx[i, 0] += xcounts[g] * kernelweights[g - i + mid - 1]
weigthedy[i, 0] += ycounts[g] * kernelweights[g - i + mid - 1]
for j in range(1, colx):
fac_ = fac_ * binwidth * (g - i)
weigthedx[i, j] += (
xcounts[g] * kernelweights[g - i + mid - 1] * fac_
)
if j < coly:
weigthedy[i, j] += (
ycounts[g] * kernelweights[g - i + mid - 1] * fac_
)
return weigthedx, weigthedy | b283d3dd19720e7d5074a39866cb4cf5d55376d8 | 13,407 |
def get_icon_for_group(group):
"""Get the icon for an AOVGroup."""
# Group has a custom icon path so use. it.
if group.icon is not None:
return QtGui.QIcon(group.icon)
if isinstance(group, IntrinsicAOVGroup):
return QtGui.QIcon(":ht/rsc/icons/aovs/intrinsic_group.png")
return QtGui.QIcon(":ht/rsc/icons/aovs/group.png") | 8e6ea6f22901bc715a7b6ce02c68ca633bf9fe00 | 13,408 |
def unix_to_windows_path(path_to_convert, drive_letter='C'):
"""
For a string representing a POSIX compatible path (usually
starting with either '~' or '/'), returns a string representing an
equivalent Windows compatible path together with a drive letter.
Parameters
----------
path_to_convert : string
A string representing a POSIX path
drive_letter : string (Default : 'C')
A single character string representing the desired drive letter
Returns
-------
string
A string representing a Windows compatible path.
"""
if path_to_convert.startswith('~'):
path_to_convert = path_to_convert[1:]
if path_to_convert.startswith('/'):
path_to_convert = path_to_convert[1:]
path_to_convert = '{}{}{}'.format(drive_letter,
':\\',
path_to_convert).replace('/', '\\')
return path_to_convert | d3c23e2c19be4b81be135ae84760430be852da41 | 13,409 |
def recordview_create_values(
coll_id="testcoll", view_id="testview", update="RecordView", view_uri=None,
view_entity_type="annal:Test_default",
num_fields=4, field3_placement="small:0,12",
extra_field=None, extra_field_uri=None
):
"""
Entity values used when creating a record view entity
"""
view_values = (
{ 'annal:type': "annal:View"
, 'rdfs:label': "%s %s/%s"%(update, coll_id, view_id)
, 'rdfs:comment': "%s help for %s in collection %s"%(update, view_id, coll_id)
, 'annal:view_entity_type': view_entity_type
, 'annal:open_view': True
, 'annal:view_fields':
[ { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_id"
, 'annal:field_placement': "small:0,12;medium:0,6"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_type"
, 'annal:field_placement': "small:0,12;medium:6,6"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_label"
, 'annal:field_placement': "small:0,12"
}
, { 'annal:field_id': layout.FIELD_TYPEID+"/Entity_comment"
# , 'annal:field_placement': field3_placement
}
]
})
if view_uri:
view_values['annal:uri'] = view_uri
if field3_placement:
view_values['annal:view_fields'][3]['annal:field_placement'] = field3_placement
if extra_field:
efd = (
{ 'annal:field_id': extra_field
, 'annal:field_placement': "small:0,12"
})
if extra_field_uri:
efd['annal:property_uri'] = extra_field_uri
view_values['annal:view_fields'].append(efd)
if num_fields == 0:
view_values['annal:view_fields'] = []
return view_values | a4b057acefd8f3e7c35b8412f0f0986d0440ab7a | 13,410 |
import math
def calculateZ(f, t2, a0, a1, a2=0, a3=0):
""" given the frequency array and the filter coefficients,
return Z(s) as a np.array()
"""
s = np.array(f)*2*math.pi*1j ####################
z = (1 + s*t2)/(s*(a3*s**3 + a2*s**2 + a1*s + a0))
return z | 56b2d349d3c279006c85a9dc9b8742395f1a6114 | 13,411 |
def get_team_project_default_permissions(team, project):
"""
Return team role for given project.
"""
perms = get_perms(team, project)
return get_role(perms, project) or "" | e6c41a1cc56c7ae3e51950508fbc1c514b6ebf7d | 13,412 |
from Carbon.File import FSRef, FSGetResourceForkName
from Carbon.Files import fsRdPerm
from Carbon import Res
def readPlistFromResource(path, restype='plst', resid=0):
"""Read plst resource from the resource fork of path.
"""
fsRef = FSRef(path)
resNum = Res.FSOpenResourceFile(fsRef, FSGetResourceForkName(), fsRdPerm)
Res.UseResFile(resNum)
plistData = Res.Get1Resource(restype, resid).data
Res.CloseResFile(resNum)
return readPlistFromString(plistData) | 36d0387114d548d57f41351355c1fe5d948f70e3 | 13,413 |
def simple_satunet(
input_shape,
kernel=(2, 2),
num_classes=1,
activation="relu",
use_batch_norm=True,
dropout=0.1,
dropout_change_per_layer=0.0,
dropout_type="standard",
use_dropout_on_upsampling=False,
filters=8,
num_layers=4,
strides=(1, 1),
):
"""
Customisable UNet architecture (Ronneberger et al. 2015 https://arxiv.org/abs/1505.04597)
input_shape: shape (x, y, num_channels)
num_classes (int): 1 for binary segmentation
activation (str): A keras.activations.Activation to use. ReLu by default.
use_batch_norm (bool): Whether to use Batch Normalisation across the channel axis between convolutions
dropout (float , 0. and 1.): dropout after the first convolutional block. 0. = no dropout
dropout_change_per_layer (float , 0. and 1.): Factor to add to the Dropout after each convolutional block
dropout_type (one of "spatial" or "standard"): Spatial is recommended by https://arxiv.org/pdf/1411.4280.pdf
use_dropout_on_upsampling (bool): Whether to use dropout in the decoder part of the network
filters (int): Convolutional filters in the initial convolutional block. Will be doubled every block
num_layers (int): Number of total layers in the encoder not including the bottleneck layer
"""
upconv_filters = int(1.5 * filters)
# Build U-Net model
inputs = tf.keras.layers.Input(input_shape)
x = inputs
down_layers = []
for l in range(num_layers):
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
down_layers.append(x)
x = tf.keras.layers.MaxPooling2D(kernel)(x)
dropout += dropout_change_per_layer
# filters = filters * 2 # double the number of filters with each layer
x = conv2d_block(
inputs=x,
filters=filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
strides=strides, # (1,1),
)
if not use_dropout_on_upsampling:
dropout = 0.0
dropout_change_per_layer = 0.0
for conv in reversed(down_layers):
filters //= 2 # decreasing number of filters with each layer
dropout -= dropout_change_per_layer
# x = upsample(filters, kernel, strides=(2,2), padding="same")(x)#(2, 2)
x = tf.keras.layers.UpSampling2D(kernel)(x)
x = tf.keras.layers.concatenate([x, conv])
x = conv2d_block(
inputs=x,
filters=upconv_filters,
use_batch_norm=use_batch_norm,
dropout=dropout,
dropout_type=dropout_type,
activation=activation,
)
# outputs = tf.keras.layers.Conv2D(num_classes, (1, 1), activation=output_activation)(x)
# ## classify
if num_classes == 1:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="sigmoid"
)(x)
else:
outputs = tf.keras.layers.Conv2D(
num_classes, (1, 1), padding="same", activation="softmax"
)(x)
model = tf.keras.models.Model(inputs=[inputs], outputs=[outputs])
return model | 1efdfb6dd15782543adb071b081b580ea78cb986 | 13,414 |
from datetime import datetime
def fracday2datetime(tdata):
"""
Takes an array of dates given in %Y%m%d.%f format and returns a
corresponding datetime object
"""
dates = [datetime.strptime(str(i).split(".")[0], "%Y%m%d").date()
for i in tdata]
frac_day = [i - np.floor(i) for i in tdata]
ratios = [(Fraction(i).limit_denominator().numerator,
Fraction(i).limit_denominator().denominator) for i in frac_day]
times = [datetime.strptime(
str(timedelta(seconds=timedelta(days=i[0]/i[1]).total_seconds())),
'%H:%M:%S').time() for i in ratios]
date_times = [datetime.combine(d, t) for d, t in zip(dates, times)]
return date_times | 03ca701317a6b80fd8c14eecddc84a380f16b3aa | 13,415 |
def flatten(iterable):
"""
Unpacks nested iterables into the root `iterable`.
Examples:
```python
from flashback.iterating import flatten
for item in flatten(["a", ["b", ["c", "d"]], "e"]):
print(item)
#=> "a"
#=> "b"
#=> "c"
#=> "d"
#=> "e"
assert flatten([1, {2, 3}, (4,), range(5, 6)]) == (1, 2, 3, 4, 5)
```
Params:
iterable (Iterable<Any>): the iterable to flatten
Returns:
tuple<Any>: the flattened iterable
"""
items = []
for item in iterable:
if isinstance(item, (list, tuple, set, frozenset, range)):
for nested_item in flatten(item):
items.append(nested_item)
else:
items.append(item)
return tuple(items) | 8c47de3255906fb114a13ecfec4bf4a1204a0dfd | 13,417 |
def get_file_info(bucket, filename):
"""Returns information about stored file.
Arguments:
bucket: a bucket that contains the file.
filename: path to a file relative to bucket root.
Returns:
FileInfo object or None if no such file.
"""
try:
stat = cloudstorage.stat(
'/%s/%s' % (bucket, filename), retry_params=_make_retry_params())
return FileInfo(size=stat.st_size)
except cloudstorage.errors.NotFoundError:
return None | f06c6c3f29cf15992d6880e6509b8ebe11d4288b | 13,418 |
import random
def generate_tree(depth, max_depth, max_args):
"""Generate tree-like equations.
Args:
depth: current depth of the node, int.
max_depth: maximum depth of the tree, int.
max_args: maximum number of arguments per operator, int.
Returns:
The root node of a tree structure.
"""
if depth < max_depth:
r = random.random()
else:
r = 1
if r > VALUE_P:
value = random.choice(VALUES)
return value, 1
else:
length = 2
num_values = random.randint(2, max_args)
values = []
for _ in range(num_values):
sub_t, sub_l = generate_tree(depth + 1, max_depth, max_args)
values.append(sub_t)
length += sub_l
op = random.choice(OPERATORS)
t = (op, values[0])
for value in values[1:]:
t = (t, value)
t = (t, END)
return t, length | df8c968444d86658d2d6f09fb836b39119998790 | 13,419 |
def Pow_sca(x_e, c_e, g, R0, R1, omega, epM):
"""Calculate the power scattered by an annulus
with a 'circling' electron as exciting source inside and
an electron moving on a slightly curved trajectory outside (vertical)
The trajectory of the electron derives from a straight vertical
trajectory in the ellipse frame.
Output: Resistive losses as a function of omega"""
# epM = 1-64/(omega*(omega+1j*gamma))
# omega = omega*Conv
k0 = omega/3e8
gamma_abs = 1j* np.pi**2 * ep0 * g**2/8 * k0**2
k_n = 1
###Lambda = 4*pi*eps0 in expression for source coefficients
###Calculate lambda according to formula in ELS_slab_crescent.pdf
a_n_s = np.exp(-omega/c_e*x_e)/omega*BesselI(1,omega*g/c_e)
#Calculate expansion coefficients as in ELS_ellipse_annulus.pdf
#This is for the cosine terms
b_c = (a_n_s /((epM-1)**2 * R0**(2) - (epM+1)**2 * R1**(2))\
*( (epM**2-1) * (R1**(2)-R0**(2))\
- 4*epM * R1**(2) * R0**(2) ) * R0**(-2)) - 1*a_n_s
#This is for the sin terms
b_s = (a_n_s/((epM-1)**2 * R0**(2) - (epM+1)**2 * R1**(2))\
*( -(epM**2-1) * (R1**(2)-R0**(2))\
- 4*epM * R1**(2) * R0**(2) ) * R0**(-2)) - 1*a_n_s
return omega/2 * np.imag(gamma_abs * (abs(b_c)**2 + abs(b_s)**2)) | aab79a2653428354d88ada4ded683d8eead6dd1e | 13,421 |
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi"
)
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q"
)[0]
x_y_id_s = read_next_bytes(
fid,
num_bytes=24 * num_points2D,
format_char_sequence="ddq" * num_points2D,
)
xys = np.column_stack(
[
tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3])),
]
)
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id,
qvec=qvec,
tvec=tvec,
camera_id=camera_id,
name=image_name,
xys=xys,
point3D_ids=point3D_ids,
)
return images | 6da7916c0c74c4a9c58a91a5920d32ed8e3bbdf5 | 13,422 |
def index():
"""Render upload page."""
log_cmd('Requested upload.index', 'green')
return render_template('upload.html',
page_title='Upload',
local_css='upload.css',
) | 478f970f54a0c66443fbbfa24f44c86622e0e07f | 13,423 |
import tqdm
def partial_to_full(dic1, dic2):
"""This function relates partial curves to full curves, according to the distances between them
The inputs are two dictionaries"""
C = []
D = []
F = []
# Calculate the closest full curve for all the partial curves under
# evaluation
for i in tqdm(dic1.keys()):
df = distance_cycle_to_full(i, dic1, dic2)
Distance = df['Distance'][df.index[0]]
Full_cycle = df['Cycle'][df.index[0]]
C.append(i)
D.append(Distance)
F.append(Full_cycle)
D = np.array(D)
C = np.array(C)
F = np.array(F)
return D, C, F | 31229ba4715e7241b205b81a207aae6e8290b93e | 13,425 |
import random
def _sample(probabilities, population_size):
"""Return a random population, drawn with regard to a set of probabilities"""
population = []
for _ in range(population_size):
solution = []
for probability in probabilities:
# probability of 1.0: always 1
# probability of 0.0: always 0
if random.uniform(0.0, 1.0) < probability:
solution.append(1)
else:
solution.append(0)
population.append(solution)
return population | ac781075f8437ea02b2dde3b241c21685c259e0c | 13,426 |
def nodal_scoping(node_ids, server = None):
"""Helper function to create a specific ``ansys.dpf.core.Scoping``
associated to a mesh.
Parameters
----------
node_ids : List of int
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the the global server.
Returns
-------
scoping : ansys.dpf.core.Scoping
"""
if not isinstance(node_ids, list):
raise dpf_errors.InvalidTypeError("list", "node_ids")
scoping = Scoping(server = server, ids = node_ids, location = locations.nodal)
return scoping | a8587a2027326e1c88088fac85a54879f28267d6 | 13,427 |
import json
def user_active(request):
"""Prevents auto logout by updating the session's last active time"""
# If auto logout is disabled, just return an empty body.
if not settings.AUTO_LOGOUT_SECONDS:
return HttpResponse(json.dumps({}), content_type="application/json", status=200)
last_active_at = set_session_user_last_active_at(request)
auto_logout_at = last_active_at + timedelta(seconds=settings.AUTO_LOGOUT_SECONDS)
auto_logout_warning_at = auto_logout_at - timedelta(seconds=settings.AUTO_LOGOUT_WARNING_AT_SECONDS_LEFT)
return HttpResponse(
json.dumps(
{
"auto_logout_at": auto_logout_at.isoformat(),
"auto_logout_warning_at": auto_logout_warning_at.isoformat(),
}
),
content_type="application/json",
status=200,
) | 332dd45457ab099a2775587dd357f5ccf9d663f7 | 13,428 |
def decode_labels(labels):
"""Validate labels."""
labels_decode = []
for label in labels:
if not isinstance(label, str):
if isinstance(label, int):
label = str(label)
else:
label = label.decode('utf-8').replace('"', '')
labels_decode.append(label)
return labels_decode | 36b8b10af2cd2868ab1923ccd1e620ccf815d91a | 13,429 |
def indent(text, num=2):
"""Indent a piece of text."""
lines = text.splitlines()
return '\n'.join(indent_iterable(lines, num=num)) | 04b547210463f50c0ddc7ee76547fea199e71bdc | 13,430 |
import random
def random_lever_value(lever_name):
"""Moves a given lever (lever_name) to a random position between 1 and 3.9"""
rand_val = random.randint(10, 39)/10 # Generate random value between 1 and 3.9
return move_lever([lever_name], [round(rand_val, 2)], costs = True) | c39781752b3defe164ad5d451932a71c99d95046 | 13,431 |
def back(update, context):
"""Кнопка назад."""
user = get_user_or_raise(update.effective_user.id)
update.message.reply_text(
messages.MAIN_MENU_MESSAGE, reply_markup=get_start_keyboard(user)
)
return ConversationHandler.END | 827070b4bcefad57afc8847f96a404a9272a0f7b | 13,432 |
def get_norm_residuals(vecs, word):
"""
computes normalized residuals of vectors with respect to a word
Args:
vecs (ndarray):
word (ndarray):
Returns:
tuple : (rvecs_n, rvec_flag)
CommandLine:
python -m ibeis.algo.hots.smk.smk_residuals --test-get_norm_residuals
Example:
>>> # ENABLE_DOCTEST
>>> # The case where vecs != words
>>> from ibeis.algo.hots.smk.smk_residuals import * # NOQA
>>> rng = np.random.RandomState(0)
>>> vecs = (hstypes.VEC_MAX * rng.rand(4, 128)).astype(hstypes.VEC_TYPE)
>>> word = (hstypes.VEC_MAX * rng.rand(1, 128)).astype(hstypes.VEC_TYPE)
>>> rvecs_n = get_norm_residuals(vecs, word)
>>> result = ut.numpy_str2(rvecs_n)
>>> print(result)
Example:
>>> # ENABLE_DOCTEST
>>> # The case where vecs == words
>>> from ibeis.algo.hots.smk.smk_residuals import * # NOQA
>>> rng = np.random.RandomState(0)
>>> vecs = (hstypes.VEC_MAX * rng.rand(4, 128)).astype(hstypes.VEC_TYPE)
>>> word = vecs[1]
>>> rvecs_n = get_norm_residuals(vecs, word)
>>> result = ut.numpy_str2(rvecs_n)
>>> print(result)
IGNORE
rvecs_agg8 = compress_normvec_uint8(arr_float)
rvecs_agg16 = compress_normvec_float16(arr_float)
ut.print_object_size(rvecs_agg16, 'rvecs_agg16: ')
ut.print_object_size(rvecs_agg8, 'rvecs_agg8: ')
ut.print_object_size(rvec_flag, 'rvec_flag: ')
%timeit np.isnan(_rvec_sums)
%timeit _rvec_sums == 0
%timeit np.equal(rvec_sums, 0)
%timeit rvec_sums == 0
%timeit np.logical_or(np.isnan(_rvec_sums), _rvec_sums == 0)
"""
# Compute residuals of assigned vectors
#rvecs_n = word.astype(dtype=FLOAT_TYPE) - vecs.astype(dtype=FLOAT_TYPE)
arr_float = np.subtract(word.astype(hstypes.FLOAT_TYPE), vecs.astype(hstypes.FLOAT_TYPE))
# Faster, but doesnt work with np.norm
#rvecs_n = np.subtract(word.view(hstypes.FLOAT_TYPE), vecs.view(hstypes.FLOAT_TYPE))
vt.normalize_rows(arr_float, out=arr_float)
# Mark null residuals
#_rvec_sums = arr_float.sum(axis=1)
#rvec_flag = np.isnan(_rvec_sums)
# Converts normvec to a smaller type like float16 or int8
rvecs_n = compress_normvec(arr_float)
# IF FLOAT16 WE NEED TO FILL NANS
# (but we should use int8, and in that case it is implicit)
# rvecs_n = np.nan_to_num(rvecs_n)
return rvecs_n | 8514f203907732c2d3175bf25f2803c93166687a | 13,433 |
def user_closed_ticket(request):
"""
Returns all closed tickets opened by user
:return: JsonResponse
"""
columns = _no_priority
if settings.SIMPLE_USER_SHOW_PRIORITY:
columns = _ticket_columns
ticket_list = Ticket.objects.filter(created_by=request.user,
is_closed=True)
dtd = TicketDTD( request, ticket_list, columns )
return JsonResponse(dtd.get_dict()) | 61c2be90937c4d8892dc8756428e3b191adc6d55 | 13,434 |
def get_placekey_from_address(street_address:str, city:str, state:str, postal_code:str, iso_country_code:str='US',
placekey_api_key: str = None) -> str:
"""
Look up the full Placekey for a given address string.
:param street_address: Street address with suite, floor, or apartment.
:param city: The city.
:param state: Two character state identifier.
:param postal_code: Postal code identifier; typically five numbers.
:param iso_country_code: Two character country identifier. Defaults to "US".
:param placekey_api_key: Placekey API key for making requests.
:return: Placekey string.
"""
# check a couple of things for the parameter inputs
assert len(state) == 2, f'state must be two character identifier, not "{state}".'
assert len(iso_country_code) == 2, 'iso_country_code must be two character identifier, not ' \
f'"{iso_country_code}".'
body = {
"query": {
"street_address": street_address,
"city": city,
"region": state,
"postal_code": postal_code,
"iso_country_code": iso_country_code
}
}
pk = _get_placekey(body, placekey_api_key)
return pk | 0f3f911bb66a30138b8b293455d348f618e11486 | 13,436 |
from pathlib import Path
def _path_to_str(var):
"""Make sure var is a string or Path, return string representation."""
if not isinstance(var, (Path, str)):
raise ValueError("All path parameters must be either strings or "
"pathlib.Path objects. Found type %s." % type(var))
else:
return str(var) | c5ae3ed06be31de3220b5400966866ccda29b9fc | 13,438 |
def netconf_edit_config(task: Task, config: str, target: str = "running") -> Result:
"""
Edit configuration of device using Netconf
Arguments:
config: Configuration snippet to apply
target: Target configuration store
Examples:
Simple example::
> nr.run(task=netconf_edit_config, config=desired_config)
"""
manager = task.host.get_connection("netconf", task.nornir.config)
manager.edit_config(config, target=target)
return Result(host=task.host) | 9862199c65ecbdc9eb037a181e5783eb911f76a1 | 13,439 |
def _cve_id_field_name():
""" Key name for a solr field that contains cve_id
"""
return "cve_id" | 68ca6f2585804e63198a20d3f174836a0cbb0841 | 13,440 |
def mapRuntime(dataFrame1, dataFrame2):
"""
Add the scraped runtimes of the titles in the viewing activity dataframe
Parameters:
dataFrame1: string
The name of the dataFrame to which the user wants to add the runtime
dataFrame2: string
The name of the dataFrame containging the runtimes.
Returns:
a dataFrame
"""
dataFrame1['Runtime'] = dataFrame1.Title.map(
dataFrame2.set_index('Title')['runtime'].to_dict())
return dataFrame1 | 61d4af72c51e61c6f0077b960e4002dd7d272ad8 | 13,441 |
def get_circ_center_2pts_r(p1, p2, r):
"""
Find the centers of the two circles that share two points p1/p2 and a radius.
From algorithm at http://mathforum.org/library/drmath/view/53027.html. Adapted from version at
https://rosettacode.org/wiki/Circles_of_given_radius_through_two_points#Python.
:param p1: First point , tuple (x, y)
:param p2: Second point, tuple (x, y)
:param r: Radius of circle
:return: a list of 2 points that are centers of circles of radius r sharing p1/p2
"""
if r == 0.0:
raise ValueError('No solution due to no radius')
(x1, y1), (x2, y2) = tuple(p1), tuple(p2)
if p1 == p2:
raise ValueError('Infinite numbre of solutions')
# Distance in x and y between points
dx = x2 - x1
dy = y1 - y2
# Dist between points
q = sqrt(dx ** 2 + dy ** 2)
if q > (2.0 * r):
raise ValueError('Too much distance between points to fit within radius')
# Halfway point
x3 = (x1 + x2) / 2.0
y3 = (y1 + y2) / 2.0
# Distance along the mirror line
d = sqrt(r ** 2 - ((q / 2.0) ** 2))
# First circle center
# c1 = (x3 + ((d * dy) / q), y3 + ((d * dx) / q))
# Second circle center
# c2 = (x3 - ((d * dy) / q), y3 - ((d * dx) / q))
c1x = x3 + sqrt(r ** 2 - (q / 2.0) ** 2) * (y1 - y2) / q
c1y = y3 + sqrt(r ** 2 - (q / 2.0) ** 2) * (x2 - x1) / q
c2x = x3 - sqrt(r ** 2 - (q / 2.0) ** 2) * (y1 - y2) / q
c2y = y3 - sqrt(r ** 2 - (q / 2.0) ** 2) * (x2 - x1) / q
return ((c1x, c1y), (c2x, c2y)) | 5ad9abe858721ad94c5d16cc8ed617dabe9f3336 | 13,442 |
def crext_MaxFragmentLength(length_exponent):
"""Create a MaxFragmentLength extension.
Allowed lengths are 2^9, 2^10, 2^11, 2^12. (TLS default is 2^14)
`length_exponent` should be 9, 10, 11, or 12, otherwise the extension will
contain an illegal value.
"""
maxlen = (length_exponent-8).to_bytes(1,"big")
return ExtensionType.max_fragment_length.value + lenprefix(maxlen) | 0078d372440dbe4914675efd004b4fb60f73a6d8 | 13,443 |
import torch
def perform_intervention(intervention, model, effect_types=('indirect', 'direct')):
"""Perform intervention and return results for specified effects"""
x = intervention.base_strings_tok[0] # E.g. The doctor asked the nurse a question. She
x_alt = intervention.base_strings_tok[1] # E.g. The doctor asked the nurse a question. He
with torch.no_grad():
candidate1_base_prob, candidate2_base_prob = model.get_probabilities_for_examples_multitoken(
x,
intervention.candidates_tok)
candidate1_alt_prob, candidate2_alt_prob = model.get_probabilities_for_examples_multitoken(
x_alt,
intervention.candidates_tok)
candidate1 = ' '.join(intervention.candidates[0]).replace('Ġ', '')
candidate2 = ' '.join(intervention.candidates[1]).replace('Ġ', '')
odds_base = candidate2_base_prob / candidate1_base_prob
odds_alt = candidate2_alt_prob / candidate1_alt_prob
total_effect = (odds_alt - odds_base) / odds_base
results = {
'base_string1': intervention.base_strings[0],
'base_string2': intervention.base_strings[1],
'candidate1': candidate1,
'candidate2': candidate2,
'candidate1_base_prob': candidate1_base_prob,
'candidate2_base_prob': candidate2_base_prob,
'odds_base': odds_base,
'candidate1_alt_prob': candidate1_alt_prob,
'candidate2_alt_prob': candidate2_alt_prob,
'odds_alt': odds_alt,
'total_effect': total_effect,
}
for effect_type in effect_types:
candidate1_probs_head, candidate2_probs_head, candidate1_probs_layer, candidate2_probs_layer,\
candidate1_probs_model, candidate2_probs_model = model.attention_intervention_experiment(
intervention, effect_type)
odds_intervention_head = candidate2_probs_head / candidate1_probs_head
odds_intervention_layer = candidate2_probs_layer / candidate1_probs_layer
odds_intervention_model = candidate2_probs_model / candidate1_probs_model
effect_head = (odds_intervention_head - odds_base) / odds_base
effect_layer = (odds_intervention_layer - odds_base) / odds_base
effect_model = (odds_intervention_model - odds_base) / odds_base
results[effect_type + "_odds_head"] = odds_intervention_head.tolist()
results[effect_type + "_effect_head"] = effect_head.tolist()
results[effect_type + "_effect_layer"] = effect_layer.tolist()
results[effect_type + "_effect_model"] = effect_model
return results | 3fae717923adda6d4b08c424c24600d578961a2a | 13,444 |
def nice_size(
self: complex,
unit: str = 'bytes',
long: bool = False,
lower: bool = False,
precision: int = 2,
sep: str = '-',
omissions: list = 'mono deca hecto'.split(),
):
"""
This should behave well on int subclasses
"""
mag = magnitude(self, omissions)
precision = sredro[mag] if self < 5 else precision
unit = set_case(set_length(mag, unit, long, sep), lower)
val = round(self * 10 ** -(sredro[mag]), precision)
return lasso(val, unit) | 1361f17e98ce4d5c6f9c094b8a4f1a9e7cf3035b | 13,445 |
from .core.observable.fromcallback import _from_callback
from typing import Callable
from typing import Optional
import typing
def from_callback(func: Callable,
mapper: Optional[typing.Mapper] = None
) -> Callable[[], Observable]:
"""Converts a callback function to an observable sequence.
Args:
func: Function with a callback as the last argument to
convert to an Observable sequence.
mapper: [Optional] A mapper which takes the arguments
from the callback to produce a single item to yield on
next.
Returns:
A function, when executed with the required arguments minus
the callback, produces an Observable sequence with a single
value of the arguments to the callback as a list.
"""
return _from_callback(func, mapper) | b93900f480d5dd851d8e45c00627590ad89fd24c | 13,446 |
import re
def EVLAUVFITS(inUV, filename, outDisk, err, compress=False, \
exclude=["AIPS HI", "AIPS AN", "AIPS FQ", "AIPS SL", "AIPS PL"], \
include=[], headHi=False, logfile=""):
"""
Write UV data as FITS file
Write a UV data set as a FITAB format file
History written to header
* inUV = UV data to copy
* filename = name of FITS file, any whitespace characters replaced with underscore
* outDisk = FITS directory number
* err = Python Obit Error/message stack
* exclude = List of table types NOT to copy
NB: "AIPS HI" isn't really a table and gets copied anyway
* include = List of table types to copy (FQ, AN always done )
Exclude has presidence over include
* headHi = if True move history to header, else leave in History table
returns FITS UV data object
"""
################################################################
mess = "Write Data to FITS UV data "+filename+" on disk "+str(outDisk)
printMess(mess, logfile)
# Checks
if not UV.PIsA(inUV):
raise TypeError("inUV MUST be a Python Obit UV")
if not OErr.OErrIsA(err):
raise TypeError("err MUST be an OErr")
#
# Deblank filename
fn = re.sub('\s','_',filename)
# Set output
outUV = UV.newPFUV("FITS UV DATA", fn, outDisk, False, err)
if err.isErr:
OErr.printErrMsg(err, "Error creating FITS data")
#Compressed?
if compress:
inInfo = UV.PGetList(outUV) #
dim = [1,1,1,1,1]
InfoList.PAlwaysPutBoolean (inInfo, "Compress", dim, [True])
# Copy
UV.PCopy (inUV, outUV, err)
if err.isErr:
OErr.printErrMsg(err, "Error copying UV data to FITS")
# History
inHistory = History.History("inhistory", outUV.List, err)
outHistory = History.History("outhistory", outUV.List, err)
# Add history
outHistory.Open(History.READWRITE, err)
outHistory.TimeStamp(" Start Obit uvtab",err)
outHistory.WriteRec(-1,"uvtab / FITS file "+fn+" disk "+str(outDisk),err)
outHistory.Close(err)
# History in header?
if headHi:
History.PCopy2Header (inHistory, outHistory, err)
OErr.printErrMsg(err, "Error with history")
# zap table
outHistory.Zap(err)
# Copy Tables
UV.PCopyTables (inUV, outUV, exclude, include, err)
return outUV | 98de4f1422be2281eca539b9c372e8d0b9980aeb | 13,447 |
def form_cleaner(querydict):
"""
Hacky way to transform form data into readable data by the model constructor
:param querydict: QueryDict
:return: dict
"""
r = dict(querydict.copy())
# Delete the CRSF Token
del r['csrfmiddlewaretoken']
for key in list(r):
# Take first element of array
r[key] = r[key][0]
# Delete empty fields
if r[key] == '' or r[key] is None:
del r[key]
return r | 83d61f028748132803555da85f0afe0215be2edd | 13,448 |
def has_1080p(manifest):
"""Return True if any of the video tracks in manifest have a 1080p profile
available, else False"""
return any(video['width'] >= 1920
for video in manifest['videoTracks'][0]['downloadables']) | f187ff7fd8f304c0cfe600c4bed8e809c4c5e105 | 13,449 |
def visualize_table(filename: str, table: str) -> bool:
"""
Formats the contents of a db table using the texttable package
:param filename: .db file name (String)
:param table: Name of the table to plot (String)
:return: Bool
"""
conn, cursor = get_connection(filename)
table_elements = get_table(filename, table)
if not len(table_elements) > 0:
print("This table is empty")
return False
text_table = Texttable()
allign = ["l" for i in range(len(table_elements[0]))]
vallign = ["m" for i in range(len(table_elements[0]))]
title = eval(query(filename, "tables", "name", table)[0][1])
text_table.set_cols_align(allign)
text_table.set_cols_valign(vallign)
text_table.header(title)
for row in table_elements:
text_table.add_row(row)
print(text_table.draw())
return True | d7ab8125353ac0550a704ba208a8095f82125294 | 13,450 |
def extractIsekaiMahou(item):
"""
# Isekai Mahou Translations!
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Isekai Mahou Chapter' in item['title'] and 'Release' in item['title']:
return buildReleaseMessageWithType(item, 'Isekai Mahou wa Okureteru!', vol, chp, frag=frag, postfix=postfix)
return False | 8537e5d9374c38aa0218d380013e98c4bfe6eabb | 13,451 |
def delete_version_from_file(fname, par, ctype=gu.PIXEL_MASK, vers=None, cmt=None, verb=False) :
"""Delete specified version from calibration constants.
Parameters
- fname : full path to the hdf5 file
- par : psana.Event | psana.Env | float - tsec event time
- ctype : gu.CTYPE - enumerated calibration type, e.g.: gu.PIXEL_MASK
- vers : int - calibration version
- cmt : str - comment
- verb : bool - verbousity
See :py:class:`DCMethods`
"""
metname = sys._getframe().f_code.co_name
str_ctype = gu.dic_calib_type_to_name[ctype]
if verb : print ' %s.delete_version_from_file: ctype: %s vers: %s'%\
(metname, str_ctype, vers)
if not is_good_fname(fname, verb) : return None
cs = DCStore(fname)
cs.load()
ct = cs.ctypeobj(str_ctype)
if ct is None : return None
#ct.print_obj()
tsec = dcu.par_to_tsec(par)
cr = ct.range_for_tsec(tsec)
if cr is None : return None
v = vers if vers is not None else cr.vnum_last()
vdel = cr.mark_version(vnum=vers, cmt=cmt)
if verb : log.setPrintBits(02) # 0377
cs.save()
if verb :
print 50*'_','\nDCStore.print_obj() after delete version %s' % str(vdel)
cs.print_obj()
return vdel | 2f5e6d180457f140c8195e358ffa6afbae8a227d | 13,452 |
from typing import List
from typing import Tuple
import io
def create_midi_file(notes: List[Tuple[int, int]]) -> io.BytesIO:
"""Create a MIDI file from the given list of notes.
Notes are played with piano instrument.
"""
byte_stream = io.BytesIO()
mid = mido.MidiFile()
track = mido.MidiTrack()
mid.tracks.append(track)
for note, t in notes:
track.append(mido.Message('note_on', note=note, velocity=64))
track.append(mido.Message('note_off', note=note, time=t))
mid.save(file=byte_stream)
return io.BytesIO(byte_stream.getvalue()) | 1f9443df11f08a76c9d5c472d025fe92f3d459af | 13,454 |
async def get_programs(request: Request) -> Response:
"""
description: Get a list of all programs
responses:
200:
description: A list of programs.
"""
ow: "OpenWater" = request.app.ow
return ToDictJSONResponse([p.to_dict() for p in ow.programs.store.all]) | d4a16ec19ba4e095c0479a43f2ef191e9dae84f5 | 13,455 |
def create_dataframe(dictionary_to_convert, cols):
"""
From a Dictionary which is passed, and the desired column to create, this function
returns a Dataframe.
"""
dataframe_converted = pd.DataFrame.from_dict(dictionary_to_convert, orient='index', columns = cols)
dataframe_converted = dataframe_converted.reset_index()
dataframe_converted = dataframe_converted.drop(columns=['index'])
return dataframe_converted | 4f2ad388cd9a12a6aee55e974320c8b7ac7f95a7 | 13,456 |
import pandas
def aggregate_dataframe(mails_per_sender, datetimes_per_sender):
"""Engineer features and aggregate them in a dataframes.
:param dict mails_per_sender: A dictionary with email counts for each sender
:param dict datetimes_per_sender: A dictionary with datetime objects for
each sender
:raises InputError: if at least one of the arguments is an empty dictionary
:returns: A dataframe with aggregated features
:rtype: pandas.DataFrame
"""
try:
if not mails_per_sender or not datetimes_per_sender:
raise exceptions.InputError('At least one of the arguments is an '
'empty dictionary!')
except exceptions.InputError:
raise
average_timestamps = average_timestamps_in_seconds(
datetimes_per_sender)
average_weekdays = weekday_average(datetimes_per_sender)
aggregation = {'Mail Count': mails_per_sender,
'Average Timestamp': average_timestamps,
'Average Weekday': average_weekdays}
return pandas.DataFrame(aggregation) | a584d72fdb2df9148b5ff6a6fe907c8f09b26234 | 13,458 |
def traj2points(traj, npoints, OS):
"""
Transform spoke trajectory to point trajectory
Args:
traj: Trajectory with shape [nspokes, 3]
npoints: Number of readout points along spokes
OS: Oversampling
Returns:
array: Trajectory with shape [nspokes, npoints, 3]
"""
[nspokes, ndim] = np.shape(traj)
r = (np.arange(0, npoints))/OS
Gx, Gy, Gz = np.meshgrid(r, np.arange(nspokes), np.arange(ndim))
traj_p = Gx*np.transpose(np.tile(traj, [npoints, 1, 1]), [1, 0, 2])
return traj_p | f411b91e86943f7ae03f52cf3d6b1005299902ba | 13,462 |
def model_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
constraint=None,
trainable=True,
collections=None,
**kwargs):
"""
Get or create a model variable.
When the variable is created, it will be added to both `GLOBAL_VARIABLES`
and `MODEL_VARIABLES` collection.
Args:
name: Name of the variable.
shape: Shape of the variable.
dtype: Data type of the variable.
initializer: Initializer of the variable.
regularizer: Regularizer of the variable.
constraint: Constraint of the variable.
trainable (bool): Whether or not the variable is trainable?
collections: In addition to `GLOBAL_VARIABLES` and `MODEL_VARIABLES`,
also add the variable to these collections.
\\**kwargs: Other named arguments passed to :func:`tf.get_variable`.
Returns:
tf.Variable: The variable.
"""
collections = list(set(
list(collections or ()) +
[tf.GraphKeys.GLOBAL_VARIABLES, tf.GraphKeys.MODEL_VARIABLES]
))
return tf.get_variable(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
constraint=constraint,
trainable=trainable,
collections=collections,
**kwargs
) | 7c717234fca10163708abf19057e68124b8fe3e8 | 13,465 |
def default_k_pattern(n_pattern):
""" the default number of pattern divisions for crossvalidation
minimum number of patterns is 3*k_pattern. Thus for n_pattern <=9 this
returns 2. From there it grows gradually until 5 groups are made for 40
patterns. From this point onwards the number of groups is kept at 5.
bootstrapped crossvalidation also uses this function to set k, but scales
n_rdm to the expected proportion of samples retained when bootstrapping
(1-np.exp(-1))
"""
if n_pattern < 12:
k_pattern = 2
elif n_pattern < 24:
k_pattern = 3
elif n_pattern < 40:
k_pattern = 4
else:
k_pattern = 5
return k_pattern | 60d083ffed24987882fa8074d99e37d06748eaf3 | 13,466 |
def resize_basinData():
"""
read in global data and make the new bt with same length
this step can be elimated if we are using ibtracks in the future CHAZ development
"""
basinName = ['atl','wnp','enp','ni','sh']
nd = 0
for iib in range(0,len(basinName),1):
ib = basinName[iib]
f =gv.ipath + 'bt_'+ib+'.nc'
#bt1 = nc.Dataset(f)
bt1 = xr.open_dataset(f)
if iib == 0:
maxCol = bt1['PIslp'].shape[0]
else:
maxCol = np.nanmax([maxCol,bt1['PIslp'].shape[0]])
## in bt1, the time is datenumber start from 1800,01,01,0,0. So if datenumber is 0 means there is no data
nd += bt1['PIslp'].shape[1]
bt = {}
for iib in range(0,len(basinName),1):
bt1 = xr.open_dataset(f)
for iv in bt1.variables.keys():
if iib == 0:
if np.size(bt1.variables[iv].shape) >1:
bt[iv] = np.zeros([maxCol,bt1.variables[iv].shape[1]])*np.float('nan')
bt[iv][:bt1.variables[iv].shape[0],:] = bt1.variables[iv].values
else:
bt[iv] = bt1.variables[iv].values
else:
if np.size(bt1.variables[iv].shape) >1:
dummy = np.zeros([maxCol,bt1.variables[iv].shape[1]])*np.float('nan')
dummy[:bt1.variables[iv].shape[0],:] = bt1.variables[iv].values
bt[iv] = np.hstack([bt[iv],dummy])
del dummy
else:
bt[iv] = np.hstack([bt[iv],bt1.variables[iv].values])
del bt1
for iv in bt.keys():
if ((np.size(bt[iv].shape) >1) and ('Time' not in iv)):
bt[iv][bt['Time']==0] = np.float('nan')
bt['Time'][bt['Time']!=bt['Time']]=0
return bt | f80892b79cbe12f00daa0918ccf1ac579c90193d | 13,467 |
def _cast_wf(wf):
"""Cast wf to a list of ints"""
if not isinstance(wf, list):
if str(type(wf)) == "<class 'numpy.ndarray'>":
# see https://stackoverflow.com/questions/2060628/reading-wav-files-in-python
wf = wf.tolist() # list(wf) does not convert int16 to int
else:
wf = list(wf) # fallback
if len(wf) > 0:
assert isinstance(wf[0], int), f"first element of wf wasn't an int, but a {type(wf[0])}"
return wf | cf2bf853b3ac021777a65d5323de6990d8dc4c5c | 13,468 |
def centralize_scene(points):
"""In-place centralize a whole scene"""
assert points.ndim == 2 and points.shape[1] >= 3
points[:, 0:2] -= points[:, 0:2].mean(0)
points[:, 2] -= points[:, 2].min(0)
return points | 3bdbbe5e3e9c1383852afd15910bb23a68e75506 | 13,470 |
def ms(val):
""" Turn a float value into milliseconds as an integer. """
return int(val * 1000) | 97f7d736ead998014a2026a430bf3f0c54042010 | 13,471 |
def render_doc(stig_rule, deployer_notes):
"""Generate documentation RST for each STIG configuration."""
template = JINJA_ENV.get_template('template_doc_rhel7.j2')
return template.render(
rule=stig_rule,
notes=deployer_notes
) | 97167c23b7b9550bac9f8722ac9f9baed21e060e | 13,472 |
def official_evaluate(reference_csv_path, prediction_csv_path):
"""Evaluate metrics with official SED toolbox.
Args:
reference_csv_path: str
prediction_csv_path: str
"""
reference_event_list = sed_eval.io.load_event_list(reference_csv_path,
delimiter='\t', csv_header=False,
fields=['filename','onset','offset','event_label'])
estimated_event_list = sed_eval.io.load_event_list(prediction_csv_path,
delimiter='\t', csv_header=False,
fields=['filename','onset','offset','event_label'])
evaluated_event_labels = reference_event_list.unique_event_labels
files={}
for event in reference_event_list:
files[event['filename']] = event['filename']
evaluated_files = sorted(list(files.keys()))
segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=evaluated_event_labels,
time_resolution=1.0
)
for file in evaluated_files:
reference_event_list_for_current_file = []
for event in reference_event_list:
if event['filename'] == file:
reference_event_list_for_current_file.append(event)
estimated_event_list_for_current_file = []
for event in estimated_event_list:
if event['filename'] == file:
estimated_event_list_for_current_file.append(event)
segment_based_metrics.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file
)
results = segment_based_metrics.results()
return results | 718da1a97cb73e382b45c43432f4b991eab93732 | 13,473 |
from pathlib import Path
import yaml
def get_notebooks():
"""Read `notebooks.yaml` info."""
path = Path("tutorials") / "notebooks.yaml"
with path.open() as fh:
return yaml.safe_load(fh) | 232ffc1820f29eddc9ded118b69ea8e6857b00c9 | 13,474 |
def getSimData(startDate, endDate, region):
""" Get all boundary condition data needed for a simulation run
Args:
startDate (string): Start date DD.MM.YYYY
(start time is hard coded to 00:00)
endDate (string): End date DD.MM.YYYY
(end day is not in time range, so end date
should be end date + 1 day)
region (string): Location of simulation (determines climate / weather)
Supported regions:
East, West, South, North
Returns:
int / np float (arrays): nSteps, time, SLP_PHH, SLP_BSLa, SLP_BSLc,
HWP, T, Eg
"""
data = getSimData_df(startDate, endDate, region)
return (data.time.size, data.time,
data.SLP,
data.HWPfactor.to_numpy(dtype=np.float32),
data.Weather,
data.SolarPosition
) | 65e7c3a18194eeac4781d57c412cbb079c1078ba | 13,475 |
def get_dag_path(pipeline, module=None):
"""
Gets the DAG path.
:@param pipeline: The Airflow Variable key that has the config.
:@type pipeline: str.
:@param module: The module that belongs to the pipeline.
:@type module: str.
:@return: The DAG path of the pipeline.
"""
if module is None:
module = pipeline
config = Variable.get(pipeline, deserialize_json=True)
return pp.join(config['dag_install_path'], '{}_dag.py'.format(module)) | 4b0a9e5d9692d3c2477e23cb4ba988c589fb9b96 | 13,476 |
def blow_up(polygon):
"""Takes a ``polygon`` as input and adds pixels to it according to the following rule. Consider the line between two
adjacent pixels in the polygon (i.e., if connected via an egde). Then the method adds additional equidistand pixels
lying on that line (if the value is double, convert to int), dependent on the x- and y-distance of the pixels.
:param polygon: input polygon that should be blown up
:type polygon: Polygon
:return: blown up polygon
"""
res = Polygon()
for i in range(1, polygon.n_points, 1):
x1 = polygon.x_points[i - 1]
y1 = polygon.y_points[i - 1]
x2 = polygon.x_points[i]
y2 = polygon.y_points[i]
diff_x = abs(x2 - x1)
diff_y = abs(y2 - y1)
# if (x1,y1) = (x2, y2)
if max(diff_x, diff_y) < 1:
if i == polygon.n_points - 1:
res.add_point(x2, y2)
continue
res.add_point(x1, y1)
if diff_x >= diff_y:
for j in range(1, diff_x, 1):
if x1 < x2:
xn = x1 + j
else:
xn = x1 - j
yn = int(round(y1 + (xn - x1) * (y2 - y1) / (x2 - x1)))
res.add_point(xn, yn)
else:
for j in range(1, diff_y, 1):
if y1 < y2:
yn = y1 + j
else:
yn = y1 - j
xn = int(round(x1 + (yn - y1) * (x2 - x1) / (y2 - y1)))
res.add_point(xn, yn)
if i == polygon.n_points - 1:
res.add_point(x2, y2)
return res | c48005af11b8e1982aa45218159169acca0bd145 | 13,477 |
from typing import Optional
import time
def x_sogs_raw(
s: SigningKey,
B: PublicKey,
method: str,
full_path: str,
body: Optional[bytes] = None,
*,
b64_nonce: bool = True,
blinded: bool = False,
timestamp_off: int = 0,
):
"""
Calculates X-SOGS-* headers.
Returns 4 elements: the headers dict, the nonce bytes, timestamp int, and signature bytes.
Use x_sogs(...) instead if you don't need the nonce/timestamp/signature values.
"""
n = x_sogs_nonce()
ts = int(time.time()) + timestamp_off
if blinded:
a = s.to_curve25519_private_key().encode()
k = sodium.crypto_core_ed25519_scalar_reduce(
blake2b(sogs.crypto.server_pubkey_bytes, digest_size=64)
)
ka = sodium.crypto_core_ed25519_scalar_mul(k, a)
kA = sodium.crypto_scalarmult_ed25519_base_noclamp(ka)
pubkey = '15' + kA.hex()
else:
pubkey = '00' + s.verify_key.encode().hex()
to_sign = [B.encode(), n, str(ts).encode(), method.encode(), full_path.encode()]
if body:
to_sign.append(blake2b(body, digest_size=64))
if blinded:
H_rh = sha512(s.encode())[32:]
r = sodium.crypto_core_ed25519_scalar_reduce(sha512([H_rh, kA, *to_sign]))
sig_R = sodium.crypto_scalarmult_ed25519_base_noclamp(r)
HRAM = sodium.crypto_core_ed25519_scalar_reduce(sha512([sig_R, kA, *to_sign]))
sig_s = sodium.crypto_core_ed25519_scalar_add(
r, sodium.crypto_core_ed25519_scalar_mul(HRAM, ka)
)
sig = sig_R + sig_s
else:
sig = s.sign(b''.join(to_sign)).signature
h = {
'X-SOGS-Pubkey': pubkey,
'X-SOGS-Nonce': sogs.utils.encode_base64(n) if b64_nonce else n.hex(),
'X-SOGS-Timestamp': str(ts),
'X-SOGS-Signature': sogs.utils.encode_base64(sig),
}
return h, n, ts, sig | 6184f7f719c8d1e9e8e14fef56a65cd1d87f9f4f | 13,478 |
import json
def get_param(param, content, num=0):
"""
在内容中获取某一参数的值
:param param: 从接口返回值中要提取的参数
:param content: 接口返回值
:param num: 返回值中存在list时,取指定第几个
:return: 返回非变量的提取参数值
"""
param_val = None
if "." in param:
patt = param.split('.')
param_val = httprunner_extract(content, patt)
return param_val
else:
if isinstance(content, str):
try:
content = json.loads(content)
except:
content = ""
if isinstance(content, dict):
param_val = get_param_response(param, content, num)
if isinstance(content, list):
dict_data = {}
for i in range(len(content)):
try:
dict_data[str(i)] = eval(content[i])
except:
dict_data[str(i)] = content[i]
param_val = get_param_response(param, dict_data, num)
if param_val is None:
return param_val
else:
if "$" + param == param_val:
param_val = None
return param_val | d912c3ee22c223b4f9a91dc4817fc54a79139c20 | 13,479 |
def make_thebig_df_from_data(strat_df_list, strat_names):
"""Joins strategy data frames into a single df - **The Big DF** -
Signature of The Big DF:
df(strategy, sim_prefix, exec, node)[metrics]
"""
thebig_df = pd.concat(strat_df_list, axis=0, keys=strat_names)
thebig_df.index.set_names("strategy", level=0, inplace=True)
return thebig_df | 8ce67464a18fde5e81e0c8fd0c2a2d7ea016730e | 13,480 |
from datetime import datetime
def first_weekday_date(date):
"""
Filter - returns the date of the first weekday for the date
Usage (in template):
{{ some_date|first_weekday_date }}
"""
week_start = date - datetime.timedelta(days=date.weekday())
return week_start.date() | 8c7466040bff9e1924dbe365b92d796afe976fed | 13,481 |
def isLto():
"""*bool* = "--lto" """
return options.lto | ada2d688b1fe84fbcbb585c28e9d6251cce3dcd9 | 13,482 |
def runtime():
"""Get the CumulusCI runtime for the current working directory."""
init_logger()
return CliRuntime() | e4c3ed275f08cc7b982550714fa5c66c78ed1aa4 | 13,483 |
def order_json_objects(obj):
"""
Recusively orders all elemts in a Json object.
Source:
https://stackoverflow.com/questions/25851183/how-to-compare-two-json-objects-with-the-same-elements-in-a-different-order-equa
"""
if isinstance(obj, dict):
return sorted((k, order_json_objects(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(order_json_objects(x) for x in obj)
return obj | 5a0459d227b0a98c536290e3e72b76424d29820c | 13,484 |
def CalculatePEOEVSA(mol, bins=None):
"""
#################################################################
MOE-type descriptors using partial charges and surface
area contributions.
chgBins=[-.3,-.25,-.20,-.15,-.10,-.05,0,.05,.10,.15,.20,.25,.30]
You can specify your own bins to compute some descriptors
Usage:
result=CalculatePEOEVSA(mol)
Input: mol is a molecule object
Output: result is a dict form
#################################################################
"""
temp = MOE.PEOE_VSA_(mol, bins, force=1)
res = {}
for i, j in enumerate(temp):
res["PEOEVSA" + str(i)] = round(j, 3)
return res | 2b51c65f70b93bee80be5eba740319ab53eeb992 | 13,485 |
def install_pyheif_from_pip() -> int:
"""
Install the python module pyheif from PIP.
Assumes required libraries already installed
:return: return code from pip
"""
print("Installing Python support for HEIF / HEIC...")
cmd = make_pip_command(
'install {} -U --disable-pip-version-check pyheif'.format(pip_user)
)
return popen_capture_output(cmd) | b4d9a2d8d08e9e6dde4ac828dd34dfc93dd6ca02 | 13,486 |
def adjust_mlb_names(mlb_id, fname, lname):
"""
Adjusts a prospect's first and last name (fname, lname) given their mlb.com player_id for better usage in matching to the professional_prospects table.
"""
player_mapper = {
}
qry = """SELECT wrong_name
, right_fname
, right_lname
FROM NSBL.name_mapper nm
;"""
res = db.query(qry)
for row in res:
wrong, right_fname, right_lname = row
player_mapper[wrong] = [right_fname, right_lname]
if mlb_id in player_mapper:
fname, lname = player_mapper.get(mlb_id)
return fname, lname
else:
return fname, lname | 2570cd47e3875e1c621f6b4c7c8659c6edca1d6e | 13,487 |
from typing import Callable
from typing import Any
def all_predicates(*predicates: Callable[[Any], bool]) -> Callable[[Any], bool]:
"""Takes a set of predicates and returns a function that takes an entity
and checks if it satisfies all the predicates.
>>> even_and_prime = all_predicates(is_even, is_prime)
>>> even_and_prime(2)
True
>>> even_and_prime(4)
False
>>> even_and_prime(3)
False
Added in version: 0.1.0
"""
return lambda entity: all((p(entity) for p in predicates)) | b531e848e3a24851c5bc756beae46bdd14311b1f | 13,488 |
def centered_rand(l):
"""Sample from U(-l, l)"""
return l*(2.*np.random.rand()-1.) | f8cc1a8c6ad190b53061e1e83a410aa5cdcf26ed | 13,489 |
import torch
def compute_rays_length(rays_d):
"""Compute ray length.
Args:
rays_d: [R, 3] float tensor. Ray directions.
Returns:
rays_length: [R, 1] float tensor. Ray lengths.
"""
rays_length = torch.norm(rays_d, dim=-1, keepdim=True) # [N_rays, 1]
return rays_length | 9b43f9ea79708a690282a04eec65dbabf4a7ae36 | 13,490 |
import itertools
def _repeat_elements(arr, n):
"""
Repeats the elements int the input array, e.g.
[1, 2, 3] -> [1, 1, 1, 2, 2, 2, 3, 3, 3]
"""
ret = list(itertools.chain(*[list(itertools.repeat(elem, n)) for elem in arr]))
return ret | 95cf8ebb75505d2704cf957cdd709b8fa735973a | 13,491 |
def get_neighbors_table(embeddings, method, ntrees=None):
"""
This is a factory method for cosine distance nearest neighbor methods.
Args:
embeddings (ndarray): The embeddings to index
method (string): The nearest neighbor method to use
ntrees (int): number of trees for annoy
Returns:
Nearest neighbor table
"""
if method == "annoy":
if ntrees is None:
raise ImproperParameterSpecificationException("ntrees must be defined")
table = AnnoyNeighborsTable(embeddings, ntrees)
elif method == "brute":
table = BruteForceNeighborsTable(embeddings)
else:
raise MethodNotImplementedException("{} is not an implemented method".format(method))
return table | ee665e8332bf0f9b4c2e1ed38cf8b328a10cfc9b | 13,492 |
def _compute_positional_encoding(
attention_type,
position_encoding_layer,
hidden_size,
batch_size,
total_length,
seq_length,
clamp_length,
bi_data,
dtype=tf.float32):
"""Computes the relative position encoding.
Args:
attention_type: str, the attention type. Can be "uni" (directional) or
"bi" (directional).
position_encoding_layer: An instance of `RelativePositionEncoding`.
hidden_size: int, the hidden size.
batch_size: int, the batch size.
total_length: int, the sequence length added to the memory length.
seq_length: int, the length of each sequence.
clamp_length: int, clamp all relative distances larger than clamp_length. -1
means no clamping.
bi_data: bool, whether to use bidirectional input pipeline. Usually set to
True during pretraining and False during finetuning.
dtype: the dtype of the encoding.
Returns:
A Tensor, representing the position encoding.
"""
freq_seq = tf.range(0, hidden_size, 2.0)
if dtype is not None and dtype != tf.float32:
freq_seq = tf.cast(freq_seq, dtype=dtype)
if attention_type == "bi":
beg, end = total_length, -seq_length
elif attention_type == "uni":
beg, end = total_length, -1
else:
raise ValueError("Unknown `attention_type` {}.".format(attention_type))
if bi_data:
forward_position_sequence = tf.range(beg, end, -1.0)
backward_position_sequence = tf.range(-beg, -end, 1.0)
if dtype is not None and dtype != tf.float32:
forward_position_sequence = tf.cast(forward_position_sequence,
dtype=dtype)
backward_position_sequence = tf.cast(backward_position_sequence,
dtype=dtype)
if clamp_length > 0:
forward_position_sequence = tf.clip_by_value(
forward_position_sequence,
-clamp_length,
clamp_length)
backward_position_sequence = tf.clip_by_value(
backward_position_sequence,
-clamp_length,
clamp_length)
if batch_size is not None:
forward_positional_encoding = position_encoding_layer(
forward_position_sequence, batch_size // 2)
backward_positional_encoding = position_encoding_layer(
backward_position_sequence, batch_size // 2)
else:
forward_positional_encoding = position_encoding_layer(
forward_position_sequence, None)
backward_positional_encoding = position_encoding_layer(
backward_position_sequence, None)
relative_position_encoding = tf.concat(
[forward_positional_encoding, backward_positional_encoding], axis=0)
else:
forward_position_sequence = tf.range(beg, end, -1.0)
if dtype is not None and dtype != tf.float32:
forward_position_sequence = tf.cast(
forward_position_sequence, dtype=dtype)
if clamp_length > 0:
forward_position_sequence = tf.clip_by_value(
forward_position_sequence,
-clamp_length,
clamp_length)
relative_position_encoding = position_encoding_layer(
forward_position_sequence, batch_size)
return relative_position_encoding | fe7a87510745aa2c4b7b5f9e3225464d32e4a00e | 13,493 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.