content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import json
def user_config(filename):
"""user-provided configuration file"""
try:
with open(filename) as file:
return json.loads(file.read(None))
except FileNotFoundError as fnf:
raise RuntimeError(f"File '{filename}' could not be found") from fnf
except json.JSONDecodeError as jsond:
raise RuntimeError(f"Error while parsing '{filename}'") from jsond
|
a6aa05d76b4aaa12c02ff97e4ab5ba4ba1245324
| 18,400 |
def _decomposed_dilated_conv2d(x, kernel_size, num_o, dilation_factor, name, top_scope, biased=False):
"""
Decomposed dilated conv2d without BN or relu.
"""
# padding so that the input dims are multiples of dilation_factor
H = tf.shape(x)[1]
W = tf.shape(x)[2]
pad_bottom = (dilation_factor - H % dilation_factor) if H % dilation_factor != 0 else 0
pad_right = (dilation_factor - W % dilation_factor) if W % dilation_factor != 0 else 0
pad = [[0, pad_bottom], [0, pad_right]]
# decomposition to smaller-sized feature maps
# [N,H,W,C] -> [N*d*d, H/d, W/d, C]
o = tf.space_to_batch(x, paddings=pad, block_size=dilation_factor)
# perform regular conv2d
num_x = x.shape[3].value
with tf.variable_scope(name) as scope:
w = tf.get_variable('weights', shape=[kernel_size, kernel_size, num_x, num_o])
s = [1, 1, 1, 1]
o = tf.nn.conv2d(o, w, s, padding='SAME')
if biased:
b = tf.get_variable('biases', shape=[num_o])
o = tf.nn.bias_add(o, b)
o = tf.batch_to_space(o, crops=pad, block_size=dilation_factor)
return o
|
578d9308834294a80e778548faba9eb9fe0329c5
| 18,401 |
from datetime import datetime
from typing import Tuple
async def post_autodaily(text_channel: TextChannel, latest_message_id: int, change_mode: bool, current_daily_message: str, current_daily_embed: Embed, utc_now: datetime.datetime) -> Tuple[bool, bool, Message]:
"""
Returns (posted, can_post, latest_message)
"""
posted = False
if text_channel and current_daily_message:
error_msg_delete = f'could not delete message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
error_msg_edit = f'could not edit message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
error_msg_post = f'could not post a message in channel [{text_channel.id}] on guild [{text_channel.guild.id}]'
post_new = change_mode != server_settings.AutoDailyChangeMode.EDIT
can_post = True
latest_message: Message = None
use_embeds = await server_settings.get_use_embeds(None, bot=BOT, guild=text_channel.guild)
if use_embeds:
colour = utils.discord.get_bot_member_colour(BOT, text_channel.guild)
embed = current_daily_embed.copy()
embed.colour = colour
else:
embed = None
if can_post:
can_post, latest_message = await daily_fetch_latest_message(text_channel, latest_message_id)
if can_post:
if latest_message and latest_message.created_at.day == utc_now.day:
latest_message_id = latest_message.id
if change_mode == server_settings.AutoDailyChangeMode.DELETE_AND_POST_NEW:
try:
deleted = await utils.discord.try_delete_message(latest_message)
if deleted:
latest_message = None
utils.dbg_prnt(f'[post_autodaily] deleted message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
else:
print(f'[post_autodaily] could not delete message [{latest_message_id}] from channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.NotFound:
print(f'[post_autodaily] {error_msg_delete}: the message could not be found')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_delete}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_delete}: {err}')
can_post = False
elif change_mode == server_settings.AutoDailyChangeMode.EDIT:
try:
if use_embeds:
await latest_message.edit(embed=embed)
else:
await latest_message.edit(content=current_daily_message)
posted = True
utils.dbg_prnt(f'[post_autodaily] edited message [{latest_message_id}] in channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.NotFound:
print(f'[post_autodaily] {error_msg_edit}: the message could not be found')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_edit}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_edit}: {err}')
can_post = False
else:
post_new = True
if not posted and can_post and post_new:
try:
if use_embeds:
latest_message = await text_channel.send(embed=embed)
else:
latest_message = await text_channel.send(current_daily_message)
posted = True
utils.dbg_prnt(f'[post_autodaily] posted message [{latest_message.id}] in channel [{text_channel.id}] on guild [{text_channel.guild.id}]')
except errors.Forbidden:
print(f'[post_autodaily] {error_msg_post}: the bot doesn\'t have the required permissions.')
can_post = False
except Exception as err:
print(f'[post_autodaily] {error_msg_post}: {err}')
can_post = False
else:
can_post = False
if latest_message:
return posted, can_post, latest_message
else:
return posted, can_post, None
else:
return posted, None, None
|
309ad1c4554b2dcef6b2ddbcfd2d5652ea291488
| 18,402 |
def room_from_loc(env, loc):
"""
Get the room coordinates for a given location
"""
if loc == 'north':
return (1, 0)
if loc == 'south':
return (1, 2)
if loc == 'west':
return (0, 1)
if loc == 'east':
return (2, 1)
if loc == 'left':
return (1, 0)
if loc == 'right':
return (1, 2)
if loc == 'front':
return (2, 1)
if loc == 'behind':
return (0, 1)
# By default, use the central room
return (1, 1)
|
75192c47fd8d4b56332b35ec7c3b355927e50ca2
| 18,403 |
def count_encoder(df, cols):
"""count encoding
Args:
df: カテゴリ変換する対象のデータフレーム
cols (list of str): カテゴリ変換する対象のカラムリスト
Returns:
pd.Dataframe: dfにカテゴリ変換したカラムを追加したデータフレーム
"""
out_df = pd.DataFrame()
for c in cols:
series = df[c]
vc = series.value_counts(dropna=False)
_df = pd.DataFrame(df[c].map(vc))
out_df = pd.concat([out_df, _df], axis=1)
out_df = out_df.add_suffix('_count_enc')
return pd.concat([df, out_df], axis=1)
|
c8e5b0995d2915871e7614099cf3260566f75f05
| 18,404 |
def wrap_response(response):
"""Wrap a tornado response as an open api response"""
mimetype = response.headers.get('Content-Type') or 'application/json'
return OpenAPIResponse(
data=response.body,
status_code=response.code,
mimetype=mimetype,
)
|
38edef05e0d2d0ae80c235ed82f00080b86c6cb1
| 18,405 |
def shifted(x):
"""Shift x values to the range [-0.5, 0.5)"""
return -0.5 + (x + 0.5) % 1
|
c40585748120af5d0acd85e4fed49f0575a92a3d
| 18,406 |
def computeAlignmentError(pP1, pP2, etype = 2, doPlot = False):
"""
Compute area-based alignment error. Assume that the
warping paths are on the same grid
:param pP1: Mx2 warping path 1
:param pP2: Nx2 warping path 2
:param etype: Error type. 1 (default) is area ratio.
2 is L1 Hausdorff distance
:param doPlot: Whether to plot the results
"""
P1 = rasterizeWarpingPath(pP1)
P2 = rasterizeWarpingPath(pP2)
score = 0
if etype == 1:
M = np.max(P1[:, 0])
N = np.max(P1[:, 1])
A1 = np.zeros((M, N))
A2 = np.zeros((M, N))
for i in range(P1.shape[0]):
[ii, jj] = [P1[i, 0], P1[i, 1]]
[ii, jj] = [min(ii, M-1), min(jj, N-1)]
A1[ii, jj::] = 1.0
for i in range(P2.shape[0]):
[ii, jj] = [P2[i, 0], P2[i, 1]]
[ii, jj] = [min(ii, M-1), min(jj, N-1)]
A2[ii, jj::] = 1.0
A = np.abs(A1 - A2)
score = np.sum(A)/(float(M)*float(N))
if doPlot:
plt.imshow(A)
plt.hold(True)
plt.scatter(pP1[:, 1], pP1[:, 0], 5, 'c', edgecolor = 'none')
plt.scatter(pP2[:, 1], pP2[:, 0], 5, 'r', edgecolor = 'none')
plt.title("Score = %g"%score)
else:
C = getCSM(np.array(P1, dtype = np.float32), np.array(P2, dtype = np.float32))
score = (np.sum(np.min(C, 0)) + np.sum(np.min(C, 1)))/float(P1.shape[0]+P2.shape[0])
if doPlot:
plt.scatter(P1[:, 1], P1[:, 0], 20, 'c', edgecolor = 'none')
plt.scatter(P2[:, 1], P2[:, 0], 20, 'r', edgecolor = 'none')
idx = np.argmin(C, 1)
for i in range(len(idx)):
plt.plot([P1[i, 1], P2[idx[i], 1]], [P1[i, 0], P2[idx[i], 0]], 'k')
plt.title("Score = %g"%score)
return score
|
cce79f3f1fa83475bc18f18004d8e2c79a8e59fa
| 18,407 |
def _cumulative_grad(grad_sum, grad):
"""Apply grad sum to cumulative gradient."""
add = ops.AssignAdd()
return add(grad_sum, grad)
|
cb2b3ab6131fb4e289df29d33876f69c265c6e62
| 18,408 |
def run_node(node):
"""Python multiprocessing works strangely in windows. The pool function needed to be
defined globally
Args:
node (Node): Node to be called
Returns:
rslts: Node's call output
"""
return node.run_with_loaded_inputs()
|
a0f52020db20b4b67e83599bc0fb6c86ec2f9514
| 18,409 |
def getitimer(space, which):
"""getitimer(which)
Returns current value of given itimer.
"""
with lltype.scoped_alloc(itimervalP.TO, 1) as old:
c_getitimer(which, old)
return itimer_retval(space, old[0])
|
86716d3faedab3436bc1fcb5f77b80129884cf2d
| 18,410 |
def substitute_T_and_RH_for_interpolated_dataset(dataset):
"""
Input :
dataset : Dataset interpolated along height
Output :
dataset : Original dataset with new T and RH
Function to remove interoplated values of T and RH in the original dataset and
replace with new values of T and RH,
calculated from values of interpolated theta and q, respetively
"""
T = f3.calc_T_from_theta(dataset)
rh = f3.calc_rh_from_q(dataset, T=T)
dataset["ta"] = (dataset.p.dims, T)
dataset["rh"] = (dataset.p.dims, rh.values)
return dataset
|
fe2fca2ea3889fca17d2e676d1d2a95634ac1782
| 18,411 |
def get_base_required_fields():
""" Get required fields for base asset from UI.
Fields required for update only: 'id', 'uid', ['lastModifiedTimestamp', 'location', 'events', 'calibration']
Present in input, not required for output:
'coordinates', 'hasDeploymentEvent', 'augmented', 'deployment_numbers', 'deployment_number',
'Ref Des', 'depth',
2016-08-24: removed 'coordinates'
2016-08-26: removed 'augmented', 'Ref Des', 'remoteDocuments', 'hasDeploymentEvent',
2016-09-26: removed 'tense',
2016-10-11: removed 'tense',
"""
base_required_fields = [
'assetInfo',
'assetType',
'dataSource',
'deployment_numbers',
'deployment_number',
'depth',
'editPhase',
'latitude',
'longitude',
'manufactureInfo',
'mobile',
'notes',
'orbitRadius',
'partData',
'physicalInfo',
'purchaseAndDeliveryInfo',
'ref_des',
'remoteResources',
'uid'
]
return base_required_fields
|
273c539d0c0b0da249e2bb171107aa775ce52ddf
| 18,412 |
def reg_tab_ext(*model):
""" Performs weighted linear regression for various models building upon the model specified in section 4,
while additionally including education levels of a council candidate (university degree, doctoral/PhD degree)
A single model (i.e. function argument) takes on the form:
model=[df,polynomial, bw, dependant variable, bandwidth-type]
df: dataframe containing all relevant data
polynomial (str): "quadratic" includes quadratic values of "margin_1" and "inter_1" in regressionmodel;
default is "linear"
bw (float): specifying data to be included relative to cut-off point ("margin_1"=0)
dependant variable (str): name of dependant variable
bandwidth-type (str): method used to calculate bandwidth
:return: df containing results of regression
"""
# pd.set_option('mode.chained_assignment', None)
table = pd.DataFrame(
{'Model': [], 'Female Mayor': [], 'Std.err_Female Mayor': [], 'University': [], 'Std.err_University': [],
'PhD': [], 'Std.err_PhD': [], 'Bandwidth type': [], 'Bandwidth size': [], 'Polynomial': [],
'Observations': [], 'Elections': [], 'Municipalities': [],
'Mean': [], 'Std.err (Mean)': []})
table = table.set_index(['Model'])
for counter, i in enumerate(model):
data_i = subset_by_margin(i[0], i[2])
weight(data_i, i[2])
y = data_i[i[3]]
w = data_i["weight" + str(i[2]) + ""]
x = data_i[["female_mayor", "margin_1", "inter_1", 'university', 'phd']]
polynomial_i = str("Linear")
if i[1] == "quadratic":
x = data_i[["female_mayor", "margin_1", "inter_1", 'university', 'phd', "margin_2", "inter_2"]]
polynomial_i = str("Quadratic")
x = sm_api.add_constant(x)
wls_model = sm_api.WLS(y, x, missing='drop', weights=w)
results = wls_model.fit(cov_type='cluster', cov_kwds={'groups': data_i["gkz"]})
betas = [1, 2, 3]
cov = ["female_mayor", 'university', 'phd']
for j in cov:
betas[cov.index(j)] = significance_level(results.pvalues[j], results.params[(cov.index(j) + 1)].round(3))
bw_size_i = str(round(i[2], 2))
bw_type_i = str(i[4])
output = [betas[0], results.bse[1], betas[1], results.bse[4], betas[2], results.bse[5], bw_type_i, bw_size_i,
polynomial_i, results.nobs,
data_i["gkz_jahr"].value_counts().count(),
data_i["gkz"].value_counts().count(), y.mean().round(2), np.std(y)]
table.loc["(" + str(counter + 1) + ")"] = output
table = table.round(3)
return table
|
be61f85e918c2cf91b720b9495968c9c9f4f7b6e
| 18,413 |
def load_pdf(filename: str) -> pd.DataFrame:
""" Read PDF dataset to pandas dataframe """
tables = tabula.read_pdf(basedir + '\\' + filename, pages="all")
merged_tables = pd.concat(tables[1:])
merged_tables.head()
return merged_tables
|
c3d7a1c5b78a62d6d6b822ecc2a90246e1c3a6aa
| 18,414 |
def he_xavier(in_size: int, out_size: int, init_only=False):
"""
Xavier initialization according to Kaiming He in:
*Delving Deep into Rectifiers: Surpassing Human-Level
Performance on ImageNet Classification
(https://arxiv.org/abs/1502.01852)
"""
stddev = tf.cast(tf.sqrt(2 / in_size), tf.float32)
W = tf.random_normal([in_size, out_size], stddev=stddev)
b = tf.zeros([out_size])
if init_only:
return W, b
return tf.Variable(W, name="weights"), tf.Variable(b, name="biases")
|
f76501537a25226f1a3f3fcb0953438dbfaa996f
| 18,415 |
def GET_v1_keyboards_build_log():
"""Returns a dictionary of keyboard/layout pairs. Each entry is a dictionary with the following keys:
* `works`: Boolean indicating whether the compile was successful
* `message`: The compile output for failed builds
"""
json_data = qmk_redis.get('qmk_api_configurator_status')
return jsonify(json_data)
|
b1e2c3f5da654987bdb7530be8f62effbf878613
| 18,416 |
def logprod(lst):
"""Computes the product of a list of numbers"""
return sum(log(i) for i in lst)
|
fd42df8ca7170f70453ef58d46035ec2ac6b6446
| 18,417 |
import torch
def nms(dets, iou_thr, device_id=None):
"""Dispatch to either CPU or GPU NMS implementations.
The input can be either a torch tensor or numpy array. GPU NMS will be used
if the input is a gpu tensor or device_id is specified, otherwise CPU NMS
will be used. The returned type will always be the same as inputs.
Arguments:
dets (torch.Tensor or np.ndarray): bboxes with scores.
iou_thr (float): IoU threshold for NMS.
device_id (int, optional): when `dets` is a numpy array, if `device_id`
is None, then cpu nms is used, otherwise gpu_nms will be used.
Returns:
tuple: kept bboxes and indice, which is always the same data type as
the input.
"""
# convert dets (tensor or numpy array) to tensor
if isinstance(dets, torch.Tensor):
is_numpy = False
dets_th = dets
elif isinstance(dets, np.ndarray):
is_numpy = True
device = 'cpu' if device_id is None else 'cuda:{}'.format(device_id)
dets_th = torch.from_numpy(dets).to(device)
else:
raise TypeError(
'dets must be either a Tensor or numpy array, but got {}'.format(
type(dets)))
# execute cpu or cuda nms
if dets_th.shape[0] == 0:
inds = dets_th.new_zeros(0, dtype=torch.long)
else:
if dets_th.is_cuda:
if dets_th.shape[1] == 7:
inds = nms_cuda.nms_3d(dets_th, iou_thr)
elif dets_th.shape[1] == 5:
inds = nms_cuda.nms(dets_th, iou_thr)
else:
inds = nms_cpu.nms(dets_th, iou_thr)
if is_numpy:
inds = inds.cpu().numpy()
return dets[inds, :], inds
|
6a00022a6903fc73429cb0f893de3b5f018315e9
| 18,418 |
def render_template(template_name_or_list, **context):
"""Renders a template from the template folder with the given
context.
:param template_name_or_list: the name of the template to be
rendered, or an iterable with template names
the first one existing will be rendered
:param context: the variables that should be available in the
context of the template.
"""
ctx = _app_ctx_stack.top
ctx.app.update_template_context(context)
return _render(
ctx.app.jinja_env.get_or_select_template(template_name_or_list),
context,
ctx.app,
)
|
a712c5db36a0ed512411c592c695ddff8a51e8fb
| 18,419 |
import re
def _ReplaceUrlWithPlaceholder(results):
"""Fix a bug by replacing domain names with placeholders
There was a bug in early dogfood versions of the survey extension
in which URLs were included in questions where they
were supposed to have a placeholder. The fix was to replace text like
"Proceed to www.example.com" with "[CHOSEN]", and "Back to safety."
with "[ALTERNATIVE]."
These questions were the first question asked, so this function will only
do the replacement in the first question in each result.
Args:
results: A list of dicts containing parsed and filtered results.
Is it assumed that results has been filtered for a given survey
condition, such that attributes questions should all appear in the
same place.
Returns:
The fixed results. Changes the input results list as well.
"""
for r in results:
q = r['responses'][0]['question'] # Do replacement in first question only
chosenMatch = re.search('\"Proceed to.*?\"', q)
alternateMatch = re.search('\"Back to safety\.\"', q)
if chosenMatch:
q = q.replace(chosenMatch.group(0), '\"[CHOSEN]\"')
if alternateMatch:
q = q.replace(alternateMatch.group(0), '\"[ALTERNATIVE].\"')
r['responses'][0]['question'] = q
return results
|
f20318bd4338b16940181dad3bc32439a77e0c2f
| 18,420 |
def XCL(code, error, mag=0.0167, propagation='random', NEV=True, **kwargs):
"""
Dummy function to manage the ISCWSA workbook not correctly defining the
weighting functions.
"""
tortuosity = kwargs['tortuosity']
if code == "XCLA":
return XCLA(
code, error, mag=mag, propagation=propagation, NEV=NEV,
tortuosity=tortuosity
)
else:
return XCLH(
code, error, mag=mag, propagation=propagation, NEV=NEV,
tortuosity=tortuosity
)
|
90efd4d07c66923d2b98739fd7684ac1ee5141e8
| 18,421 |
def to_bgr(image):
"""Convert image to BGR format
Args:
image: Numpy array of uint8
Returns:
bgr: Numpy array of uint8
"""
# gray scale image
if image.ndim == 2:
bgr = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
return bgr
# BGRA format
if image.shape[2] == 4:
bgr = cv2.cvtColor(image, cv2.COLOR_RGBA2BGR)
return bgr
bgr = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return bgr
|
51d8455bb4060ff2db662a34846a4f957d33af81
| 18,422 |
import pathlib
import yaml
def load(path: pathlib.Path) -> dict:
"""Load a YAML file, returning its contents.
:raises: RuntimeError
"""
with path.open() as handle:
try:
return yaml.safe_load(handle)
except scanner.ScannerError as error:
LOGGER.critical('Failed to parse YAML from %s: %s',
path, error)
raise RuntimeError('YAML parse failure')
|
d91ba626619ec25e2dbdbe35202b26cd43d27dc3
| 18,423 |
import os
import sys
def find_package(dir):
"""
Given a directory, finds the equivalent package name. If it
is directly in sys.path, returns ''.
"""
dir = os.path.abspath(dir)
orig_dir = dir
path = map(os.path.abspath, sys.path)
packages = []
last_dir = None
while 1:
if dir in path:
return '.'.join(packages)
packages.insert(0, os.path.basename(dir))
dir = os.path.dirname(dir)
if last_dir == dir:
raise ValueError(
"%s is not under any path found in sys.path" % orig_dir)
last_dir = dir
|
0bc904165620daa2f408a3f1c526bfe4a34def97
| 18,424 |
import warnings
def validate_settings(raw_settings):
"""Return cleaned settings using schemas collected from INSTALLED_APPS."""
# Perform early validation on Django's INSTALLED_APPS.
installed_apps = raw_settings['INSTALLED_APPS']
schemas_mapping = raw_settings.get('CONFIT_SCHEMAS', {})
# Create schema instance using INSTALLED_APPS.
settings_schema = composite_schema(
installed_apps=installed_apps,
mapping=schemas_mapping)
# Actually validate settings.
cleaned_settings = settings_schema.deserialize(raw_settings)
# Highlight differences between raw and cleaned settings.
# Warn users when raw settings contain directives that are not used in
# schemas.
raw_keys = set(raw_settings.keys())
cleaned_keys = set(cleaned_settings.keys())
unused_keys = raw_keys.difference(cleaned_keys)
if unused_keys:
warnings.warn(
'The following settings are mentioned in your configuration, but '
'are not in cleaned settings. They may be missing in '
'configuration schemas, or you do not need to set them up: \n'
'- {settings}'.format(settings='\n- '.join(unused_keys)),
Warning)
# Return.
return cleaned_settings
|
ba764cc54eee27a8317f21d6bd69ee85b9deadbf
| 18,425 |
from torch.cuda.amp import autocast
def is_autocast_module_decorated(module: nn.Module):
"""
Return `True` if a nn.Module.forward was decorated with
torch.cuda.amp.autocast
"""
try:
decorators = _get_decorators(module.forward)
for d in decorators:
if isinstance(d, autocast):
return True
except:
pass
return False
|
9f4db5438cb7e14ae20fd552a54121d9ce6c0d46
| 18,426 |
def timestamp_format_is_valid(timestamp: str) -> bool:
"""
Determines if the supplied timestamp is valid for usage with Graylog.
:param timestamp: timestamp that is to be checked
:return: whether the timestamp is valid (True) or invalid (False)
"""
try:
get_datetime_from_timestamp(timestamp)
except ValueError:
return False
return True
|
9c1623bede646c9ebbd5cd4200db193437728590
| 18,427 |
def indices(n, dtype):
"""Indices of each element in upper/lower triangle of test matrix."""
size = tri.tri_n(n - 1)
return np.arange(size, dtype=dtype)
|
0ecdcad0d66e268125826e0e415b410a07143b6c
| 18,428 |
def _sample(n, k):
""" Select k number out of n without replacement unless k is greater than n
"""
if k > n:
return np.random.choice(n, k, replace=True)
else:
return np.random.choice(n, k, replace=False)
|
cde012459ddb64dc7700ec0238e222fd4d26d3a2
| 18,429 |
def set_price_filter(request, category_slug):
"""Saves the given price filter to session. Redirects to the category with
given slug.
"""
req = request.POST if request.method == 'POST' else request.GET
try:
min_val = lfs.core.utils.atof(req.get("min", "0"))
except (ValueError):
min_val = 0
try:
max_val = lfs.core.utils.atof(req.get("max", "99999"))
except:
max_val = 0
try:
float(min_val)
except (TypeError, ValueError):
min_val = "0"
try:
float(max_val)
except (TypeError, ValueError):
max_val = "0"
request.session["price-filter"] = {"min": min_val, "max": max_val}
url = reverse("lfs_category", kwargs={"slug": category_slug})
return HttpResponseRedirect(url)
|
1adf1798e7fbf290d98e1b47b94cd9c9038732fe
| 18,430 |
def _get_config():
"""Returns a dictionary with server parameters, or ask them to the user"""
# tries to figure if we can authenticate using a configuration file
data = read_config()
# this does some sort of validation for the "webdav" data...
if "webdav" in data:
if (
"server" not in data["webdav"]
or "username" not in data["webdav"]
or "password" not in data["webdav"]
):
raise KeyError(
'If the configuration file contains a "webdav" '
"section, it should contain 3 variables defined inside: "
'"server", "username", "password".'
)
else:
# ask the user for the information, in case nothing available
logger.warn(
"Requesting server information for webDAV operation. "
"(To create a configuration file, and avoid these, follow "
"the Setup subsection at our Installation manual.)"
)
webdav_data = dict()
webdav_data["server"] = input("The base address of the server: ")
webdav_data["username"] = input("Username: ")
webdav_data["password"] = input("Password: ")
data["webdav"] = webdav_data
return data["webdav"]
|
c2105753cb4ae551bea53c0a2aaf0432dd275422
| 18,431 |
import requests
def lastfmcompare(text, nick, bot,):
"""[user] ([user] optional) - displays the now playing (or last played) track of LastFM user [user]"""
api_key = bot.config.get("api_keys", {}).get("lastfm")
if not api_key:
return "No last.fm API key set."
if not text:
return "please specify a lastfm username to compare"
try:
user1, user2 = text.split()
except:
user2 = text
user1 = nick
user2_check = get_account(user2)
if user2_check:
user2 = user2_check
user1_check = get_account(user1)
if user1_check:
user1 = user1_check
params = {
'method': 'tasteometer.compare',
'api_key': api_key,
'type1': 'user',
'value1': user1,
'type2': 'user',
'value2': user2
}
request = requests.get(api_url, params=params)
if request.status_code != requests.codes.ok:
return "Failed to fetch info ({})".format(request.status_code)
data = request.json()
if 'error' in data:
return "Error: {}.".format(data["message"])
score = float(data["comparison"]["result"]["score"])
score = float("{:.3f}".format(score * 100))
if score == 0:
return "{} and {} have no common listening history.".format(user2, user1)
level = "Super" if score > 95 else "Very High" if score > 80 else "High" if score > 60 else \
"Medium" if score > 40 else "Low" if score > 10 else "Very Low"
# I'm not even going to try to rewrite this line
artists = [f["name"] for f in data["comparison"]["result"]["artists"]["artist"]] if \
type(data["comparison"]["result"]["artists"]["artist"]) == list else \
[data["comparison"]["result"]["artists"]["artist"]["name"]] if "artist" \
in data["comparison"]["result"]["artists"] else ""
artist_string = "\x02In Common:\x02 " + \
", ".join(artists) if artists else ""
return "Musical compatibility between \x02{}\x02 and \x02{}\x02: {} (\x02{}%\x02) {}".format(user1, user2, level,
score, artist_string)
|
42b23961f210b4004aac987ca1146ee748392949
| 18,432 |
def fourier_ellipsoid(inp, size, n=-1, axis=-1, output=None):
"""
Multidimensional ellipsoid Fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
inp : array_like
The inp array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the inp is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the inp is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : Tensor, optional
If given, the result of filtering the inp is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : Tensor
The filtered inp.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
"""
inp = np.asarray(inp)
output = _get_output_fourier(output, inp)
axis = normalize_axis_index(axis, inp.ndim)
sizes = cndsupport._normalize_sequence(size, inp.ndim)
sizes = np.asarray(sizes, dtype=np.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
cndi.fourier_filter(inp, sizes, n, axis, output, 2)
return output
|
41128d7972bdb0cb6100991c1dc22031b7f3e6b3
| 18,433 |
def gaussian1D(x: np.ndarray, amplitude: Number, center: Number, stdev: Number) -> np.ndarray:
"""A one dimensional gaussian distribution.
= amplitude * exp(-0.5 (x - center)**2 / stdev**2)
"""
return amplitude * np.exp(-0.5 * (x - center)**2 / stdev**2)
|
5c5c36ea71a08aec3246a2ca1dedf1d62c3fd331
| 18,434 |
def BuildImportLibs(flags, inputs_by_part, deffiles):
"""Runs the linker to generate an import library."""
import_libs = []
Log('building import libs')
for i, (inputs, deffile) in enumerate(zip(inputs_by_part, deffiles)):
libfile = 'part%d.lib' % i
flags_with_implib_and_deffile = flags + ['/IMPLIB:%s' % libfile,
'/DEF:%s' % deffile]
RunLinker(flags_with_implib_and_deffile, i, inputs, 'implib', None)
import_libs.append(libfile)
return import_libs
|
f29bcf16917cf8509662cf44c7881f8c7282b37d
| 18,435 |
def gather_sparse(a, indices, axis=0, mask=None):
"""
SparseTensor equivalent to tf.gather, assuming indices are sorted.
:param a: SparseTensor of rank k and nnz non-zeros.
:param indices: rank-1 int Tensor, rows or columns to keep.
:param axis: int axis to apply gather to.
:param mask: boolean mask corresponding to indices. Computed if not provided.
:return gathered_a: SparseTensor masked along the given axis.
:return values_mask: bool Tensor indicating surviving values, shape [nnz].
"""
in_size = _square_size(a.dense_shape)
out_size = tf.size(indices)
if mask is None:
mask = ops.indices_to_mask(indices, in_size)
inverse_map = _indices_to_inverse_map(indices, in_size)
return _boolean_mask_sparse(
a, mask, axis=axis, inverse_map=inverse_map, out_size=out_size
)
|
87e68a99c660448a11d32fa090ca3921552cd122
| 18,436 |
def Window(node, size=-1, full_only=False):
"""Lazy wrapper to collect a window of values. If a node is executed 3 times,
returning 1, 2, 3, then the window node will collect those values in a list.
Arguments:
node (node): input node
size (int): size of windows to use
full_only (bool): only return if list is full
"""
def foo(node=node, size=size, full_only=full_only):
if size == 0:
return node.value()
if ret._accum is None:
ret._accum = [node.value()]
elif ret.dependencyIsDirty(node):
ret._accum.append(node.value())
if size > 0:
ret._accum = ret._accum[-size:]
if full_only and len(ret._accum) == size:
return ret._accum
elif full_only:
return None
return ret._accum
# make new node
ret = node._gennode("Window[{}]".format(size if size > 0 else "∞"), foo, [node])
ret._accum = None
return ret
|
1f85b576455f3b379e41a7247ff486281bf21f8f
| 18,437 |
def fixed_rate_loan(amount, nrate, life, start, freq='A', grace=0,
dispoints=0, orgpoints=0, prepmt=None, balloonpmt=None):
"""Fixed rate loan.
Args:
amount (float): Loan amount.
nrate (float): nominal interest rate per year.
life (float): life of the loan.
start (int, tuple): init period for the loan.
pyr (int): number of compounding periods per year.
grace (int): number of periods of grace (without payment of the principal)
dispoints (float): Discount points of the loan.
orgpoints (float): Origination points of the loan.
prepmt (pandas.Series): generic cashflow representing prepayments.
balloonpmt (pandas.Series): generic cashflow representing balloon payments.
Returns:
A object of the class ``Loan``.
>>> pmt = cashflow(const_value=0, start='2016Q1', periods=11, freq='Q')
>>> pmt['2017Q4'] = 200
>>> fixed_rate_loan(amount=1000, nrate=10, life=10, start='2016Q1', freq='Q',
... grace=0, dispoints=0,
... orgpoints=0, prepmt=pmt, balloonpmt=None) # doctest: +NORMALIZE_WHITESPACE
Amount: 1000.00
Total interest: 129.68
Total payment: 1129.68
Discount points: 0.00
Origination points: 0.00
<BLANKLINE>
Beg_Ppal_Amount Nom_Rate Tot_Payment Int_Payment Ppal_Payment \\
2016Q1 1000.000000 10.0 0.000000 0.000000 0.000000
2016Q2 1000.000000 10.0 114.258763 25.000000 89.258763
2016Q3 910.741237 10.0 114.258763 22.768531 91.490232
2016Q4 819.251005 10.0 114.258763 20.481275 93.777488
2017Q1 725.473517 10.0 114.258763 18.136838 96.121925
2017Q2 629.351591 10.0 114.258763 15.733790 98.524973
2017Q3 530.826618 10.0 114.258763 13.270665 100.988098
2017Q4 429.838520 10.0 314.258763 10.745963 303.512800
2018Q1 126.325720 10.0 114.258763 3.158143 111.100620
2018Q2 15.225100 10.0 15.605727 0.380627 15.225100
2018Q3 0.000000 10.0 0.000000 0.000000 0.000000
<BLANKLINE>
End_Ppal_Amount
2016Q1 1000.000000
2016Q2 910.741237
2016Q3 819.251005
2016Q4 725.473517
2017Q1 629.351591
2017Q2 530.826618
2017Q3 429.838520
2017Q4 126.325720
2018Q1 15.225100
2018Q2 0.000000
2018Q3 0.000000
"""
if not isinstance(float(nrate), float):
TypeError('nrate must be a float.')
nrate = interest_rate(const_value=nrate, start=start, periods=life+grace+1, freq=freq)
if prepmt is None:
prepmt = cashflow(const_value=0, start=start, periods=len(nrate), freq=freq)
else:
verify_period_range([nrate, prepmt])
if balloonpmt is None:
balloonpmt = nrate.copy()
balloonpmt[:] = 0
else:
verify_period_range([nrate, balloonpmt])
# present value of the balloon payments
if balloonpmt is not None:
balloonpv = timevalue(cflo=balloonpmt, prate=nrate, base_date=grace)
else:
balloonpv = 0
pyr = getpyr(nrate)
pmt = pvpmt(pmt=None, pval=-amount+balloonpv, nrate=nrate[0], nper=len(nrate)-1, pyr=pyr)
pmts = nrate.copy()
pmts[:] = 0
for time in range(1, life + 1):
pmts[grace + time] = pmt
# balance
begppalbal = nrate.copy()
intpmt = nrate.copy()
ppalpmt = nrate.copy()
totpmt = nrate.copy()
endppalbal = nrate.copy()
begppalbal[:] = 0
intpmt[:] = 0
ppalpmt[:] = 0
totpmt[:] = 0
endppalbal[:] = 0
# payments per period
for time, _ in enumerate(totpmt):
totpmt[time] = pmts[time] + balloonpmt[time] + prepmt[time]
# balance calculation
for time in range(grace + life + 1):
if time == 0:
begppalbal[0] = amount
endppalbal[0] = amount
totpmt[time] = amount * (dispoints + orgpoints) / 100
### intpmt[time] = amount * dispoints / 100
else:
begppalbal[time] = endppalbal[time - 1]
if time <= grace:
intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100
totpmt[time] = intpmt[time]
endppalbal[time] = begppalbal[time]
else:
intpmt[time] = begppalbal[time] * nrate[time] / pyr / 100
ppalpmt[time] = totpmt[time] - intpmt[time]
if ppalpmt[time] < 0:
capint = - ppalpmt[time]
ppalpmt[time] = 0
else:
capint = 0
endppalbal[time] = begppalbal[time] - ppalpmt[time] + capint
if endppalbal[time] < 0:
totpmt[time] = begppalbal[time] + intpmt[time]
ppalpmt[time] = begppalbal[time]
endppalbal[time] = begppalbal[time] - ppalpmt[time]
pmts[time] = 0
prepmt[time] = 0
data = {'Beg_Ppal_Amount':begppalbal}
result = Loan(life=life, amount=amount, grace=grace, nrate=nrate,
dispoints=dispoints, orgpoints=orgpoints,
data=data)
result['Nom_Rate'] = nrate
result['Tot_Payment'] = totpmt
result['Int_Payment'] = intpmt
result['Ppal_Payment'] = ppalpmt
result['End_Ppal_Amount'] = endppalbal
return result
|
10681a99ec381ec64517891d8d1101ed5eae78f4
| 18,438 |
import json
def get_results():
"""
Returns the scraped results for a set of inputs.
Inputs:
The URL, the type of content to scrap and class/id name.
This comes from the get_results() function in script.js
Output:
Returns a JSON list of the results
"""
# Decode the json data and turn it into a python dict
post_data = json.loads(request.data.decode())
# Extract the inputs from the JSON data
req_url = post_data.get('url')
req_type = post_data.get('type')
req_selector = post_data.get('selector')
results = []
# Each of the types of extraction is handled here
if req_type == 'head':
results = Webpage(req_url).get_head_tag()
elif req_type == 'content':
results = Webpage(req_url).get_all_contents()
elif req_type == 'class':
results = Webpage(req_url).get_content_by_class(req_selector)
elif req_type == 'id':
results = Webpage(req_url).get_content_by_id(req_selector)
elif req_type == 'images':
results = Webpage(req_url).get_all_images()
# The scraped results are turned into JSON format
# and sent to the frontend
serialized = json.dumps(results)
return serialized
|
59af271fc024854258c488f17489383f424dfae3
| 18,439 |
def _mocked_presets(*args, **kwargs):
"""Return a list of mocked presets."""
return [MockPreset("1")]
|
ebf48fb23dff67b2d1a9faac6e72764f2a5f8f0a
| 18,440 |
def play(context, songpos=None):
"""
*musicpd.org, playback section:*
``play [SONGPOS]``
Begins playing the playlist at song number ``SONGPOS``.
The original MPD server resumes from the paused state on ``play``
without arguments.
*Clarifications:*
- ``play "-1"`` when playing is ignored.
- ``play "-1"`` when paused resumes playback.
- ``play "-1"`` when stopped with a current track starts playback at the
current track.
- ``play "-1"`` when stopped without a current track, e.g. after playlist
replacement, starts playback at the first track.
*BitMPC:*
- issues ``play 6`` without quotes around the argument.
"""
if songpos is None:
return context.core.playback.play().get()
elif songpos == -1:
return _play_minus_one(context)
try:
tl_track = context.core.tracklist.slice(songpos, songpos + 1).get()[0]
return context.core.playback.play(tl_track).get()
except IndexError:
raise exceptions.MpdArgError('Bad song index')
|
fc2cee0b3cca2df33b844004ecaaaa1b0eaa5347
| 18,441 |
from typing import Union
def SMLB(x: Union[float, np.ndarray]) -> Union[float, np.ndarray]:
"""
Taken from Minerbo, G. N. and Levy, M. E., "Inversion of Abel’s integral equation by means of orthogonal polynomials.",
SIAM J. Numer. Anal. 6, 598-616 and swapped to satisfy SMLB(0) = 0.
"""
return (np.where((x > 0.00000000001), 1.241 * np.multiply(np.power(2 * x - x ** 2, -1.5),
np.exp(1.21 * (1 - np.power(2 * x - x ** 2, -1)))),
0)) / 0.9998251040790366
|
ed9b59ccbf99458796d11521825ba6ab0215144d
| 18,442 |
def add_colon(in_str):
"""Add colon after every 4th character."""
return ':'.join([in_str[i:i+4] for i in range(0, len(in_str), 4)])
|
fa4258aa9d684a087d2a81ae09a2702d6e58e3e1
| 18,443 |
def fetch_partial_annotations():
""" Returns the partial annotations as an array
Returns:
partial_annotations: array of annotation data - [n_annotations, 5]
row format is [T, L, X, Y, Z]
"""
raw_mat = loadmat(PARTIAL_ANNOTATIONS_PATH)
annotations = raw_mat['divisionAnnotations']
# chop extra mystery column
return annotations[:, :-1]
|
69d57df06576af141dcc0eb9b00c7834e1a4a2c2
| 18,444 |
def get_alt_pos_info(rec):
"""Returns info about the second-most-common nucleotide at a position.
This nucleotide will usually differ from the reference nucleotide, but it
may be the reference (i.e. at positions where the reference disagrees with
the alignment's "consensus").
This breaks ties arbitrarily.
Parameters
==========
rec: dict
pysamstats record for a given position in an alignment produced
by stat_variation().
Returns
=======
(cov, alt nt freq, alt nt): tuple of (int, int, str)
Describes the second-most-common nucleotide at a position.
The first entry in this tuple is the (mis)match coverage at this
position. This is an integer defined as the sum of A, C, G, T
nucleotides at this position (note that this excludes degenerate
nucleotides like N -- we could change this in the future if that'd be
useful, I suppose). Note that this coverage could be zero, if no reads
are aligned to this specific position.
The second entry is the raw frequency of this nucleotide
at this position: this will be an integer greater than or equal to 0.
This is also referred to in the paper, etc. as alt(pos).
The third entry is just the alternate nucleotide (one of A, C, G, T),
represented as a string. This is returned for reference -- as of
writing this isn't actually needed for Pleuk itself, but I have other
code outside of Pleuk that benefits from this!
"""
cov = rec["A"] + rec["C"] + rec["G"] + rec["T"]
ordered_nts = sorted("ACGT", key=rec.get)
# The literal nucleotide used in the numerator of freq(pos): one of A, C,
# G, T
alt_nt = ordered_nts[-2]
# The raw frequency (in counts) of alt_nt. An integer >= 0.
alt_nt_freq = rec[alt_nt]
return (cov, alt_nt_freq, alt_nt)
|
3abe3fcbbf0ddbccb44025f2e476f77dc3e8abf9
| 18,445 |
import torch
def accuracy(output, target, topk=(1,)):
""" Computes the accuracy over the k top predictions for the specified
values of k.
"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
|
d2edbbff872670f1637696e63fe448a749138985
| 18,446 |
def grim(n, mu, prec=2, n_items=1):
"""
Test that a mean mu reported with a decimal precision prec is possible, given a number of observations n and a
number of items n_items.
:param n: The number of observations
:param mu: The mean
:param prec: The precision (i.e., number of decimal places) of the mean
:param n_items: The number of scale items that were averaged. Default is 1.
:return: True if the mean is possible, False otherwise.
"""
if n*n_items >= 10**prec:
warn("The effective number of data points is such that GRIM will always find a solution.")
cval = np.round(mu * n * n_items, 0)
valid = np.round(cval/n/n_items, prec) == np.round(mu, prec)
return valid
|
093fdea1b59157b477642b31afa8388192188020
| 18,447 |
def split_dataframe(df:pd.DataFrame,split_index:np.ndarray):
"""
Split out the continuous variables from a dataframe \n
Params:
df : Pandas dataframe
split_index : Indices of continuous variables
"""
return df.loc[:,split_index].values
|
842f9b04d0d546b8bef28f0b110e7d570eb8f0a0
| 18,448 |
def user_select_columns():
"""
Useful columns from the users table, omitting authentication-related
columns like password.
"""
u = orm.User.__table__
return [
u.c.id,
u.c.user_name,
u.c.email,
u.c.first_name,
u.c.last_name,
u.c.org,
u.c.created_at,
u.c.updated_at,
u.c.sign_in_count,
u.c.last_sign_in_at
]
|
faf7ffd18a2fc6c55c1f8a4c19d176a34f79e19f
| 18,449 |
def remove_query_param(url, key):
"""
Given a URL and a key/val pair, remove an item in the query
parameters of the URL, and return the new URL.
"""
(scheme, netloc, path, query, fragment) = urlparse.urlsplit(url)
query_dict = urlparse.parse_qs(query)
query_dict.pop(key, None)
query = urlparse.urlencode(sorted(list(query_dict.items())), doseq=True)
return urlparse.urlunsplit((scheme, netloc, path, query, fragment))
|
4a7ac5b2b1767a6fbc082e7e5b4f2d10dbd87926
| 18,450 |
def test_ap_hs20_sim(dev, apdev):
"""Hotspot 2.0 with simulated SIM and EAP-SIM"""
if not hlr_auc_gw_available():
return "skip"
hs20_simulated_sim(dev[0], apdev[0], "SIM")
dev[0].request("INTERWORKING_SELECT auto freq=2412")
ev = dev[0].wait_event(["INTERWORKING-ALREADY-CONNECTED"], timeout=15)
if ev is None:
raise Exception("Timeout on already-connected event")
|
cbda3f0ebc33c3d0e8ee6a99a088a61c57655480
| 18,451 |
def FreshReal(prefix='b', ctx=None):
"""Return a fresh real constant in the given context using the given prefix.
>>> x = FreshReal()
>>> y = FreshReal()
>>> eq(x, y)
False
>>> x.sort()
Real
"""
ctx = _get_ctx(ctx)
return ArithRef(Z3_mk_fresh_const(ctx.ref(), prefix, RealSort(ctx).ast), ctx)
|
afc312fbd85387adcfe72c81c5af5b06fe0ccee1
| 18,452 |
def ndim_rectangles_integral(
# main args
func,
up_limits,
low_limits,
ndim,
nsamples=10000,
args_func = {},
# demo plot args
verbose=False,
args_subplots = {'sharex':True, 'sharey':True, 'figsize':(10,10)},
args_suptitle = {'fontsize':16},
args_scatter_mesh = {'marker':"+", 'color':"black", 'label':"rectangular mesh"},
args_scatter_func = {'marker':"o", 'label':"computed points"},
args_legend = {},
dim_labels = None
):
"""
Returns the integral of a function in n-dimensions using the textbook rectangle method.
Heavy usage of numpy functions to benefit from parallization.
Tip: To save RAM, divide integration space into sub-spaces and integrate one at a time.
v0.1
Parameters
----------
func : function
A Python function or method to integrate. The function takes an array of coordinates of shape=(ndim) and/or shape=(ndim, nsamples) to be integrated as first argument.
Other arguments can be passed using the args_func dictionary argument.
up_limits: array_like
Upward bounds of integrations. Expected shape = (ndim)
low_limits: array_like
Downward bounds of integrations. Expected shape = (ndim)
nsamples: integer or array_like, optional
#Samples of integrations in each dimension. Expected shape = (ndim). If an integer is given, #samples are divided between each dimension by nsamples**(1/ndim).
args_func: dictionary, optional
Supplementary arguments to pass to func.
verbose: boolean, optional
Generates a matplotlib (plt) figure of the integration space meshing and samples. This involves the computation of an histogram which is significantly computationaly intensive. Verbose=True should be used for verifications only with a low number of samples.
args_subplots: dictionary, optional
Supplementary arguments to pass to the plt.subplot function for pdf sample / space meshing visualisation (for verbose=True).
args_suptitle: dictionary, optional
Supplementary arguments to pass to the plt.suptitle function for pdf sample / space meshing visualisation (for verbose=True).
args_scatter_mesh: dictionary, optional
Supplementary arguments to pass to the plt.scatter function for space meshing visualisation (for verbose=True).
args_scatter_func: dictionary, optional
Supplementary arguments to pass to the plt.scatter function for pdf sample visualisation (for verbose=True).
args_legend: dictionary, optional
Supplementary arguments to pass to the plt.legend function for pdf sample / space meshing visualisation (for verbose=True).
dim_labels = array_like, optional
Label of each dimension for pdf sample / space meshing visualisation (for verbose=True). Expected shape = (ndim)
Returns
-------
result : float
The result of the integration.
Example
--------
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
dim_labels = ["x", "y", "z"]
ndim = len(dim_labels)
df_func = lambda x:stats.multivariate_normal.pdf(x, mean=np.zeros(ndim), cov=np.eye(ndim))
integral = ndim_rectangles_integral (func = df_func,
up_limits = np.full(ndim,4),
low_limits = np.full(ndim,-4),
ndim=ndim,
nsamples = np.full(ndim,11),
verbose = True,
dim_labels = dim_labels,)
print("integral = %f"%(integral))
plt.show()
"""
#---------------------------------------------------------------
# supporting int as n_samples argument
if isinstance(nsamples, int):
nsamples = np.full(ndim,int(nsamples**(1/ndim)))
# checking arguments
if not(len(up_limits)==len(low_limits)==ndim==len(nsamples)):
raise ValueError("Shapes should be len(up_limits)=len(low_limits)=ndim")
#---------------------------------------------------------------
# todo: max_memory argument. automated space division
#---------------------------------------------------------------
# hyperrectangles edge size in each dimension
ndx = np.array([(up_limits[dim] - low_limits[dim])/(nsamples[dim]-1) for dim in range(ndim)])
# hyperrectangle volume
vol = np.prod(ndx)
# hyperrectangles centers: edges
ncenters = np.array([np.linspace(start=low_limits[dim]+ndx[dim]/2, stop=up_limits[dim]-ndx[dim]/2, num=nsamples[dim]-1) for dim in range(ndim)])
del ndx
# hyperrectangles centers: coords
ncoords_centers = np.array(np.meshgrid(*ncenters))
del ncenters
ncoords_centers = ncoords_centers.reshape(ncoords_centers.shape[0],np.prod(ncoords_centers.shape[1:])) # equivalent to ncoords_centers = ncoords_centers.reshape(ndim,np.prod(nsamples-1))
ncoords_centers = ncoords_centers.transpose()
#---------------------------------------------------------------
# integral computation
try: # if func supports array of coords
mapped_func = func(ncoords_centers, **args_func)
except: # if func only supports 1 coord at a time
mapped_func = np.array([func(ncoords_centers[i], **args_func) for i in range (ncoords_centers.shape[0])])
# dividing by volume
integral = np.sum(mapped_func)*vol
#---------------------------------------------------------------
#todo: error computation
# # not sure about this...
# mapped_err = np.abs(mapped_func-np.roll(mapped_func, 1))/2
# err = np.sum(mapped_err)*vol
#---------------------------------------------------------------
# mesh plot for visualisation purposes
if verbose==1:
# meshing edges for display
nedges = np.array([np.linspace(start=low_limits[dim], stop=up_limits[dim], num=nsamples[dim]) for dim in range(ndim)], dtype=object) # nedges.shape = (ndim, nsamples in dim)
ncoords_edges = np.array(np.meshgrid(*nedges))
ncoords_edges = ncoords_edges.reshape(ncoords_edges.shape[0],np.prod(ncoords_edges.shape[1:]))
# plot
fig, ax = plt.subplots(ndim ,ndim, **args_subplots)
#title
args_suptitle_default = {'t':"Mesh and func samples used. Integral = %f"%(integral)} # default title
args_suptitle_default.update(args_suptitle)
fig.suptitle(**args_suptitle_default)
for i in range(ndim):
for j in range (ndim):
# mesh: plot
ax[i,j].scatter(ncoords_edges[i,:], ncoords_edges[j,:], **args_scatter_mesh)
# df sample points: cleaning supperposed values, summing prob along other dimensions
temp_centers_ij = np.append(ncoords_centers[:,[i,j]], mapped_func.reshape(mapped_func.shape[0],1),axis=1)
temp_centers_ij = temp_centers_ij[np.lexsort((temp_centers_ij[:,0], temp_centers_ij[:,1]))]
unique_centers = []
unique_prob = []
counter = -1
for k in range(temp_centers_ij.shape[0]):
if np.sum(temp_centers_ij[k,0:2] != temp_centers_ij[k-1,0:2]):
unique_prob.append(temp_centers_ij[k,2])
unique_centers.append(temp_centers_ij[k,0:2])
counter+=1
else:
unique_prob[counter]+=temp_centers_ij[k,2]
unique_centers = np.array(unique_centers)
unique_prob = np.array(unique_prob)
#todo: use an image instead of points for the sampled pdf
# df sample points: plot
df_plot = ax[i,j].scatter(unique_centers[:,0], unique_centers[:,1], c=unique_prob, **args_scatter_func)
plt.colorbar(df_plot, ax=ax[i,j])
# labels
if dim_labels != None:
ax[i,j].set_xlabel(dim_labels[i])
ax[i,j].set_ylabel(dim_labels[j])
# legend
handles, labels = plt.gca().get_legend_handles_labels()
by_label = collections_OrderedDict(zip(labels, handles))
fig.legend(by_label.values(), by_label.keys(), **args_legend)
#---------------------------------------------------------------
return integral
|
60a78a14fedc571276520d71c99669829253c062
| 18,453 |
import threading
def handle_readable(client):
"""
Return True: The client is re-registered to the selector object.
Return False: The server disconnects the client.
"""
data = client.recv(1028)
if data == b'':
return False
client.sendall(b'SERVER: ' + data)
print(threading.active_count())
return True
|
9a77bb893a5da4e76df5593feb6ecf49022e6ef3
| 18,454 |
from datetime import datetime
import logging
def fund_wallet():
"""
---
post:
summary: fund a particular wallet
description: sends funds to a particular user given the user id the amount will be
removed from the wallet with the respective currency, if not it falls to the default wallet.
if the sender is admin no money will be deducted from any wallet else an amount will be deducted
from the sender wallet with the respective currency. so that means that admin can geenrate cash while
other users can perform transactions between wallets.
requestBody:
required: true
content:
application/json:
schema: Fund
responses:
'200':
description: success
content:
application/json:
schema: TransactionResponse
tags:
- user
- admin
"""
try:
required = ["currency", "amount", "receiver"]
data = request.get_json()
if not all([rq in data.keys() for rq in required]):
return jsonify(status=error, message="Missing Required JSON Field!")
amount = data["amount"]
currency = data["currency"]
receiver_id = data["receiver"]
if not CurrencyUtils.iscurrency_valid(currency):
return jsonify(status=error, message="Please Enter a valid Currency code"), 400
if g.user.role.name != "Admin":
sender_wallet = g.user.wallet.filter_by(currency=currency).first()
if sender_wallet is None:
sender_wallet = g.user.wallet.filter_by(
currency=g.user.main_currency)
if CurrencyUtils.convert_currency(sender_wallet.currency.upper(), currency.upper(), sender_wallet.balance) < amount:
return jsonify(status=error, message="Insufficient fund!"), 403
amount = CurrencyUtils.convert_currency(
sender_wallet.currency.upper(), currency.upper(), amount)
else:
if sender_wallet.balance < amount:
return jsonify(status=error, message="Insufficient fund!"), 403
receiver = User.query.filter_by(id=receiver_id).first()
if not receiver:
return jsonify(status=error, message=f"Sorry User with id {receiver_id} does not exsits!"), 400
if receiver.role.name == "Admin":
return jsonify(status=unauthorized, message="Sorry Admin account can't be funded!"), 403
receiver_wallet = receiver.wallet.filter_by(currency=currency).first()
if receiver_wallet is None:
if receiver.role.name == "Elite":
new_wallet = Wallet(currency=currency, user_id=receiver.id)
db.session.add(new_wallet)
db.session.commit()
receiver_wallet = new_wallet
elif receiver.role.name == "Noob":
receiver_wallet = receiver.wallet.filter_by(
currency=receiver.main_currency.lower()).first()
if g.user.role.name == "Admin":
tx = Transaction(receiver=receiver_wallet.id, sender=None,
amount=amount, currency=currency, at=datetime.datetime.utcnow())
else:
tx = Transaction(receiver=receiver_wallet.id, sender=sender_wallet.id,
amount=amount, currency=currency, at=datetime.datetime.utcnow())
if receiver.role.name == "Noob":
tx.isapproved = False
db.session.add(tx)
db.session.commit()
return jsonify(status=ok, data=tx.serialize), 200
except SyntaxError as e:
logging.error(e)
return jsonify(status=error, message=str(e)), 400
|
55955335f4462ad118fb792f3335db8090f7439e
| 18,455 |
import numpy
def create_objective(dist, abscissas):
"""Create objective function."""
abscissas_ = numpy.array(abscissas[1:-1])
def obj(absisa):
"""Local objective function."""
out = -numpy.sqrt(dist.pdf(absisa))
out *= numpy.prod(numpy.abs(abscissas_ - absisa))
return out
return obj
|
c63eeadffd067c2a94470ddbf03fb009265fbbbc
| 18,456 |
def get_party_to_seats(year, group_id, party_to_votes):
"""Give votes by party, compute seats for party."""
eligible_party_list = get_eligible_party_list(
group_id,
party_to_votes,
)
if not eligible_party_list:
return {}
n_seats = YEAR_TO_REGION_TO_SEATS[year][group_id]
n_seats_bonus = 0 if (group_id == COUNTRY_ID) else 1
n_seats_non_bonus = n_seats - n_seats_bonus
winning_party = sorted(party_to_votes.items(), key=lambda x: -x[1],)[
0
][0]
party_to_seats = {winning_party: n_seats_bonus}
relevant_num = sum(
list(
map(
lambda party: party_to_votes[party],
eligible_party_list,
)
)
)
party_r = []
n_seats_r = n_seats_non_bonus
resulting_num = (int)(relevant_num / n_seats_non_bonus)
for party in eligible_party_list:
seats_r = party_to_votes[party] / resulting_num
seats_non_bonus_whole = (int)(seats_r)
party_to_seats[party] = (
party_to_seats.get(party, 0) + seats_non_bonus_whole
)
party_r.append((party, seats_r % 1))
n_seats_r -= seats_non_bonus_whole
party_r = sorted(party_r, key=lambda x: -x[1])
for i in range(0, n_seats_r):
party = party_r[i][0]
party_to_seats[party] = party_to_seats.get(party, 0) + 1
return party_to_seats
|
02270cfeefb87b3da0ec4fa88dfb692a4645df5e
| 18,457 |
def get_product(name, version):
"""Get info about a specific version of a product"""
product = registry.get_product(name, version)
return jsonify(product.to_dict())
|
0c461d672ef4d07578b098b3cb937027ad8946f1
| 18,458 |
def _is_segment_in_block_range(segment, blocks):
"""Return whether the segment is in the range of one of the blocks."""
for block in blocks:
if block.start <= segment.start and segment.end <= block.end:
return True
return False
|
e7509f18f0a72cf90fb1aa643c77c2e13154f0d0
| 18,459 |
from operator import add
from sys import prefix
def acrobatic(m):
"""More power and accuracy at the cost of increased complexity; can stun"""
if 'do_agility_based_dam' in m['functions'] or 'do_strength_based_dam' in m['functions']:
return None
if 'takedown' in m['features']:
return None
m = m.copy()
mult(m, 'stam_cost', 1.25)
add(m, 'complexity', 2)
up_tier(m, 3)
add_fun(m, 'do_agility_based_dam')
add_fun(m, 'do_strength_based_dam')
add(m, 'freq', -2, mn=1)
prefix(m, 'Acrobatic')
return m
|
b15980d1bc68e495c9e86f16a708e80e90245954
| 18,460 |
import configparser
import sys
import os
import subprocess
def systemd(config, args):
""" Build and install systemd scripts for the server """
try:
config = load_config(args.cfg_file, explicit=True)
except (OSError, configparser.ParsingError) as exc:
print(exc, file=sys.stderr)
return 1
print("Building systemd services")
using_venv = sys.prefix != sys.base_prefix
if using_venv:
print(f" - Detected virtualenv: {sys.prefix}")
print(" Service files will be built for this virutalenv")
site_path = None
global_site = config.get("taky", "cfg_path") == "/etc/taky/taky.conf"
if global_site:
print(" - Detected system-wide site install")
svcs = {
"taky": "taky.service",
"cot": "taky-cot.service",
"dps": "taky-dps.service",
}
else:
site_path = os.path.dirname(config.get("taky", "cfg_path"))
hostname = config.get("taky", "hostname")
print(f" - Detected site install: {site_path}")
svcs = {
"taky": f"taky-{hostname}.service",
"cot": f"taky-{hostname}-cot.service",
"dps": f"taky-{hostname}-dps.service",
}
if not args.user:
print(
" - WARNING: taky will run as root! It's strongly recommended",
file=sys.stderr,
)
print(" to create a system user for taky!", file=sys.stderr)
# Do not overwrite files if they exist
for svc in svcs:
path = os.path.join(args.path, svcs[svc])
if os.path.exists(path):
print(f"ERROR: Refusing to overwite service file: {path}", file=sys.stderr)
return 1
print(f" - Writing services to {args.path}")
try:
print(f" - Writing {svcs['cot']}")
write_cot_svc(svcs, config, args, using_venv, site_path)
if args.dps:
print(f" - Writing {svcs['dps']}")
write_dps_svc(svcs, config, args, using_venv, site_path)
print(f" - Writing {svcs['taky']}")
write_uni_svc(svcs, config, args, using_venv, site_path)
except PermissionError as exc:
print(f"ERROR: Unable to write service files to {args.path}", file=sys.stderr)
return 1
except OSError as exc:
print(f"ERROR: {exc}", file=sys.stderr)
return 1
if args.install:
try:
print(" - Reloading systemctl services")
subprocess.check_output(["systemctl", "daemon-reload"])
print(" - Enabling service")
subprocess.check_output(["systemctl", "enable", svcs["taky"]])
print(" - Starting service")
subprocess.check_output(["systemctl", "start", svcs["taky"]])
except subprocess.CalledProcessError as exc:
print(f"ERROR: systemctl calls failed: {exc}")
return 1
return 0
|
1a1a8caad38fedc016f1ef6efea9adc6ab020c95
| 18,461 |
def compute_snes_color_score(img):
""" Returns the ratio of SNES colors to the total number of colors in the image
Parameters:
img (image) -- Pillow image
Returns:
count (float) -- ratio of SNES colors
"""
score = _get_distance_between_palettes(img, util.get_snes_color_palette())
return score
# colors, snes_color_count = get_color_count(img, util.get_snes_color_palette())
w, h = img.size
colors = np.array(img.getcolors(maxcolors=w * h))
total_color_count = len(colors)
invalid_color_count = np.sum([((r & 0x03) & (g & 0x03) & (b & 0x03)) for (_, (r, g, b)) in colors]) # zero out valid bits, leaving only invalid bits
snes_color_count = total_color_count - invalid_color_count # count remaining colors with invalid bits
return snes_color_count / total_color_count
|
b52ae8d7d98700f455e126cfb447e41c1762528c
| 18,462 |
def get_assignments_for_team(user, team):
""" Get openassessment XBlocks configured for the current teamset """
# Confirm access
if not has_specific_team_access(user, team):
raise Exception("User {user} is not permitted to access team info for {team}".format(
user=user.username,
team=team.team_id
))
# Limit to team-enabled ORAs for the matching teamset in the course
return modulestore().get_items(
team.course_id,
qualifiers={'category': 'openassessment'},
settings={'teams_enabled': True, 'selected_teamset_id': team.topic_id}
)
|
67b72b34b8549127728c33dfac8599d979d09f6f
| 18,463 |
def is_flexible_uri(uri: Uri_t) -> bool:
"""Judge if specified `uri` has one or more flexible location.
Args:
uri: URI pattern to be judged.
Returns:
True if specified `uri` has one or more flexible location,
False otherwise.
"""
for loc in uri:
if isinstance(loc, FlexibleLocation):
return True
return False
|
fd5138d3dfc44c36e7b5ccfe911f6640e22bc7f2
| 18,464 |
def load_frame_gray(img_path, gray_flag=False):
"""Load image at img_path, and convert the original image to grayscale if gray_flag=True.
Return image and grayscale image if gray_flag=True; otherwise only return original image.
img_path = a string containing the path to an image file readable by cv.imread
"""
try:
img = cv.imread(img_path)
except Exception as err:
print(f"The following error occurred when reading the image file at {img_path}: \n{err}")
img = None
if gray_flag and isinstance(img, np.ndarray):
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
else:
gray = None
return (img, gray) if gray_flag else img
|
8b792f2bf5f22f34e0b880c934019c238a3cc360
| 18,465 |
def read_vocab_file(path):
""" Read voc file.
This reads a .voc file, stripping out empty lines comments and expand
parentheses. It returns each line as a list of all expanded
alternatives.
Args:
path (str): path to vocab file.
Returns:
List of Lists of strings.
"""
LOG.warning("read_vocab_file is deprecated! "
"use SkillResources class instead")
vocab = []
with open(path, 'r', encoding='utf8') as voc_file:
for line in voc_file.readlines():
if line.startswith('#') or line.strip() == '':
continue
vocab.append(expand_options(line.lower()))
return vocab
|
cdf230f1fbeafbcc3839a02ce86b33719dfcf806
| 18,466 |
import re
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)]
|
f49aca918e4efc2f5e7f6541df2d5329bc2752f7
| 18,467 |
import subprocess
def _set_wpa_supplicant_config(interface, config, opt):
"""Starts or restarts wpa_supplicant unless doing so would be a no-op.
The no-op case (i.e. wpa_supplicant is already running with an equivalent
config) can be overridden with --force-restart.
Args:
interface: The interface on which to start wpa_supplicant.
config: A wpa_supplicant configuration, as a string.
opt: The OptDict parsed from command line options.
Returns:
Whether wpa_supplicant was started successfully.
Raises:
BinWifiException: On various errors.
"""
tmp_config_filename = utils.get_filename(
'wpa_supplicant', utils.FILENAME_KIND.config, interface, tmp=True)
forced = False
current_config = None
band = opt.band
try:
with open(tmp_config_filename) as tmp_config_file:
current_config = tmp_config_file.read()
except IOError:
pass
already_running = _is_wpa_supplicant_running(interface)
if not already_running:
utils.log('wpa_supplicant not running yet, starting.')
elif current_config != config:
# TODO(rofrankel): Consider using wpa_cli reconfigure here.
utils.log('wpa_supplicant config changed, reconfiguring.')
elif opt.force_restart:
utils.log('Forced restart requested.')
forced = True
else:
utils.log('wpa_supplicant-%s already configured and running', interface)
return True
if not forced:
utils.atomic_write(tmp_config_filename, config)
# TODO(rofrankel): Consider removing all the restart hostapd stuff when
# b/30140131 is resolved. hostapd seems to keep working without being
# restarted, at least on Camaro.
restart_hostapd = False
ap_interface = iw.find_interface_from_band(band, iw.INTERFACE_TYPE.ap,
opt.interface_suffix)
if _is_hostapd_running(ap_interface):
restart_hostapd = True
opt_without_persist = options.OptDict({})
opt_without_persist.persist = False
opt_without_persist.band = opt.band
opt_without_persist.interface_suffix = opt.interface_suffix
if not stop_ap_wifi(opt_without_persist):
raise utils.BinWifiException(
"Couldn't stop hostapd to start wpa_supplicant.")
if already_running:
subprocess.check_call(['ifdown', interface])
subprocess.check_call(['/etc/ifplugd/ifplugd.action', interface, 'down'])
if not _reconfigure_wpa_supplicant(interface):
raise utils.BinWifiException('Failed to reconfigure wpa_supplicant.')
subprocess.check_call(['ifup', interface])
subprocess.check_call(['/etc/ifplugd/ifplugd.action', interface, 'up'])
elif not _start_wpa_supplicant(interface, tmp_config_filename):
raise utils.BinWifiException(
'wpa_supplicant failed to start. Look at wpa_supplicant logs for '
'details.')
if restart_hostapd:
_restart_hostapd(ap_interface)
return True
|
09197b75393182087651d9625483928e2b92d802
| 18,468 |
def generate_episode(sim, policy, horizon=200):
"""
Generate an episode from a policy acting on an simulation.
Returns: sequence of state, action, reward.
"""
obs = sim.reset()
policy.reset() # Reset the policy too so that it knows its the beginning of the episode.
states, actions, rewards = [], [], []
states.append(obs)
for _ in range(horizon):
action = policy.act(obs)
obs, reward, done, _ = sim.step(action)
states.append(obs)
actions.append(action)
rewards.append(reward)
if done:
break
states.pop() # Pop off the terminating state
return states, actions, rewards
|
73a0bbb2703c047d3305e93dd2a340c83db12277
| 18,469 |
import asyncio
async def _ensure_meadowrun_vault(location: str) -> str:
"""
Gets the meadowrun key vault URI if it exists. If it doesn't exist, also creates the
meadowrun key vault, and tries to assign the Key Vault Administrator role to the
current user.
"""
subscription_id = await get_subscription_id()
vault_name = get_meadowrun_vault_name(subscription_id)
resource_group_path = await ensure_meadowrun_resource_group(location)
vault_path = (
f"{resource_group_path}/providers/Microsoft.KeyVault/vaults/{vault_name}"
)
try:
vault = await azure_rest_api("GET", vault_path, "2019-09-01")
return vault["properties"]["vaultUri"]
except ResourceNotFoundError:
# theoretically key_vault_client.vaults.get_deleted() should be faster,
# but that requires specifying a location and there's no way to know what
# location the key vault may have been originally created in.
deleted_vault_found = False
async for page in azure_rest_api_paged(
"GET",
f"/subscriptions/{subscription_id}/providers/Microsoft.KeyVault/"
f"deletedVaults",
"2019-09-01",
):
for vault in page["value"]:
if vault["name"] == vault_name:
deleted_vault_found = True
break
if deleted_vault_found:
break
if deleted_vault_found:
# if we have a deleted vault, then we should try to recover it
create_mode = "recover"
print(f"The meadowrun Key Vault {vault_name} was deleted, recovering")
else:
create_mode = "default"
print(
f"The meadowrun Key Vault {vault_name} does not exist, creating it "
"now"
)
# if we're creating or recreating the Key Vault, assume that we need to add the
# current user to the Key Vault Administrator role so that the current user can
# access secrets.
assign_role_task = asyncio.create_task(
assign_role_to_principal(
"Key Vault Administrator", await get_current_user_id(), location
)
)
# Now we can create/recover the Key Vault.
# https://docs.microsoft.com/en-us/rest/api/keyvault/keyvault/vaults/create-or-update#vaultproperties
vault, _ = await wait_for_poll(
await azure_rest_api_poll(
"PUT",
vault_path,
"2019-09-01",
"AsyncOperationJsonStatus",
json_content={
"location": location,
"properties": {
"tenantId": await get_tenant_id(),
"sku": {"family": "A", "name": "Standard"},
"enableRbacAuthorization": True,
"createMode": create_mode,
},
},
)
)
try:
await assign_role_task
except Exception as e:
print(
"Warning: we were not able to assign the Key Vault Administrator role "
f"to the current user. You may not be able to create/read secrets: {e}"
)
return vault["properties"]["vaultUri"]
|
9e16940a56ae83b47d42d7583ad6efc9c5d63d23
| 18,470 |
from typing import Iterable
from typing import Dict
from typing import List
from typing import Union
from pathlib import Path
from typing import Tuple
import logging
def get_dataset_splits(
datasets: Iterable[HarmonicDataset],
data_dfs: Dict[str, pd.DataFrame] = None,
xml_and_csv_paths: Dict[str, List[Union[str, Path]]] = None,
splits: Iterable[float] = (0.8, 0.1, 0.1),
seed: int = None,
) -> Tuple[List[List[HarmonicDataset]], List[List[int]], List[List[Piece]]]:
"""
Get datasets representing splits of the data in the given DataFrames.
Parameters
----------
datasets : Iterable[HarmonicDataset]
An Iterable of HarmonicDataset class objects, each representing a different type of
HarmonicDataset subclass to make a Dataset from. These are all passed so that they will
have identical splits.
data_dfs : Dict[str, pd.DataFrame]
If using dataframes, a mapping of 'files', 'measures', 'chords', and 'notes' dfs.
xml_and_csv_paths : Dict[str, List[Union[str, Path]]]
If using the MusicXML ('xmls') and label csvs ('csvs'), a list of paths of the
matching xml and csv files.
splits : Iterable[float]
An Iterable of floats representing the proportion of pieces which will go into each split.
This will be normalized to sum to 1.
seed : int
A numpy random seed, if given.
Returns
-------
dataset_splits : List[List[HarmonicDataset]]
An iterable, the length of `dataset` representing the splits for each given dataset type.
Each element is itself an iterable the length of `splits`.
split_ids : List[List[int]]
A list the length of `splits` containing the file_ids for each data point in each split.
split_pieces : List[List[Piece]]
A list of the pieces in each split.
"""
split_ids, split_pieces = get_split_file_ids_and_pieces(
data_dfs=data_dfs,
xml_and_csv_paths=xml_and_csv_paths,
splits=splits,
seed=seed,
)
dataset_splits = np.full((len(datasets), len(splits)), None)
for split_index, (split_prop, pieces) in enumerate(zip(splits, split_pieces)):
if len(pieces) == 0:
logging.warning(
"Split %s with prop %s contains no pieces. Returning None for those.",
split_index,
split_prop,
)
continue
for dataset_index, dataset_class in enumerate(datasets):
dataset_splits[dataset_index][split_index] = dataset_class(pieces)
return dataset_splits, split_ids, split_pieces
|
c1389dad05aa2911735b1e9099acda9e2a8a1c05
| 18,471 |
from PilotErrors import PilotException
from movers import JobMover
from movers.trace_report import TraceReport
import traceback
def put_data_es(job, jobSite, stageoutTries, files, workDir=None, activity=None):
"""
Do jobmover.stageout_outfiles or jobmover.stageout_logfiles (if log_transfer=True)
or jobmover.stageout_logfiles_os (if special_log_transfer=True)
:backward compatible return: (rc, pilotErrorDiag, rf, "", filesNormalStageOut, filesAltStageOut)
"""
tolog("Mover put data started [new implementation]")
si = getSiteInformation(job.experiment)
si.setQueueName(jobSite.computingElement) # WARNING: SiteInformation is singleton: may be used in other functions! FIX me later
workDir = workDir or os.path.dirname(job.workdir)
mover = JobMover(job, si, workDir=workDir, stageoutretry=stageoutTries)
eventType = "put_es"
mover.trace_report = TraceReport(pq=jobSite.sitename, localSite=jobSite.sitename, remoteSite=jobSite.sitename, dataset="", eventType=eventType)
mover.trace_report.init(job)
error = None
storageId = None
try:
if not activity:
activity = "es_events"
file = files[0]
if file.storageId and file.storageId != -1:
storageId = file.storageId
copytools = [('objectstore', {'setup': ''})]
else:
copytools = None
transferred_files, failed_transfers = mover.stageout(activity=activity, files=files, copytools=copytools)
except PilotException, e:
error = e
except Exception, e:
tolog("ERROR: Mover put data failed [stageout]: exception caught: %s" % e)
tolog(traceback.format_exc())
error = PilotException('STAGEOUT FAILED, exception=%s' % e, code=PilotErrors.ERR_STAGEOUTFAILED, state='STAGEOUT_FAILED')
if error:
## send trace
mover.trace_report.update(clientState=error.state or 'STAGEOUT_FAILED', stateReason=error.message, timeEnd=time())
mover.sendTrace(mover.trace_report)
return error.code, error.message, None
tolog("Mover put data finished")
# prepare compatible output
# keep track of which files have been copied
not_transferred = [e.lfn for e in files if e.status not in ['transferred']]
if not_transferred:
err_msg = 'STAGEOUT FAILED: not all output files have been copied: remain files=%s, errors=%s' % ('\n'.join(not_transferred), ';'.join([str(ee) for ee in failed_transfers]))
tolog("Mover put data finished: error_msg=%s" % err_msg)
return PilotErrors.ERR_STAGEOUTFAILED, err_msg, None
return 0, "", storageId
|
b3841dc487e19ca989575e37b95a9e8f2949258b
| 18,472 |
def floor(data):
"""
Returns element-wise largest integer not greater than x.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, and float32
Returns:
tvm.tensor.Tensor, has the same shape as data and type of int32.
"""
vc_util.ops_dtype_check(data.dtype, vc_util.DtypeForDavinci.ALL_FLOAT)
shape = [x.value for x in data.shape]
vc_util.check_shape(shape)
if utils.product_is_mini() and data.dtype == "float32":
# solve the problem of 87==floor(86.9996) when high_precision is needed.
# problem is caused by such as fp16(86.9996)==87.
# detect problem by fp32(86.9996) - fp32(floor(fp16(86.9996))) < 0
# floor could only apply on float16
data_fp16 = akg.lang.cce.cast_to(data, "float16")
floor_data = akg.lang.cce.floor(data_fp16)
floor_fp16 = akg.lang.cce.cast_to(floor_data, "float16")
floor_fp32 = akg.lang.cce.cast(floor_fp16, "float32")
# if diff=1e-7, we cannot get right sign of fp16(diff)
# but we can get right sign of 10000*diff = 1e-3, which has the same
# sign as diff
diff = (data - floor_fp32) * 10000
diff_fp16 = akg.lang.cce.cast_to(diff, "float16")
# if diff < 0 and floor == ceil, then it's 87 = floor(86.99999)
res = akg.tvm.compute(shape,
lambda *i: akg.tvm.expr.Select(
diff_fp16(*i) < tvm.const(0, "float16"),
floor_fp16(*i) - tvm.const(1, "float16"),
floor_fp16(*i)),
name="res")
res = akg.lang.cce.cast_to(res, "int32")
else:
res = akg.lang.cce.floor(data)
return res
|
3d553d54330c3237908b33600fae560a92f20975
| 18,473 |
def pixel_link_model(inputs, config):
""" PixelLink architecture. """
if config['model_type'] == 'mobilenet_v2_ext':
backbone = mobilenet_v2(inputs, original_stride=False,
weights_decay=config['weights_decay'])
elif config['model_type'] == 'ka_resnet50':
backbone = keras_applications_resnet50(inputs)
elif config['model_type'] == 'ka_vgg16':
backbone = keras_applications_vgg16(inputs)
elif config['model_type'] == 'ka_mobilenet_v2_1_0':
backbone = keras_applications_mobilenetv2(inputs, alpha=1.0)
elif config['model_type'] == 'ka_mobilenet_v2_1_4':
backbone = keras_applications_mobilenetv2(inputs, alpha=1.4)
elif config['model_type'] == 'ka_xception':
backbone = keras_applications_xception(inputs)
segm_logits = fcn_head(backbone, num_classes=2, name='segm_logits',
weights_decay=config['weights_decay'])
link_logits = fcn_head(backbone, num_classes=16, name='link_logits_',
weights_decay=config['weights_decay'])
new_shape = tf.shape(link_logits)[1], tf.shape(link_logits)[2], 8, 2
link_logits = tf.keras.layers.Reshape(new_shape, name='link_logits')(link_logits)
return tf.keras.Model(inputs, [segm_logits, link_logits])
|
0bf606e5b06d94bce98865147fb6a1cf45b04560
| 18,474 |
def ingresar_datos():
"""Pide al usuario los datos para calcular el precio de la compra
de boletos.
:return: tipo, cantidad
:rtype: tuple
"""
text_align("Datos de la compra", width=35)
tipo: str = choice_input(tuple(TIPO.keys()))
cantidad: int = int_input("Ingrese el número de boletos: ", min=1, max=12)
return tipo, cantidad
|
eb9b1c90fbc44a639a7760848723f5579eced4df
| 18,475 |
def trim_spectrum(freqs, power_spectra, f_range):
"""Extract a frequency range from power spectra.
Parameters
----------
freqs : 1d array
Frequency values for the power spectrum.
power_spectra : 1d or 2d array
Power spectral density values.
f_range: list of [float, float]
Frequency range to restrict to, as [lowest_freq, highest_freq].
Returns
-------
freqs_ext : 1d array
Extracted frequency values for the power spectrum.
power_spectra_ext : 1d or 2d array
Extracted power spectral density values.
Notes
-----
This function extracts frequency ranges >= f_low and <= f_high.
It does not round to below or above f_low and f_high, respectively.
Examples
--------
Using a simulated spectrum, extract a frequency range:
>>> from fooof.sim import gen_power_spectrum
>>> freqs, powers = gen_power_spectrum([1, 50], [1, 1], [10, 0.5, 1.0])
>>> freqs, powers = trim_spectrum(freqs, powers, [3, 30])
"""
# Create mask to index only requested frequencies
f_mask = np.logical_and(freqs >= f_range[0], freqs <= f_range[1])
# Restrict freqs & spectra to requested range
# The if/else is to cover both 1d or 2d arrays
freqs_ext = freqs[f_mask]
power_spectra_ext = power_spectra[f_mask] if power_spectra.ndim == 1 \
else power_spectra[:, f_mask]
return freqs_ext, power_spectra_ext
|
a522a384033fc38d3bba5e7d91ca8debfdedec68
| 18,476 |
def _get_variable_for(v):
"""Returns the ResourceVariable responsible for v, or v if not necessary."""
if v.op.type == "VarHandleOp":
for var in ops.get_collection(ops.GraphKeys.RESOURCES):
if (isinstance(var, resource_variable_ops.ResourceVariable)
and var.handle.op is v.op):
return var
raise ValueError("Got %s but could not locate source variable." % (str(v)))
return v
|
5e8f4b83495c89f728c30148e9b05e06713d6b82
| 18,477 |
def load_prefixes(filepath):
"""Dado um arquivo txt contendo os prefixos utilizados na SPARQL, é
devolvida uma string contendo os prefixos e uma lista de tuplas contendo
os prefixos.
Parameters
----------
filepath : str
Caminho do arquivo txt contendo o conjunto de prefixos.
Returns
-------
tuple of str
Uma tupla contendo os prefixos carregados na forma de string e uma
lista de tuplas, onde a primeira posição é o nome dado ao URI e a
segunda contém a URI correspondente.
Examples
--------
.. code-block:: python
>>> from QApedia.io import load_prefixes
>>> filename = "prefixes.txt"
>>> prefixes = load_prefixes(filename)
>>> for uri_name, uri in prefixes[1]:
... print(uri_name, uri)
...
owl: http://www.w3.org/2002/07/owl#
xsd: http://www.w3.org/2001/XMLSchema#
rdfs: http://www.w3.org/2000/01/rdf-schema#
rdf: http://www.w3.org/1999/02/22-rdf-syntax-ns#
foaf: http://xmlns.com/foaf/0.1/
dc: http://purl.org/dc/elements/1.1/
dbpedia2: http://dbpedia.org/property/
dbpedia: http://dbpedia.org/
skos: http://www.w3.org/2004/02/skos/core#
"""
f = open(filepath, "r")
lines = f.readlines()
f.close()
prefixes = "\n".join(line.rstrip() for line in lines)
list_of_prefixes = convert_prefixes_to_list(prefixes)
return prefixes, list_of_prefixes
|
a6c2f3c014dbfae73718c579da914f840489e701
| 18,478 |
def build_convolutional_box_predictor(is_training,
num_classes,
conv_hyperparams_fn,
min_depth,
max_depth,
num_layers_before_predictor,
use_dropout,
dropout_keep_prob,
kernel_size,
box_code_size,
apply_sigmoid_to_scores=False,
add_background_class=True,
class_prediction_bias_init=0.0,
use_depthwise=False,
box_encodings_clip_range=None):
"""Builds the ConvolutionalBoxPredictor from the arguments.
Args:
is_training: Indicates whether the BoxPredictor is in training mode.
num_classes: number of classes. Note that num_classes *does not*
include the background category, so if groundtruth labels take values
in {0, 1, .., K-1}, num_classes=K (and not K+1, even though the
assigned classification targets can range from {0,... K}).
conv_hyperparams_fn: A function to generate tf-slim arg_scope with
hyperparameters for convolution ops.
min_depth: Minimum feature depth prior to predicting box encodings
and class predictions.
max_depth: Maximum feature depth prior to predicting box encodings
and class predictions. If max_depth is set to 0, no additional
feature map will be inserted before location and class predictions.
num_layers_before_predictor: Number of the additional conv layers before
the predictor.
use_dropout: Option to use dropout or not. Note that a single dropout
op is applied here prior to both box and class predictions, which stands
in contrast to the ConvolutionalBoxPredictor below.
dropout_keep_prob: Keep probability for dropout.
This is only used if use_dropout is True.
kernel_size: Size of final convolution kernel. If the
spatial resolution of the feature map is smaller than the kernel size,
then the kernel size is automatically set to be
min(feature_width, feature_height).
box_code_size: Size of encoding for each box.
apply_sigmoid_to_scores: If True, apply the sigmoid on the output
class_predictions.
add_background_class: Whether to add an implicit background class.
class_prediction_bias_init: Constant value to initialize bias of the last
conv2d layer before class prediction.
use_depthwise: Whether to use depthwise convolutions for prediction
steps. Default is False.
box_encodings_clip_range: Min and max values for clipping the box_encodings.
Returns:
A ConvolutionalBoxPredictor class.
"""
box_prediction_head = box_head.ConvolutionalBoxHead(
is_training=is_training,
box_code_size=box_code_size,
kernel_size=kernel_size,
use_depthwise=use_depthwise,
box_encodings_clip_range=box_encodings_clip_range)
class_prediction_head = class_head.ConvolutionalClassHead(
is_training=is_training,
num_class_slots=num_classes + 1 if add_background_class else num_classes,
use_dropout=use_dropout,
dropout_keep_prob=dropout_keep_prob,
kernel_size=kernel_size,
apply_sigmoid_to_scores=apply_sigmoid_to_scores,
class_prediction_bias_init=class_prediction_bias_init,
use_depthwise=use_depthwise)
other_heads = {}
return convolutional_box_predictor.ConvolutionalBoxPredictor(
is_training=is_training,
num_classes=num_classes,
box_prediction_head=box_prediction_head,
class_prediction_head=class_prediction_head,
other_heads=other_heads,
conv_hyperparams_fn=conv_hyperparams_fn,
num_layers_before_predictor=num_layers_before_predictor,
min_depth=min_depth,
max_depth=max_depth)
|
3e3b79cbd6e99b8b9d3da5ff2545e17a92ef3f38
| 18,479 |
import os
def _get_default_directory():
"""Returns the default directory for the Store. This is intentionally
underscored to indicate that `Store.get_default_directory` is the intended
way to get this information. This is also done so
`Store.get_default_directory` can be mocked in tests and
`_get_default_directory` can be tested.
"""
return os.environ.get('PRE_COMMIT_HOME') or os.path.join(
os.environ.get('XDG_CACHE_HOME') or os.path.expanduser('~/.cache'),
'pre-commit',
)
|
28636cc7c6d3d804a7baea4a95a1e40c8247308c
| 18,480 |
def parse_command_line():
"""
:return:
"""
parser = argp.ArgumentParser(prog='TEPIC/findBackground.py', add_help=True)
ag = parser.add_argument_group('Input/output parameters')
ag.add_argument('--input', '-i', type=str, dest='inputfile', required=True,
help='Path to input file. First three columns in file'
' are expected to be chrom - start - end.')
ag.add_argument('--genome', '-g', type=str, dest='genome', required=True,
help='Path to genome reference file in 2bit format.')
ag.add_argument('--output', '-o', type=str, dest='outputfile', default='stdout',
help='Path to output file or stdout. Default: stdout')
ag = parser.add_argument_group('Runtime parameters')
ag.add_argument('--workers', '-w', type=int, dest='workers', default=1,
help='Number of CPU cores to use. 1 CPU core'
' processes 1 chromosome at a time. Default: 1')
ag.add_argument('--time-out', '-to', type=int, dest='timeout', default=3,
help='Maximal number of minutes to spend searching for'
' background regions per chromosome. Default: 3 minutes')
ag.add_argument('--threshold', '-th', type=int, dest='threshold', default=90,
help='Stop searching after having found more than <THRESHOLD>%%'
' matches per chromosome. Default: 90%%')
ag.add_argument('--eps-init', '-ei', type=float, dest='epsinit', default=1.,
help='Init value for epsilon. Error tolerance in percentage points'
' for similarity matching. Default: 1.0 ppt')
ag.add_argument('--eps-step', '-es', type=float, dest='epsstep', default=0.5,
help='Increment epsilon at each iteration by this value. Default: 0.5')
ag.add_argument('--eps-max', '-em', type=float, dest='epsmax', default=2.,
help='Maximal value for epsilon. After reaching this value, restart'
' search with different starting positions. Default: 2.0')
return parser.parse_args()
|
1f12e08cf4c86f40a84a09303d75a5d3506a3a14
| 18,481 |
import torch
def disparity_to_idepth(K, T_right_in_left, left_disparity):
"""Function athat transforms general (non-rectified) disparities to inverse
depths.
"""
assert(len(T_right_in_left.shape) == 3)
# assert(T_right_in_left.shape[0] == self.batch_size)
assert(T_right_in_left.shape[1] == 4)
assert(T_right_in_left.shape[2] == 4)
assert(len(K.shape) == 3)
# assert(K.shape[0] == self.batch_size)
assert(K.shape[1] == 4)
assert(K.shape[2] == 4)
batch_size = K.shape[0]
rows = left_disparity.shape[-2]
cols = left_disparity.shape[-1]
# Create tensor of homogeneous pixel coordinates of size (batch, 3, rows*cols).
y_grid, x_grid = torch.meshgrid(torch.arange(0, rows, device=left_disparity.device),
torch.arange(0, cols, device=left_disparity.device))
xys = torch.cat([x_grid.reshape(-1, rows * cols).float(),
y_grid.reshape(-1, rows * cols).float()], dim=0)
xys = xys.unsqueeze(0).repeat(batch_size, 1, 1)
ones = torch.ones(batch_size, 1, rows * cols, dtype=torch.float32, device=xys.device)
xyz_pix = torch.cat([xys, ones], 1)
Kinv = torch.inverse(K)
T_left_in_right = torch.inverse(T_right_in_left)
R_left_in_right = T_left_in_right[:, :3, :3]
KRKinv = torch.matmul(K[:, :3, :3], torch.matmul(R_left_in_right, Kinv[:, :3, :3]))
KRKinv3 = KRKinv[:, 2, :] # (batch, 3)
KRKinv3_rep = torch.unsqueeze(KRKinv3, dim=2).repeat(1, 1, rows*cols) # (batch, 3, rows*cols)
KT_left_in_right = torch.matmul(K, T_left_in_right)
Kt = KT_left_in_right[:, :3, 3] # (batch, 3)
Kt_rep = torch.unsqueeze(Kt, dim=2).repeat(1, 1, rows*cols) # (batch, 3, rows*cols)
# (batch, rows*cols)
left_disparity_flat = left_disparity.reshape(batch_size, -1)
# Compute pixels at infinite depth.
pix_inf = torch.matmul(KRKinv, xyz_pix) # (batch, 3, rows*cols)
pix_inf[:, 0, :] /= pix_inf[:, 2, :]
pix_inf[:, 1, :] /= pix_inf[:, 2, :]
pix_inf[:, 2, :] /= pix_inf[:, 2, :]
# Compute epipolar lines (must point from far to near depth).
pix_far = torch.matmul(KRKinv, xyz_pix * 1e2)
pix_far += Kt_rep
pix_far[:, 0, :] /= pix_far[:, 2, :]
pix_far[:, 1, :] /= pix_far[:, 2, :]
pix_far[:, 2, :] /= pix_far[:, 2, :]
epi_diff = pix_far[:, :2, :] - pix_inf[:, :2, :]
epi_norm = torch.sqrt(torch.sum(epi_diff**2, dim=1))
epiline = epi_diff[:, :2, :] # (batch, 2, rows*cols)
epiline[:, 0, :] /= (epi_norm + 1e-6)
epiline[:, 1, :] /= (epi_norm + 1e-6)
mask = epi_norm < 1e-6
mask = mask.reshape(batch_size, 1, rows, cols)
# Convert disparity to idepth.
# (batch, rows*cols)
w = KRKinv3_rep[:, 0, :] * xyz_pix[:, 0, :] + \
KRKinv3_rep[:, 1, :] * xyz_pix[:, 1, :] + \
KRKinv3_rep[:, 2, :]
# (batch, rows*cols)
A0 = Kt_rep[:, 0, :] - Kt_rep[:, 2, :]*(pix_inf[:, 0, :] + left_disparity_flat * epiline[:, 0, :])
A1 = Kt_rep[:, 1, :] - Kt_rep[:, 2, :]*(pix_inf[:, 1, :] + left_disparity_flat * epiline[:, 1, :])
b0 = w * left_disparity_flat * epiline[:, 0, :]
b1 = w * left_disparity_flat * epiline[:, 1, :]
ATA = A0 * A0 + A1 * A1
ATb = A0 * b0 + A1 * b1
left_idepthmap = ATb / ATA
left_idepthmap = left_idepthmap.reshape(batch_size, 1, rows, cols)
# Set bad points to 0 idepth.
left_idepthmap = (~mask).float() * left_idepthmap
return left_idepthmap
|
454bda2fd9ec4e4ef5615dbdb054c2f3b454f31a
| 18,482 |
def processPhoto(photoInfo, panoramioreview=False, reviewer='',
override=u'', addCategory=u'', autonomous=False, site=None):
"""Process a single Panoramio photo."""
if not site:
site = pywikibot.Site('commons', 'commons')
if isAllowedLicense(photoInfo) or override:
# Should download the photo only once
photo = downloadPhoto(photoInfo.get(u'photo_file_url'))
# Don't upload duplicate images, should add override option
duplicates = findDuplicateImages(photo, site=site)
if duplicates:
pywikibot.output(u'Found duplicate image at %s' % duplicates.pop())
else:
filename = getFilename(photoInfo, site=site)
pywikibot.output(filename)
description = getDescription(photoInfo, panoramioreview,
reviewer, override, addCategory)
pywikibot.output(description)
if not autonomous:
(newDescription, newFilename, skip) = Tkdialog(
description, photo, filename).show_dialog()
else:
newDescription = description
newFilename = filename
skip = False
# pywikibot.output(newPhotoDescription)
# if (pywikibot.Page(title=u'File:'+ filename,
# site=pywikibot.Site()).exists()):
# # I should probably check if the hash is the same and if not upload
# # it under a different name
# pywikibot.output(u'File:' + filename + u' already exists!')
# else:
# Do the actual upload
# Would be nice to check before I upload if the file is already at
# Commons
# Not that important for this program, but maybe for derived
# programs
if not skip:
bot = UploadRobot(photoInfo.get(u'photo_file_url'),
description=newDescription,
useFilename=newFilename,
keepFilename=True,
verifyDescription=False, site=site)
bot.upload_image(debug=False)
return 1
return 0
|
e753f411b028caf74ca12b5e7215f161d290afa7
| 18,483 |
def auto_apilado(datos,target,agrupacion,porcentaje=False):
"""
Esta función recibe un set de datos DataFrame,
una variable target, y la variable
sobre la que se desean agrupar los datos (eje X).
Retorna un grafico de barras apilado.
"""
total = datos[[target,agrupacion]].groupby(agrupacion).count()
tabla = pd.DataFrame([])
fig = go.Figure()
#Creamos una traza
for value in datos[target].unique():
trace = datos[[target,agrupacion]].loc[datos[target]==value].groupby(agrupacion).count()
if porcentaje: #Las columnas deben tener el mismo nombre
trace = 100*trace/total
y_title ='Porcentaje (Individuos)'
trace.rename(columns={target:str(value)},inplace=True)
tabla = pd.concat([tabla, trace],axis = 1)
#Creación de la figura
fig.add_trace(go.Bar(
x = tabla.index,
y = tabla[str(value)],
name=str(value),
# marker_color='rgb(26, 118, 255)'
))
y_title='Conteo (Individuos)'
fig.update_layout(
title='Conteo de '+str(target)+' agrupado por '+str(agrupacion),
xaxis_tickfont_size=14,
yaxis=dict(
title=y_title,
titlefont_size=16,
tickfont_size=14,
),
xaxis=dict(
title=str(agrupacion)
))
fig.update_layout(barmode='stack')
return fig, tabla
|
b3b13e0e5bd56628971004c0d6d5171929ab6de3
| 18,484 |
from datetime import datetime
def month_from_string(month_str: str) -> datetime.date:
"""
Accepts year-month strings with hyphens such as "%Y-%m"
"""
return datetime.datetime.strptime(month_str, "%Y-%m").date()
|
cfb901f6676d40398bd6f49c438541f00e5389e3
| 18,485 |
import hashlib
def get_isomorphic_signature(graph: DiGraph) -> str:
"""
Generate unique isomorphic id with pynauty
"""
nauty_graph = pynauty.Graph(len(graph.nodes), directed=True, adjacency_dict=nx.to_dict_of_lists(graph))
return hashlib.md5(pynauty.certificate(nauty_graph)).hexdigest()
|
8dfd7dd44409fee7dddd88f21681ed93232f1dba
| 18,486 |
from typing import Callable
from typing import Any
import sys
import time
def watchdog(timeout: int | float, function: Callable, *args, **kwargs) -> Any:
"""Time-limited execution for python function. TimeoutError raised if not finished during defined time.
Args:
timeout (int | float): Max time execution in seconds.
function (Callable): Function that will be evaluated.
*args: Args for the function.
*kwargs: Kwargs for the function.
Raises:
TimeoutError: If defined time runs out.
RuntimeError: If function call with defined params fails.
Returns:
Any: Depends on used function.
Examples:
>>> import time
>>> def sleep(sec):
... for _ in range(sec):
... time.sleep(1)
>>> watchdog(1, sleep, 0)
>>> watchdog(1, sleep, 10)
Traceback (most recent call last):
TimeoutError: ...
"""
old_tracer = sys.gettrace()
def tracer(frame, event, arg, start=time.time()):
"Helper."
now = time.time()
if now > start + timeout:
raise TimeoutError("Time exceeded")
return tracer if event == "call" else None
try:
sys.settrace(tracer)
result = function(*args, **kwargs)
except TimeoutError:
sys.settrace(old_tracer)
raise TimeoutError(
mylogging.return_str(
"Timeout defined in watchdog exceeded.", caption="TimeoutError", level="ERROR",
)
)
except Exception:
sys.settrace(old_tracer)
raise RuntimeError(
mylogging.return_str(
f"Watchdog with function {function.__name__}, args {args} and kwargs {kwargs} failed."
)
)
finally:
sys.settrace(old_tracer)
return result
|
f9a54bee2d444831439d710ef882e4e117b85d62
| 18,487 |
def _encode_raw_string(str):
"""Encodes a string using the above encoding format.
Args:
str (string): The string to be encoded.
Returns:
An encoded version of the input string.
"""
return _replace_all(str, _substitutions)
|
bb33875b276fe822c2b43ec3ebcc57b0d2f4c7b9
| 18,488 |
from typing import Callable
def char_pred(pred: Callable[[int], bool]) -> Parser:
"""Parses a single character passing a given predicate."""
def f(x):
if pred(x):
return value(x)
else:
raise Failure(f"Character '{chr(x)}' fails predicate"
" `{pred.__name__}`")
return item >> f
|
2b7be4f740e7f7afad1ef66c0d544208d679fc5c
| 18,489 |
def convert_bound(bound, coord_max, coord_var):
"""
This function will return a converted bound which which matches the
range of the given input file.
Parameters
----------
bound : np.array
1-dimensional 2-element numpy array which represent the lower
and upper bounding box on this coordinate, respectively.
coord_max : integer
The max value which is possible given this coordinate. For
example, the max for longitude is 360.
coord_var : xarray.DataArray
The xarray variable for some coordinate.
Returns
-------
np.array
1-dimensional 2-element number array which represents the lower
and upper bounding box on this coordinate and has been converted
based on the valid bounds coordinate range of the dataset.
Notes
-----
Assumption that 0 is always on the prime meridian/equator.
"""
scale = coord_var.attrs.get('scale_factor', 1.0)
offset = coord_var.attrs.get('add_offset', 0.0)
valid_min = coord_var.attrs.get('valid_min', None)
if valid_min is None or valid_min > 0:
# If coord var doesn't contain valid min, attempt to find
# manually. Note: Given the perfect storm, this could still fail
# to find the actual bounds.
# Filter out _FillValue from data before calculating min and max
fill_value = coord_var.attrs.get('_FillValue', None)
var_values = coord_var.values
if fill_value:
var_values = np.where(var_values != fill_value, var_values, np.nan)
var_min = np.nanmin(var_values)
var_max = np.nanmax(var_values)
if 0 <= var_min <= var_max <= (coord_max / scale):
valid_min = 0
# If the file coords are 0 --> max
if valid_min == 0:
bound = (bound + coord_max) % coord_max
# If the right/top bound is 0, set to max.
if bound[1] == 0:
bound[1] = coord_max
# If edges are the same, assume it wraps and return all
if bound[0] == bound[1]:
bound = np.array([0, coord_max])
# If the file longitude is -coord_max/2 --> coord_max/2
if valid_min != 0:
# If edges are the same, assume it wraps and return all
if bound[0] == bound[1]:
bound = np.array([-(coord_max / 2), coord_max / 2])
# Calculate scale and offset so the bounds match the coord data
return apply_scale_offset(scale, offset, bound)
|
5784167af65b2f406bfa5c428f1421a8915359f3
| 18,490 |
def tk_window_focus():
"""Return true if focus maintenance under TkAgg on win32 is on.
This currently works only for python.exe and IPython.exe.
Both IDLE and Pythonwin.exe fail badly when tk_window_focus is on."""
if rcParams['backend'] != 'TkAgg':
return False
return rcParams['tk.window_focus']
|
757963dce9d9dc00be54ffbcbf694656f2f770e9
| 18,491 |
from functools import reduce
def cc_filter_set_variables(operator, bk_biz_id, bk_obj_id, bk_obj_value):
"""
通过集群ID、过滤属性ID、过滤属性值,过滤集群
:param operator: 操作者
:param bk_biz_id: 业务ID
:param bk_obj_id: 过滤属性ID
:param bk_obj_value: 过滤属性值
:return:
"""
client = get_client_by_user(operator)
obj_value_list = bk_obj_value.split(",")
results = []
# 多个过滤属性值时循环请求接口
for obj_value in obj_value_list:
kwargs = {
"bk_biz_id": int(bk_biz_id),
"condition": {bk_obj_id: obj_value},
}
result = client.cc.search_set(kwargs)
if not result["result"]:
err_msg = _(
"[cc_filter_set_variables] 调用 cc.search_set 接口获取集群失败, " "kwargs={kwargs}, result={result}"
).format(kwargs=kwargs, result=result)
logger.error(err_msg)
raise ApiRequestError(err_msg)
results += result["data"]["info"]
if not results:
return [], set()
bk_attributes = reduce(set.intersection, [set(result.keys()) for result in results])
return results, bk_attributes
|
3e1f849c59d3e3553f1c8b6f725a36921cae9451
| 18,492 |
def distance(mags, spt, spt_unc):
"""
mags is a dictionary of bright and faint mags
set a bias
"""
res={}
f110w=mags['F110W']
f140w=mags['F140W']
f160w=mags['F160W']
relations=POLYNOMIAL_RELATIONS['abs_mags']
nsample=1000
for k in mags.keys():
#take the standard deviation
spt=make_spt_number(spt)
absmag_scatter=relations[k][1]
spts=np.random.normal(spt, spt_unc, nsample)
#trim out spectral types outside range of validitiy
mask=(spts<15) & (spts >40)
absmags=(relations[k][0])(spts)[~mask]
#total_uncertainty
mag_unc=(absmag_scatter**2+mags[k][1]**2)**0.5
relmags=np.random.normal(mags[k][0], mag_unc, nsample)[~mask]
dists=get_distance(absmags, relmags)
res[str('dist')+k]=np.nanmedian(dists)
res[str('dist_er')+k]=np.nanstd(dists)
return res
|
cece86e80c03ce8d9753fe864bd746e68500fca3
| 18,493 |
import torch
import sys
def loss_function(recon_x, x, mu, logvar, cl, target, natural):
"""
Universally calculates the loss, be it for training or testing. Hardcoded to use mse_loss. Change below to binary_cross_entropy if desired.
@param recon_x: images reconstructed by the decoder(s)
@param x: original images for comparison
@param mu: latent mean
@param logvar: latent log variance
@param cl: cell count predictions for given images
@param target: cell count label for given labeled images
@param natural: bool, true if x is of type natural
@return: float, float, float, the summed loss as well as the Kullback-Leibler divergence and the loss of the regressor in separate
"""
global decoder_nat_loss, decoder_syn_loss, KLD_syn_loss, KLD_nat_loss, regressor_nat, regressor_syn
# decoder_loss = F.binary_cross_entropy(recon_x, x.view(-1, 1, img_size, img_size), reduction='sum') * decoder_l_factor
decoder_loss = F.mse_loss(recon_x, x) * decoder_l_factor
# see Appendix B from VAE paper: Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
beta = 1 / (batch_size * boundary_dim) # disentanglement factor#extremely small
KLD = -0.5 * beta * torch.sum(1 + logvar - mu.pow(2) - logvar.exp()) * KLD_l_factor
regressor_loss = F.mse_loss(cl, target.view(-1, 1)) * regressor_l_factor
if epoch < regressor_start:
regressor_loss *= 0
if natural:
decoder_nat_loss += decoder_loss
KLD_nat_loss += KLD
regressor_nat += regressor_loss
else:
decoder_syn_loss += decoder_loss
KLD_syn_loss += KLD
regressor_syn += regressor_loss
if KLD > 1e10:
playSound()
print('KLD diverged')
# print(KLD)
# print(torch.max(logvar))
# print(logvar)
# print(mu)
# print(beta)
sys.exit('KLD diverged')
return decoder_loss + KLD + regressor_loss, KLD, regressor_loss
|
8fc5773dbecd3f445ffb849a91ad0a95a93bc45c
| 18,494 |
import math
def foo(X):
"""The function to evaluate"""
ret = []
for x in X:
r = 2*math.sqrt(sum([n*n for n in x]));
if r == 0:
ret.append(0)
else:
ret.append(math.sin(r) / r);
return ret
|
7b241cf45757cdf9a5a28ee56c59ee41099ccb1e
| 18,495 |
def measure_curv(left_fit, right_fit, plot_points, ym_per_pix, xm_per_pix):
"""
calculates the curvature using a given polynom
Args:
left_fit ([type]): [description]
right_fit ([type]): [description]
plot_points ([type]): [description]
"""
#get the max y value (start of the lane) this is the place we want to calc the curvature
y_curve = np.max(plot_points)
#calculate/defin the new polynom values to get m instead of pixel
cofA_left = xm_per_pix / (ym_per_pix**2) * left_fit[0]
cofB_left = (xm_per_pix/ym_per_pix) * left_fit[1]
cofA_right = xm_per_pix / (ym_per_pix**2) * right_fit[0]
cofB_right = (xm_per_pix/ym_per_pix) * right_fit[1]
#calculate the curvature using the formula: R = (1+(2Ay+B)^2)^3/2)/|2A| with y = A*y^2+B*y+C
left_curv_m = ((1+(2*cofA_left*y_curve*ym_per_pix+cofB_left)**2)**(2/2))/np.absolute(2*cofA_left)
right_curv_m = ((1+(2*cofA_right*y_curve*ym_per_pix+cofB_right)**2)**(2/2))/np.absolute(2*cofA_right)
#calculate the mean curvature (curvatur from the middle of the lane)
curv_mean = (left_curv_m + right_curv_m) / 2
return curv_mean, left_curv_m, right_curv_m
|
7ae6d1e390906c3011349716aad0d0640a4c3a65
| 18,496 |
import logging
from datetime import datetime
def get_external_dns(result):
"""
Function to validate the ip address. Used to extract EXTERNAL_DNS server information
Args:
result(dict): Input result dictionary with all network parameters and boolean flags
Returns:
result(dict): The updated result dictionary with network parameters
Raises:
Exception on Invalid IP addresses
"""
logging.info('[%s] - Collect the external dns.', datetime.datetime.today())
try:
is_answer = False
while not is_answer:
external_dns = case_check(input("Do you have public EXTERNAL DNS IP servers? y or n \n"))
if external_dns == 'n' or external_dns == 'y':
result['external_dns_flag'] = external_dns
is_answer = True
if external_dns == 'y':
is_internal = False
while not is_internal:
external = case_check(
input("Enter the EXTERNAL DNS public IP address(s) comma separated or 's' to skip \n"))
if external == 's':
result['external_dns_flag'] = 's'
logging.info("EXTERNAL_DNS option skipped by user ")
break
if len(external) > 0:
result, is_internal = ip_validation('EXTERNAL_DNS', external, result, is_internal)
else:
print(f'{Style.RED}Wrong value! Please input y or n{Style.RESET}')
return result
except Exception as error:
logging.error(error)
|
14531bcd17dbc036f417ec7eca6d24e9c7931e6f
| 18,497 |
def __process_agent(agent_param):
"""Get the agent id and namespace from an input param."""
if not agent_param.endswith('TEXT'):
param_parts = agent_param.split('@')
if len(param_parts) == 2:
ag, ns = param_parts
elif len(param_parts) == 1:
ag = agent_param
ns = 'HGNC-SYMBOL'
else:
raise DbAPIError('Unrecognized agent spec: \"%s\"' % agent_param)
else:
ag = agent_param[:-5]
ns = 'TEXT'
if ns == 'HGNC-SYMBOL':
original_ag = ag
ag = hgnc_client.get_hgnc_id(original_ag)
if ag is None and 'None' not in agent_param:
raise DbAPIError('Invalid agent name: \"%s\"' % original_ag)
ns = 'HGNC'
return ag, ns
|
49ebaa4c435422066c0e2345e4cf056caebbdc9e
| 18,498 |
def inference(predictions_op, true_labels_op, display, sess):
""" Perform inference per batch on pre-trained model.
This function performs inference and computes the CER per utterance.
Args:
predictions_op: Prediction op
true_labels_op: True Labels op
display: print sample predictions if True
sess: default session to evaluate the ops.
Returns:
char_err_rate: list of CER per utterance.
"""
char_err_rate = []
# Perform inference of batch worth of data at a time.
[predictions, true_labels] = sess.run([predictions_op,
true_labels_op])
pred_label = sparse_to_labels(predictions[0][0])
actual_label = sparse_to_labels(true_labels)
for (label, pred) in zip(actual_label, pred_label):
char_err_rate.append(distance(label, pred)/len(label))
if display:
# Print sample responses
for i in range(ARGS.batch_size):
print(actual_label[i] + ' vs ' + pred_label[i])
return char_err_rate
|
5e58ab3fff91a2fb5450b37f0bf41b2681d297d9
| 18,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.