content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def dilate( data, iterations=1, structure=None ):
"""Dilate a binary ND array by a number of iterations."""
# Convert to binary, just in case.
mask = binarise(data).astype(int)
if not structure:
structure = ndimage.generate_binary_structure(3,1)
# Check we have positive iterations - no header available here to convert from mm.
iterations = np.abs(iterations)
# Slightly awkward as I'm not certain iterations == voxels
print (" Dilating {0} iterations ...".format(iterations))
if iterations > 0:
dilated_mask = ndimage.binary_dilation( mask, structure, iterations )
return dilated_mask
# End of dilate() definition | 724b10f0c1d0d417f4ca693a5322349f390da17c | 12,100 |
from PyPDF4 import PdfFileReader
def text_from_pdf(file_name : str) -> str:
"""
Extract text from PDF file
==========================
Parameters
----------
file_name : str
Name of the file to extract text from.
Returns
-------
str
The extracted text.
"""
text = ''
with open(file_name, 'rb') as instream:
reader = PdfFileReader(instream)
for i in range(reader.numPages):
text += '{}\n'.format(reader.getPage(i).extractText())
return text | a1b0077b143b4fee211dd38b6beabf58c7692177 | 12,101 |
import configparser
def read_section(section, fname):
"""Read the specified section of an .ini file."""
conf = configparser.ConfigParser()
conf.read(fname)
val = {}
try:
val = dict((v, k) for v, k in conf.items(section))
return val
except configparser.NoSectionError:
return None | 65d6b81b45fc7b75505dd6ee4dda19d13ebf7095 | 12,102 |
import torch
def freqz(
b, a=1, worN=512, whole=False, fs=2 * np.pi, log=False, include_nyquist=False
):
"""Compute the frequency response of a digital filter."""
h = None
lastpoint = 2 * np.pi if whole else np.pi
if log:
w = np.logspace(0, lastpoint, worN, endpoint=include_nyquist and not whole)
else:
w = np.linspace(0, lastpoint, worN, endpoint=include_nyquist and not whole)
w = torch.tensor(w, device=b.device)
if a.size() == 1:
n_fft = worN if whole else worN * 2
h = torch.fft.rfft(b, n=n_fft)[:worN]
h /= a
if h is None:
zm1 = torch.exp(-1j * w)
h = polyval(b, zm1) / (polyval(a, zm1) + 1e-16)
# need to catch NaNs here
w = w * fs / (2 * np.pi)
return w, h | d6950acc8535791968d34edf8c4ebd557000b72e | 12,103 |
def _helper_fit_partition(self, pnum, endog, exog, fit_kwds,
init_kwds_e={}):
"""handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple.
"""
temp_init_kwds = self.init_kwds.copy()
temp_init_kwds.update(init_kwds_e)
model = self.model_class(endog, exog, **temp_init_kwds)
results = self.estimation_method(model, pnum, self.partitions,
fit_kwds=fit_kwds,
**self.estimation_kwds)
return results | 30b7e6d48c2f0fa3eb2d2486fee9a87dad609886 | 12,104 |
def generate_2d_scatter(data, variables, class_data=None, class_names=None,
nrows=None, ncols=None, sharex=False, sharey=False,
show_legend=True, xy_line=False, trendline=False,
cmap_class=None, shorten_variables=False,
**kwargs):
"""Generate 2D scatter plots from the given data and variables.
This method will generate 2D scatter plots for all combinations
of the given variables.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we will plot here.
variables : list of strings
The variables we will generate scatter plots for.
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings, optional
A mapping from the class data to labels/names.
nrows : integer, optional
The number of rows to use in a figure.
ncols : integer, optional
The number of columns to use in a figure.
sharex : boolean, optional
If True, the scatter plots will share the x-axis.
sharey : boolean, optional
If True, the scatter plots will share the y-axis.
show_legend : boolean, optional
If True, we will create a legend here and show it.
xy_line : boolean, optional
If True, we will add a x=y line to the plot.
trendline : boolean, optional
If True, we will add a trend line to the plot.
cmap_class : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for classes.
kwargs : dict, optional
Additional arguments used for the plotting.
Returns
-------
figures : list of objects like :class:`matplotlib.figure.Figure`
The figures containing the plots.
axes : list of objects like :class:`matplotlib.axes.Axes`
The axes containing the plots.
"""
nplots = comb(len(variables), 2, exact=True)
figures, axes = create_fig_and_axes(
nplots, nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
**kwargs,
)
fig = None
for i, (xvar, yvar) in enumerate(combinations(variables, 2)):
# We do not want to repeat the legend in all subplots:
show_legend_ax = False
if axes[i].figure != fig:
fig = axes[i].figure
show_legend_ax = True
xlabel = None
ylabel = None
if shorten_variables:
if len(xvar) > 5:
xlabel = xvar[:3] + '...'
if len(yvar) > 5:
ylabel = yvar[:3] + '...'
_, _, patches, labels = plot_scatter(
data,
xvar,
yvar,
axi=axes[i],
xlabel=xlabel,
ylabel=ylabel,
class_data=class_data,
class_names=class_names,
cmap_class=cmap_class,
**kwargs,
)
if xy_line:
line_xy = add_xy_line(axes[i], alpha=0.7, color='black')
patches.append(line_xy)
labels.append('x = y')
if trendline:
line_trend = add_trendline(axes[i], data[xvar], data[yvar],
alpha=0.7, ls='--', color='black')
patches.append(line_trend)
labels.append('y = a + bx')
if show_legend and show_legend_ax and patches and labels:
axes[i].legend(patches, labels)
return figures, axes | 9d2a843f07cbfed921831a64ad39b4c13a947500 | 12,105 |
def getOptions(options):
"""translate command line options to PAML options."""
codeml_options = {}
if options.analysis == "branch-specific-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "1"
elif options.analysis == "branch-fixed-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "0"
elif options.analysis == "branch-all-but-one-fixed-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "2"
if not tree:
raise ValueError("please supply a tree for this mode.")
if not options.filename_output_tree:
raise ValueError(
"please speficy filename-output-tree as location "
"(relative to this script) for trees.")
elif options.analysis == "site-specific-kaks":
codeml_options["ncatG"] = "10"
codeml_options["getSE"] = "1"
codeml_options["seqtype"] = "1"
codeml_options["NSsites"] = "0 3 1 2 7 8"
codeml_options["model"] = "0"
codeml_options["CodonFreq"] = "2"
elif options.analysis == "pairwise":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "0"
codeml_options["runmode"] = "-2"
if options.multiple_genes:
codeml_options["Malpha"] = "0"
codeml_options["Mgene"] = "0"
if options.omega is not None:
codeml_options["omega"] = str(options.omega)
if options.estimate_ancestors:
codeml_options["RateAncestor"] = "1"
if options.codon_frequencies is not None:
c = options.codon_frequencies.upper()
if c in ("UNIFORM", "FEQUAL"):
a = "0"
elif c == "F1X4":
a = "1"
elif c == "F3X4":
a = "2"
elif c == "F61":
a = "3"
else:
a = options.codon_frequencies
codeml_options["CodonFreq"] = a
if options.method is not None:
codeml_options["method"] = str(options.method)
if options.optimization_threshold is not None:
codeml_options["Small_Diff"] = str(options.optimization_threshold)
if options.clean_data:
codeml_options["cleandata"] = options.clean_data
return codeml_options | 86b0fa157e9a9c48d7bf683e57f24f49e32f15e7 | 12,106 |
import json
def deserialize_block_to_json(block: Block) -> str:
"""Deserialize Block object to JSON string
Parameters
----------
block : Block
Block object
Returns
-------
str
JSON string
"""
try:
if block:
return json.dumps(
{
"blockId": block.id,
"blockNumber": block.number,
"timestamp": block.timestamp,
"producer": block.producer,
"unfilteredTransactionCount": block.unfilteredTransactionCount,
"unfilteredTransactionTraceCount": block.unfilteredTransactionTraceCount,
"unfilteredExecutedInputActionCount": block.unfilteredExecutedInputActionCount,
"unfilteredExecutedTotalActionCount": block.unfilteredExecutedTotalActionCount,
"filteringIncludeFilterExpr": block.filteringIncludeFilterExpr,
"filteredTransactionTraceCount": block.filteredTransactionTraceCount,
"filteredExecutedInputActionCount": block.filteredExecutedInputActionCount,
"filteredExecutedTotalActionCount": block.filteredExecutedTotalActionCount,
"filteredTransactionCount": block.filteredTransactionCount,
},
sort_keys=True,
)
else:
raise ValueError("None block made it through")
except ValueError as e:
logger.exception(
BlocktraceLog(
__name__,
"catching_exception",
{"hint": "Check debug logs to see why None Block made it through"},
)
)
return "" | b78f31ef076bcb36011df45c6c5c68563a47e71e | 12,107 |
def get_invocations(benchmark: Benchmark):
"""
Returns a list of invocations that invoke the tool for the given benchmark.
It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
If this benchmark is not supported, an empty list has to be returned.
"""
if not is_benchmark_supported(benchmark):
return []
prec = dict()
prec["epsilon-correct"] = "0.000001"
prec["probably-epsilon-correct"] = "0.05"
prec["often-epsilon-correct"] = "0.001"
prec["often-epsilon-correct-10-min"] = "0.001"
result = []
for track in prec.keys():
benchmark_settings = "./pet.sh reachability --precision {} --relative-error --only-result -m {} -p {} --property {}".format(
prec[track],
benchmark.get_prism_program_filename(),
benchmark.get_prism_property_filename(),
benchmark.get_property_name(),
)
if benchmark.get_open_parameter_def_string() != "":
benchmark_settings += " --const {}".format(
benchmark.get_open_parameter_def_string()
)
if (
"haddad" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
benchmark_settings = "./fix-syntax " + benchmark_settings
# default settings PET eps-corr
default_inv = Invocation()
default_inv.identifier = "default"
default_inv.note = "Default settings."
default_inv.track_id = track
default_inv.add_command(benchmark_settings)
result += [default_inv]
if (
track == "epsilon-correct"
or benchmark.get_model_type() == "ctmc"
or "haddad" in benchmark.get_prism_program_filename()
or "csma" in benchmark.get_prism_program_filename()
or "wlan" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
# smc is prob eps correct, cannot handle ctmc and haddad monmege cannot be parsed by it
continue
if benchmark.get_num_states_tweak() is None:
# need this info
continue
smc_settings = "./smc.sh {} {} -prop {} -heuristic RTDP_ADJ -RTDP_ADJ_OPTS 1 -colourParams S:{},Av:10,e:{},d:0.05,p:0.05,post:64".format(
benchmark.get_prism_program_filename(),
benchmark.get_prism_property_filename(),
benchmark.get_property_name(),
benchmark.get_num_states_tweak(),
prec[track],
)
if benchmark.get_open_parameter_def_string() != "":
smc_settings += " -const {}".format(
benchmark.get_open_parameter_def_string()
)
if (
"haddad" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
smc_settings = "./fix-syntax " + smc_settings
# SMC invocations
SMC_inv = Invocation()
SMC_inv.identifier = "specific"
SMC_inv.note = "Statistical model checking with limited information (no transition probabilities)"
SMC_inv.track_id = track
SMC_inv.add_command(smc_settings)
result += [SMC_inv]
return result | 4afeaa62cecd2ae21f6e112fe5c87dff54310a95 | 12,108 |
def see(node: "Position", move: Move = None) -> float:
"""Static-Exchange-Evaluation
Args:
node: The current position to see
move (Move, optional): The capture move to play. Defaults to None.
Returns:
float: The score associated with this capture. Positive is good.
"""
c = node.state.turn
bitboards = node.boards
if move is None:
return 0
if not move.is_capture:
return 0
i = 0
gain = [0] * 32
target = bitboards.piece_at(move._to)
if target is None:
return 0
occ = bitboards.occupancy
from_bb = Bitboard(1 << move._from)
attack_defend_bb = bitboards.attack_defend_to(move._to, c)
xrays = bitboards.xrays_bb
gain[i] = PIECE_VALUES[target._type]
assert target is not None
pt = (bitboards.piece_at(move._from))._type
while True:
i += 1
gain[i] = PIECE_VALUES[pt] - gain[i-1]
if max(-gain[i-1], gain[i]) < 0:
break
attack_defend_bb ^= from_bb
occ ^= from_bb
from_bb, pt = least_valuable_attacker(~c, bitboards, attack_defend_bb)
if not from_bb:
break
i -= 1
while i:
gain[i-1] = -max(-gain[i-1], gain[i])
i -= 1
return gain[0] | e6062b7cd09e9b2dca514e5be23b7fa870ff923f | 12,109 |
import sys
def new(option):
"""
Create a new message queue object; options must contain the type of
queue (which is the name of the child class), see above.
"""
options = option.copy()
qtype = options.pop("type", "DQS")
try:
__import__("messaging.queue.%s" % (qtype.lower()))
except SyntaxError:
raise SyntaxError("error importing dirq type: %s" % qtype)
except ImportError:
raise ImportError(
"you must install %s dependencies before using this module" %
(qtype, ))
try:
module = sys.modules["messaging.queue.%s" % (qtype.lower())]
return getattr(module, qtype)(**options)
except KeyError:
pass
raise ValueError("queue type not valid: %s" % qtype) | 9e285f4bee5442a41c10b32158595da5e03707de | 12,110 |
def rk4(f, t0, y0, h, N):
""""Solve IVP given by y' = f(t, y), y(t_0) = y_0 with step size h > 0, for N steps,
using the Runge-Kutta 4 method.
Also works if y is an n-vector and f is a vector-valued function."""
t = t0 + np.array([i * h for i in range(N+1)])
m = len(y0)
y = np.zeros((N+1, m))
y[0] = y0
# Repeatedly approximate next value.
for n in range(N):
k1 = f(t[n], y[n])
k2 = f(t[n] + h/2, y[n] + k1 * h/2)
k3 = f(t[n] + h/2, y[n] + k2 * h/2)
k4 = f(t[n] + h, y[n] + k3 * h)
y[n+1] = y[n] + h * (k1 + 2 * k2 + 2 * k3 + k4) / 6
return t, y | e6b7c3d1ac0ea765a3ac9ebac69159dd2c2eab78 | 12,111 |
import os
def main():
"""
Example entry point; please see Enumeration example for more in-depth
comments on preparing and cleaning up the system.
:return: True if successful, False otherwise.
:rtype: bool
"""
# Since this application saves images in the current folder
# we must ensure that we have permission to write to this folder.
# If we do not have permission, fail right away.
try:
test_file = open('test.txt', 'w+')
except IOError:
print('Unable to write to current directory. Please check permissions.')
input('Press Enter to exit...')
return False
test_file.close()
os.remove(test_file.name)
result = True
# Retrieve singleton reference to system object
system = PySpin.System.GetInstance()
# Get current library version
version = system.GetLibraryVersion()
print('Library version: {}.{}.{}.{}\n'.format(version.major, version.minor, version.type, version.build))
# Retrieve list of cameras from the system
cam_list = system.GetCameras()
num_cameras = cam_list.GetSize()
print('Number of cameras detected: {}\n'.format(num_cameras))
# Finish if there are no cameras
if num_cameras == 0:
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
print('Not enough cameras!')
input('Done! Press Enter to exit...')
return False
# Run example on each camera
for i, cam in enumerate(cam_list):
print('Running example for camera {}...\n'.format(i))
result &= run_single_camera(cam)
print('Camera {} example complete...\n'.format(i))
# Release reference to camera
del cam
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
input('Done! Press Enter to exit...')
return result | 62b846f3dd26106ea6dcc01bed841895f3691728 | 12,112 |
import torch
def parse_predictions(est_data, gt_data, config_dict):
""" Parse predictions to OBB parameters and suppress overlapping boxes
Args:
est_data, gt_data: dict
{point_clouds, center, heading_scores, heading_residuals,
size_scores, size_residuals, sem_cls_scores}
config_dict: dict
{dataset_config, remove_empty_box, use_3d_nms, nms_iou,
use_old_type_nms, conf_thresh, per_class_proposal}
Returns:
batch_pred_map_cls: a list of len == batch size (BS)
[pred_list_i], i = 0, 1, ..., BS-1
where pred_list_i = [(pred_sem_cls, box_params, box_score)_j]
where j = 0, ..., num of valid detections - 1 from sample input i
"""
eval_dict = {}
pred_center = est_data['center'] # B,num_proposal,3
pred_heading_class = torch.argmax(est_data['heading_scores'], -1) # B,num_proposal
heading_residuals = est_data['heading_residuals_normalized'] * (
np.pi / config_dict['dataset_config'].num_heading_bin) # Bxnum_proposalxnum_heading_bin
pred_heading_residual = torch.gather(heading_residuals, 2,
pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_residual.squeeze_(2)
pred_size_class = torch.argmax(est_data['size_scores'], -1) # B,num_proposal
size_residuals = est_data['size_residuals_normalized'] * torch.from_numpy(
config_dict['dataset_config'].mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0)
pred_size_residual = torch.gather(size_residuals, 2,
pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 1,
3)) # B,num_proposal,1,3
pred_size_residual.squeeze_(2)
pred_sem_cls = torch.argmax(est_data['sem_cls_scores'], -1) # B,num_proposal
sem_cls_probs = softmax(est_data['sem_cls_scores'].detach().cpu().numpy()) # B,num_proposal,10
pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal
num_proposal = pred_center.shape[1]
# Since we operate in upright_depth coord for points, while util functions
# assume upright_camera coord.
bsize = pred_center.shape[0]
pred_corners_3d_upright_camera = np.zeros((bsize, num_proposal, 8, 3))
pred_center_upright_camera = flip_axis_to_camera(pred_center.detach().cpu().numpy())
for i in range(bsize):
for j in range(num_proposal):
heading_angle = config_dict['dataset_config'].class2angle( \
pred_heading_class[i, j].detach().cpu().numpy(), pred_heading_residual[i, j].detach().cpu().numpy())
box_size = config_dict['dataset_config'].class2size( \
int(pred_size_class[i, j].detach().cpu().numpy()), pred_size_residual[i, j].detach().cpu().numpy())
corners_3d_upright_camera = get_3d_box(box_size, -heading_angle, pred_center_upright_camera[i, j, :])
pred_corners_3d_upright_camera[i, j] = corners_3d_upright_camera
K = pred_center.shape[1] # K==num_proposal
nonempty_box_mask = np.ones((bsize, K))
if config_dict['remove_empty_box']:
# -------------------------------------
# Remove predicted boxes without any point within them..
batch_pc = gt_data['point_clouds'].cpu().numpy()[:, :, 0:3] # B,N,3
for i in range(bsize):
pc = batch_pc[i, :, :] # (N,3)
for j in range(K):
box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3)
box3d = flip_axis_to_depth(box3d)
pc_in_box, inds = extract_pc_in_box3d(pc, box3d)
if len(pc_in_box) < 5:
nonempty_box_mask[i, j] = 0
# -------------------------------------
obj_logits = est_data['objectness_scores'].detach().cpu().numpy()
obj_prob = softmax(obj_logits)[:, :, 1] # (B,K)
if not config_dict['use_3d_nms']:
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_2d_with_prob = np.zeros((K, 5))
for j in range(K):
boxes_2d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_2d_with_prob[j, 2] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_2d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_2d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_2d_with_prob[j, 4] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_2d_faster(boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and (not config_dict['cls_nms']):
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 7))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 2] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 4] = np.max(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 5] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_3d_faster(boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and config_dict['cls_nms']:
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 8))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 2] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 4] = np.max(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 5] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
boxes_3d_with_prob[j, 7] = pred_sem_cls[i, j] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_3d_faster_samecls(boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
return eval_dict, {'pred_corners_3d_upright_camera': pred_corners_3d_upright_camera,
'sem_cls_probs': sem_cls_probs,
'obj_prob': obj_prob,
'pred_sem_cls': pred_sem_cls} | 9d31b44e37e7af458084b29927eaa29d2e1889af | 12,113 |
def benchmark_op(op, burn_iters: int = 2, min_iters: int = 10):
"""Final endpoint for all kb.benchmarks functions."""
assert not tf.executing_eagerly()
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
bm = tf.test.Benchmark()
result = bm.run_op_benchmark(
sess, op, burn_iters=burn_iters, min_iters=min_iters
)
summarize(result)
return result | 4b46cbb4f487332b43e1c06daef294e9e01f13a5 | 12,114 |
def run_truncated_sprt(list_alpha, list_beta, logits_concat, labels_concat, verbose=False):
""" Calculate confusion matrix, mean hitting time, and truncate rate of a batch.
Args:
list_alpha: A list of floats.
list_beta: A list of floats with the same length as list_alpha's.
logits_concat: A logit Tensor with shape (batch, (duration - order_sprt), order_sprt + 1, 2). This is the output of datasets.data_processing.sequential_concat(logit_slice, labels_slice)
labels_concat: A binary label Tensor with shape (batch size,) with label = 0 or 1. This is the output of datasets.data_processing.sequential_concat(logit_slice, labels_slice).
Returns:
dict_confmx_sprt: A dictionary with keys like "thresh=0.2342,-0.2342". Value is a confusion matrix Tensor.
dict_mean_hittimes: A dictionary with keys like "thresh=0.2342,-0.2342". Value is a mean hitting time.
dict_truncate_rates: A dictionary with keys like "thresh=0.2342,-0.2342". Value is an truncate rate.
"""
dict_confmx_sprt = dict()
dict_mean_hittimes = dict()
dict_var_hittimes = dict()
dict_truncate_rates = dict()
batch_size_tmp = labels_concat.shape[0]
for alpha, beta in zip(list_alpha, list_beta):
# Calc thresholds
alpha = float(alpha)
beta = float(beta)
thresh = [np.log(beta/(1-alpha)), np.log((1-beta)/alpha)]
key = "thresh={:6.4f},{:7.4f}".format(thresh[0], thresh[1])
# Run truncated sprt
confmx, mean_hittime, var_hittime, truncate_rate = binary_truncated_sprt(logits_concat, labels_concat, alpha, beta)
dict_confmx_sprt[key] = confmx
dict_mean_hittimes[key] = mean_hittime
dict_var_hittimes[key] = var_hittime
dict_truncate_rates[key] = truncate_rate
if verbose:
print("====================================")
print("SPRT w/ alpha={}, beta={}".format(alpha, beta))
print("Thresholds = {}".format(thresh))
print("Confusion Matrix")
print(confmx)
print("Mean Hitting Time: {} +- {}".format(mean_hittime, tf.sqrt(var_hittime)))
print("truncate: {} / {} = {}".format(tf.round(truncate_rate*batch_size_tmp), batch_size_tmp, truncate_rate))
print("====================================")
return dict_confmx_sprt, dict_mean_hittimes, dict_var_hittimes, dict_truncate_rates | 7934b1de29c60a59df056cbb1e4dce42e76ca540 | 12,115 |
def get_users(metadata):
"""
Pull users, handles hidden user errors
Parameters:
metadata: sheet of metadata from mwclient
Returns:
the list of users
"""
users = []
for rev in metadata:
try:
users.append(rev["user"])
except (KeyError):
users.append(None)
return users | 48dbae6a63019b0e4c2236a97e147102fe4d8758 | 12,116 |
from re import S
def solve(FLT_MIN, FLT_MAX):
"""Solving cos(x) <= -0.99, dx/dt=1, x(0) = 0
# Basic steps:
# 1. First compute the n terms for each ode
# 2. Next replace the guard with ode(t), so that it is only in t
# 3. Then compute the number of terms needed for g(t)
# 4. Finally, compute g(t) = 0 and g(t)-2g(0) = 0
# 5. Note that computing number of terms "n" in taylor essentially
# guarantees that tᵣ - t ≤ floating point error only, specified by the
# polynomial solver.
"""
# XXX: This is the theta
def test_multivariate():
# LTI is easy to solve
# Xdiff = S.sympify('(5*x(t) + 2*y(t) + 1)')
# Time varying, takes more time in general,
# with increasing power for t^n
# Xdiff = S.sympify('(5*x(t) + 2*y(t) + t**3)')
# Non linear with periodic functions
# Xdiff = S.sympify('sin(sqrt(x(t)+1))')
# import math
# FLT_MIN = 0
# FLT_MAX = 2*math.pi
# More complex ode
# Xdiff = S.sympify('sin(sin(x(t)+1))')
# The angles can only be between 0 and 2π
# import math
# FLT_MIN = -2*math.pi
# FLT_MAX = 2*math.pi
# A sqrt
# Xdiff = S.sympify('sqrt(x(t)+1)')
# The ones below need to have a reduced search space bound for
# continous variables.
# Another sqrt, does not seem to converge
# Xdiff = S.sympify('x(t)*t') # Does not work
# Now multiplication, seems to not coverge ever.
Xdiff = S.sympify('exp(2*x(t))') # Does not work either
# Using scaling factor, to reduce the bounds of the maximisation
# problem.
FLT_MIN = -1e1
FLT_MAX = 1e1
return FLT_MIN, FLT_MAX, Xdiff
FLT_MIN, FLT_MAX, tomaximize = test_multivariate()
xt = S.sympify('x(t)')
x = S.abc.x
yt = S.sympify('y(t)')
y = S.abc.y
# Coupled ode example
(tokens, nx) = getN({xt.diff(t): ([tomaximize],
{yt.diff(t): (xt,
# args always in
# same order for
# everyone
[x, y, t])},
# Always list all the replacements
{xt: x, yt: y},
[x, y, t])},
FLT_MIN=FLT_MIN, FLT_MAX=FLT_MAX, epsilon=1e-6)
# print(tokens)
print('required terms for θ satisfying Lipschitz constant:', nx)
# Now make the taylor polynomial
taylorxcoeffs = [5*S.pi/2, 1] + [0]*(nx-2)
# These are the smooth tokens
taylorxpoly = sum([t**i*v for i, v in zip(range(nx), taylorxcoeffs)])
# The theta' taylor polynomial
print('θ(t) = ', taylorxpoly)
# The guard function that needs the lipschitz constant
def guard():
gt = (S.cos(taylorxpoly)+0.99)
return gt.diff(t)
gt = S.sympify('g(t)')
tokens, n = getN({gt.diff(t): ([guard()], dict(), dict(), [t])})
# print(tokens)
print('Number of terms for cos(%s)+0.99: %s' % (taylorxpoly, n))
# Now we do the example of the ode with taylor polynomial
cosseries1 = S.fps(S.cos(taylorxpoly)+0.99, x0=0).polynomial(n=n)
print('Guard taylor polynomial:', cosseries1, '\n')
# print(S.simplify(cosseries1))
root = None
try:
root1 = S.nsolve(cosseries1, t, 0, dict=True)[0][t]
root = root1
except ValueError:
print('No root for g(t)=0')
# Now the second one, this one fails
# g(t) - 2*g(0) = 0
cosseries2 = S.fps(S.cos((5*S.pi/2) + t)-1.98, x0=0).polynomial(n=n)
# print(S.simplify(cosseries2))
try:
root2 = S.nsolve(cosseries2, t, 0, dict=True)[0][t]
root = min(root, root2)
except ValueError:
print('No root for g(t)-2*g(0) = 0')
print('guard Δt:', root) | 90288d73717d02b966beb66396e0be16f68f55f5 | 12,117 |
from tvm.contrib import graph_executor
def run_tvm_graph(
coreml_model, target, device, input_data, input_name, output_shape, output_dtype="float32"
):
"""Generic function to compile on relay and execute on tvm"""
if isinstance(input_data, list):
shape_dict = {}
dtype_dict = {}
for i, e in enumerate(input_name):
shape_dict[e] = input_data[i].shape
dtype_dict[e] = input_data[i].dtype
else:
shape_dict = {input_name: input_data.shape}
dtype_dict = {input_name: input_data.dtype}
mod, params = relay.frontend.from_coreml(coreml_model, shape_dict)
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
m = graph_executor.GraphModule(lib["default"](device))
# set inputs
if isinstance(input_data, list):
for i, e in enumerate(input_name):
m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
else:
m.set_input(input_name, tvm.nd.array(input_data.astype(input_data.dtype)))
# execute
m.run()
# get outputs
if isinstance(output_shape, list) and isinstance(output_dtype, list):
tvm_output_list = []
for i, s in enumerate(output_shape):
tvm_output = m.get_output(i, tvm.nd.empty((s), output_dtype[i]))
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
else:
if not output_shape:
tvm_output = m.get_output(0)
else:
tvm_output = m.get_output(0, tvm.nd.empty((output_shape), output_dtype))
return tvm_output.numpy() | 391b3e0dcca3c1893da69a6ce1ac219f7c56dfa0 | 12,118 |
import math
def detect_peaks(data, srate):
"""
obrain maximum and minimum values from blood pressure or pleth waveform
the minlist is always one less than the maxlist
"""
ret = []
if not isinstance(data, np.ndarray):
data = np.array(data)
raw_data = np.copy(data)
raw_srate = srate
# resampling rate to 100Hz
data = resample_hz(data, srate, 100)
srate = 100
# upper and lower bound of the heart rate (Hz = /sec)
# heart rate = hf * 60;
fh = 200 / 60 # 3.3
fl = 30 / 60 # 0.5
# estimate hr
y1 = band_pass(data, srate, 0.5 * fl, 3 * fh)
# Divide the entire x into four regions and use the median of these
# hf = []
# for(var i = 0; i < 4; i++) {
# var subw = new Wav(srate, y1.vals.copy(data.length / 4 * i, data.length / 4 * (i+1)));
# hf[i] = subw.estimate_heart_rate(fl, fh);
# if(hf[i] == 0) {
# console.log("HR estimation failed, assume 75");
# hf[i] = 75 / 60;
# }
# }
# hf = hf.median();
# Whole heart freq estimation
hf = estimate_heart_freq(y1, srate)
if hf == 0:
print("HR estimation failed, assume 75")
hf = 75 / 60
# band pass filter again with heart freq estimation
y2 = band_pass(data, srate, 0.5 * fl, 2.5 * hf)
d2 = np.diff(y2)
# detect peak in gradient
p2 = detect_maxima(d2, 90)
# detect real peak
y3 = band_pass(data, srate, 0.5 * fl, 10 * hf)
p3 = detect_maxima(y3, 60)
# find closest p3 that follows p2
p4 = []
last_p3 = 0
for idx_p2 in p2:
idx_p3 = 0
for idx_p3 in p3:
if idx_p3 > idx_p2:
break
if idx_p3 != 0:
if last_p3 != idx_p3:
p4.append(idx_p3)
last_p3 = idx_p3
# nearest neighbor and inter beat interval correction
# p: location of detected peaks
pc = []
# find all maxima before preprocessing
m = detect_maxima(data, 0)
m = np.array(m)
# correct peaks location error due to preprocessing
last = -1
for idx_p4 in p4:
cand = find_nearest(m, idx_p4)
if cand != last:
pc.append(cand)
last = cand
ht = 1 / hf # beat interval (sec)
# correct false negatives (FN)
# Make sure if there is rpeak not included in the PC.
i = -1
while i < len(pc):
if i < 0:
idx_from = 0
else:
idx_from = pc[i]
if i >= len(pc) - 1:
idx_to = len(data)-1
else:
idx_to = pc[i+1]
# find false negative and fill it
if idx_to - idx_from < 1.75 * ht * srate:
i += 1
continue
# It can not be within 0.2 of both sides
idx_from += 0.2 * ht * srate
idx_to -= 0.2 * ht * srate
# Find missing peak and add it
# find the maximum value from idx_from to idx_to
idx_max = -1
val_max = 0
for j in range(np.searchsorted(m, idx_from), len(m)):
idx_cand = m[j]
if idx_cand >= idx_to:
break
if idx_max == -1 or val_max < data[idx_cand]:
val_max = data[idx_cand]
idx_max = idx_cand
# There is no candidate to this FN. Overtake
if idx_max != -1: # add idx_max and restart trom there
pc.insert(i+1, idx_max)
i -= 1
i += 1
# correct false positives (FP)
i = 0
while i < len(pc) - 1:
idx1 = pc[i]
idx2 = pc[i+1]
if idx2 - idx1 < 0.75 * ht * srate: # false positive
idx_del = i + 1 # default: delete i+1
if 1 < i < len(pc) - 2:
# minimize heart rate variability
idx_prev = pc[i-1]
idx_next = pc[i+2]
# find center point distance
d1 = abs(idx_next + idx_prev - 2 * idx1)
d2 = abs(idx_next + idx_prev - 2 * idx2)
if d1 > d2:
idx_del = i
else:
idx_del = i+1
elif i == 0:
idx_del = i
elif i == len(pc) - 2:
idx_del = i+1
pc.pop(idx_del)
i -= 1
i += 1
# remove dupilcates
i = 0
for i in range(0, len(pc) - 1):
if pc[i] == pc[i+1]:
pc.pop(i)
i -= 1
i += 1
# find nearest peak in real data
# We downsample x to srate to get maxidxs. ex) 1000 Hz -> 100 Hz
# Therefore, the position found by maxidx may differ by raw_srate / srate.
maxlist = []
ratio = math.ceil(raw_srate / srate)
for maxidx in pc:
idx = int(maxidx * raw_srate / srate) # extimated idx -> not precise
maxlist.append(max_idx(raw_data, idx - ratio - 1, idx + ratio + 1))
# get the minlist from maxlist
minlist = []
for i in range(len(maxlist) - 1):
minlist.append(min_idx(raw_data, maxlist[i], maxlist[i+1]))
return [minlist, maxlist] | ad327b10dd6bcecb3036ecfb5cdcf07defecf2ff | 12,119 |
from typing import Tuple
from typing import Dict
def add_entity_to_watchlist(client: Client, args) -> Tuple[str, Dict, Dict]:
"""Adds an entity to a watchlist.
Args:
client: Client object with request.
args: Usually demisto.args()
Returns:
Outputs.
"""
watchlist_name = args.get('watchlist_name')
entity_type = args.get('entity_type')
entity_name = args.get('entity_name')
expiry_days = args.get('expiry_days') if 'expiry_days' in args else '30'
response = client.add_entity_to_watchlist_request(watchlist_name, entity_type, entity_name, expiry_days)
if 'successfull' not in response:
raise Exception(f'Failed to add entity {entity_name} to the watchlist {watchlist_name}.\n'
f'Error from Securonix is: {response}.')
human_readable = f'Added successfully the entity {entity_name} to the watchlist {watchlist_name}.'
return human_readable, {}, response | 28270c3fa0985458a1fc18f5fd4d2c8661eae1dc | 12,120 |
def is_base255(channels):
"""check if a color is in base 01"""
if isinstance(channels, str):
return False
return all(_test_base255(channels).values()) | e8e6176785303f8f1130c7e99f929ec183e145c5 | 12,121 |
def make_unrestricted_prediction(solution: SolverState) -> tuple[Role, ...]:
"""
Uses a list of true/false statements and possible role sets
to return a rushed list of predictions for all roles.
Does not restrict guesses to the possible sets.
"""
all_role_guesses, curr_role_counts = get_basic_guesses(solution)
solved = recurse_assign(solution, all_role_guesses, curr_role_counts, False)
switch_dict = get_switch_dict(solution)
final_guesses = tuple(solved[switch_dict[i]] for i in range(len(solved)))
if len(final_guesses) != const.NUM_ROLES:
raise RuntimeError("Could not find unrestricted assignment of roles.")
return final_guesses | 2662979b0fdca524dcea368daf7b11283906ecbb | 12,122 |
import argparse
def get_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(description="Expression aggregator")
parser.add_argument(
"-e", "--expressions", nargs="+", help="Expressions", required=True
)
parser.add_argument(
"-d", "--descriptors", nargs="+", help="Descriptors", required=True
)
parser.add_argument("-s", "--source", help="Source", required=True)
parser.add_argument(
"-t", "--expression-type", help="Expression type", required=True
)
parser.add_argument("-g", "--group-by", help="Group by", required=True)
parser.add_argument("-a", "--aggregator", help="Aggregator")
parser.add_argument("-b", "--box-plot-output", help="Box plot output file name")
parser.add_argument(
"-l", "--log-box-plot-output", help="Log box plot output file name"
)
parser.add_argument(
"-x", "--expressions-output", help="Expressions output file name"
)
return parser.parse_args() | a33401b0407ca8538f09918c8ec9074ca21e2438 | 12,123 |
def computeFourteenMeVPoint(xs, E14='14.2 MeV', useCovariance=True, covariance=None):
"""
Compute the value of the cross section at 14.2 MeV.
If the covariance is provided, the uncertainty on the 14.2 MeV point will be computed.
:param xs: reference to the cross section
:param E14: the 14 MeV point to use (in case you want to override the default of 14.2 MeV)
:param useCovariance: use this to override covariance usage
:type useCovariance: bool
:param covariance: covariance to use when computing uncertainty on the spectral average.
If None (default: None), no uncertainty is computed.
:type covariance: covariance instance or None
:rtype: PQU
"""
return computeValueAtAPoint(xs, E14, useCovariance=useCovariance, covariance=covariance) | 4d3165518e227f0c1027d45507c6d67e1e27bf0b | 12,124 |
def conn_reshape_directed(da, net=False, sep='-', order=None, rm_missing=False,
fill_value=np.nan, to_dataframe=False,
inplace=False):
"""Reshape a raveled directed array of connectivity.
This function takes a DataArray of shape (n_pairs, n_directions) or
(n_pairs, n_times, n_direction) where n_pairs reflects pairs of roi
(e.g 'roi_1-roi_2') and n_direction usually contains bidirected 'x->y' and
'y->x'. At the end, this function reshape the input array so that rows
contains the sources and columns the targets leading to a non-symmetric
DataArray of shape (n_roi, n_roi, n_times). A typical use case for this
function would be after computing the covariance based granger causality.
Parameters
----------
da : xarray.DataArray
Xarray DataArray of shape (n_pairs, n_times, n_directions) where
actually the roi dimension contains the pairs (roi_1-roi_2, roi_1-roi_3
etc.). The dimension n_directions should contains the dimensions 'x->y'
and 'y->x'
sep : string | '-'
Separator used to separate the pairs of roi names.
order : list | None
List of roi names to reorder the output.
rm_missing : bool | False
When reordering the connectivity array, choose if you prefer to reindex
even if there's missing regions (rm_missing=False) or if missing
regions should be removed (rm_missing=True)
fill_value : float | np.nan
Value to use for filling missing pairs (e.g diagonal)
to_dataframe : bool | False
Dataframe conversion. Only possible if the da input does not contains
a time axis.
Returns
-------
da_out : xarray.DataArray
DataArray of shape (n_roi, n_roi, n_times)
See also
--------
conn_covgc
"""
assert isinstance(da, xr.DataArray)
if not inplace:
da = da.copy()
assert ('roi' in list(da.dims)) and ('direction' in list(da.dims))
if 'times' not in list(da.dims):
da = da.expand_dims("times")
# get sources, targets names and sorted full list
sources, targets, roi_tot = _untangle_roi(da, sep)
# transpose, reindex and reorder (if needed)
da_xy, da_yx = da.sel(direction='x->y'), da.sel(direction='y->x')
if net:
da = xr.concat((da_xy - da_yx, da_xy - da_yx), 'roi')
else:
da = xr.concat((da_xy, da_yx), 'roi')
da, order = _dataarray_unstack(da, sources, targets, roi_tot, fill_value,
order, rm_missing)
# dataframe conversion
if to_dataframe:
da = _dataframe_conversion(da, order)
return da | bb6747cc47b263545fce219f8357d8773fb428bc | 12,125 |
import shlex
import subprocess
def cmd_appetite(manifest, extra_params, num_threads=1, delete_logs=False):
"""Run appetite with defined params
:param manifest: manifest to reference
:param extra_params: extra params if needed
:param num_threads: Number of threads to use
:param delete_logs: Delete logs before running
:return: output from appetite call
"""
if delete_logs:
delete_log_dir()
create_log()
cmd = list(COMMON_CMD) + shlex.split("--num-conns %s --apps-manifest %s %s" % (
num_threads, manifest, extra_params))
return subprocess.check_call(cmd, cwd=SCRIPT_PATH, shell=False) | f22fdd1d436f616cac31517c0594d94416ef6366 | 12,126 |
def create_manager(user):
"""
Return a ManageDNS object associated with user (for history)
"""
if 'REVERSE_ZONE' in app.config:
revzone = app.config['REVERSE_ZONE']
else:
revzone = None
return ManageDNS(nameserver=app.config['SERVER'], forward_zone=app.config['FORWARD_ZONE'],
reverse_zone=revzone, user=user, key_name=key_name,
key_hash=key_hash) | 0832ce4353775a19cc015490f4febf6df6bd8f04 | 12,127 |
from datetime import datetime
import requests
import json
import sys
import time
import operator
def rank_urls(urls, year=None, filename=None):
"""
Takes a list of URLs and searches for them in
Hacker News submissions. Prints or saves each
URL and its total points to a given filename
in descending order of points. Searches for
submissions from all years, unless year is given.
"""
now = datetime.now()
if year:
if year > now.year:
print("Please enter a valid year parameter (example: " + str(now.year) + ") or leave out for all time.")
return None
else:
pass
leaderboard = {}
count = 0
for url in urls:
query = 'http://hn.algolia.com/api/v1/search?query=' + url + '&restrictSearchableAttributes=url'
r = requests.get(query)
if r:
data = json.loads(r.text)
total_score = 0
for item in data['hits']:
date = item['created_at'][:-5]
date = datetime.strptime(date, '%Y-%m-%dT%H:%M:%S')
now = datetime.now()
if not year:
total_score += item['points']
elif date.year != year:
pass
else:
total_score += item['points']
count += 1
progress = (count / len(urls) ) * 100.00
sys.stdout.write(" Progress: %d%% \r" % (progress) )
sys.stdout.flush()
leaderboard[url] = total_score
time.sleep(1) # Limit to 1 api request per second
sorted_leaderboard = reversed(sorted(leaderboard.items(), key=operator.itemgetter(1)))
if filename:
f = open(filename, 'w')
for key, value in sorted_leaderboard:
f.write(str(value) + "," + key + '\n')
f.close()
print('Results saved to ' + filename)
else:
for key, value in sorted_leaderboard:
print(str(value) + "," + key) | 80de668d98cfbcca8ca6aeae269d6d2a683ae2d3 | 12,128 |
def get_psi_part(v, q):
"""Return the harmonic oscillator wavefunction for level v on grid q."""
Hr = make_Hr(v + 1)
return N(v) * Hr[v](q) * np.exp(-q * q / 2.0) | 9d4d6a62b7ee434d5d92a694a4a2491fd8a94f97 | 12,129 |
import os
def get_tempdir() -> str:
"""Get the directory where temporary files are stored."""
return next((os.environ[var] for var in (
'XDG_RUNTIME_DIR', 'TMPDIR', 'TMP', 'TEMP'
) if var in os.environ), '/tmp') | 95c90d9f297bbd76e1f083d07058db1b46c275ba | 12,130 |
def get_pij(d, scale, i, optim = "fast"):
"""
Compute probabilities conditioned on point i from a row of distances
d and a Gaussian scale (scale = 2*sigma^2). Vectorized and unvectorized
versions available.
"""
if optim == "none":
#
# TO BE DONE
#
return get_pij(d, scale, i, optim = "fast")
else:
d_scaled = -d/scale
d_scaled -= np.max(d_scaled)
exp_D = np.exp(d_scaled)
exp_D[i] = 0
return exp_D/np.sum(exp_D) | 4a1ee1d91ba949789cc96d2ed1873197afbf4b67 | 12,131 |
import os
import hashlib
def hash_file(path):
"""
Returns the hash of a file.
Based on https://stackoverflow.com/questions/22058048/hashing-a-file-in-python
"""
# Return error as hash, if file does not exist
if not os.path.exists(path):
return f"error hashing file, file does not exist: {path}"
# BUF_SIZE is totally arbitrary, change for your app!
BUF_SIZE = 65536 # lets read stuff in 64kb chunks!
# Use sha1
sha1 = hashlib.sha1()
# Read and hash file (with buffering)
with open(path, "rb") as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha1.update(data)
# Return hash
return sha1.hexdigest() | 722afbe2b6dafa35fbd91b28b4b33d407057b562 | 12,132 |
import os
import glob
import itertools
import re
def get_section_names(target_dir, dir_name, ext="wav"):
"""
Get section name (almost equivalent to machine ID).
target_dir : str
base directory path
dir_name : str
sub directory name
ext : str (default="wav)
file extension of audio files
return :
section_names : list [ str ]
list of section names extracted from the names of audio files
"""
# create test files
query = os.path.abspath(
"{target_dir}/{dir_name}/*.{ext}".format(
target_dir=target_dir, dir_name=dir_name, ext=ext
)
)
file_paths = sorted(glob.glob(query))
# extract section names
section_names = sorted(
list(
set(
itertools.chain.from_iterable(
[re.findall("section_[0-9][0-9]", ext_id) for ext_id in file_paths]
)
)
)
)
return section_names | 1fea77ad452b936019201ff35fe8479a8ad7efd5 | 12,133 |
def get_user_input(prompt: str, current_setting: str):
"""
Get user input
:param prompt: prompt to display
:param current_setting: current value
:return:
"""
if current_setting != '':
print(f'-- Current setting: {current_setting}')
use_current = '/return to use current'
else:
use_current = ''
user_ip = ''
while user_ip == '':
user_ip = input(f'{prompt} [q to quit{use_current}]: ')
if user_ip.lower() == 'q':
break
if user_ip == '' and current_setting != '':
user_ip = current_setting
return user_ip | 358bd937db4ae111eb515385f0f61391a7ae665c | 12,134 |
def class_is_u16_len(cls):
"""
Return True if cls_name is an object which uses initial uint16 length
"""
ofclass = loxi_globals.unified.class_by_name(cls)
if not ofclass:
return False
if len(ofclass.members) < 1:
return False
m = ofclass.members[0]
if not isinstance(m, ir.OFLengthMember):
return False
if m.oftype != "uint16_t":
return False
return True | f45a35e98ff0a2cf6d10ac31e5e31b501f7edcfd | 12,135 |
import zlib
def uploadfile(ticket_id):
"""
Anexa um arquivo ao ticket.
"""
if "file" not in request.files:
return "arquivo inválido"
filename = request.files.get("file").filename
maxfilesize = int(cfg("attachments", "max-size"))
blob = b""
filesize = 0
while True:
chunk = request.files.get("file").file.read(4096)
if not chunk:
break
chunksize = len(chunk)
if filesize + chunksize > maxfilesize:
return "erro: arquivo maior do que máximo permitido"
filesize += chunksize
blob += chunk
log.debug(type(blob))
blob = zlib.compress(blob)
username = current_user()
with db_trans() as c:
c.execute(
"""
insert into files (
ticket_id,
name,
user,
size,
contents
)
values (
:ticket_id,
:filename,
:username,
:filesize,
:blob
)
""",
{
"ticket_id": ticket_id,
"filename": filename,
"username": username,
"filesize": filesize,
"blob": blob,
},
)
c.execute(
"""
update tickets
set datemodified = datetime('now', 'localtime')
where id = :ticket_id
""",
{"ticket_id": ticket_id},
)
return redirect("/ticket/%s" % ticket_id) | d6dae213ff1b6f48b1cd2efb739e52eba6531692 | 12,136 |
def split(data, train_ids, test_ids, valid_ids=None):
"""Split data into train, test (and validation) subsets."""
datasets = {
"train": (
tuple(map(lambda x: x[train_ids], data[0])),
data[1][train_ids],
),
"test": (tuple(map(lambda x: x[test_ids], data[0])), data[1][test_ids]),
}
if valid_ids is not None:
datasets["valid"] = (
tuple(map(lambda x: x[valid_ids], data[0])),
data[1][valid_ids],
)
else:
datasets["valid"] = None
return datasets | 0156d39a5920c5ba7e3ab05a85358b1a960cf239 | 12,137 |
def parse_value_file(path):
"""return param: [(value type, value)]"""
data = {}
samples = [x.strip("\n").split("\t") for x in open(path)]
for row in samples:
parameter = row[0]
values = [x for x in row[1:] if x != SKIP_VAL]
if values != []:
if parameter not in data:
data[parameter] = []
data[parameter] += values
return data | 7dd51f21877ec8ce4a8a64c288a5e862ea0e2a52 | 12,138 |
import os
def get_stop_words(stop_text, filename='ChineseStopWords.txt'):
"""读取指定停用词文件"""
_fp = os.path.join(stopwords_path, filename)
with open(_fp, 'r', encoding='utf-8') as f:
lines = f.readlines()
stop_words = [word.strip() for word in lines]
if stop_text:
input_stop_words = stop_text.strip().split('\n')
if input_stop_words:
stop_words.extend(input_stop_words)
return stop_words | 8c5355519e58ddcf82c87c9f357af614b9021188 | 12,139 |
def _is_comments_box(shape):
""" Checks if this shape represents a Comments question; RECTANGLE with a green outline """
if shape.get('shapeType') != 'RECTANGLE':
return False
color = get_dict_nested_value(shape, 'shapeProperties', 'outline', 'outlineFill', 'solidFill', 'color', 'rgbColor')
return 'blue' not in color and 'red' not in color and 'green' in color and color.get('green') == 1 | 5fa4abba6cc0db3552e90bd73ea9aa7659665ffe | 12,140 |
import os
def read_tree(path):
"""Returns a dict with {filepath: content}."""
if not os.path.isdir(path):
return None
out = {}
for root, _, filenames in os.walk(path):
for filename in filenames:
p = os.path.join(root, filename)
with open(p, 'rb') as f:
out[os.path.relpath(p, path)] = f.read()
return out | 1c3c7be9723ac9f60ab570a7287b067dd2c5536e | 12,141 |
def get_config(cfg, name):
"""Given the argument name, read the value from the config file.
The name can be multi-level, like 'optimizer.lr'
"""
name = name.split('.')
suffix = ''
for item in name:
assert item in cfg, f'attribute {item} not cfg{suffix}'
cfg = cfg[item]
suffix += f'.{item}'
return cfg | 4b0a8eedb057a26d67cd5c9f7698c33754b29249 | 12,142 |
def get_current_frame_content_entire_size(driver):
# type: (AnyWebDriver) -> ViewPort
"""
:return: The size of the entire content.
"""
try:
width, height = driver.execute_script(_JS_GET_CONTENT_ENTIRE_SIZE)
except WebDriverException:
raise EyesError('Failed to extract entire size!')
return dict(width=width, height=height) | 6df557ae5f628897a0d178695a13349787532168 | 12,143 |
def conv_slim_capsule(input_tensor,
input_dim,
output_dim,
layer_name,
input_atoms=8,
output_atoms=8,
stride=2,
kernel_size=5,
padding='SAME',
**routing_args):
"""Builds a slim convolutional capsule layer.
This layer performs 2D convolution given 5D input tensor of shape
`[batch, input_dim, input_atoms, input_height, input_width]`. Then refines
the votes with routing and applies Squash non linearity for each capsule.
Each capsule in this layer is a convolutional unit and shares its kernel over
the position grid and different capsules of layer below. Therefore, number
of trainable variables in this layer is:
kernel: [kernel_size, kernel_size, input_atoms, output_dim * output_atoms]
bias: [output_dim, output_atoms]
Output of a conv2d layer is a single capsule with channel number of atoms.
Therefore conv_slim_capsule is suitable to be added on top of a conv2d layer
with num_routing=1, input_dim=1 and input_atoms=conv_channels.
Args:
input_tensor: tensor, of rank 5. Last two dimmensions representing height
and width position grid.
input_dim: scalar, number of capsules in the layer below.
output_dim: scalar, number of capsules in this layer.
layer_name: string, Name of this layer.
input_atoms: scalar, number of units in each capsule of input layer.
output_atoms: scalar, number of units in each capsule of output layer.
stride: scalar, stride of the convolutional kernel.
kernel_size: scalar, convolutional kernels are [kernel_size, kernel_size].
padding: 'SAME' or 'VALID', padding mechanism for convolutional kernels.
**routing_args: dictionary {leaky, num_routing}, args to be passed to the
update_routing function.
Returns:
Tensor of activations for this layer of shape
`[batch, output_dim, output_atoms, out_height, out_width]`. If padding is
'SAME', out_height = in_height and out_width = in_width. Otherwise, height
and width is adjusted with same rules as 'VALID' in tf.nn.conv2d.
"""
with tf.variable_scope(layer_name):
# convolution. return [batch_size, 1, 32, 8, 6, 6]
kernel = variables.weight_variable(shape=[
kernel_size, kernel_size, input_atoms, output_dim * output_atoms
])
biases = variables.bias_variable([output_dim, output_atoms, 1, 1])
votes, votes_shape, input_shape = _depthwise_conv3d(
input_tensor, kernel, input_dim, output_dim, input_atoms, output_atoms,
stride, padding)
# convolution End
with tf.name_scope('routing'):
logit_shape = tf.stack([
input_shape[0], input_dim, output_dim, votes_shape[2], votes_shape[3]
])
biases_replicated = tf.tile(biases,
[1, 1, votes_shape[2], votes_shape[3]])
activations = _update_routing(
votes=votes,
biases=biases_replicated,
logit_shape=logit_shape,
num_dims=6,
input_dim=input_dim,
output_dim=output_dim,
**routing_args)
return activations | 626719fa607c7e02e2315d5082e9536b995ab080 | 12,144 |
def p_op_mean0_update(prev_p_op_mean0: float, p_op_var0: float, op_choice: int):
"""0-ToM updates mean choice probability estimate"""
# Input variable transforms
p_op_var0 = np.exp(p_op_var0)
# Update
new_p_op_mean0 = prev_p_op_mean0 + p_op_var0 * (
op_choice - inv_logit(prev_p_op_mean0)
)
# For numerical purposes, according to the VBA package
new_p_op_mean0 = logit(inv_logit(new_p_op_mean0))
return new_p_op_mean0 | 2a66e0e6089813c8605e658bb68c103d2b07515d | 12,145 |
from aiida.orm import QueryBuilder, Code
from aiida.common.exceptions import NotExistent
def get_last_code(entry_point_name):
"""Return a `Code` node of the latest code executable of the given entry_point_name in the database.
The database will be queried for the existence of a inpgen node.
If this is not exists and NotExistent error is raised.
:param entry_point_name: string
:return: the uuid of a inpgen `Code` node
:raise: aiida.common.exceptions.NotExistent
"""
filters = {'attributes.input_plugin': {'==': entry_point_name}}
builder = QueryBuilder().append(Code, filters=filters)
builder.order_by({Code: {'ctime': 'asc'}})
results = builder.first()
if not results:
raise NotExistent(f'ERROR: Could not find any Code in the database with entry point: {entry_point_name}!')
else:
inpgen = results[0]
return inpgen.uuid | 801ef01075dccaf2d6d91c8bfcd9f038dc749ba7 | 12,146 |
def digamma(x):
"""Digamma function.
Parameters
----------
x : array-like
Points on the real line
out : ndarray, optional
Output array for the values of `digamma` at `x`
Returns
-------
ndarray
Values of `digamma` at `x`
"""
return _digamma(x) | fb0eb21ee4255851aaa90bea1ad2b8729c7b0137 | 12,147 |
def get_time_index_dataset_from_file(the_file: h5py.File) -> h5py.Dataset:
"""Return the dataset for time indices from the H5 file object."""
return the_file[TIME_INDICES] | dfcd2017285ac252becbb2de7d1b4c4eb178534e | 12,148 |
import os
def get_database_connection():
"""Возвращает соединение с базой данных Redis, либо создаёт новый, если он ещё не создан."""
global _database
if _database is None:
database_password = os.getenv("DB_PASSWORD", default=None)
database_host = os.getenv("DB_HOST", default='localhost')
database_port = os.getenv("DB_PORT", default=6379)
_database = Client(
host=database_host,
port=database_port,
password=database_password,
decode_responses=True
)
return _database | 3417bc17a357a7bcd54131301f3963a10d76e027 | 12,149 |
def is_feature_enabled(feature_name):
"""A short-form method for server-side usage. This method evaluates and
returns the values of the feature flag, using context from the server only.
Args:
feature_name: str. The name of the feature flag that needs to
be evaluated.
Returns:
bool. The value of the feature flag, True if it's enabled.
"""
return _evaluate_feature_flag_value_for_server(feature_name) | 65c3b74988d7eb145352d2835b90a60abedfeba1 | 12,150 |
def str_to_size(size_str):
"""
Receives a human size (i.e. 10GB) and converts to an integer size in
mebibytes.
Args:
size_str (str): human size to be converted to integer
Returns:
int: formatted size in mebibytes
Raises:
ValueError: in case size provided in invalid
"""
if size_str is None:
return None
# no unit: assume mebibytes as default and convert directly
if size_str.isnumeric():
return int(size_str)
size_str = size_str.upper()
# check if size is non-negative number
if size_str.startswith('-'):
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
# decimal units are converted to bytes and then to mebibytes
dec_units = ('KB', 'MB', 'GB', 'TB')
for index, unit in enumerate(dec_units):
# unit used is different: try next
if not size_str.endswith(unit):
continue
try:
size_int = int(size_str[:-2]) * pow(1000, index+1)
except ValueError:
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
# result is returned in mebibytes
return int(size_int / pow(1024, 2))
# binary units are just divided/multipled by powers of 2
bin_units = ('KIB', 'MIB', 'GIB', 'TIB')
for index, unit in enumerate(bin_units):
# unit used is different: try next
if not size_str.endswith(unit):
continue
try:
size_int = int(int(size_str[:-3]) * pow(1024, index-1))
except ValueError:
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None
return size_int
raise ValueError(
'Invalid size format: {}'.format(size_str)) from None | 0051b7cf55d295a4fffcc41ed5b0d900243ef2da | 12,151 |
def point_line_distance(point, line):
"""Distance between a point and great circle arc on a sphere."""
start, end = line
if start == end:
dist = great_circle_distance(point, start, r=1)/np.pi*180
else:
dist = cross_track_distance(point, line, r=1)
dist = abs(dist/np.pi*180)
return dist | 472b9134034fb0f0e03ad4964f97c2be7337db56 | 12,152 |
def uniq_by(array, iteratee=None):
"""This method is like :func:`uniq` except that it accepts iteratee which
is invoked for each element in array to generate the criterion by which
uniqueness is computed. The order of result values is determined by the
order they occur in the array. The iteratee is invoked with one argument:
``(value)``.
Args:
array (list): List to process.
iteratee (mixed, optional): Function to transform the elements of the
arrays. Defaults to :func:`.identity`.
Returns:
list: Unique list.
Example:
>>> uniq_by([1, 2, 3, 1, 2, 3], lambda val: val % 2)
[1, 2]
.. versionadded:: 4.0.0
"""
return list(iterunique(array, iteratee=iteratee)) | c4398add1597a447f400bd6c100cc10eda4e63a4 | 12,153 |
def process_lvq_pak(dataset_name='lvq-pak', kind='all', numeric_labels=True, metadata=None):
"""
kind: {'test', 'train', 'all'}, default 'all'
numeric_labels: boolean (default: True)
if set, target is a vector of integers, and label_map is created in the metadata
to reflect the mapping to the string targets
"""
untar_dir = interim_data_path / dataset_name
unpack_dir = untar_dir / 'lvq_pak-3.1'
if kind == 'train':
data, target = read_space_delimited(unpack_dir / 'ex1.dat', skiprows=[0,1])
elif kind == 'test':
data, target = read_space_delimited(unpack_dir / 'ex2.dat', skiprows=[0])
elif kind == 'all':
data1, target1 = read_space_delimited(unpack_dir / 'ex1.dat', skiprows=[0,1])
data2, target2 = read_space_delimited(unpack_dir / 'ex2.dat', skiprows=[0])
data = np.vstack((data1, data2))
target = np.append(target1, target2)
else:
raise Exception(f'Unknown kind: {kind}')
if numeric_labels:
if metadata is None:
metadata = {}
mapped_target, label_map = normalize_labels(target)
metadata['label_map'] = label_map
target = mapped_target
dset_opts = {
'dataset_name': dataset_name,
'data': data,
'target': target,
'metadata': metadata
}
return dset_opts | 7d9aaed88fb20dc151c61d23760e05e77965838c | 12,154 |
def StrToPtkns(path_string):
""" The inverse of PtknsToStr(), this function splits a string like
'/usr/local/../bin/awk' into ['usr','local','..','bin','awk'].
For illustrative purposes only. Use text.split('/') directly instead."""
return orig_text.split('/') | c6259c2ae34f987d1e6cd8a23bec963c8cd4b466 | 12,155 |
def load_key(file, callback=util.passphrase_callback):
# type: (AnyStr, Callable) -> EC
"""
Factory function that instantiates a EC object.
:param file: Names the filename that contains the PEM representation
of the EC key pair.
:param callback: Python callback object that will be invoked
if the EC key pair is passphrase-protected.
"""
with BIO.openfile(file) as bio:
return load_key_bio(bio, callback) | 72d47a88d80141b385f8212134fb682507ce47d4 | 12,156 |
import fnmatch
def glob_path_match(path: str, pattern_list: list) -> bool:
"""
Checks if path is in a list of glob style wildcard paths
:param path: path of file / directory
:param pattern_list: list of wildcard patterns to check for
:return: Boolean
"""
return any(fnmatch(path, pattern) for pattern in pattern_list) | 7c21e8f1c441641990826cf1d6d29d4add40e9ca | 12,157 |
def sample_df(df, col_name='family', n_sample_per_class=120, replace = False):
"""
samples the dataframe based on a column, duplicates only if the
number of initial rows < required sample size
"""
samples = df.groupby(col_name)
list_cls = df[col_name].unique()
df_lst = []
for cls in list_cls:
cls_df = samples.get_group(cls)
if (cls_df.shape[0] < n_sample_per_class) and (replace==False):
cls_sample = cls_df
else:
cls_sample = cls_df.sample(n=n_sample_per_class,replace=replace,random_state=42)
df_lst.append(cls_sample)
df_sampled = pd.concat(df_lst, sort=False)
df_sampled = shuffle(df_sampled)
return df_sampled | cc229e9cbd4a094b9a42893f15d99303f1f14c2d | 12,158 |
import base64
def inventory_user_policies_header(encode):
"""generate output header"""
if encode == 'on':
return misc.format_line((
base64.b64encode(str("Account")),
base64.b64encode(str("UserName")),
base64.b64encode(str("PolicyName")),
base64.b64encode(str("Policy"))
))
else:
return misc.format_line((
str("Account"),
str("UserName"),
str("PolicyName"),
str("Policy")
)) | 80d170505c0f05e48c2854c9f5370d161de953fb | 12,159 |
def fields_dict(cls):
"""
Return an ordered dictionary of ``attrs`` attributes for a class, whose
keys are the attribute names.
:param type cls: Class to introspect.
:raise TypeError: If *cls* is not a class.
:raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs``
class.
:rtype: an ordered dict where keys are attribute names and values are
`attr.Attribute`\\ s. This will be a `dict` if it's
naturally ordered like on Python 3.6+ or an
:class:`~collections.OrderedDict` otherwise.
.. versionadded:: 18.1.0
"""
if not isclass(cls):
raise TypeError("Passed object must be a class.")
attrs = getattr(cls, "__attrs_attrs__", None)
if attrs is None:
raise NotAnAttrsClassError(
"{cls!r} is not an attrs-decorated class.".format(cls=cls)
)
return ordered_dict(((a.name, a) for a in attrs)) | 863106132a8ce27c8cb12c8ace8f4204b43484c3 | 12,160 |
def cdlxsidegap3methods(
client,
symbol,
timeframe="6m",
opencol="open",
highcol="high",
lowcol="low",
closecol="close",
):
"""This will return a dataframe of upside/downside gap three methods for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
opencol (string): column to use to calculate
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
val = t.CDLXSIDEGAP3METHODS(
df[opencol].values.astype(float),
df[highcol].values.astype(float),
df[lowcol].values.astype(float),
df[closecol].values.astype(float),
)
return pd.DataFrame(
{
opencol: df[opencol].values,
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"cdlxsidegap3methods": val,
}
) | f77f8e7404b2be942919a652facbfea412e962d3 | 12,161 |
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _pyre().finditer(pattern, string, flags) | ff84f88a200b469bbea010b04d4b3f36fd340c9c | 12,162 |
def nufft_j(x, y, freq = None, period_max=1., period_min=.5/24, window=False, oversamp=10.):
"""
nufft_j(x, y, period_max=1.,
period_min=.5/24, window=False, oversamp=10.):
Basic STFT algorithm
for evenly sampled data
"""
srt = np.argsort(x)
x = x[srt] # get sorted x, y arrays
y = y[srt]
if freq is None:
# Get a good frequency sampling, based on scargle in IDL
# freq = LombScargle(x,y).autofrequency()
# minimum_frequency=1./period_max,maximum_frequency=1./period_min)
freq = freq_grid(x,fmin=1./period_max,fmax=1./period_min,oversamp=oversamp)
# create array to hold fft results
fft = np.zeros_like(freq)
if window:
np.absolute(nufft.nufft3(x,y/y,freq*np.pi*2),out=fft)
else:
np.absolute(nufft.nufft3(x,y-np.nanmean(y),freq*np.pi*2),out=fft)
return fft,freq | dc690bd294c28d8b70befc1463eaeed018bf98bf | 12,163 |
import logging
def logger(verbosity=levels['error'], log_file=None):
"""Create a logger which streams to the console, and optionally a file."""
# create/get logger for this instance
logger = logging.getLogger(__name__)
logger.setLevel(levels['debug'])
fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
# with stream (console) handle
ch = logging.StreamHandler()
ch.setLevel(verbosity)
ch.setFormatter(fmt)
logger.addHandler(ch)
# optionally with file handle
if log_file:
log_file.setFormatter(fmt)
logger.addHandler(log_file)
return logger | da17765b4d52b388920873df85fadeeed262c2d7 | 12,164 |
import sys
def head(input_file, in_format, nrows):
"""
Convert tables between formats, optionally modifying column names in the tables
"""
# Guess format if not specified:
if in_format.upper() == "AUTO":
in_format = utils.guess_format(input_file)
# Read PARQUET:
if in_format.upper() == "PARQUET":
df = pd.read_parquet(input_file)
print(df.head(nrows), file=sys.stdout)
if in_format.upper() == "TSV" or in_format.upper() == "CSV":
df = pd.read_csv(
input_file, sep="\t" if in_format.upper() == "TSV" else ",", nrows=nrows
)
print(df, file=sys.stdout)
elif in_format.upper() == "HDF5":
h = h5py.File(input_file, "r")
dct = {k: h[k][:nrows] for k in h.keys()}
h.close()
df = pd.DataFrame.from_dict(dct)
print(df, file=sys.stdout)
return 0 | 9ed8b83ea91c48672f3f896cf1c8d1ec12100029 | 12,165 |
def getNamespacePermissions(paths):
"""Get L{Namespace}s and L{NamespacePermission}s for the specified paths.
@param paths: A sequence of L{Namespace.path}s to get L{Namespace}s and
L{NamespacePermission}s for.
@return: A C{ResultSet} yielding C{(Namespace, NamespacePermission)}
2-tuples for the specified L{Namespace.path}s.
"""
store = getMainStore()
return store.find((Namespace, NamespacePermission),
NamespacePermission.namespaceID == Namespace.id,
Namespace.path.is_in(paths)) | cf6c9a898bdc08130702d4aeb6570790a9dc8edc | 12,166 |
def plot(x, y, ey=[], ex=[], frame=[], kind="scatter", marker_option=".",
ls="-", lw=1, label="", color="royalblue", zorder=1, alpha=1.,
output_folder="", filename=""):
"""
Erstellt einen Plot (plot, scatter oder errorbar).
Parameters
----------
x : array-like
x-Werte
y : array-like
y-Werte
ey : array_like
Fehler auf die y-Werte
ex : array_like
Fehler auf die x-Werte
kind : string
Die Art des plots
Möglich sind "plot" (default), "scatter" und "errorbar".
marker_option : string
Definiert die Option marker bei Plottyp "plot" oder "scatter" sowie
die Option fmt bei Plottyp "errorbar".
ls : string
linestyle
lw : float
linewidth
zorder : int
Die "Ebene" der zu plottenden Daten
return frame
"""
#error arrays
if len(ex)==1:
ex = np.ones(len(x))*ex[0]
elif ex==[]:
ex = np.zeros(len(x))
if len(ey)==1:
ey = np.ones(len(y))*ey[0]
#plotting
fig, plot = plt.subplots(1,1) if frame == [] else frame
if kind=="plot":
plot.plot(x, y, color=color, marker=marker_option, ls=ls, lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="scatter":
plot.scatter(x, y, color=color, marker=marker_option, lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="errorbar":
plot.errorbar(x, y, ey, ex, color=color, fmt=marker_option, ls="", lw=lw, label=label, zorder=zorder, alpha=alpha)
elif kind=="bar":
plot.bar(x, y, color=color, label=label, zorder=zorder, alpha=alpha)
#saving plot
if filename!="":
fig.savefig(output_folder+filename,bbox_inches='tight',pad_inches=pad_inches)
return [fig,plot] | 038a12ab841a617bf1ca3106d5664f8942c9e259 | 12,167 |
def find_nominal_hv(filename, nominal_gain):
"""
Finds nominal HV of a measured PMT dataset
Parameters
----------
filename: string
nominal gain: float
gain for which the nominal HV should be found
Returns
-------
nominal_hv: int
nominal HV
"""
f = h5py.File(filename, "r")
gains = []
hvs = []
keys = f.keys()
for key in keys:
gains.append(f[key]["fit_results"]["gain"][()])
hvs.append(int(key))
f.close()
gains = np.array(gains)
hvs = np.array(hvs)
diff = abs(np.array(gains) - nominal_gain)
nominal_hv = int(hvs[diff == np.min(diff)])
return nominal_hv | 122c5c14314e1ad3521a67a7b9287969a471818d | 12,168 |
def parse_match(field, tokens):
"""Parses a match or match_phrase node
:arg field: the field we're querying on
:arg tokens: list of tokens to consume
:returns: list of match clauses
"""
clauses = []
while tokens and tokens[-1] not in (u'OR', u'AND'):
token = tokens.pop()
if token.startswith(u'"'):
clauses.append(build_match_phrase(field, token[1:-1]))
else:
clauses.append(build_match(field, token))
return clauses | ac970f319b74317637c31265981ecebab6ca9611 | 12,169 |
def get_filesystem(namespace):
"""
Returns a patched pyfilesystem for static module storage based on
`DJFS_SETTINGS`. See `patch_fs` documentation for additional details.
The file system will have two additional properties:
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed)
"""
if DJFS_SETTINGS['type'] == 'osfs':
return get_osfs(namespace)
elif DJFS_SETTINGS['type'] == 's3fs':
return get_s3fs(namespace)
else:
raise AttributeError("Bad filesystem: " + str(DJFS_SETTINGS['type'])) | ae00307c0c38a554bebe1bbd940ace0f2d154b47 | 12,170 |
def sym_normalize_adj(adj):
"""symmetrically normalize adjacency matrix"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_inv_sqrt = np.power(np.maximum(degree, np.finfo(float).eps), -0.5)
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() | a172ec18cd88ac8a50356453eb159c001b21d9b1 | 12,171 |
def prepare_label(input_batch, new_size):
"""Resize masks and perform one-hot encoding.
Args:
input_batch: input tensor of shape [batch_size H W 1].
new_size: a tensor with new height and width.
Returns:
Outputs a tensor of shape [batch_size h w 21]
with last dimension comprised of 0's and 1's only.
"""
with tf.name_scope('label_encode'):
input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.
input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.
input_batch = tf.one_hot(input_batch, depth=n_classes)
return input_batch | 3cd049b0d610ed2cec79e17464a0b3d18baa0ab2 | 12,172 |
def find_index_halfmax(data1d):
"""
Find the two indices at half maximum for a bell-type curve (non-parametric). Uses center of mass calculation.
:param data1d:
:return: xmin, xmax
"""
# normalize data between 0 and 1
data1d = data1d / float(np.max(data1d))
# loop across elements and stops when found 0.5
for i in range(len(data1d)):
if data1d[i] > 0.5:
break
# compute center of mass to get coordinate at 0.5
xmin = i - 1 + (0.5 - data1d[i - 1]) / float(data1d[i] - data1d[i - 1])
# continue for the descending slope
for i in range(i, len(data1d)):
if data1d[i] < 0.5:
break
# compute center of mass to get coordinate at 0.5
xmax = i - 1 + (0.5 - data1d[i - 1]) / float(data1d[i] - data1d[i - 1])
# display
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(src1d)
# plt.plot(xmin, 0.5, 'o')
# plt.plot(xmax, 0.5, 'o')
# plt.savefig('./normalize1d.png')
return xmin, xmax | 2bc506075218b34d971beebd1ed6d08b0841aec9 | 12,173 |
from typing import Optional
def format_autoupdate_jira_msg(
message_body: str, header_body: Optional[str] = None
) -> str:
"""
Format a JIRA message with useful headers.
An "Automated JIRA Update" title will be added,
as well as either a URL link if a ``BUILD_URL`` env variable is present,
or a note indicating a manual run with user id otherwise.
Args:
message_body: the body of the message
header_body: a header to be added with ``h2`` tag
Returns:
a formatted message with headers
"""
message = "h2. {}".format(header_body) if header_body else ""
message += "\n\nAutomated JIRA Update:\n\n{}\n\n{}".format(
_build_source(), message_body
)
return message | 8470987c886c4c696ebd7537369b9baee9883e20 | 12,174 |
def _unescape_token(escaped_token):
"""Inverse of _escape_token().
Args:
escaped_token: a unicode string
Returns:
token: a unicode string
"""
def match(m):
if m.group(1) is None:
return "_" if m.group(0) == "\\u" else "\\"
try:
return chr(int(m.group(1)))
except (ValueError, OverflowError) as _:
return "\u3013" # Unicode for undefined character.
trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
return _UNESCAPE_REGEX.sub(match, trimmed) | 1e596373f64d2163e467dddf4851e60dba6faa00 | 12,175 |
def create_option_learner(action_space: Box) -> _OptionLearnerBase:
"""Create an option learner given its name."""
if CFG.option_learner == "no_learning":
return KnownOptionsOptionLearner()
if CFG.option_learner == "oracle":
return _OracleOptionLearner()
if CFG.option_learner == "direct_bc":
return _DirectBehaviorCloningOptionLearner(action_space)
if CFG.option_learner == "implicit_bc":
return _ImplicitBehaviorCloningOptionLearner(action_space)
if CFG.option_learner == "direct_bc_nonparameterized":
return _DirectBehaviorCloningOptionLearner(action_space,
is_parameterized=False)
raise NotImplementedError(f"Unknown option_learner: {CFG.option_learner}") | 5642b5c6713dcf3204a0bf98e4435cfb2874e1c6 | 12,176 |
def parse_foochow_romanized_phrase(phrase, allow_omit_ingbing = True):
"""Parse a dash-separated phrase / word in Foochow Romanized."""
syllables = phrase.strip().split('-')
result = []
for syllable in syllables:
try:
parsed = FoochowRomanizedSyllable.from_string(syllable, allow_omit_ingbing)
result.append(parsed)
except:
raise ValueError("%s is not a valid Foochow Romanized syllable.", syllable)
return result | d9b5fa15ab11a596e14c7eecff2ce4fc7ef520ae | 12,177 |
def _update(dict_merged: _DepDict, dict_new: _DepDict) -> _DepDict:
"""
Merge a dictionary `dict_new` into `dict_merged` asserting if there are
conflicting (key, value) pair.
"""
for k, v in dict_new.items():
v = dict_new[k]
if k in dict_merged:
if v != dict_merged[k]:
raise ValueError(
"Key '%s' is assigned to different values '%s' and '%s'"
% (k, v, dict_merged[k])
)
else:
dict_merged[k] = v
return dict_merged | 8c96256dd96f0a75d8e8cde039c7193699bf763f | 12,178 |
from datetime import datetime
def date_convert(value):
"""
日期字符串转化为数据库的日期类型
:param value:
:return:
"""
try:
create_date = datetime.strptime(value, '%Y/%m/%d').date()
except Exception as e:
create_date = datetime.now().date()
return create_date | 40d7a213a8aeed692940bbb285fdad1bbb5b65a6 | 12,179 |
def discriminator_txt2img_resnet(input_images, t_txt, is_train=True, reuse=False):
""" 64x64 + (txt) --> real/fake """
# https://github.com/hanzhanggit/StackGAN/blob/master/stageI/model.py
# Discriminator with ResNet : line 197 https://github.com/reedscot/icml2016/blob/master/main_cls.lua
w_init = tf.random_normal_initializer(stddev=0.02)
gamma_init=tf.random_normal_initializer(1., 0.02)
df_dim = 64 # 64 for flower, 196 for MSCOCO
s = 64 # output image size [64]
s2, s4, s8, s16 = int(s / 2), int(s / 4), int(s / 8), int(s / 16)
tl.layers.set_name_reuse(reuse)
net_in = Input(input_images)
net_h0 = Conv2d(df_dim, (4, 4), (2, 2), act=lambda x: tl.act.lrelu(x, 0.2), padding='SAME', W_init=w_init, name='d_h0/conv2d')(net_in)
net_h1 = Conv2d(df_dim * 2, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h1/conv2d')(net_h0)
net_h1 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h1/batchnorm')(net_h1)
net_h2 = Conv2d(df_dim * 4, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h2/conv2d')(net_h1)
net_h2 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h2/batchnorm')(net_h2)
net_h3 = Conv2d(df_dim * 8, (4, 4), (2, 2), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h3/conv2d')(net_h2)
net_h3 = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='d_h3/batchnorm')(net_h3)
net = Conv2d(df_dim * 2, (1, 1), (1, 1), act=None, padding='VALID', W_init=w_init, b_init=None, name='d_h4_res/conv2d')(net_h3)
net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm')(net)
net = Conv2d(df_dim * 2, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h4_res/conv2d2')(net)
net = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm2')(net)
net = Conv2d(df_dim * 8, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init, b_init=None, name='d_h4_res/conv2d3')(net)
net = BatchNorm(is_train=is_train, gamma_init=gamma_init, name='d_h4_res/batchnorm3')(net)
net_h4 = Elementwise(act=lambda x: tl.act.lrelu(x, 0.2), combine_fn=tf.add, name='d_h4/add')([net_h3, net])
# net_h4.outputs = tl.act.lrelu(net_h4.outputs, 0.2)
if t_txt is not None:
net_in2 = Input(t_txt)
#net_txt = Dense(n_units=t_dim, act=lambda x: tl.act.lrelu(x, 0.2), W_init=w_init, name='d_reduce_txt/dense')(net_txt)
net_txt = ExpandDims(1, name='d_txt/expanddim1')(net_in2)
net_txt = ExpandDims(1, name='d_txt/expanddim2')(net_txt)
net_txt = Tile([1, 4, 4, 1], name='d_txt/tile')(net_txt)
net_h4_concat = Concat(concat_dim=3, name='d_h3_concat')([net_h4, net_txt])
# 243 (ndf*8 + 128 or 256) x 4 x 4
net_h4 = Conv2d(df_dim * 8, (1, 1), (1, 1), padding='VALID', W_init=w_init, b_init=None, name='d_h3/conv2d_2')(net_h4_concat)
net_h4 = BatchNorm(act=lambda x: tl.act.lrelu(x, 0.2), is_train=is_train, gamma_init=gamma_init, name='d_h3/batch_norm_2')(net_h4)
net_ho = Conv2d(1, (s16, s16), (s16, s16), act=tf.nn.sigmoid, padding='VALID', W_init=w_init, name='d_ho/conv2d')(net_h4)
# 1 x 1 x 1
net_ho = Flatten()(net_ho)
# logits = net_ho.outputs
# net_ho.outputs = tf.nn.sigmoid(net_ho.outputs)
return tl.models.Model(inputs=[net_in,net_in2], outputs=net_ho) | 200d23ccffe631ea9bea2de5afa82a1794192a7b | 12,180 |
import imghdr
def get_img_content(session,
file_url,
extension=None,
max_retry=3,
req_timeout=5):
"""
Returns:
(data, actual_ext)
"""
retry = max_retry
while retry > 0:
try:
response = session.get(file_url, timeout=req_timeout)
except Exception as e:
print(f'Exception caught when downloading file {file_url}, '
f'error: {e}, remaining retry times: {retry - 1}')
else:
if response.status_code != 200:
print(f'Response status code {response.status_code}, '
f'file {file_url}')
break
# get the response byte
data = response.content
if isinstance(data, str):
print('Converting str to byte, later remove it.')
data = data.encode(data)
actual_ext = imghdr.what(extension, data)
actual_ext = 'jpg' if actual_ext == 'jpeg' else actual_ext
# do not download original gif
if actual_ext == 'gif' or actual_ext is None:
return None, actual_ext
return data, actual_ext
finally:
retry -= 1
return None, None | 156005420ebc1503d5cf7a194051b93d9fccb8ed | 12,181 |
import urllib
def nextbus(a, r, c="vehicleLocations", e=0):
"""Returns the most recent latitude and
longitude of the selected bus line using
the NextBus API (nbapi)"""
nbapi = "http://webservices.nextbus.com"
nbapi += "/service/publicXMLFeed?"
nbapi += "command=%s&a=%s&r=%s&t=%s" % (c,a,r,e)
xml = minidom.parse(urllib.urlopen(nbapi))
bus=xml.getElementsByTagName("vehicle")
if bus:
at = bus.attributes
return(at["lat"].value, at["lon"].value)
else: return (False, False) | 13d1d26fcda1ad01e145dc1d1d0d4e5377efa576 | 12,182 |
def xml_translate(callback, value):
""" Translate an XML value (string), using `callback` for translating text
appearing in `value`.
"""
if not value:
return value
try:
root = parse_xml(value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
return serialize_xml(result)
except etree.ParseError:
# fallback for translated terms: use an HTML parser and wrap the term
root = parse_html(u"<div>%s</div>" % value)
result = translate_xml_node(root, callback, parse_xml, serialize_xml)
# remove tags <div> and </div> from result
return serialize_xml(result)[5:-6] | b95f61fd1f78d567f69bdae3f5d0a1599d7b5cdc | 12,183 |
def check_sc_sa_pairs(tb, pr_sc, pr_sa, ):
"""
Check whether pr_sc, pr_sa are allowed pairs or not.
agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
"""
bS = len(pr_sc)
check = [False] * bS
for b, pr_sc1 in enumerate(pr_sc):
pr_sa1 = pr_sa[b]
hd_types1 = tb[b]['types']
hd_types11 = hd_types1[pr_sc1]
if hd_types11 == 'text':
if pr_sa1 == 0 or pr_sa1 == 3: # ''
check[b] = True
else:
check[b] = False
elif hd_types11 == 'real':
check[b] = True
else:
raise Exception("New TYPE!!")
return check | ded05192f26516e54e469bb1fe44ff6170ecea13 | 12,184 |
def translate_resource_args(func):
"""
Decorator that converts Issue and Project resources to their keys when used as arguments.
"""
@wraps(func)
def wrapper(*args, **kwargs):
arg_list = []
for arg in args:
if isinstance(arg, (Issue, Project)):
arg_list.append(arg.key)
else:
arg_list.append(arg)
result = func(*arg_list, **kwargs)
return result
return wrapper | 091c2bbc2875f32c643afe0a88ddb5980ff9f90c | 12,185 |
def nan_jumps_dlc(files, max_jump=200):
"""Nan stretches in between large jumps, assuming most of the trace is correct"""
# copy the data
corrected_trace = files.copy()
# get the column names
column_names = corrected_trace.columns
# run through the columns
for column in column_names:
# skip the index column if it's there
if column == 'index':
continue
# find the jumps
jump_length = np.diff(corrected_trace[column], axis=0)
jump_location = np.argwhere(abs(jump_length) > max_jump)
if jump_location.shape[0] == 0:
continue
jump_location = [el[0] for el in jump_location]
# initialize a flag
pair_flag = True
# go through pairs of jumps
for idx, jump in enumerate(jump_location[:-1]):
# if this is the second member of a pair, skip
if not pair_flag:
# reset the pair flag
pair_flag = True
continue
# if this jump and the next have the same sign, skip
if (jump_length[jump]*jump_length[jump_location[idx+1]]) > 0:
continue
# nan the segment in between
corrected_trace.loc[jump+1:jump_location[idx+1]+1, column] = np.nan
# set the pair flag
pair_flag = False
return corrected_trace | 5c06e6d020c4b85f2749d46f693dc29fd4f8326c | 12,186 |
def index():
"""View: Site Index Page"""
return render_template("pages/index.html") | 045c45082215bbc6888944386ba901af7412b0a6 | 12,187 |
from typing import Optional
import aiohttp
import urllib
import hashlib
async def get_latest_digest_from_registry(
repository: str,
tag: str,
credentials: Optional[meadowrun.credentials.RawCredentials],
) -> str:
"""
Queries the Docker Registry HTTP API to get the current digest of the specified
repository:tag. The output of this function should always match the output of
`docker inspect --format='{{.RepoDigests}}' [repository]:[tag]` AFTER calling
`docker pull [repository]:[tag]`. Example output is something like
sha256:76eaa9e5bd357d6983a88ddc9c4545ef4ad64c50f84f081ba952c7ed08e3bdd6. Note that
this hash is also part of the output when `docker pull` is run.
This function gets the latest version of that hash without pulling the image first.
The docker command line/client does not provide this capability
(https://stackoverflow.com/questions/56178911/how-to-obtain-docker-image-digest-from-tag/56178979#56178979),
so we have to resort to the Docker Registry HTTP API.
Note that we could also use skopeo for this: https://github.com/containers/skopeo
The functionality we're implementing is exactly the same as `skopeo inspect
[repository]:[tag]` and then reading the "digest" field.
"""
# The correct implementation is to read the manifest for a repository/tag and
# compute the sha256 hash of the content of the manifest.
#
# At the time of writing this comment, the accepted answers on the first couple
# Google results on this topic were out of date, incomplete, or incorrect:
#
# https://stackoverflow.com/questions/39375421/can-i-get-an-image-digest-without-downloading-the-image/39376254#39376254
# https://stackoverflow.com/questions/41808763/how-to-determine-the-docker-image-id-for-a-tag-via-docker-hub-api/41830007#41830007
# https://ops.tips/blog/inspecting-docker-image-without-pull/
#
# Part of the confusion seems to be that there are many different digests for the
# sub-parts of the image (e.g. for different architectures, different layers,
# etc.). We only care about the digest that we get from `docker inspect` (as
# described above) because this is what we'll use to figure out if we have the
# latest version of the image or not. Correctness can be easily verified using the
# docker command line as described above.
#
# Reading the docker-content-digest header in the response is an alternative to
# computing the hash ourselves, but this header is not in the responses from AWS
# ECR.
registry_domain, repository = get_registry_domain(repository)
manifests_url = f"https://{registry_domain}/v2/{repository}/manifests/{tag}"
headers = {"Accept": _MANIFEST_ACCEPT_HEADER_FOR_DIGEST}
if credentials is None:
basic_auth = None
elif isinstance(credentials, meadowrun.credentials.UsernamePassword):
basic_auth = aiohttp.BasicAuth(credentials.username, credentials.password)
else:
raise ValueError(f"Unexpected type of credentials {type(credentials)}")
manifests_text: Optional[bytes] = None
# First, try requesting the manifest without any authentication. It might work, and
# if it doesn't, the response will tell us how the authentication should work.
# TODO add logic to "remember" which repositories require what kinds of
# authentication, as well as potentially tokens as well
async with aiohttp.request("GET", manifests_url, headers=headers) as response:
if response.ok:
manifests_text = await response.read()
else:
# Regardless of the type of error, try again with authentication as long as
# we have a www-authenticate header. response.headers is case insensitive:
if "www-authenticate" not in response.headers:
# we don't know how to authenticate, so just propagate the error
response.raise_for_status()
authentication_header = response.headers["www-authenticate"]
authentication_header_error_message = (
"Don't know how to interpret authentication header "
+ authentication_header
)
scheme, space, auth_params = authentication_header.partition(" ")
if not space:
raise ValueError(authentication_header_error_message)
elif scheme.lower() == "basic":
if basic_auth is None:
raise ValueError(
f"Basic auth is required to access {manifests_url} but no "
"username/password was provided"
)
# We've already set the basic_auth variable above, so we'll leave it as
# is so that it gets used directly in the next request for the manifest
# below.
elif scheme.lower() == "bearer":
# For bearer auth, we need to request a token. Parsing the
# www-authenticate header should tell us everything we need to know to
# construct the request for the token. Example of a www-authenticate
# header is
# `Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/python:pull"` # noqa E501
auth_params_parsed = urllib.request.parse_keqv_list(
urllib.request.parse_http_list(auth_params)
)
# a bit hacky, but we're going to use auth_params to create the query
# string, so we remove realm from it because that's the only one we
# don't need
# TODO should this be case insensitive?
realm = auth_params_parsed["realm"]
del auth_params_parsed["realm"]
token_request_url = (
f"{realm}?{urllib.parse.urlencode(auth_params_parsed)}"
)
# Even if no username_password was provided (i.e. basic_auth is None)
# it's worth trying this. E.g. DockerHub requires an anonymous token for
# public repositories
async with aiohttp.request(
"GET", token_request_url, auth=basic_auth
) as token_response:
if not token_response.ok:
token_response.raise_for_status()
# TODO should this be case insensitive?
token = (await token_response.json())["token"]
# Now we add the Bearer token to headers which will get used in the
# next request for the manifest. We also need to unset basic_auth as
# we've used that to get the token, and it should not be used in
# subsequent requests.
headers["Authorization"] = f"Bearer {token}"
basic_auth = None
else:
raise ValueError(authentication_header_error_message)
# now exactly one of basic_auth or headers["Authorization"] should be set
async with aiohttp.request(
"GET", manifests_url, headers=headers, auth=basic_auth
) as response_authenticated:
if not response_authenticated.ok:
response_authenticated.raise_for_status()
manifests_text = await response_authenticated.read()
if not manifests_text:
raise ValueError(
"Programming error: manifests_text should not be None/empty string"
)
# compute the digest from the manifest text
digest = hashlib.sha256(manifests_text).hexdigest()
return f"sha256:{digest}"
# TODO test case where image doesn't exist | 95b0044d5c6e744f548891c1f78115eb40ddaf4c | 12,188 |
def histogram(x, bins, bandwidth, epsilon=1e-10):
"""
Function that estimates the histogram of the input tensor.
The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter.
"""
pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon)
return pdf | bee459eb69daf41ccb0f2810d5e88139f57cad87 | 12,189 |
import re
import os
def file_deal(paths, set_list: list, list_search: list, list_enter: list, file_path: dict, clear_list: bool = False,
pattern=r'^[.\n]*$', is_file=True, replace_str: str = '', names: dict = None):
"""
:param clear_list: is need clear the list
:param paths: DirPicker path or FilePicker files
:param set_list: the list save the keys
:param list_search: the list to search
:param list_enter: the list to show
:param file_path: the dict of loaded files
:param pattern: the pattern of the base_filename
:param is_file: a bool of the load type bool True->FilePicker,False->DirPicker
:param replace_str: the need replace string in the base filename
:param names: the Chinese-base_name dict
:return: if do not raise any error and worked bool->True,else bool->False
"""
try:
if names is None:
names = {}
pattern_re = re.compile(pattern)
if not is_file:
dict_path = paths.copy()
paths = paths.keys()
num = len(paths)
else:
dict_path = {}
num = len(set_list)
if clear_list:
set_list.clear()
list_enter.clear()
list_search.clear()
num = 0
if not len(paths) == 0:
path = filter(lambda x: pattern_re.match(os.path.basename(x)) is not None, paths)
path = list(path)
info_write = info_write_builder(is_file, dict_path, replace_str, set_list, file_path, list_enter,
names, list_search)
path_len = len(path)
paths = zip(path, range(path_len))
paths = list(paths)
num += len(list(map(info_write, list(paths))))
if path_len == 0:
return False, '导入完成,无新增项!'
else:
return False, '导入失败,无导入项!'
except (TypeError, KeyError, RuntimeError)as info:
return False, '导入失败,发生错误!%s' % info
else:
return True, '导入成功! 成功导入%d个!' % num | a34dbfe7c968626aca5c68e6aa2c0287ebd7146f | 12,190 |
def generate_kam(
kam_path: str
) -> nx.DiGraph:
"""
Generates the knowledge assembly model as a NetworkX graph.
:param kam_path: Path to the file containing the source, relationship and the target nodes of a knowledge
assembly model (KAM).
:return: KAM graph as a NetworkX DiGraph.
"""
# Read the file containing the kam file
kam_df = pd.read_csv(kam_path, sep='\t', header=None)
# Rename the column headers are Source, Relationship and Target
kam_df.columns = ['Source', 'Relationship', 'Target']
# Map relationships between the nodes as either +1 or -1 based on the interaction
rlsp_mapping = {
'activates': 1,
'inhibits': -1
}
# Add the data to a directed graph
kam = nx.DiGraph()
for edge in kam_df.index:
kam.add_edge(
kam_df.at[edge, 'Source'],
kam_df.at[edge, 'Target'],
effect=rlsp_mapping[kam_df.at[edge, 'Relationship']]
)
return kam | 54f6e6cc0440a9b81cb48078030943013e599847 | 12,191 |
def toDrive(collection, folder, namePattern='{id}', scale=30,
dataType="float", region=None, datePattern=None,
extra=None, verbose=False, **kwargs):
""" Upload all images from one collection to Google Drive. You can use
the same arguments as the original function
ee.batch.export.image.toDrive
:param collection: Collection to upload
:type collection: ee.ImageCollection
:param folder: Google Drive folder to export the images to
:type folder: str
:param namePattern: pattern for the name. See make_name function
:type namePattern: str
:param region: area to upload. Defualt to the footprint of the first
image in the collection
:type region: ee.Geometry.Rectangle or ee.Feature
:param scale: scale of the image (side of one pixel). Defults to 30
(Landsat resolution)
:type scale: int
:param maxImgs: maximum number of images inside the collection
:type maxImgs: int
:param dataType: as downloaded images **must** have the same data type
in all bands, you have to set it here. Can be one of: "float",
"double", "int", "Uint8", "Int8" or a casting function like
*ee.Image.toFloat*
:type dataType: str
:param datePattern: pattern for date if specified in namePattern.
Defaults to 'yyyyMMdd'
:type datePattern: str
:return: list of tasks
:rtype: list
"""
# empty tasks list
tasklist = []
# get region
region = tools.geometry.getRegion(region)
# Make a list of images
img_list = collection.toList(collection.size())
n = 0
while True:
try:
img = ee.Image(img_list.get(n))
name = makeName(img, namePattern, datePattern, extra)
name = name.getInfo()
description = utils.matchDescription(name)
# convert data type
img = utils.convertDataType(dataType)(img)
task = ee.batch.Export.image.toDrive(image=img,
description=description,
folder=folder,
fileNamePrefix=name,
region=region,
scale=scale, **kwargs)
task.start()
if verbose:
print("exporting {} to folder '{}' in GDrive".format(name, folder))
tasklist.append(task)
n += 1
except Exception as e:
error = str(e).split(':')
if error[0] == 'List.get':
break
else:
raise e
return tasklist | 2bf55de8894a063d2e74a462a3959753a1396c0a | 12,192 |
def area(box):
"""Computes area of boxes.
B: batch_size
N: number of boxes
Args:
box: a float Tensor with [N, 4], or [B, N, 4].
Returns:
a float Tensor with [N], or [B, N]
"""
with tf.name_scope('Area'):
y_min, x_min, y_max, x_max = tf.split(
value=box, num_or_size_splits=4, axis=-1)
return tf.squeeze((y_max - y_min) * (x_max - x_min), axis=-1) | 64e7a8e530d28a6c88f8a1c7fd2bb1b6d880617c | 12,193 |
def read(fn):
"""
return a list of the operating systems and a list of the groups in
the given fingerbank config file
"""
cfg = parse_config_with_heredocs(fn)
return create_systems_and_groups(cfg) | 900e70093e469527f20f7be6ed091edf58ff3ace | 12,194 |
def decay_value(base_value, decay_rate, decay_steps, step):
""" decay base_value by decay_rate every decay_steps
:param base_value:
:param decay_rate:
:param decay_steps:
:param step:
:return: decayed value
"""
return base_value*decay_rate**(step/decay_steps) | c593f5e46d7687fbdf9760eb10be06dca3fb6f7b | 12,195 |
def setup_flask_app(manager_ip='localhost',
driver='',
hash_salt=None,
secret_key=None):
"""Setup a functioning flask app, when working outside the rest-service
:param manager_ip: The IP of the manager
:param driver: SQLA driver for postgres (e.g. pg8000)
:param hash_salt: The salt to be used when creating user passwords
:param secret_key: Secret key used when hashing flask tokens
:return: A Flask app
"""
app = Flask(__name__)
manager_config.load_configuration(from_db=False)
with app.app_context():
app.config['SQLALCHEMY_DATABASE_URI'] = manager_config.db_url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['ENV'] = 'production'
set_flask_security_config(app, hash_salt, secret_key)
Security(app=app, datastore=user_datastore)
Migrate(app=app, db=db)
db.init_app(app)
app.app_context().push()
return app | ca71e94dcfcc4949abc4782a74cd67ce1d089d06 | 12,196 |
import shutil
def download_file(file_id, unique_id, credentials):
"""Downloads a file from google drive if user has been authenticated using oauth2
Args:
file_id (str): [The google drive id of the file]
unique_id (str): [The name of the video that is to be used for stored file]
Returns:
bool: [whether the file has been successfully downloaded or not]
"""
http = credentials.authorize(httplib2.Http())
service = discovery.build("drive", "v3", http=http)
request = service.files().get_media(fileId=file_id)
fh = BytesIO()
# Initialise a downloader object to download the file
# Downloads in chunks of 2MB
downloader = MediaIoBaseDownload(fh, request, chunksize=2048000)
done = False
try:
# Download the data in chunks
while not done:
status, done = downloader.next_chunk()
fh.seek(0)
# Write the received data to the file
with open(f"./{videos_dir}/{unique_id}", "wb") as f:
shutil.copyfileobj(fh, f)
print("File Downloaded")
# Return True if file Downloaded successfully
return True
except Exception as e:
print(str(e))
# Return False if something went wrong
print("Something went wrong.")
return False | ae1f0e9648602737a295ac313f3984d31c51fc7e | 12,197 |
def join_lines(new_lines, txt):
"""Joins lines, adding a trailing return if the original text had one."""
return add_ending('\n'.join(new_lines), txt) | 097fccf3ce6a7a5aab9d4f470c35833af3f63836 | 12,198 |
def get_components_with_metrics(config):
"""
:type: config mycroft_holmes.config.Config
"""
storage = MetricsStorage(config=config)
components = []
for feature_name, feature_spec in config.get_features().items():
feature_id = config.get_feature_id(feature_name)
metrics = config.get_metrics_for_feature(feature_name)
try:
score = storage.get(feature_id, feature_metric='score')
except MycroftMetricsStorageError:
score = None
component = {
'id': feature_id,
# feature's metadata
'name': feature_name,
'docs': feature_spec.get('url'),
'repo': feature_spec.get('repo'),
# fetch metrics and calculated score
'metrics': metrics,
'score': score or 0, # always an int, as we sort latter on
# link to a feature's dashboard
'url': url_for('dashboard.feature', feature_id=feature_id, _external=True),
}
components.append(component)
# sort components by score (descending)
components = sorted(components, key=lambda item: item['score'], reverse=True)
return components | 478743f29620530d7c4d7ca916ec595fa7a1ab3b | 12,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.