content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def collapse(board_u):
"""
takes a row/column of the board
and collapses it to the left
"""
i = 1
limit = 0
while i < 4:
if board_u[i]==0:
i += 1
continue
up_index = i-1
curr_index = i
while up_index>=0 and board_u[up_index]==0:
board_u[up_index] = board_u[curr_index]
board_u[curr_index] = 0
up_index -= 1
curr_index -= 1
if up_index >= limit and board_u[up_index]==board_u[curr_index]:
board_u[up_index] *= 2
board_u[curr_index] = 0
limit = curr_index
i += 1
return board_u | a79a3c7b83355f95face09e0ab21ab0b15a81053 | 3,648,701 |
import re
def find_match_in_file(search_term, file_location):
"""
This function is used to query a file
search_term = Term to find
file_location = Location of file to query.
"""
try:
with open(file_location) as line:
for search in line:
result = re.match(search_term, search)
if result:
return result
return
except Exception as err:
print(err) | d78776069c8f2b4da5f09bf0ce3e675e215ee584 | 3,648,702 |
def get_cost_function(cost_function_name: str):
"""
Given the name of a cost function, retrieve the corresponding function and its partial derivative wrt Y_circ
:param cost_function_name: the name of the cost function
:return: the corresponding cost function and its partial derivative wrt Y_circ
"""
try:
return cost_functions[cost_function_name]
except KeyError:
raise UnknownCostFunctionName(cost_function_name) | 4f6664d91878e482f1996faa9141c201e2d260d3 | 3,648,703 |
def create_C1(data_set):
"""
Create frequent candidate 1-itemset C1 by scaning data set.
Args:
data_set: A list of transactions. Each transaction contains several items.
Returns:
C1: A set which contains all frequent candidate 1-itemsets
"""
C1 = set()
for t in data_set:
for item in t:
item_set = frozenset([item])
C1.add(item_set)
return C1 | 9f3deb61c6c3b982976c61c4247102431794daa8 | 3,648,704 |
def timezone(name):
"""
Loads a Timezone instance by name.
:param name: The name of the timezone.
:type name: str or int
:rtype: Timezone
"""
return Timezone.load(name) | c0d0250a30581c4414eb4bb293331127011ae2a3 | 3,648,705 |
def complexDivision(a, b):
"""
复数除法
:param a: 负数a
:param b: 负数b
:return: 返回除法结果
"""
res = np.zeros(a.shape, a.dtype)
divisor = 1. / (b[:, :, 0] ** 2 + b[:, :, 1] ** 2)
res[:, :, 0] = (a[:, :, 0] * b[:, :, 0] + a[:, :, 1] * b[:, :, 1]) * divisor
res[:, :, 1] = (a[:, :, 1] * b[:, :, 0] + a[:, :, 0] * b[:, :, 1]) * divisor
return res | b3758021f466d8b56af06698c83077875ff7fdb5 | 3,648,706 |
def dynamics_RK4(OdeFun, tspan, x, u, v):
"""
# RK4 integrator for a time-invariant dynamical system under a control, u,
and disturbance, v.
# See https://lpsa.swarthmore.edu/NumInt/NumIntFourth.html
This impl adopted from unstable-zeros's learning CBFs example for two airplanes
https://github.com/unstable-zeros/learning-cbfs/blob/master/airplane_example/learning_cbfs_airplane.ipynb
This function must be called within a loop for a total of N
steps of integration, Obviously, the smallet the value of T, the better
Inp.ts:
OdeFun: Right Hand Side of Ode function to be integrated
tspan: A list [start, end] that specifies over what time horizon to integrate the dynamics
x: State, must be a list, initial condition
u: Control, must be a list
v: Disturbance, must be a list
Author: Lekan Molu, August 09, 2021
"""
M = 4 # RK4 steps per interval
h = 0.2 # time step
if onp.any(tspan):
hh = (tspan[1]-tspan[0])/10/M
X = onp.array(x)
U = onp.array(u)
V = onp.array(v)
for j in range(M):
if onp.any(tspan): # integrate for this much time steps
for h in np.arange(tspan[0], tspan[1], hh):
k1 = OdeFun(X, U, V)
k2 = OdeFun(X + h/2 * k1, U, V)
k3 = OdeFun(X + h/2 * k2, U, V)
k4 = OdeFun(X + h * k3, U, V)
X = X+(h/6)*(k1 +2*k2 +2*k3 +k4)
else:
k1 = OdeFun(X, U, V)
k2 = OdeFun(X + h/2 * k1, U, V)
k3 = OdeFun(X + h/2 * k2, U, V)
k4 = OdeFun(X + h * k3, U, V)
X = X+(h/6)*(k1 +2*k2 +2*k3 +k4)
return list(X) | cda0b30b9973e299be4c14e11660d6f2fd9bc0e9 | 3,648,707 |
def random_decay(num_actions=None, decay_type='polynomial_decay', start_decay_at=0,
stop_decay_at=1e9, decay_rate=0., staircase=False, decay_steps=10000,
min_exploration_rate=0):
"""Builds a random decaying exploration.
Decay a random value based on number of states and the decay_type.
Args:
num_actions: `int` or None. If discrete num_action must be None.
decay_type: A decay function name defined in `exploration_decay`
possible Values: exponential_decay, inverse_time_decay, natural_exp_decay,
piecewise_constant, polynomial_decay.
start_decay_at: `int`. When to start the decay.
stop_decay_at: `int`. When to stop the decay.
decay_rate: A Python number. The decay rate.
staircase: Whether to apply decay in a discrete staircase,
as opposed to continuous, fashion.
decay_steps: How often to apply decay.
min_exploration_rate: `float`. Don't decay below this number.
Returns:
`function` the exploration logic operation.
"""
if num_actions is None:
exploration_rate = partial(np.random.randn, 1)
else:
exploration_rate = partial(np.random.randn, num_actions)
exploration_rate = _decay_fn(timestep=get_global_timestep(),
exploration_rate=exploration_rate,
decay_type=decay_type,
start_decay_at=start_decay_at,
stop_decay_at=stop_decay_at,
decay_rate=decay_rate,
staircase=staircase,
decay_steps=decay_steps,
min_exploration_rate=min_exploration_rate)
track(exploration_rate, tf.GraphKeys.EXPLORATION_RATE)
return exploration_rate | 6cdcaac8e6fbdf609ed09dd686cebad0935017aa | 3,648,708 |
def not_found_handler(not_found):
"""Basic not found request handler."""
return render_template('except.html',
http_excep=not_found,
message='Not resource found at this URL.',
http_code=404,
http_error="Not Found") | 35e147ea123eba7df7e517279aaf3e0790656d95 | 3,648,709 |
def remove_minor_regions(labeled_img, biggest_reg_lab):
"""
Set all the minor regions to background and the biggest to 1.
Returns:
A numpy array with the new segmentation.
"""
f = np.vectorize(lambda x: 1 if x == biggest_reg_lab else 0)
return f(labeled_img) | 86f015d4bca9dab570fd8ea8684f466d86803b54 | 3,648,710 |
from bs4 import BeautifulSoup
import re
import json
def get_right(html):
"""
获取公共解析部分(右边页面,用户详细资料部分)
"""
soup = BeautifulSoup(html, "html.parser")
scripts = soup.find_all('script')
pattern = re.compile(r'FM.view\((.*)\)')
cont = ''
# 这里先确定右边的标识,企业用户可能会有两个r_id
rids = []
for script in scripts:
m = pattern.search(script.string)
if m and 'WB_frame_c' in script.string:
all_info = m.group(1)
cont = json.loads(all_info).get('html', '')
if not cont:
return ''
rsoup = BeautifulSoup(cont, 'html.parser')
r_ids = rsoup.find(attrs={'class': 'WB_frame_c'}).find_all('div')
for r in r_ids:
rids.append(r['id'])
for script in scripts:
for r_id in rids:
m = pattern.search(script.string)
if m and r_id in script.string:
all_info = m.group(1)
cont += json.loads(all_info).get('html', '')
return cont | c76ae4a70904145f34b6fe1a3c2d8d5ef563f342 | 3,648,711 |
def marginal_ln_likelihood_worker(task):
"""
Compute the marginal log-likelihood, i.e. the likelihood integrated over
the linear parameters. This is meant to be ``map``ped using a processing
pool` within the functions below and is not supposed to be in the
public API.
Parameters
----------
task : iterable
An array containing the indices of samples to be operated on, the
filename containing the prior samples, and the data.
Returns
-------
ll : `numpy.ndarray`
Array of log-likelihood values.
"""
slice_or_idx, task_id, prior_samples_file, joker_helper = task
# Read the batch of prior samples
batch = read_batch(prior_samples_file, joker_helper.packed_order,
slice_or_idx, units=joker_helper.internal_units)
if batch.dtype != np.float64:
batch = batch.astype(np.float64)
# memoryview is returned
ll = joker_helper.batch_marginal_ln_likelihood(batch)
return np.array(ll) | fd46ef000994c981889fbb440d8ffeacdf0a8b41 | 3,648,712 |
def from_matvec(matrix, vector=None):
""" Combine a matrix and vector into an homogeneous affine
Combine a rotation / scaling / shearing matrix and translation vector into
a transform in homogeneous coordinates.
Parameters
----------
matrix : array-like
An NxM array representing the the linear part of the transform.
A transform from an M-dimensional space to an N-dimensional space.
vector : None or array-like, optional
None or an (N,) array representing the translation. None corresponds to
an (N,) array of zeros.
Returns
-------
xform : array
An (N+1, M+1) homogenous transform matrix.
See Also
--------
to_matvec
Examples
--------
>>> from_matvec(np.diag([2, 3, 4]), [9, 10, 11])
array([[ 2, 0, 0, 9],
[ 0, 3, 0, 10],
[ 0, 0, 4, 11],
[ 0, 0, 0, 1]])
The `vector` argument is optional:
>>> from_matvec(np.diag([2, 3, 4]))
array([[2, 0, 0, 0],
[0, 3, 0, 0],
[0, 0, 4, 0],
[0, 0, 0, 1]])
"""
matrix = np.asarray(matrix)
nin, nout = matrix.shape
t = np.zeros((nin + 1, nout + 1), matrix.dtype)
t[0:nin, 0:nout] = matrix
t[nin, nout] = 1.
if vector is not None:
t[0:nin, nout] = vector
return t | 82aaf8fd72f076baa731163b335b5afd873f6f8c | 3,648,713 |
def read_file(fp, limit=DEFAULT_FILE_READ_SIZE):
"""
Return output of fp.read() limited to `limit` bytes of output from the end of file.
"""
fp.seek(0, 2) # Go to EOF
total = fp.tell()
if total > limit:
fp.seek(total - limit)
else:
fp.seek(0)
return fp.read() | 97fe4bc2b465cdcc562c931c6ffae68ceb616715 | 3,648,715 |
def sample_quaternions(shape=None):
"""
Effective Sampling and Distance Metrics for 3D Rigid Body Path Planning, James J. Kuffner (2004)
https://ri.cmu.edu/pub_files/pub4/kuffner_james_2004_1/kuffner_james_2004_1.pdf
"""
s = np.random.random(shape)
sigma1 = np.sqrt(1 - s)
sigma2 = np.sqrt(s)
theta1 = np.random.uniform(0, 2 * np.pi, shape)
theta2 = np.random.uniform(0, 2 * np.pi, shape)
w = np.cos(theta2) * sigma2
x = np.sin(theta1) * sigma1
y = np.cos(theta1) * sigma1
z = np.sin(theta2) * sigma2
return np.stack([w, x, y, z], axis=-1) | b7484c857cea09ade1e15b46ffa102bdeb16aa34 | 3,648,716 |
def _select_free_device(existing):
"""
Given a list of allocated devices, return an available device name.
According to
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
all AWS Linux instances have ``/dev/sd[a-z]`` available. However:
- ``sda`` is reserved for the root device (technically only ``sda1``);
- Amazon "strongly recommend that you don't" use instance store names
(usually ``/dev/sd[b-e]``) "because the behavior can be unpredictable";
- some "custom kernels might have restrictions that limit use to
``/dev/sd[f-p]``".
``sd[f-p]`` only allows 11 devices, so to increase this, ignore the
least stringent statement above, and allow ``sd[f-z]`` (21 devices).
To reduce the risk of failing on custom AMIs, select from ``[f-p]`` first.
Any further increase will need to start mining the ``hd[a-z][1-15]``
and ``xvd[b-c][a-z]`` namespaces, but which to use depends on whether
the AMI uses paravirtualization or HVM.
:param Sequence[bytes]: List of allocated device basenames
(e.g. ``[b'sda']``).
:return unicode file_name: available device name for attaching EBS volume.
"""
local_devices = frozenset(existing)
sorted_devices = sorted(existing)
IN_USE_DEVICES(devices=sorted_devices).write()
for suffix in b"fghijklmonpqrstuvwxyz":
next_local_device = b'xvd' + suffix
next_local_sd_device = b'sd' + suffix
file_name = u'/dev/sd' + unicode(suffix)
possible_devices = [
next_local_device, next_local_sd_device
]
if not local_devices.intersection(possible_devices):
return file_name
# Could not find any suitable device that is available
# for attachment. Log to Eliot before giving up.
NO_AVAILABLE_DEVICE(devices=sorted_devices).write()
raise NoAvailableDevice() | 93860df03c9df75c11f31c3017106fbed58bb48e | 3,648,717 |
def create_target_delivery_request(get_offers_opts):
"""Converts dict representation of get_offers options to TargetDeliveryRequest object"""
return TargetDeliveryRequest(request=create_delivery_request(get_offers_opts.get("request")),
target_cookie=get_offers_opts.get("targetCookie"),
target_location_hint=get_offers_opts.get("targetLocationHint"),
consumer_id=get_offers_opts.get("consumerId"),
customer_ids=get_offers_opts.get("customerIds"),
session_id=get_offers_opts.get("sessionId"),
visitor=get_offers_opts.get("visitor")) | cf470d56b32d6fd5eaeee3b549333425607dedbd | 3,648,718 |
def wintype_to_cdata(wintype):
"""
Returns the underlying CFFI cdata object or ffi.NULL if wintype is None.
Used internally in API wrappers to "convert" pywincffi's Python types to
the required CFFI cdata objects when calling CFFI functions. Example:
>>> from pywincffi.core import dist
>>> from pywincffi.kernel32 import CreateEvent
>>> from pywincffi.wintypes import wintype_to_cdata
>>> ffi, lib = dist.load()
>>> # Get an event HANDLE, using the wrapper: it's a Python HANDLE object.
>>> hEvent = CreateEvent(bManualReset=False, bInitialState=False)
>>> # Call ResetEvent directly without going through the wrapper:
>>> hEvent_cdata = wintype_to_cdata(hEvent)
>>> result = lib.ResetEvent(hEvent_cdata)
:param wintype:
A type derived from :class:`pywincffi.core.typesbase.CFFICDataWrapper`
:return:
The underlying CFFI <cdata> object, or ffi.NULL if wintype is None.
"""
ffi, _ = dist.load()
if wintype is None:
return ffi.NULL
if isinstance(wintype, (SOCKET, HANDLE, WSAEVENT)):
return wintype._cdata[0]
return wintype._cdata | e85123a0dc790c9b53f7afb4199c45d573e14310 | 3,648,719 |
import json
def rating_feedback_view(incident: Incident, channel_id: str):
"""Builds all blocks required to rate and provide feedback about an incident."""
modal_template = {
"type": "modal",
"title": {"type": "plain_text", "text": "Incident Feedback"},
"blocks": [
{
"type": "context",
"elements": [
{
"type": "plain_text",
"text": "Use this form to rate your experience and provide feedback about the incident.",
}
],
},
],
"close": {"type": "plain_text", "text": "Cancel"},
"submit": {"type": "plain_text", "text": "Submit"},
"callback_id": RatingFeedbackCallbackId.submit_form,
"private_metadata": json.dumps({"incident_id": str(incident.id), "channel_id": channel_id}),
}
rating_picker_options = []
for rating in FeedbackRating:
rating_picker_options.append(
{"text": {"type": "plain_text", "text": rating}, "value": rating}
)
rating_picker_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.rating,
"label": {"type": "plain_text", "text": "Rate your experience"},
"element": {
"type": "static_select",
"placeholder": {"type": "plain_text", "text": "Select a rating"},
"options": rating_picker_options,
},
"optional": False,
}
modal_template["blocks"].append(rating_picker_block)
feedback_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.feedback,
"label": {"type": "plain_text", "text": "Give us feedback"},
"element": {
"type": "plain_text_input",
"action_id": RatingFeedbackBlockId.feedback,
"placeholder": {
"type": "plain_text",
"text": "How would you describe your experience?",
},
"multiline": True,
},
"optional": False,
}
modal_template["blocks"].append(feedback_block)
anonymous_checkbox_block = {
"type": "input",
"block_id": RatingFeedbackBlockId.anonymous,
"label": {
"type": "plain_text",
"text": "Check the box if you wish to provide your feedback anonymously",
},
"element": {
"type": "checkboxes",
"action_id": RatingFeedbackBlockId.anonymous,
"options": [
{
"value": "anonymous",
"text": {"type": "plain_text", "text": "Anonymize my feedback"},
},
],
},
"optional": True,
}
modal_template["blocks"].append(anonymous_checkbox_block)
return modal_template | 5698c87cc7530decbff083130c0150085ff7871c | 3,648,721 |
def _GetTargetOS():
"""Returns the target os specified in args.gn file.
Returns an empty string is target_os is not specified.
"""
build_args = _GetBuildArgs()
return build_args['target_os'] if 'target_os' in build_args else '' | 930c56d18d55d12728f7aac594096f0697c03bb4 | 3,648,722 |
def generalise_sent_pos(s):
"""
generalise sentence pattern by POS tags only
:param s:
:return:
"""
rets = []
for token in s['sent']:
e = token.idx + len(token.text)
is_matched = False
for ann in s['anns']:
if token.idx >= ann['s'] and e <= ann['e']:
rets.append((token.text, token.pos_, True, ann['signed_label'], ann['gt_label']))
is_matched = True
break
# print '%s-%s, %s: [%s]' % (token.idx, e, token.idx, token.text)
if not is_matched:
rets.append((token.text, token.pos_))
return {"sent": s['sent'].text, 'pattern': rets} | 03092bd253f13739d918438839ff4234f5ef80af | 3,648,723 |
def add_batting_metrics(df):
"""
Adds the following columns to a given DataFrame:
PA
1B
OBP
BA
SLG
OPS
ISO
PA/HR
K
BB
BABIP
wOBA
wRAA
wRC
Args:
df (DataFrame): the DataFrame to append additional stats to
Returns:
DataFrame of stats with additional columns
"""
df.loc[:, 'PA'] = df.apply(_calculate_pa, axis=1)
df = df.loc[df.PA > 0]
try:
df.loc[:, '1B'] = df.apply(_calculate_singles, axis=1)
df.loc[:, 'OBP'] = round((df['H'] + df['BB'] + df['IBB'] + df['HBP']) \
/df['PA'], ROUND_TO)
df.loc[:, 'BA'] = round(df['H'] / df['AB'], ROUND_TO)
df.loc[:, 'SLG'] = round((1 *df['1B'] + 2 * df['2B']+ 3 * df['3B'] \
+ 4 * df['HR']) /df['AB'], ROUND_TO)
df.loc[:, 'OPS'] = round(df['OBP'] + df['SLG'], ROUND_TO)
df.loc[:, 'ISO'] = round(df['SLG'] - df['BA'], ROUND_TO)
df.loc[:, 'HR%'] = round(df['HR'] / df['PA'], ROUND_TO)
df.loc[:, 'K%'] = round(df['K'] / df['PA'], ROUND_TO)
# df.loc[:, 'K%'] = round(df['K'] / df['PA'], ROUND_TO)*100
df.loc[:, 'BB%'] = round(df['BB'] / df['PA'], ROUND_TO)
# df.loc[:, 'BB%'] = round(df['BB'] / df['PA'], ROUND_TO)*100
df.loc[:, 'BABIP'] = round((df['H'] - df['HR']) \
/ (df['AB'] - df['K'] - df['HR'] \
+ df['SF']), ROUND_TO)
df.loc[:, 'wOBA'] = df.apply(_calculate_woba, axis=1)
df.loc[:, 'wRAA'] = df.apply(_calculate_wraa, axis=1)
df.loc[:, 'wRC'] = df.apply(_calculate_wrc, axis=1)
return df.sort_values(by='wOBA', ascending=False)
except:
print('no records found')
return pd.DataFrame() | 50a424696a1db32966b8b240823e326248f092aa | 3,648,724 |
def line_break(text, line_len=79, indent=1):
"""
Split some text into an array of lines.
Enter: text: the text to split.
line_len: the maximum length of a line.
indent: how much to indent all but the first line.
Exit: lines: an array of lines.
"""
lines = [text.rstrip()]
while len(lines[-1]) > line_len:
pos = lines[-1].rfind(' ', 0, line_len)
if pos < 0:
pos = line_len
lines[-1:] = [lines[-1][:pos].rstrip(), ' '*indent+lines[-1][
pos:].strip()]
return lines | 34b866109689796a4d428e7d3a68a34f7152250f | 3,648,725 |
from nipype.pipeline import engine as pe
from nibabies.workflows.anatomical.preproc import init_anat_average_wf
from nibabies.workflows.anatomical.registration import init_coregistration_wf
from nibabies.workflows.anatomical.brain_extraction import (
init_infant_brain_extraction_wf,
)
from nibabies.workflows.anatomical.outputs import init_coreg_report_wf
def init_workflow(bids_path, output_path, participant_label, workdir=None):
"""Create the preprocessing workflow."""
wf = pe.Workflow(name="nibabies_anat")
for subid in participant_label:
sub_wf = pe.Workflow(name=f"nibabies_anat_{subid}")
t1w_files = list(
(bids_path / f"sub-{subid}" / "anat").glob(f"sub-{subid}*_T1w.nii.gz")
)
t2w_files = list(
(bids_path / f"sub-{subid}" / "anat").glob(f"sub-{subid}*_T2w.nii.gz")
)
t1w_ref = init_anat_average_wf(
num_maps=len(t1w_files), name="t1w_ref", omp_nthreads=8
)
t2w_ref = init_anat_average_wf(
num_maps=len(t2w_files), name="t2w_ref", omp_nthreads=8
)
t1w_ref.inputs.inputnode.in_files = [str(f) for f in t1w_files]
t2w_ref.inputs.inputnode.in_files = [str(f) for f in t2w_files]
be = init_infant_brain_extraction_wf(omp_nthreads=8, age_months=2)
cr = init_coregistration_wf(omp_nthreads=8, sloppy=True)
rpt = init_coreg_report_wf(output_dir=str(output_path.absolute()))
rpt.inputs.inputnode.source_file = [str(f) for f in t1w_files]
# fmt:off
sub_wf.connect([
(t2w_ref, be, [("outputnode.out_file", "inputnode.in_t2w")]),
(t1w_ref, cr, [("outputnode.out_file", "inputnode.in_t1w")]),
(be, cr, [
("outputnode.t2w_preproc", "inputnode.in_t2w_preproc"),
("outputnode.out_mask", "inputnode.in_mask"),
("outputnode.out_probmap", "inputnode.in_probmap"),
]),
(cr, rpt, [
("outputnode.t1w_preproc", "inputnode.t1w_preproc"),
("outputnode.t2w_preproc", "inputnode.t2w_preproc"),
("outputnode.t1w_mask", "inputnode.in_mask"),
]),
])
# fmt:on
wf.add_nodes([sub_wf])
if workdir:
wf.base_dir = workdir
return wf | 71bc9d3d0d11ebd93f608a66e115110272b23c67 | 3,648,726 |
def visualize_spectrum(y):
"""Effect that maps the Mel filterbank frequencies onto the LED strip"""
global _prev_spectrum
y = np.copy(interpolate(y, config.N_PIXELS // 2))
common_mode.update(y)
diff = y - _prev_spectrum
_prev_spectrum = np.copy(y)
# Color channel mappings
r = r_filt.update(y - common_mode.value)
g = np.abs(diff)
b = b_filt.update(np.copy(y))
# Mirror the color channels for symmetric output
r = np.concatenate((r[::-1], r))
g = np.concatenate((g[::-1], g))
b = np.concatenate((b[::-1], b))
output = np.array([r, g,b]) * 255
return output | ca20151f268f5124d7cf909a64ff1c668740a858 | 3,648,727 |
def testDuplicateContours(glyph):
"""
Contours shouldn't be duplicated on each other.
"""
contours = {}
for index, contour in enumerate(glyph):
contour = contour.copy()
contour.autoStartSegment()
pen = DigestPointPen()
contour.drawPoints(pen)
digest = pen.getDigest()
if digest not in contours:
contours[digest] = []
contours[digest].append(index)
duplicateContours = []
for digest, indexes in contours.items():
if len(indexes) > 1:
duplicateContours.append(indexes[0])
return duplicateContours | ed3f5499dfbf36192f025a0551b252bd2623216f | 3,648,728 |
import re
def expand_strength(row):
"""Extracts information from Strength column and cleans remaining strengths.
Gets Additional Info and Final Volume Quantity columns from Strength.
Reformats any malformed strengths and removes commas from within numbers.
"""
strengths = row['Strength']
# search for additional info marked by double asterisks
if '*' in strengths:
additional_info = ''
for tag in ADDITIONAL_INFO_TAGS:
if tag in strengths:
additional_info = additional_info + tag + '. '
strengths = strengths.replace(tag, '')
row['AdditionalInfo'] = additional_info
# search for final final Reconstituted Solution Volume quantity
if re.match(r"(.*)?\(\d*[.,]?\d+\s*ML\)", strengths):
paren = re.search(r"\(\d*[.,]?\d+\s*ML\)", strengths)
strengths = re.sub(r"\(\d*[.,]?\d+\s*ML\)", '', strengths).strip()
row['FinalVolQty'] = get_qty_format(paren[0].strip('()'))
# replace malformed strings for better formatting
for bad_format, improved_format in ILL_FORMATTED_STRENGTHS.items():
strengths = strengths.replace(bad_format, improved_format)
# determine if there is a semi colon anywhere between two parentheses
paren = re.findall(r'[\(][^)]*;.*?[\)]', strengths)
if paren:
strengths = reformat_paren_with_semi(strengths)
# remove comma from numbers
strengths = re.sub(r'(\d),(\d)', r'\1\2', strengths)
row['CleanStrength'] = strengths.strip()
return row | e6520fa26a4f8a75954db4e957a4ed7f3e52f8a2 | 3,648,729 |
def dem_autoload(geometries, demType, vrt=None, buffer=None, username=None, password=None,
product='dem', nodata=None, hide_nodata=False):
"""
obtain all relevant DEM tiles for selected geometries
Parameters
----------
geometries: list[spatialist.vector.Vector]
a list of :class:`spatialist.vector.Vector` geometries to obtain DEM data for;
CRS must be WGS84 LatLon (EPSG 4326)
demType: str
the type of DEM to be used; current options:
- 'AW3D30' (ALOS Global Digital Surface Model "ALOS World 3D - 30m")
* info: https://www.eorc.jaxa.jp/ALOS/en/aw3d30/index.htm
* url: ftp://ftp.eorc.jaxa.jp/pub/ALOS/ext1/AW3D30/release_v1804
* height reference: EGM96
- 'Copernicus 10m EEA DEM' (Copernicus 10 m DEM available over EEA-39 countries)
* registration: https://spacedata.copernicus.eu/web/cscda/data-access/registration
* url: ftps://cdsdata.copernicus.eu/DEM-datasets/COP-DEM_EEA-10-DGED/2021_1
* height reference: EGM2008
- 'Copernicus 30m Global DEM'
* info: https://copernicus-dem-30m.s3.amazonaws.com/readme.html
* url: https://copernicus-dem-30m.s3.eu-central-1.amazonaws.com/
* height reference: EGM2008
- 'Copernicus 30m Global DEM II'
* registration: https://spacedata.copernicus.eu/web/cscda/data-access/registration
* url: ftps://cdsdata.copernicus.eu/DEM-datasets/COP-DEM_GLO-30-DGED/2021_1
* height reference: EGM2008
- 'Copernicus 90m Global DEM'
* info: https://copernicus-dem-90m.s3.amazonaws.com/readme.html
* url: https://copernicus-dem-90m.s3.eu-central-1.amazonaws.com/
* height reference: EGM2008
- 'Copernicus 90m Global DEM II'
* registration: https://spacedata.copernicus.eu/web/cscda/data-access/registration
* url: ftps://cdsdata.copernicus.eu/DEM-datasets/COP-DEM_GLO-90-DGED/2021_1
* height reference: EGM2008
- 'GETASSE30'
* info: https://seadas.gsfc.nasa.gov/help-8.1.0/desktop/GETASSE30ElevationModel.html
* url: https://step.esa.int/auxdata/dem/GETASSE30
* height reference: WGS84
- 'SRTM 1Sec HGT'
* url: https://step.esa.int/auxdata/dem/SRTMGL1
* height reference: EGM96
- 'SRTM 3Sec'
* url: https://srtm.csi.cgiar.org/wp-content/uploads/files/srtm_5x5/TIFF
* height reference: EGM96
- 'TDX90m'
* registration: https://geoservice.dlr.de/web/dataguide/tdm90
* url: ftpes://tandemx-90m.dlr.de
* height reference: WGS84
vrt: str or None
an optional GDAL VRT file created from the obtained DEM tiles
buffer: int, float, None
a buffer in degrees to add around the individual geometries
username: str or None
(optional) the user name for services requiring registration
password: str or None
(optional) the password for the registration account
product: str
the sub-product to extract from the DEM product.
The following options are available for the respective DEM types:
- 'AW3D30'
* 'dem': the actual Digital Elevation Model
* 'msk': mask information for each pixel (Cloud/Snow Mask, Land water and
low correlation mask, Sea mask, Information of elevation dataset used
for the void-filling processing)
* 'stk': number of DSM-scene files which were used to produce the 5 m resolution DSM
- 'Copernicus 10m EEA DEM'
* 'dem': the actual Digital Elevation Model
* 'edm': editing mask
* 'flm': filling mask
* 'hem': height error mask
* 'wbm': water body mask
- 'Copernicus 30m Global DEM'
* 'dem': the actual Digital Elevation Model
- 'Copernicus 30m Global DEM II'
* 'dem': the actual Digital Elevation Model
* 'edm': editing mask
* 'flm': filling mask
* 'hem': height error mask
* 'wbm': water body mask
- 'Copernicus 90m Global DEM'
* 'dem': the actual Digital Elevation Model
- 'Copernicus 90m Global DEM II'
* 'dem': the actual Digital Elevation Model
* 'edm': editing mask
* 'flm': filling mask
* 'hem': height error mask
* 'wbm': water body mask
- 'GETASSE30'
* 'dem': the actual Digital Elevation Model
- 'SRTM 1Sec HGT'
* 'dem': the actual Digital Elevation Model
- 'SRTM 3Sec'
* 'dem': the actual Digital Elevation Model
- 'TDX90m'
* 'dem': the actual Digital Elevation Model
* 'am2': Amplitude Mosaic representing the minimum value
* 'amp': Amplitude Mosaic representing the mean value
* 'com': Consistency Mask
* 'cov': Coverage Map
* 'hem': Height Error Map
* 'lsm': Layover and Shadow Mask, based on SRTM C-band and Globe DEM data
* 'wam': Water Indication Mask
Returns
-------
list or None
the names of the obtained files or None if a VRT file was defined
Examples
--------
download all SRTM 1 arcsec DEMs overlapping with a Sentinel-1 scene and mosaic them to a single GeoTIFF file
.. code-block:: python
from pyroSAR import identify
from pyroSAR.auxdata import dem_autoload
from spatialist import gdalwarp
# identify the SAR scene
filename = 'S1A_IW_SLC__1SDV_20150330T170734_20150330T170801_005264_006A6C_DA69.zip'
scene = identify(filename)
# extract the bounding box as spatialist.Vector object
bbox = scene.bbox()
# download the tiles and virtually combine them in an in-memory
# VRT file subsetted to the extent of the SAR scene plus a buffer of 0.01 degrees
vrt = '/vsimem/srtm1.vrt'
dem_autoload(geometries=[bbox], demType='SRTM 1Sec HGT',
vrt=vrt, buffer=0.01)
# write the final GeoTIFF file
outname = scene.outname_base() + 'srtm1.tif'
gdalwarp(src=vrt, dst=outname, options={'format': 'GTiff'})
# alternatively use function dem_create and warp the DEM to UTM
# including conversion from geoid to ellipsoid heights
from pyroSAR.auxdata import dem_create
outname = scene.outname_base() + 'srtm1_ellp.tif'
dem_create(src=vrt, dst=outname, t_srs=32632, tr=(30, 30),
geoid_convert=True, geoid='EGM96')
"""
with DEMHandler(geometries) as handler:
return handler.load(demType=demType,
username=username,
password=password,
vrt=vrt,
buffer=buffer,
product=product,
nodata=nodata,
hide_nodata=hide_nodata) | 8b95d2ae3a518cd8181c21d138f7754146e9acd1 | 3,648,730 |
import string
import random
def randomString(stringLength=6):
"""Generate a random string of fixed length """
letters = string.ascii_uppercase
return ''.join(random.choice(letters) for i in range(stringLength)) | 8652f4039d7b3ea024001e965e81d4b742b6c2e8 | 3,648,731 |
def create_new_record(account,userName,password):
"""
Function that creates new records for a given user account
"""
new_record = Records(account,userName,password)
return new_record | 7e87347dbb1526168cdb7ebee1268a67381682e0 | 3,648,732 |
def is_bow(vec):
"""
Checks if a vector is in the sparse Gensim BoW format
"""
return matutils.isbow(vec) | 0e180f1cd0319a366492c692b73459302407a9da | 3,648,733 |
import asyncio
def timeout(seconds, loop=None):
"""
Returns a channel that closes itself after `seconds`.
:param seconds: time before the channel is closed
:param loop: you can optionally specify the loop on which the returned channel is intended to be used.
:return: the timeout channel
"""
c = Chan(loop=loop or asyncio.get_event_loop())
c.loop.call_later(seconds, c.close)
return c | c2cc8c362123a5dd9c8b867a78b23cb32c0f4330 | 3,648,734 |
def analyse_df(df, options):
"""
Analyses a dataframe, creating a metadata object
based on the passed options. Metadata objects can be
used to apply dataset preparations across multiple platforms.
"""
_validate_options(options)
metadata = _create_metadata(options)
for proc in options["procs"]:
if proc == "FillMissing":
_analyse_fill_missing(df, options, metadata)
elif proc == "Normalize":
_analyse_standardization(df, options, metadata)
elif proc == "Categorify":
_analyse_categorization(df, options, metadata)
else:
raise ValueError("Unsupported proc type in options " + proc)
metadata["columns"][options["dep_var"]] = {"type": COL_TYPES.DEPVAR.value}
return metadata | 8ccefe92dabca1da71b32bc941ab320f04e1b437 | 3,648,735 |
def base_route():
"""
Base route to any page. Currently, aboutme.
"""
return redirect(url_for("pages.links")) | e01efc921ac2d55c8de3f8b2b20fa68b10f0d834 | 3,648,736 |
def blend_normal(source, source_mask, target):
"""Blend source on top of target image using weighted alpha blending.
Args:
source (np.ndarray): Array of shape (H, W, C) which contains the source
image (dtype np.uint8).
source_mask (np.ndarray): Array of shape (H, W) which contains the
source foreground mask (dtype np.uint8). Background pixels should be
assigned 0 and foreground 255. Values inbetween are used to interpolate
between source and target.
target (np.ndarray): Array of shape (H, W, C) which contains the target
image.
Returns:
output (np.ndarray): Array of the same shape as target containing the
blended image.
"""
return paste_to(source, source_mask, target, (0, 0)) | af6b4206eafc019aa2075772541cb6cb195f2ad1 | 3,648,737 |
def load_dataset(name):
"""
Load a dataset.
"""
try:
func = loaders[name]
except KeyError:
print(f"Dataset '{name}' is not in available list: {list_datasets()}")
else:
return func() | fd57b34625590a8c0680c6b46d35980245ec6e5c | 3,648,738 |
import tokenize
def split_symbols_implicit_precedence(tokens, local_dict, global_dict): # pragma: no cover
"""Replace the sympy builtin split_symbols with a version respecting implicit multiplcation.
By replacing this we can better cope with expressions like 1/xyz being
equivalent to 1/(x*y*z) rather than (y*z)/x as is the default. However it
cannot address issues like 1/2x becoming (1/2)*x rather than 1/(2*x), because
Python's tokeniser does not respect whitespace and so cannot distinguish
between '1/2 x' and '1/2x'.
This transformation is unlikely to be used, but is provided as proof of concept.
"""
result = []
split = False
split_previous = False
for tok in tokens:
if split_previous:
# throw out closing parenthesis of Symbol that was split
split_previous = False
continue
split_previous = False
if tok[0] == tokenize.NAME and tok[1] == 'Symbol':
split = True
elif split and tok[0] == tokenize.NAME:
symbol = tok[1][1:-1]
if sympy_parser._token_splittable(symbol):
# If we're splitting this symbol, wrap it in brackets by adding
# them before the call to Symbol:
result = result[:-2] + [(tokenize.OP, '(')] + result[-2:]
for char in symbol:
if char in local_dict or char in global_dict:
# Get rid of the call to Symbol
del result[-2:]
result.extend([(tokenize.NAME, "{}".format(char)),
(tokenize.NAME, 'Symbol'), (tokenize.OP, '(')])
else:
result.extend([(tokenize.NAME, "'{}'".format(char)), (tokenize.OP, ')'),
(tokenize.NAME, 'Symbol'), (tokenize.OP, '(')])
# Delete the last two tokens: get rid of the extraneous
# Symbol( we just added
# Also, set split_previous=True so will skip
# the closing parenthesis of the original Symbol
del result[-2:]
split = False
split_previous = True
# Then close the extra brackets we added:
result.append((tokenize.OP, ')'))
continue
else:
split = False
result.append(tok)
return result | a26adc14eec622b04e454b0a426ee37ac26e9bef | 3,648,739 |
def altsumma(f, k, p):
"""Return the sum of f(i) from i=k, k+1, ... till p(i) holds true or 0.
This is an implementation of the Summation formula from Kahan,
see Theorem 8 in Goldberg, David 'What Every Computer Scientist
Should Know About Floating-Point Arithmetic', ACM Computer Survey,
Vol. 23, No. 1, March 1991."""
if not p(k):
return 0
else:
S = f(k)
C = 0
j = k + 1
while p(j):
Y = f(j) - C
T = S + Y
C = (T - S) - Y
S = T
j += 1
return S | 952e77fcedfbe01658342126d95b79175c082976 | 3,648,740 |
from re import T
import functools
from datetime import datetime
from typing import cast
def action_logging(f: T) -> T:
"""
Decorates function to execute function at the same time submitting action_logging
but in CLI context. It will call action logger callbacks twice,
one for pre-execution and the other one for post-execution.
Action logger will be called with below keyword parameters:
sub_command : name of sub-command
start_datetime : start datetime instance by utc
end_datetime : end datetime instance by utc
full_command : full command line arguments
user : current user
log : airflow.models.log.Log ORM instance
dag_id : dag id (optional)
task_id : task_id (optional)
execution_date : execution date (optional)
error : exception instance if there's an exception
:param f: function instance
:return: wrapped function
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
An wrapper for cli functions. It assumes to have Namespace instance
at 1st positional argument
:param args: Positional argument. It assumes to have Namespace instance
at 1st positional argument
:param kwargs: A passthrough keyword argument
"""
if not args:
raise ValueError("Args should be set")
if not isinstance(args[0], Namespace):
raise ValueError("1st positional argument should be argparse.Namespace instance,"
f"but is {type(args[0])}")
metrics = _build_metrics(f.__name__, args[0])
cli_action_loggers.on_pre_execution(**metrics)
try:
return f(*args, **kwargs)
except Exception as e:
metrics['error'] = e
raise
finally:
metrics['end_datetime'] = datetime.utcnow()
cli_action_loggers.on_post_execution(**metrics)
return cast(T, wrapper) | 4a524549b6c3c19928e70be460482061b71d2197 | 3,648,741 |
def decode_public_id(str_id):
"""
make numeric ID from 4-letter ID
Args:
str_id (str): ID consisting of a number and 3 alphabets
Return:
num_id (int): numeric ID
"""
def alpha2num(c):
return encoder.find(c)
def num2num(c):
return 5 if c == '9' else int(c) - 3
alphas = [alpha2num(c) for c in str_id[1:]]
alphas.insert(0, num2num(str_id[0]))
return sum(alphas[i] * 18**(3-i) for i in range(4)) | 8425ffe2d0fd7161507734a3b452d48d78e97246 | 3,648,742 |
def get_sentence_idcs_in_split(datasplit: DataFrame, split_id: int):
"""Given a dataset split is (1 for train, 2 for test, 3 for dev), returns the set of corresponding sentence
indices in sentences_df."""
return set(datasplit[datasplit["splitset_label"] == split_id]["sentence_index"]) | b10d05dec6c70fae6a31eb017afe4d110a6bc23a | 3,648,743 |
def valid_scope_list():
"""List all the oscilloscope types."""
s = "\nValid types are:\n"
s += ", ".join(DS1000C_scopes) + "\n"
s += ", ".join(DS1000E_scopes) + "\n"
s += ", ".join(DS1000Z_scopes) + "\n"
s += ", ".join(DS4000_scopes) + "\n"
s += ", ".join(DS6000_scopes) + "\n"
return s | 2d6dfb7ae6ea5c62ebe674fa1018202aa5f9f0ac | 3,648,744 |
def load():
"""Returns an instance of the plugin"""
return SyslogOutOutputPlugin | 072c827e96e85b04e1109a1788e92502729de09d | 3,648,746 |
def word_sorter(x):
"""
Function to sort the word frequency pairs after frequency
Lowest frequency collocates first - highest frerquency collocates last
"""
# getting length of list of word/frequency pairs
lst = len(x)
# sort by frequency
for i in range(0, lst):
for j in range(0, lst-i-1):
if (x[j][1] > x[j + 1][1]):
temp = x[j]
x[j]= x[j + 1]
x[j + 1] = temp
return(x) | 571570bb03d6473b9c6839aa6fdc0b1ba8efbe3c | 3,648,747 |
def le_assinatura():
"""[A funcao le os valores dos tracos linguisticos do modelo e devolve uma assinatura a ser comparada com os textos fornecidos]
Returns:
[list] -- [description]
"""
print("Bem-vindo ao detector automático de COH-PIAH.")
print("Informe a assinatura típica de um aluno infectado:")
wal = float(input("Entre o tamanho médio de palavra:"))
ttr = float(input("Entre a relação Type-Token:"))
hlr = float(input("Entre a Razão Hapax Legomana:"))
sal = float(input("Entre o tamanho médio de sentença:"))
sac = float(input("Entre a complexidade média da sentença:"))
pal = float(input("Entre o tamanho medio de frase:"))
return [wal, ttr, hlr, sal, sac, pal] | b6a0bacb02f3f878a88a6681d87d11408c292fe2 | 3,648,748 |
def category_task_delete(request, structure_slug, category_slug,
task_id, structure):
"""
Deletes task from a category
:type structure_slug: String
:type category_slug: String
:type task_id: String
:type structure: OrganizationalStructure (from @is_manager)
:param structure_slug: structure slug
:param category_slug: category slug
:param task_id: task code
:param structure: structure object (from @is_manager)
:return: render
"""
category = get_object_or_404(TicketCategory,
organizational_structure=structure,
slug=category_slug)
task = get_object_or_404(TicketCategoryTask,
code=task_id,
category=category)
messages.add_message(request, messages.SUCCESS,
_("Attività {} eliminata correttamente").format(task))
# log action
logger.info('[{}] manager of structure {}'
' {} deleted a task'
' for category {}'.format(timezone.localtime(),
structure,
request.user,
category))
delete_directory(task.get_folder())
task.delete()
return redirect('uni_ticket:manager_category_detail',
structure_slug=structure_slug,
category_slug=category_slug) | 3329c2342f6b115ab33df334df27a26ab8f3fc79 | 3,648,749 |
def lnposterior_selection(lnprobability, sig_fact=3., quantile=75, quantile_walker=50, verbose=1):
"""Return selected walker based on the acceptance fraction.
:param np.array lnprobability: Values of the lnprobability taken by each walker at each iteration
:param float sig_fact: acceptance fraction below quantile - sig_fact * sigma will be rejected
:param float quantile: Quantile to use as reference lnprobability value.
:param float quantile_walker: Quantile used to assert the lnprobability for each walker. 50 is
the meadian, 100 is the highest lnprobability.
:param int verbose: if 1 speaks otherwise not
:return list_of_int l_selected_walker: list of selected walker
:return int nb_rejected: number of rejected walker
"""
walkers_percentile_lnposterior = percentile(
lnprobability, quantile_walker, axis=1)
percentile_lnposterior = percentile(
walkers_percentile_lnposterior, quantile)
mad_lnposterior = mad(walkers_percentile_lnposterior)
if verbose == 1:
logger.info("lnposterior of the walkers: {}\nquantile {}%: {}, MAD:{}"
"".format(walkers_percentile_lnposterior, quantile, percentile_lnposterior,
mad_lnposterior))
l_selected_walker = where(walkers_percentile_lnposterior > (
percentile_lnposterior - (sig_fact * mad_lnposterior)))[0]
nb_rejected = lnprobability.shape[0] - len(l_selected_walker)
if verbose == 1:
logger.info(
"Number of rejected walkers: {}/{}".format(nb_rejected, lnprobability.shape[0]))
return l_selected_walker, nb_rejected | 4c489ee8b3edaca5baad033101236e4165f37041 | 3,648,750 |
def space_check(board, position):
"""Returns boolean value whether the cell is free or not."""
return board[position] not in PLAYERS_MARKS | d335a2dd441e7b761e6f9680905e993b12b3e8f7 | 3,648,751 |
import functools
def validate_params(serializer_cls):
"""利用Serializer对请求参数进行校验"""
def decorator(func):
@functools.wraps(func)
def wrapper(request: Request):
data = request.query_params if request.method == "GET" else request.data
serializer = serializer_cls(data=data)
serializer.is_valid(raise_exception=True)
setattr(request, "validated_data", serializer.validated_data)
return func(request)
return wrapper
return decorator | c2573c131a01ede9862efac3bf3b280b2f426214 | 3,648,753 |
from typing import Optional
import gc
def get_pairs_observations(kdata: kapture.Kapture,
kdata_query: Optional[kapture.Kapture],
keypoints_type: str,
max_number_of_threads: Optional[int],
iou: bool,
topk: int):
"""
get observations pairs as list
"""
if iou:
individual_observations = get_observation_images(keypoints_type,
kdata, kdata_query,
max_number_of_threads)
gc.collect()
else:
individual_observations = None
all_pairs = get_observation_image_pairs(keypoints_type,
kdata, kdata_query,
max_number_of_threads)
if iou:
assert individual_observations is not None
final_pairs = {}
for img1 in all_pairs.keys():
for img2 in all_pairs[img1].keys():
if img1 not in final_pairs:
final_pairs[img1] = {}
union = individual_observations[img1] + individual_observations[img2] - all_pairs[img1][img2]
if union == 0:
final_pairs[img1][img2] = 0
else:
final_pairs[img1][img2] = all_pairs[img1][img2] / union
all_pairs = final_pairs
getLogger().info('ranking co-observation pairs...')
assert kdata.records_camera is not None
image_pairs = get_topk_observation_pairs(all_pairs, kdata.records_camera, topk)
return image_pairs | cb9b04c88d2dec30c1d782be602c8ab615ddb65f | 3,648,754 |
def col2im_conv(col, input, layer, h_out, w_out):
"""Convert image to columns
Args:
col: shape = (k*k, c, h_out*w_out)
input: a dictionary contains input data and shape information
layer: one cnn layer, defined in testLeNet.py
h_out: output height
w_out: output width
Returns:
im: shape = (h_in, w_in, c)
"""
h_in = input['height']
w_in = input['width']
c = input['channel']
k = layer['k']
stride = layer['stride']
im = np.zeros((h_in, w_in, c))
col = np.reshape(col, (k*k*c, h_out*w_out))
for h in range(h_out):
for w in range(w_out):
im[h*stride: h*stride+k, w*stride: w*stride+k, :] = \
im[h*stride: h*stride+k, w*stride: w*stride+k, :] + \
np.reshape(col[:, h*w_out + w], (k, k, c))
return im | 408b1ea43eac3c25cc32909a4f32e8380e2ff0d4 | 3,648,755 |
def fix_missing_period(line):
"""Adds a period to a line that is missing a period"""
if line == "":
return line
if line[-1] in END_TOKENS:
return line
return line + " ." | c886384b634ed9e88e37c539b541c3037029c056 | 3,648,758 |
async def main() -> int:
"""main async portion of command-line runner. returns status code 0-255"""
try:
await tome.database.connect()
return await migrations.main(
whither=args.whither, conn=tome.database.connection(), dry_run=args.dry_run
)
except KeyboardInterrupt:
logger.critical("keyboard interrupt")
try:
await tome.database.disconnect()
except Exception as e:
logger.error("failed to cleanly disconnect database", exc_info=e)
logger.info("rolled back")
return 130
except Exception as e:
logger.critical("a fatal error occurred!", exc_info=e)
await tome.database.disconnect()
logger.info("rolled back")
return 3
finally:
await tome.database.disconnect() | d72a0e30aac405cc87a39bda8d31e53119162c0f | 3,648,759 |
def getTaskStatus( task_id ) :
"""Get tuple of Instance status and corresponding status string.
'task_id' is the DB record ID."""
_inst = Instance.objects.get( id = task_id )
return ( _inst.status , STATUS2TEXT[_inst.status] ) | 48fc342d16bd6d9f1f1b912f1fef00a4ae9ed932 | 3,648,760 |
def fit_affine_matrix(p1, p2):
""" Fit affine matrix such that p2 * H = p1
Hint:
You can use np.linalg.lstsq function to solve the problem.
Args:
p1: an array of shape (M, P)
p2: an array of shape (M, P)
Return:
H: a matrix of shape (P, P) that transform p2 to p1.
"""
assert (p1.shape[0] == p2.shape[0]),\
'Different number of points in p1 and p2'
p1 = pad(p1)
p2 = pad(p2)
### YOUR CODE HERE
H = np.linalg.lstsq(p2, p1, rcond=None)[0]
### END YOUR CODE
# Sometimes numerical issues cause least-squares to produce the last
# column which is not exactly [0, 0, 1]
H[:,2] = np.array([0, 0, 1])
return H | 302d7a30bfe11f343016de7f42708b71b6df3f3b | 3,648,761 |
def cnn_lstm_nd(pfac,
max_features=NUM_WORDS,
maxlen=SEQUENCE_LENGTH,
lstm_cell_size=CNNLSTM_CELL_SIZE,
embedding_size=EMBEDDING_SIZE):
"""CNN-LSTM model, modified from Keras example."""
# From github.com/keras-team/keras/blob/master/examples/imdb_cnn_lstm.py
filters = 64
kernel_size = 5
pool_size = 4
model = Sequential()
model.add(pfac(Embedding(max_features, embedding_size, input_length=maxlen,
name='embedding')))
model.add(pfac(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1,
name='conv')))
model.add(MaxPooling1D(pool_size=pool_size))
model.add(pfac(LSTM(lstm_cell_size, name='lstm')))
model.add(pfac(Dense(2, name='dense')))
return model | cf5573dc6b9f02549cb40505d0d224c90ddccf90 | 3,648,762 |
def updateUserPassword(userName, newPassword):
""" update the user password """
cursor = conn('open')
if cursor:
try:
cursor.execute("UPDATE user SET u_Pass= %s where u_name= %s ",
(generate_password_hash(newPassword), userName))
conn("commit")
conn('close')
except Exception as e:
return False
else:
conn('close')
return False
return True | 9aac85b6393e60121b3ec4aad943b87613173298 | 3,648,763 |
def mit_b1(**kwargs):
"""
Constructs a mit_b1 model.
Arguments:
"""
model = MixVisionTransformer(embed_dims=64, num_layers=[2, 2, 2, 2], num_heads=[1, 2, 5, 8], **kwargs)
return model | a38e4d09646a29f0648379ba02a42bb079da9917 | 3,648,764 |
def GenerateConfig(_):
"""Returns empty string."""
return '' | ed42eb1c320ca1df25603a53d4abf4a1b14215f3 | 3,648,765 |
def calc_procrustes(points1, points2, return_tform=False):
""" Align the predicted entity in some optimality sense with the ground truth.
Does NOT align scale
https://github.com/shreyashampali/ho3d/blob/master/eval.py """
t1 = points1.mean(0) # Find centroid
t2 = points2.mean(0)
points1_t = points1 - t1 # Zero mean
points2_t = points2 - t2
R, s = orthogonal_procrustes(points1_t, points2_t) # Run procrustes alignment, returns rotation matrix and scale
points2_t = np.dot(points2_t, R.T) # Apply tform to second pointcloud
points2_t = points2_t + t1
if return_tform:
return R, t1 - t2
else:
return points2_t | 01f3957624a764f3d6853a402a9d0d9c8b933766 | 3,648,766 |
from pathlib import Path
import torch
def read_image_pillow(input_filename: Path) -> torch.Tensor:
"""
Read an image file with pillow and return a torch.Tensor.
:param input_filename: Source image file path.
:return: torch.Tensor of shape (C, H, W).
"""
pil_image = Image.open(input_filename)
torch_tensor = TF.to_tensor(pil_image)
return torch_tensor | dec3cdfddaec4e6f50a6c5fc1ba384d217c0637b | 3,648,767 |
def compute_solar_angle(balloon_state: balloon.BalloonState) -> float:
"""Computes the solar angle relative to the balloon's position.
Args:
balloon_state: current state of the balloon.
Returns:
Solar angle at the balloon's position.
"""
el_degree, _, _ = solar.solar_calculator(
balloon_state.latlng,
balloon_state.date_time)
return el_degree | f11f91b282ee4897e444591d8eb14377c8001fa9 | 3,648,768 |
from typing import Optional
from typing import Tuple
from typing import List
def parse_links(source_file: str, root_url: Optional[str]=None) -> Tuple[List[Link], str]:
"""parse a list of URLs with their metadata from an
RSS feed, bookmarks export, or text file
"""
check_url_parsing_invariants()
timer = TimedProgress(TIMEOUT * 4)
with open(source_file, 'r', encoding='utf-8') as file:
links, parser = run_parser_functions(file, timer, root_url=root_url)
timer.end()
if parser is None:
return [], 'Failed to parse'
return links, parser | d13207d1415b02d9f39b15d6b1f2435b8304500b | 3,648,769 |
from typing import Iterable
from typing import Any
from typing import cast
def executemany(c: CursorType, sql: str, args: Iterable[Iterable[Any]]) -> OtherResult:
"""
Call c.executemany, with the given sqlstatement and argumens, and return c.
The mysql-type-plugin for mysql will special type this function such
that the number and types of args matches the what is expected for the query
by analyzing the mysql-schema.sql file in the project root.
"""
c.executemany(sql, args)
return cast(OtherResult, c) | 25f93840189a99e2c90bf5d63cd49558f497e38f | 3,648,770 |
import click
import struct
def validate_host():
"""Ensure that the script is being run on a supported platform."""
supported_opsys = ["darwin", "linux"]
supported_machine = ["amd64"]
opsys, machine = get_platform()
if opsys not in supported_opsys:
click.secho(
f"this application is currently not known to support {opsys}",
fg="red",
)
raise SystemExit(2)
if machine not in supported_machine:
click.secho(
f"this application is currently not known to support running on {machine} machines",
fg="red",
)
if struct.calcsize("P") * 8 != 64:
click.secho(
"this application can only be run on 64 bit hosts, in 64 bit mode", fg="red"
)
raise SystemExit(2)
return True | e0346ebd8d80062a2be7666d1d89d40803dd069f | 3,648,772 |
def EAD_asset(x,RPs,curves,positions):
"""
Calculates the expected annual damage for one road segment (i.e. a row of the DataFrame containing the results)
based on the damage per return period and the flood protection level.
WARNING: THIS FUNCTION PROBABLY REALLY SLOWS DOWN THE OVERALL POST-PROCESSING SCRIPT (COMPARE ITS PERFORMANCE WITH
THE CLIMATE CHANGE SCRIPT) Most likely, the cause is the use of a loop and an if-statement for the manipulation; this is not
smart for a function that is applied on a DataFrame!!!
Arguments:
*x* (Geopandas Series) - A row of the GeoPandas DataFrame containing all road segments, should have dam cols, rps, flood protection level
*RPs* (List) - return periods of the damage data, in descending order e.g. [500,200,100,50,20,10]
*curves* (List) - short names of damage curves for which to calculate the EAD e.g. ["C1","C2","C3","C4","C5","C6","HZ"]
*positions* (List) - tuple positions of available max damage estimates e.g. [0,1,2,3,4]
Returns:
*x* (Geopandas Series) - with added the new columns containing EADs
"""
PL = x["Jongman_FP"]
RPs_copy = [y for y in RPs] #make a new list, or it will be altered in the function and mess up everything!!!
for curve in curves:
damcols = ["dam_{}_rp{}".format(curve,rp) for rp in RPs] #this was RPs; but somehow this did not work
EAD = [0,0,0,0,0] #initialize empty lists
for pos in positions: #iterate over all the max damage estimates
dam = list(x[damcols].apply(lambda y: pick_tuple(y,pos)).values) #creates a numpy array with the damage values of the desired max_dam
EAD[pos] = risk_FP(dam,RPs_copy,PL)
if not curve == "HZ": #save results to the series, which will be returned as a row in the df
x["EAD_{}".format(curve)] = tuple(EAD)
else:
x["EAD_HZ"] = EAD[0]
return x | f2cae0626ff55f81277314d9f45dadcba480c7fd | 3,648,773 |
def procrustes_alignment(data, reference=None, n_iter=10, tol=1e-5,
return_reference=False, verbose=False):
"""Iterative alignment using generalized procrustes analysis.
Parameters
----------
data : list of ndarrays, shape = (n_samples, n_feat)
List of datasets to align.
reference : ndarray, shape = (n_samples, n_feat), optional
Dataset to use as reference in the first iteration. If None, the first
dataset in `data` is used as reference. Default is None.
n_iter : int, optional
Number of iterations. Default is 10.
tol : float, optional
Tolerance for stopping criteria. Default is 1e-5.
return_reference : bool, optional
Whether to return the reference dataset built in the last iteration.
Default is False.
verbose : bool, optional
Verbosity. Default is False.
Returns
-------
aligned : list of ndarray, shape = (n_samples, n_feat)
Aligned datsets.
mean_dataset : ndarray, shape = (n_samples, n_feat)
Reference dataset built in the last iteration. Only if
``return_reference == True``.
"""
if n_iter <= 0:
raise ValueError('A positive number of iterations is required.')
if reference is None:
# Use the first item to build the initial reference
aligned = [data[0]] + [procrustes(d, data[0]) for d in data[1:]]
reference = np.mean(aligned, axis=0)
else:
aligned = [None] * len(data)
reference = reference.copy()
dist = np.inf
for i in range(n_iter):
# Align to reference
aligned = [procrustes(d, reference) for d in data]
# Compute new mean
new_reference = np.mean(aligned, axis=0)
# Compute distance
reference -= new_reference
reference **= 2
new_dist = reference.sum()
# Update reference
reference = new_reference
if verbose:
print('Iteration {0:>3}: {1:.6f}'.format(i, new_dist))
if dist != np.inf and np.abs(new_dist - dist) < tol:
break
dist = new_dist
return aligned, reference if return_reference else aligned | 1452c124d88cf00f9c0d45bf7ba0e1db449657d3 | 3,648,774 |
def get_paramsets(args, nuisance_paramset):
"""Make the paramsets for generating the Asmimov MC sample and also running
the MCMC.
"""
asimov_paramset = []
llh_paramset = []
gf_nuisance = [x for x in nuisance_paramset.from_tag(ParamTag.NUISANCE)]
llh_paramset.extend(
[x for x in nuisance_paramset.from_tag(ParamTag.SM_ANGLES)]
)
llh_paramset.extend(gf_nuisance)
for parm in llh_paramset:
parm.value = args.__getattribute__(parm.name)
boundaries = fr_utils.SCALE_BOUNDARIES[args.dimension]
tag = ParamTag.SCALE
llh_paramset.append(
Param(
name='logLam', value=np.mean(boundaries), ranges=boundaries, std=3,
tex=r'{\rm log}_{10}\left (\Lambda^{-1}' + \
misc_utils.get_units(args.dimension)+r'\right )',
tag=tag
)
)
llh_paramset = ParamSet(llh_paramset)
tag = ParamTag.BESTFIT
if args.data is not DataType.REAL:
flavor_angles = fr_utils.fr_to_angles(args.injected_ratio)
else:
flavor_angles = fr_utils.fr_to_angles([1, 1, 1])
asimov_paramset.extend(gf_nuisance)
asimov_paramset.extend([
Param(name='astroFlavorAngle1', value=flavor_angles[0], ranges=[ 0., 1.], std=0.2, tag=tag),
Param(name='astroFlavorAngle2', value=flavor_angles[1], ranges=[-1., 1.], std=0.2, tag=tag),
])
asimov_paramset = ParamSet(asimov_paramset)
return asimov_paramset, llh_paramset | d49d2d757a041f1087508810239eac21260ac4b7 | 3,648,775 |
def indgen(*shape):
"""
Create a (multi-dimensional) range of integer values.
Notes
-----
**porting to python**
If ``shape`` is of one dimension only, you can use ``np.arange(n)``.
IDL accepts floats as dimension parameters, but applies ``int()`` before
using them. While ``np.arange()`` also accepts floats, be careful, as the
number of elements do not match any more!
.. code-block:: IDL
INDGEN(5.2) -> [0,1,2,3,4]
INDGEN(5) -> [0,1,2,3,4]
np.arange(5.2) -> [0,1,2,3,4,5] ; !!
np.arange(int(5.2)) -> [0,1,2,3,4]
np.arange(5) -> [0,1,2,3,4]
"""
return findgen(*shape, dtype=int) | 68f703ae0c7863bd8ed3866755d50df97d1c9de9 | 3,648,776 |
def get_bins(values):
"""
Automatically compute the number of bins for discrete variables.
Parameters
----------
values = numpy array
values
Returns
-------
array with the bins
Notes
-----
Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis
estimators. Acording to numpy `np.histogram` this provides good all around performance.
The Sturges is a very simplistic estimator based on the assumption of normality of the data.
This estimator has poor performance for non-normal data, which becomes especially obvious for
large data sets. The estimate depends only on size of the data.
The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.
It is considered a robusts version of the Scott rule as the IQR is less affected by outliers
than the standard deviation. However, the IQR depends on fewer points than the standard
deviation, so it is less accurate, especially for long tailed distributions.
"""
x_min = values.min().astype(int)
x_max = values.max().astype(int)
# Sturges histogram bin estimator
bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1)
# The Freedman-Diaconis histogram bin estimator.
iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return
bins_fd = 2 * iqr * values.size ** (-1 / 3)
width = np.round(np.max([1, bins_sturges, bins_fd])).astype(int)
return np.arange(x_min, x_max + width + 1, width) | 638c07fe6263391ff42d7a96b2d482c4b6dd7c9a | 3,648,778 |
def destination_name(data):
""" Fonction qui permet de récupérer le nom du terminus en fonction de data passé en paramètre
Nom fonction : destination_name
Paramètre : data, un flux xml
Return : un string qui a comme valeur le libellé de la destination final"""
tree = ET.ElementTree(ET.fromstring(data))
root = tree.getroot()
for tag in root.findall("."):
if tag.tag == "PlannedPatternDelivery":
if tag.find("PlannedPattern") is None:
return "Error, 'PlannedPattern' tag not exists"
else:
if tag.find("PlannedPattern/DestinationName") is None:
return "Error, 'DestinationName' tag not exists"
else:
for elem in root.findall("./PlannedPattern/DestinationName"):
if elem.text is None:
return "Error, 'DestinationName' tag is empty"
else:
return elem.text | ee5065523e1ae26104687a97ef0bd41e70670ef2 | 3,648,779 |
def prim_NumToTensor(mapper, graph, node):
""" 构造转为Tensor的PaddleLayer。
TorchScript示例:
%other.2 : Tensor = prim::NumToTensor(%1736)
参数含义:
%other.2 (Tensor): 输出。
%1736 (-): 输入。
"""
scope_name = mapper.normalize_scope_name(node)
output_name = mapper._get_outputs_name(node)[0]
layer_outputs = [output_name]
layer_inputs = {}
layer_attrs = {}
inputs_name, inputs_node = mapper._get_inputs_name(node)
# 获取当前节点输出的list
current_outputs = [output_name]
# 处理输入0,即%86
mapper._check_input(graph, inputs_node[0], inputs_name[0], current_outputs, scope_name)
inputs_inputs_name, inputs_inputs_node = mapper._get_inputs_name(inputs_node[0])
if inputs_node[0].kind() == "aten::size" and len(inputs_inputs_name) > 1:
layer_inputs["input"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
graph.add_layer(
"prim_equal", inputs=layer_inputs, outputs=layer_outputs, scope_name=scope_name)
else:
layer_inputs["fill_value"] = inputs_name[0]
# 获取当前节点输入的list
current_inputs = list(layer_inputs.values())
input_type = list(node.inputs())[0].type()
layer_attrs["dtype"] = input_type
layer_attrs["shape"] = [1]
graph.add_layer(
"paddle.full",
inputs=layer_inputs,
outputs=layer_outputs,
scope_name=scope_name,
**layer_attrs)
return current_inputs, current_outputs | 0fd8881513dc4ee95e7c187b790e8d95d5996ec7 | 3,648,780 |
def import_taskdict(modname):
"""Import user module and return its name and TASKDICT"""
try:
mod = import_module(modname)
except (ImportError, ModuleNotFoundError):
LOGGER.critical('Module %s not found. '
'Check it is along PYTHONPATH', modname)
raise
try:
modtd = getattr(mod, 'TASKDICT')
except AttributeError:
LOGGER.critical('Module %s has no TASKDICT; '
'Please, remove it from input, to continue.',
mod.__name__)
raise
return mod.__name__, modtd | b351389ab92650a5dcc317953e6fe500b324a09d | 3,648,781 |
def nlp_stem(string):
"""
Generates a list of the stem for each word in the original string and returns the joined list of stems as a single string.
"""
ps = nltk.porter.PorterStemmer()
stems = [ps.stem(word) for word in string.split()]
return " ".join(stems) | a0e44b709eb8b53500f9208634b059ff50660710 | 3,648,782 |
def _CreateHostConfigEntityFromHostInventory(lab_name, host):
"""Creates HostConfig from HostInventory.
Args:
lab_name: the lab name.
host: the ansible inventory Host object.
Returns:
the HostConfig entity.
"""
return datastore_entities.HostConfig(
id=host.name,
lab_name=lab_name,
hostname=host.name,
inventory_groups=sorted(set([g.name for g in host.groups]))) | e3de12526f380baa5b6e0cfd7b0cb6ce14236633 | 3,648,783 |
import PIL
import time
def visualize_object_detection_custom(
image, post_processed, config, prev_state, start_time, duration):
"""Draw object detection result boxes to image.
Args:
image (np.ndarray): A inference input RGB image to be draw.
post_processed (np.ndarray): A one batch output of model be
already applied post process. Format is defined at
https://github.com/blue-oil/blueoil/blob/master/docs/specification/output_data.md
config (EasyDict): Inference config.
prev_state (string): A previous state, "NORMAL" or "WARNING" or "CLEAR"
start_time (float): UNIX time when state was changed to current state
duration (float): Duration(sec) for waiting to change status displayed
Returns:
PIL.Image.Image: drawn image object.
String: A current state ("NORMAL" or "WARNING" or "CLEAR")
Float: UNIX time when state was changed to current state
Bool: A flag of which "WARNING" is displayed or not
"""
colorWarning = (255, 0, 0)
colorClear = (0, 255, 0)
box_font = PIL.ImageFont.truetype(FONT, 10)
state_font = PIL.ImageFont.truetype(FONT, 20)
classes = config.CLASSES
ng_class_id = classes.index("face") if "face" in classes else 0
start_time = start_time or time.time()
center_width = image.shape[1] // 2
predict_boxes = _scale_boxes(
post_processed, image.shape, config.IMAGE_SIZE
)
# Gather and remove duplicate box in different classes
uniq_boxes = _gather_prediction(predict_boxes)
states = [_get_state(box, center_width) for box in uniq_boxes]
total_state = _get_total_state(states, ng_class_id)
image = PIL.Image.fromarray(_mask_image(image))
draw = PIL.ImageDraw.Draw(image)
for uniq_box, state in zip(uniq_boxes, states):
box = uniq_box["box"]
class_id = uniq_box["class_id"]
xy = [box[0], box[1], box[0] + box[2], box[1] + box[3]]
color = colorWarning if class_id == ng_class_id else colorClear
prefix = "[OK]" if state[0] or (state[1] != ng_class_id) else "[NG]"
txt = "{:s} {:s}: {:.3f}".format(
prefix, classes[class_id], float(uniq_box["score"])
)
draw.rectangle(xy, outline=color)
draw.text([box[0], box[1]], txt, fill=color, font=box_font)
if prev_state != total_state:
start_time = time.time()
elapsed_time = float(time.time() - start_time)
right_corner = [center_width + 60, 0]
displayed_waring = False
if total_state == STATE_WARNING and elapsed_time >= duration:
draw.text(right_corner, "WARNING", fill=colorWarning, font=state_font)
displayed_waring = True
elif total_state == STATE_CLEAR and elapsed_time >= duration:
draw.text(right_corner, " CLEAR", fill=colorClear, font=state_font)
return image, total_state, start_time, displayed_waring | 7a325b782ea0dba6bc573d27661545804d68614b | 3,648,784 |
def all_files(directory="..\\raw_data\\"):
""" Return flat list of all csv files in the given directory.
Args:
directory [string] full path to directory with csv files.
Default project layout is used if it is not provided
Returns:
Flat list of csv files as absolute names.
"""
files = list_files(directory)
result = []
for year in files.keys():
result += files[year]
return result | 179e40756b02c7a25f01fb4e0b8b863326256a3a | 3,648,785 |
def connected_user(**params):
"""Returns the connected user."""
if g.context.person:
return g.context.person == request.view_args.get('person_id') | 713f52c8aa090cf8756d49279df72f4f470b4b5c | 3,648,786 |
def remove_plugin(plugin, directory=None):
"""Removes the specified plugin."""
repo = require_repo(directory)
plugins = get_value(repo, 'plugins', expect_type=dict)
if plugin not in plugins:
return False
del plugins[plugin]
set_value(repo, 'plugins', plugins)
return True | d631d0906411782e681cb2deb76cc1e347f856e4 | 3,648,787 |
def command_arg(name, value_type=None, help=''): # noqa; pylint: disable=redefined-builtin
"""
Decorator wrapping functions to add command line arguments to the sub command to be invoked
:param name: Name of the argument
:param value_type: Type of the argument
:param help: Help string for the argument
"""
@_ensure_command
def wrapper(f):
f.command.add_argument(name, value_type=value_type, help=help, # noqa; pylint: disable=redefined-builtin
wrapped=f)
return f
return wrapper | 6d8490e5afaa3345ff9af7d905c67f4bc53e4325 | 3,648,788 |
def ioc_arg_parser(*, desc, default_prefix, argv=None, macros=None,
supported_async_libs=None):
"""
A reusable ArgumentParser for basic example IOCs.
Parameters
----------
description : string
Human-friendly description of what that IOC does
default_prefix : string
args : list, optional
Defaults to sys.argv
macros : dict, optional
Maps macro names to default value (string) or None (indicating that
this macro parameter is required).
supported_async_libs : list, optional
"White list" of supported server implementations. The first one will
be the default. If None specified, the parser will accept all of the
(hard-coded) choices.
Returns
-------
ioc_options : dict
kwargs to be handed into the IOC init.
run_options : dict
kwargs to be handed to run
"""
parser, split_args = template_arg_parser(desc=desc, default_prefix=default_prefix,
argv=argv, macros=macros,
supported_async_libs=supported_async_libs)
return split_args(parser.parse_args()) | 7b4638afdc04284d5e69bfb48d476a557bc8bcd3 | 3,648,789 |
def count_model_param_and_flops(model):
"""
Return the number of params and the number of flops of (only) 2DConvolutional Layers and Dense Layers for both the model.
:return:
"""
param_by_layer = dict()
flop_by_layer = dict()
nb_param_model, nb_flop_model = 0, 0
for layer in model.layers:
if isinstance(layer, Conv2D):
nb_param_layer, nb_param_layer_bias = count_nb_param_layer(layer)
nb_flop_layer = count_nb_flop_conv_layer(layer, nb_param_layer, nb_param_layer_bias)
elif isinstance(layer, Dense):
nb_param_layer, nb_param_layer_bias = count_nb_param_layer(layer)
nb_flop_layer = count_nb_flop_dense_layer(layer, nb_param_layer, nb_param_layer_bias)
else:
# if you have over layers you want to compute flops in: put other conditions here and write the necessary functions
nb_param_layer, nb_param_layer_bias, nb_flop_layer = 0, 0, 0
param_by_layer[layer.name] = nb_param_layer + nb_param_layer_bias
flop_by_layer[layer.name] = nb_flop_layer
nb_param_model += nb_param_layer
nb_flop_model += nb_flop_layer
total_nb_param_model = nb_param_model
total_nb_flop_model = nb_flop_model
return total_nb_param_model, total_nb_flop_model | 5fff62167db1d369eb02aae07ef7a23bbd5d7a16 | 3,648,790 |
import OpenSSL
def catch_conn_reset(f):
"""
A decorator to handle connection reset errors even ones from pyOpenSSL
until https://github.com/edsu/twarc/issues/72 is resolved
It also handles ChunkedEncodingError which has been observed in the wild.
"""
try:
ConnectionError = OpenSSL.SSL.SysCallError
except:
ConnectionError = None
@wraps(f)
def new_f(self, *args, **kwargs):
# Only handle if pyOpenSSL is installed.
if ConnectionError:
try:
return f(self, *args, **kwargs)
except (ConnectionError, ChunkedEncodingError) as e:
log.warning("caught connection reset error: %s", e)
self.connect()
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return new_f | a5b3b7d78a03b81b0eb768e816f5bf3d95ea50cc | 3,648,792 |
def prepared(name):
"""Prepare the given volume.
Args:
name (str): Volume name
Returns:
dict: state return value
"""
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
# Idempotence.
if __salt__['metalk8s_volumes.is_prepared'](name):
ret['result'] = True
ret['comment'] = 'Volume {} already prepared.'.format(name)
return ret
# Dry-run.
if __opts__['test']:
ret['changes'][name] = 'Prepared'
ret['result'] = None
ret['comment'] = 'Volume {} is going to be prepared.'.format(name)
return ret
# Let's go for real.
try:
__salt__['metalk8s_volumes.prepare'](name)
except Exception as exn:
ret['result'] = False
ret['comment'] = 'Failed to prepare volume {}: {}.'.format(name, exn)
else:
ret['changes'][name] = 'Prepared'
ret['result'] = True
ret['comment'] = 'Volume {} prepared.'.format(name)
return ret | 443ea25a3bd5797744f2f095b54d40172c57f5ee | 3,648,793 |
def if_statement(env, node):
"""
'If' statement def for AST.
interpret - runtime function for Evaluator (true of false statement depending on condition).
"""
condition_value = node.condition.interpret(env)
if condition_value:
node.true_stmt.interpret(env)
else:
if node.alternatives_stmt:
for alternative_stmt in node.alternatives_stmt:
alternative_condition_value = alternative_stmt.interpret(env)
if alternative_condition_value:
return True
if node.false_stmt:
node.false_stmt.interpret(env)
return condition_value | 96522698c42d7649d3951f5fd2ffe3bbd992c985 | 3,648,794 |
def volCyl(radius:float, height: float) -> float:
"""Finds volume of a cylinder"""
volume: float = pi * radius * radius * height
return volume | 2d4320bb0a2d802e4308d22593e8c2f1d797ad87 | 3,648,795 |
from s3 import S3DateFilter, \
from s3db.req import req_status_opts
def req_filter_widgets():
"""
Filter widgets for requests
@returns: list of filter widgets
"""
T = current.T
S3LocationFilter, \
S3OptionsFilter, \
S3TextFilter, \
s3_get_filter_opts
req_status_opts = OrderedDict(sorted(req_status_opts().items(),
key = lambda i: i[0],
))
filter_widgets = [
S3TextFilter(["req_ref"],
label = T("Order No."),
),
S3DateFilter("date"),
S3OptionsFilter("transit_status",
cols = 3,
options = req_status_opts,
sort = False,
),
S3OptionsFilter("fulfil_status",
cols = 3,
hidden = True,
options = req_status_opts,
sort = False,
),
S3OptionsFilter("req_item.item_id",
hidden = True,
options = lambda: s3_get_filter_opts("supply_item"),
),
]
if current.auth.s3_has_role("SUPPLY_COORDINATOR"):
coordinator_filters = [
S3LocationFilter("site_id$location_id",
levels = ["L3", "L4"],
),
S3TextFilter("site_id$location_id$addr_postcode",
label = T("Postcode"),
),
S3OptionsFilter("site_id",
hidden = True
),
S3OptionsFilter("site_id$organisation_id$delivery.value",
label = T("Delivery##supplying"),
options = delivery_tag_opts(),
),
]
filter_widgets[2:2] = coordinator_filters
return filter_widgets | 13720abfd491609e2d6ac9e4ab702ef744041b61 | 3,648,796 |
from typing import List
def is_minimally_connected(graph: List[List[int]], num_vertices: int) -> bool:
"""
1. Has no cycle.
2. All nodes are connected
"""
visited = set()
has_cycle = is_cyclic(graph, 0, -1, visited)
if has_cycle or len(visited) < num_vertices:
# if num_vertices > len(visited), it means there is a disconnect in the graph.
return False
return True | 8b836f34b6e0c3c5f5651f8c31a4448a8af09d13 | 3,648,797 |
def to_keep(path):
"""
:param path:
:return: True if heigh and width >= 512
"""
img = Image.open(path)
h, w = img.size
return h >= 512 and w >= 512 | 0a24fe38f52c6441fc9955e2e4890962b7c35fb6 | 3,648,798 |
from typing import Hashable
from typing import Callable
def __register(key, *additional_keys, default_handler_info, registry=None, registering_for_name="", overwrite=False):
"""
Internal decorator to register the non-default handlers for multimethod
registry and key_fn_name are keyword arguments with defaults to make it easy to apply
functools.partial on them
"""
if registry is None:
registry = {}
all_keys = [key] + list(additional_keys)
def decorator(handler):
if not isinstance(key, Hashable):
raise TypeError(
f"Cannot register handler for function {registering_for_name} with key {repr(key)} that is not Hashable"
)
if not isinstance(handler, Callable):
raise TypeError(f"handler function {handler} of type {type(handler)} must be Callable")
for _key in all_keys:
if _key in registry and not overwrite:
raise KeyError(
f"Duplicate registration for key {repr(_key)} for function {registering_for_name}"
)
if __get_default_params(handler):
raise ValueError(
f"Found default params while registering keys {repr(all_keys)} for function {registering_for_name}. "
"Default params are only allowed in the default handler"
)
for _key in all_keys:
registry[_key] = __HandlerInfo(
handler=handler,
default_params={
**default_handler_info.default_params,
**__get_default_params(handler) # default params of explicit registration takes precedence
}
)
@wraps(handler)
def wrapper(*args, **kwargs):
handler(*args, **kwargs)
return wrapper
return decorator | 769184841dd2eb87813533bb19bde0bf747ac26f | 3,648,799 |
def download_from_url_if_not_in_cache(cloud_path: str, cache_dir: str = None):
"""
:param cloud_path: e.g., https://public-aristo-processes.s3-us-west-2.amazonaws.com/wiqa-model.tar.gz
:param to_dir: will be regarded as a cache.
:return: the path of file to which the file is downloaded.
"""
return cached_path(url_or_filename=cloud_path, cache_dir=cache_dir) | f0549e14b4219303ce48f992d684330338958370 | 3,648,800 |
from mne.viz.backends.renderer import _get_renderer
from mne_connectivity.base import BaseConnectivity
def plot_sensors_connectivity(info, con, picks=None,
cbar_label='Connectivity'):
"""Visualize the sensor connectivity in 3D.
Parameters
----------
info : dict | None
The measurement info.
con : array, shape (n_channels, n_channels) | Connectivity
The computed connectivity measure(s).
%(picks_good_data)s
Indices of selected channels.
cbar_label : str
Label for the colorbar.
Returns
-------
fig : instance of Renderer
The 3D figure.
"""
_validate_type(info, "info")
if isinstance(con, BaseConnectivity):
con = con.get_data()
renderer = _get_renderer(size=(600, 600), bgcolor=(0.5, 0.5, 0.5))
picks = _picks_to_idx(info, picks)
if len(picks) != len(con):
raise ValueError('The number of channels picked (%s) does not '
'correspond to the size of the connectivity data '
'(%s)' % (len(picks), len(con)))
# Plot the sensor locations
sens_loc = [info['chs'][k]['loc'][:3] for k in picks]
sens_loc = np.array(sens_loc)
renderer.sphere(np.c_[sens_loc[:, 0], sens_loc[:, 1], sens_loc[:, 2]],
color=(1, 1, 1), opacity=1, scale=0.005)
# Get the strongest connections
n_con = 20 # show up to 20 connections
min_dist = 0.05 # exclude sensors that are less than 5cm apart
threshold = np.sort(con, axis=None)[-n_con]
ii, jj = np.where(con >= threshold)
# Remove close connections
con_nodes = list()
con_val = list()
for i, j in zip(ii, jj):
if np.linalg.norm(sens_loc[i] - sens_loc[j]) > min_dist:
con_nodes.append((i, j))
con_val.append(con[i, j])
con_val = np.array(con_val)
# Show the connections as tubes between sensors
vmax = np.max(con_val)
vmin = np.min(con_val)
for val, nodes in zip(con_val, con_nodes):
x1, y1, z1 = sens_loc[nodes[0]]
x2, y2, z2 = sens_loc[nodes[1]]
tube = renderer.tube(origin=np.c_[x1, y1, z1],
destination=np.c_[x2, y2, z2],
scalars=np.c_[val, val],
vmin=vmin, vmax=vmax,
reverse_lut=True)
renderer.scalarbar(source=tube, title=cbar_label)
# Add the sensor names for the connections shown
nodes_shown = list(set([n[0] for n in con_nodes] +
[n[1] for n in con_nodes]))
for node in nodes_shown:
x, y, z = sens_loc[node]
renderer.text3d(x, y, z, text=info['ch_names'][picks[node]],
scale=0.005,
color=(0, 0, 0))
renderer.set_camera(azimuth=-88.7, elevation=40.8,
distance=0.76,
focalpoint=np.array([-3.9e-4, -8.5e-3, -1e-2]))
renderer.show()
return renderer.scene() | 3d236d8e8802f65c6388eeeafa327a252f9a75be | 3,648,801 |
import re
import json
def str_to_list_1(string):
"""
Parameters
----------
string : str
The str of first line in each sample of sample.txt
Returns
---------
final_list: lst
"""
final_list = []
li = re.findall(r'\[.*?\]', string)
for ele in li:
final_list.append(json.loads(ele))
return final_list | 92b4b11a339d2101a0af5408caee58cc9b9668a1 | 3,648,802 |
def one_mini_batch(data, batch_indices):
"""
产生每一次的小的batch
:param data:
:param batch_indices:
:return:
"""
batch_data = {
"raw_data": [data[i] for i in batch_indices],
"word_id_list": [],
"label_vector": []
}
for data in batch_data["raw_data"]:
batch_data["word_id_list"].append(data["word_id_list"])
batch_data["label_vector"].append(data["label_vector"])
return batch_data | 2bbbd62a00422431bb3322ebfce26d7fe95edc09 | 3,648,804 |
def reset_password(reset_key):
"""Checks the reset key. If successful, displays the password reset prompt."""
username = auth_utils.check_reset_key(reset_key)
if username is None:
flask.flash(
'Invalid request. If your link has expired, then you will need to generate a new one. '
'If you continue to encounter problems, please contact [email protected].'
)
return flask.redirect(flask.url_for('auth.forgot_password'))
return flask.render_template(
'reset_password.html', username=username, reset_key=reset_key) | 4f8e30a1669837c31b3dc2f77df441c50c6439dd | 3,648,805 |
import scipy
def williams_diff_test(corr_func: SummaryCorrFunc,
X: np.ndarray,
Y: np.ndarray,
Z: np.ndarray,
two_tailed: bool) -> float:
"""
Calculates the p-value for the difference in correlations using Williams' Test.
"""
# In the math, Z is metric 1. We take the absolute value of the correlations because
# it does not matter whether they are positively or negatively correlated with each other. The WMT scripts
# do the same before calling r.test
r12 = abs(corr_func(X, Z))
r13 = abs(corr_func(Y, Z))
r23 = abs(corr_func(X, Y))
n = _get_n(corr_func, X)
# Implementation based on https://github.com/cran/psych/blob/master/R/r.test.R
diff = r12 - r13
det = 1 - (r12 ** 2) - (r23 ** 2) - (r13 ** 2) + (2 * r12 * r23 * r13)
av = (r12 + r13) / 2
cube = (1 - r23) ** 3
t2 = diff * np.sqrt((n - 1) * (1 + r23) / (((2 * (n - 1) / (n - 3)) * det + av ** 2 * cube)))
# r.test implicitly assumes that r12 > r13 because it takes the absolute value of the t statistic. Since we don't,
# we have to have special handling for one-tailed tests so we don't map a negative t statistic to a positive one.
if two_tailed:
pvalue = scipy.stats.t.sf(abs(t2), n - 3) * 2
else:
pvalue = scipy.stats.t.sf(t2, n - 3)
return pvalue | afda90296b544233ba34f3abdd87d72b360de832 | 3,648,806 |
from typing import Tuple
from typing import List
import sqlite3
def load_students(max_meeting_seconds: int) -> Tuple[List[str], int]:
"""Loads student names and wait times from the database."""
try:
with sqlite3.connect("students.db") as conn:
cursor = conn.cursor()
try:
cursor.execute("SELECT name FROM students")
student_names = [row[0] for row in cursor.fetchall()]
cursor.execute("SELECT seconds FROM students")
individual_seconds = cursor.fetchall()[0][0]
return student_names, individual_seconds
except IndexError:
pass
except sqlite3.OperationalError:
create_students_table()
return [], max_meeting_seconds | b5b2a003216507df413cba7bea1171cd4667ee1f | 3,648,807 |
def get_associated_genes(variants_list: list) -> pd.DataFrame:
"""
Get variant gene information from BioMart.
More information on BioMart here: https://www.ensembl.org/info/data/biomart/index.html
:param variants_list: the list with variant ids.
:return: dataframe with variant and gene information
"""
snp_dataset = Dataset(name='hsapiens_snp', host='http://www.ensembl.org')
variant_gene_df = snp_dataset.query(attributes=['refsnp_id', 'ensembl_gene_stable_id'],
filters={'snp_filter': variants_list})
gene_dataset = Dataset(name='hsapiens_gene_ensembl', host='http://www.ensembl.org')
gene_df = gene_dataset.query(attributes=['ensembl_gene_id', 'external_gene_name'], only_unique=False,
filters={'link_ensembl_gene_id': list(variant_gene_df['Gene stable ID'])})
merged_df = pd.merge(variant_gene_df, gene_df, on='Gene stable ID')
interaction = ['association' for ind, row in merged_df.iterrows()]
merged_df['interaction'] = interaction
return merged_df | e267afb387496a99701872db94b46543e8c7406a | 3,648,809 |
def crc16(data) :
"""Compute CRC16 for bytes/bytearray/memoryview data"""
crc = _CRC16_START
for b in data :
crc = ((crc << 8) & 0xFFFF) ^ _CRC16_TABLE[(crc >> 8) ^ b]
return crc | ac7dc27ebc47d1bc444050b9adba81d0ac26167a | 3,648,810 |
def sigma(j: int, N: int = 1) -> np.ndarray:
"""
"""
s = [s0, s1, s2, s3]
dims = [4] * N
idx = np.unravel_index(j, dims)
return tensor(s[x] for x in idx) | c312222f5a037723f9b7920a971d93e36e3b3e4b | 3,648,811 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.