content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def compare_data_identifiers(a, b):
"""Checks if all the identifiers match, besides those that are not in both lists"""
a = {tuple(key): value for key, value in a}
b = {tuple(key): value for key, value in b}
matching_keys = a.keys() & b.keys()
a = {k: v for k, v in a.items() if k in matching_keys}
b = {k: v for k, v in b.items() if k in matching_keys}
return a == b | f0f5f08e4cc685b62b2af19e0c724561988ed1b9 | 16,397 |
import re
def expand_abbr(abbr, doc_type = 'html'):
"""
Разворачивает аббревиатуру
@param abbr: Аббревиатура
@type abbr: str
@return: str
"""
tree = parse_into_tree(abbr, doc_type)
if tree:
result = tree.to_string(True)
if result:
result = re.sub('\|', insertion_point, result, 1)
return re.sub('\|', sub_insertion_point, result)
return '' | 23d0edebd9660303d4c361b468c5cb2f6e0e0f03 | 16,398 |
def SaveSettings (event=None, SettingsNotebook=None, filename = "settings.hdf5", title="Open HDF5 file to save settings", OpenDialog=True ) :
"""
Method for saving setting
"""
if OpenDialog :
# Ask user to select the file
openFileDialog = wx.FileDialog(SettingsNotebook, title, "", filename, "HDF5 files (*.hdf5)|*.hdf5",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)
# Check whether user cancelled
if openFileDialog.ShowModal() == wx.ID_CANCEL: return None
filename = openFileDialog.GetPath()
with h5py.File (filename, 'a') as file_settings :
# Crete the grope if it does not exist
try : parameters_grp = file_settings["settings"]
except KeyError : parameters_grp = file_settings.create_group("settings")
# Loop over all settings tab
for SettingsTabName, SettingsTab in SettingsNotebook.settings_to_tabs.items() :
# Save all settings on a given tab
try : del parameters_grp[SettingsTabName]
except KeyError : pass
grp = parameters_grp.create_group(SettingsTabName)
for key, value in SettingsTab.GetSettings().items() : grp[key] = value
# return valid filename
return filename | 7e2a221c78ef78f542877a754034084ed8dd8492 | 16,399 |
import torch
import colorsys
def getAfinityCenter(width, height, point, center, radius=7, img_affinity=None):
"""
Function to create the affinity maps,
e.g., vector maps pointing toward the object center.
Args:
width: image wight
height: image height
point: (x,y)
center: (x,y)
radius: pixel radius
img_affinity: tensor to add to
return:
return a tensor
"""
tensor = torch.zeros(2,height,width).float()
# Create the canvas for the afinity output
imgAffinity = Image.new("RGB", (width,height), "black")
totensor = transforms.Compose([transforms.ToTensor()])
draw = ImageDraw.Draw(imgAffinity)
r1 = radius
p = point
draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),(255,255,255))
del draw
# Compute the array to add the afinity
array = (np.array(imgAffinity)/255)[:,:,0]
angle_vector = np.array(center) - np.array(point)
angle_vector = normalize(angle_vector)
affinity = np.concatenate([[array*angle_vector[0]],[array*angle_vector[1]]])
# print (tensor)
if not img_affinity is None:
# Find the angle vector
# print (angle_vector)
if length(angle_vector) >0:
angle=py_ang(angle_vector)
else:
angle = 0
# print(angle)
c = np.array(colorsys.hsv_to_rgb(angle/360,1,1)) * 255
draw = ImageDraw.Draw(img_affinity)
draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),fill=(int(c[0]),int(c[1]),int(c[2])))
del draw
re = torch.from_numpy(affinity).float() + tensor
return re, img_affinity | 5c25274809820f6318b9756680d2967ba8f08a5d | 16,400 |
def unwrap_phase_iterative_fft(mat, iteration=4, win_for=None, win_back=None,
weight_map=None):
"""
Unwrap a phase image using an iterative FFT-based method as described in
Ref. [1].
Parameters
----------
mat : array_like
2D array. Wrapped phase-image in the range of [-Pi; Pi].
iteration : int
Number of iteration.
win_for : array_like
2D array. FFT-window for the forward transform. Generated if None.
win_back : array_like
2D array. FFT-window for the backward transform. Making sure there are
no zero-values. Generated if None.
weight_map : array_like
2D array. Using a weight map if provided.
Returns
-------
array_like
2D array. Unwrapped phase-image.
References
----------
.. [1] https://doi.org/10.1364/AO.56.007079
"""
height, width = mat.shape
if win_for is None:
win_for = _make_window(2 * height, 2 * width, direction="forward")
if win_back is None:
win_back = _make_window(2 * height, 2 * width, direction="backward")
if weight_map is None:
weight_map = np.ones_like(mat)
mat_unwrap = unwrap_phase_based_fft(mat * weight_map, win_for, win_back)
for i in range(iteration):
mat_wrap = _wrap_to_pi(mat_unwrap)
mat_diff = mat - mat_wrap
nmean = np.mean(mat_diff)
mat_diff = _wrap_to_pi(mat_diff - nmean)
phase_diff = unwrap_phase_based_fft(mat_diff * weight_map, win_for,
win_back)
mat_unwrap = mat_unwrap + phase_diff
return mat_unwrap | e79bb72441379531f70d32d07c4e6e9299a39062 | 16,401 |
def wallunderground(idf, bsdobject, deletebsd=True, setto000=False):
"""return a wall:underground if bsdobject (buildingsurface:detailed) is an
underground wall"""
# ('WALL:UNDERGROUND', Wall, s.startswith('Ground'))
# test if it is an underground wall
if bsdobject.Surface_Type.upper() == 'WALL': # Surface_Type == wall
if bsdobject.Outside_Boundary_Condition.upper().startswith('GROUND'): # Outside_Boundary_Condition startswith 'ground'
simpleobject = idf.newidfobject('WALL:UNDERGROUND')
simpleobject.Name = bsdobject.Name
simpleobject.Construction_Name = bsdobject.Construction_Name
simpleobject.Zone_Name = bsdobject.Zone_Name
simpleobject.Azimuth_Angle = bsdobject.azimuth
simpleobject.Tilt_Angle = bsdobject.tilt
surforigin = bsdorigin(bsdobject, setto000=setto000)
simpleobject.Starting_X_Coordinate = surforigin[0]
simpleobject.Starting_Y_Coordinate = surforigin[1]
simpleobject.Starting_Z_Coordinate = surforigin[2]
simpleobject.Length = bsdobject.width
simpleobject.Height = bsdobject.height
if deletebsd:
idf.removeidfobject(bsdobject)
return simpleobject
return None | 739ca431088f049323b64e80649e6b23930d1318 | 16,402 |
def canonical_order(match):
"""
It does not make sense to define a separate bond between atoms 1 and 2,
and between atoms 2 and 1. This function will swap the atoms in the bond
if the first atom > second atom.
"""
# match[0][0:2] contains the ID numbers for the 2 atoms in the match
atom0 = match[0][0]
atom1 = match[0][1]
# match[1][0:1] contains the ID numbers for the 1 bond
bond0 = match[1][0]
if atom0 < atom1:
# return ((atom0, atom1), (bond0)) same thing as:
return match
else:
return ((atom1, atom0), (bond0)) | ea268fedaa365e0fad3ea49944cc1d1bb5fa7a51 | 16,403 |
def grant_db_access_to_role(role, db): # pylint: disable=invalid-name
"""Grant the role 'database_name', returns grant permission."""
return grant_obj_permission_to_role(role, db, 'database_access') | 5adb5f8f06473b20d6e7f386acb889631e042dde | 16,404 |
import json
def execute_compute_job():
"""Call the execution of a workflow.
---
tags:
- services
consumes:
- application/json
parameters:
- name: consumerAddress
in: query
description: The consumer address.
required: true
type: string
- name: serviceAgreementId
in: query
description: The ID of the service agreement.
required: true
type: string
- name: signature
in: query
description: Signature of the documentId to verify that the consumer has rights to download the asset.
type: string
- name: workflowDID
in: query
description: DID of the workflow that is going to start to be executed.
type: string
responses:
200:
description: Call to the operator-service was successful.
400:
description: One of the required attributes is missing.
401:
description: Invalid asset data.
500:
description: Error
"""
data = request.args
required_attributes = [
'serviceAgreementId',
'consumerAddress',
'signature',
'workflowDID'
]
msg, status = check_required_attributes(required_attributes, data, 'consume')
if msg:
return msg, status
if not (data.get('signature')):
return f'`signature is required in the call to "consume".', 400
try:
agreement_id = data.get('serviceAgreementId')
consumer_address = data.get('consumerAddress')
asset_id = keeper_instance().agreement_manager.get_agreement(agreement_id).did
did = id_to_did(asset_id)
if not was_compute_triggered(agreement_id, did, consumer_address, keeper_instance()):
msg = (
'Checking if the compute was triggered failed. Either consumer address does not '
'have permission to executre this workflow or consumer address and/or service '
'agreement id is invalid.')
logger.warning(msg)
return msg, 401
workflow = DIDResolver(keeper_instance().did_registry).resolve(data.get('workflowDID'))
body = {"serviceAgreementId": agreement_id, "workflow": workflow.as_dictionary()}
response = requests_session.post(
get_config().operator_service_url + '/api/v1/operator/init',
data=json.dumps(body),
headers={'content-type': 'application/json'})
return jsonify({"workflowId": response.content.decode('utf-8')})
except Exception as e:
logger.error(f'Error- {str(e)}', exc_info=1)
return f'Error : {str(e)}', 500 | 3601cc1f9001d23d07ded604f9fa241fe11cebd3 | 16,405 |
from typing import List
from pathlib import Path
def files_filter_ext(files: List[Path], ext: str) -> List[Path]:
"""Filter files from a list matching a extension.
Args:
files: List of files.
ext: Extension to filter.
Returns:
List of files that have the extension.
"""
return [f for f in files if f.suffix == ext] | 0ed134583f9fa4868111d1475b8be4d67ba4feb7 | 16,406 |
import six
def interp_to_grid(tran,v,expand_x=True,expand_y=True):
"""
Return dense matrix for X,Y and V (from v, or tran[v] if v is str)
expand_x: defaults to 1 more value in the X dimension than in V, suitable for
passing to pcolormesh.
expand_y: defaults to 1 more value in the Y dimension than in V, for pcolormesh
"""
if isinstance(v,six.string_types):
v=tran[v]
x,y,scal,dz=xr.broadcast(get_d_sample(tran),tran.z_ctr,v,get_z_dz(tran))
# important to use .values, as xarray will otherwise muck with
# the indexing
# coll_u=plot_utils.pad_pcolormesh(x.values,y.values,scal.values,ax=ax)
# But we have some additional information on how to pad Y, so do that
# here.
# Move to numpy land
X=x.values
Y=y.values
Dz=dz.values
if expand_y:
# Expands the vertical coordinate in the vertical
Ybot=Y-0.5*Dz
Yexpand=np.concatenate( (Ybot,Ybot[:,-1:]), axis=1)
Yexpand[:,-1]=np.nan
Yexpand[:,1:]=np.where( np.isfinite(Yexpand[:,1:]),
Yexpand[:,1:],
Y+0.5*Dz)
# Expands the horizontal coordinate in the vertical
Xexpand=np.concatenate( (X,X[:,-1:]), axis=1)
else:
Yexpand=Y
Xexpand=X
# And expand in the horizontal
def safe_midpnt(a,b):
ab=0.5*(a+b)
invalid=np.isnan(ab)
ab[invalid]=a[invalid]
invalid=np.isnan(ab)
ab[invalid]=b[invalid]
return ab
if expand_x:
dx=utils.center_to_interval(X[:,0])
Xexpand2=np.concatenate( (Xexpand-0.5*dx[:,None], Xexpand[-1:,:]+0.5*dx[-1:,None]), axis=0)
Yexpand2=np.concatenate( (Yexpand[:1,:],
safe_midpnt(Yexpand[:-1],Yexpand[1:]),
Yexpand[-1:,:]), axis=0)
else:
Xexpand2=Xexpand
Yexpand2=Yexpand
return Xexpand2,Yexpand2,scal.values | 324e42329588860d6fd45cfa06988f49e56ca504 | 16,408 |
def simulation(G, tau, gamma, rho, max_time, number_infected_before_release, release_number, background_inmate_turnover,
stop_inflow_at_intervention, p, death_rate, percent_infected, percent_recovered, social_distance,
social_distance_tau, initial_infected_list):
"""Runs a simulation on SIR model.
Args:
G: Networkx graph
tau: transmission rate
gamma: recovery rate
rho: percent of inmates that are initially infected
max_time: # of time steps to run simulation
number_infected_before_release: number of infected at which to perform release on next integer time
release_number: # of inmates to release at release intervention
background_inmate_turnover: background # of inmates added/released at each time step
stop_inflow_at_intervention: should we stop the background inflow of inmates at intervention time?
p: probability of contact between inmate and other inmates
death_rate: percent of recovered inmates that die
percent_infected: percent of general population that is infected
percent_recovered: percent of general population that is recovered
social_distance: boolean flag, if we lower transmission rate after major release
social_distance_tau: new transmission rate after major release
initial_infected_list: sets node numbers of initial infected (default is 0, this parameter is arbitrary)
Returns:
t: array of times at which events occur
S: # of susceptible inmates at each time
I: # of infected inmates at each time
R: # of recovered inmates at each time
D: # of dead inmates at each time step
"""
print('Starting simulation...')
release_occurred = False
background_release_number = background_inmate_turnover
data_list = []
recovered_list = []
delta_recovered_list = []
# Check we are using initial_infected_list
if initial_infected_list is not None:
print('Using initial infected list to set initial infected.')
infected_list = initial_infected_list.copy()
else: # Choose random initial infections based on rho
print('Using rho to set initial infected.')
infected_list = list(np.random.choice(list(G.nodes), int(np.ceil(rho * len(G.nodes))), replace=False))
# Loop over time
for i in range(max_time):
# Run 1 time unit of simulation
data = EoN.fast_SIR(G, tau, gamma, initial_infecteds=infected_list, initial_recovereds=recovered_list,
tmin=i, tmax=i + 1, return_full_data=True)
data_list.append(data)
# Update infected and recovered inmate lists
infected_list, recovered_list = get_infected(data, i + 1), get_recovered(data, i + 1)
# Check if release condition has been met
if not release_occurred and len(infected_list) >= number_infected_before_release:
background_inmate_turnover, r_n, tau = enact_interventions(background_inmate_turnover,
background_release_number, i + 1,
infected_list, release_number,
social_distance,
social_distance_tau,
stop_inflow_at_intervention,
tau)
release_occurred = True
else: # If not, use background release rate
r_n = background_release_number
# Add and release inmates
G, infected_list, recovered_list, delta_recovered = recalibrate_graph(G, infected_list, recovered_list,
background_inmate_turnover, r_n, p,
percent_infected, percent_recovered,
death_rate)
# Track the number of recovered inmates added or released at each time step
delta_recovered_list.append(delta_recovered)
# Process raw data into t, S, I, R, D arrays
t, S, I, R, D = process_data(data_list, delta_recovered_list, death_rate)
print('Simulation completed.\n')
return t, S, I, R, D | 1f9ab389ce2f301266ce6d796c303cfbb5ab4b44 | 16,409 |
def relative(link : str):
"""Convert relative link to absolute"""
return f"#{document.URL.split('#')[1]}/{link}" | 5f00da06f5277b4a85512b49e9348d8d22949058 | 16,410 |
from typing import Tuple
from typing import List
def _get_axes_names(ndim: int) -> Tuple[List[str], List[str]]:
"""Get needed axes names given the number of dimensions.
Parameters
----------
ndim : int
Number of dimensions.
Returns
-------
axes : List[str]
Axes names.
coords : List[str]
Coordinates names.
"""
if ndim == 2:
axes = [axis for axis in AXES if axis != Axes.ZPLANE.value]
coords = [coord for coord in COORDS if coord != Coordinates.Z.value]
elif ndim == 3:
axes = AXES
coords = COORDS
else:
raise TypeError('expected 2- or 3-D image')
return axes, coords | 4f9dc40131443520a2f43c287b7d0ab1428a878f | 16,411 |
def multi_replace(text, replace_dict):
"""Perform multiple replacements in one go using the replace dictionary
in format: { 'search' : 'replace' }
:param text: Text to replace
:type text: `str`
:param replace_dict: The replacement strings in a dict
:type replace_dict: `dict`
:return: `str`
:rtype:
"""
new_text = text
for search, replace in list(replace_dict.items()):
new_text = new_text.replace(search, str(replace))
return new_text | dc902c988fa57cd9a3d7f4def6089b78d36664c8 | 16,412 |
def function_expr(fn: str, args_expr: str = "") -> str:
"""
DEPRECATED. Please do not add anything else here. In order to manipulate the
query, create a QueryProcessor and register it into your dataset.
Generate an expression for a given function name and an already-evaluated
args expression. This is a place to define convenience functions that evaluate
to more complex expressions.
"""
if fn.startswith("apdex("):
match = APDEX_FUNCTION_RE.match(fn)
if match:
return "(countIf({col} <= {satisfied}) + (countIf(({col} > {satisfied}) AND ({col} <= {tolerated})) / 2)) / count()".format(
col=escape_identifier(match.group(1)),
satisfied=match.group(2),
tolerated=int(match.group(2)) * 4,
)
raise ValueError("Invalid format for apdex()")
elif fn.startswith("impact("):
match = IMPACT_FUNCTION_RE.match(fn)
if match:
apdex = "(countIf({col} <= {satisfied}) + (countIf(({col} > {satisfied}) AND ({col} <= {tolerated})) / 2)) / count()".format(
col=escape_identifier(match.group(1)),
satisfied=match.group(2),
tolerated=int(match.group(2)) * 4,
)
return "(1 - {apdex}) + ((1 - (1 / sqrt(uniq({user_col})))) * 3)".format(
apdex=apdex, user_col=escape_identifier(match.group(3)),
)
raise ValueError("Invalid format for impact()")
elif fn.startswith("failure_rate("):
match = FAILURE_RATE_FUNCTION_RE.match(fn)
if match:
return "countIf(notIn(transaction_status, tuple({ok}, {cancelled}, {unknown}))) / count()".format(
ok=SPAN_STATUS_NAME_TO_CODE["ok"],
cancelled=SPAN_STATUS_NAME_TO_CODE["cancelled"],
unknown=SPAN_STATUS_NAME_TO_CODE["unknown"],
)
raise ValueError("Invalid format for failure_rate()")
# For functions with no args, (or static args) we allow them to already
# include them as part of the function name, eg, "count()" or "sleep(1)"
if not args_expr and fn.endswith(")"):
return fn
# Convenience topK function eg "top10", "top3" etc.
topk = TOPK_FUNCTION_RE.match(fn)
if topk:
return "topK({})({})".format(topk.group(1), args_expr)
# turn uniq() into ifNull(uniq(), 0) so it doesn't return null where
# a number was expected.
if fn == "uniq":
return "ifNull({}({}), 0)".format(fn, args_expr)
# emptyIfNull(col) is a simple pseudo function supported by Snuba that expands
# to the actual clickhouse function ifNull(col, '') Until we figure out the best
# way to disambiguate column names from string literals in complex functions.
if fn == "emptyIfNull" and args_expr:
return "ifNull({}, '')".format(args_expr)
# Workaround for https://github.com/ClickHouse/ClickHouse/issues/11622
# Some distributed queries fail when arrays are passed as array(1,2,3)
# and work when they are passed as [1, 2, 3]
if get_config("format_clickhouse_arrays", 1) and fn == "array":
return f"[{args_expr}]"
# default: just return fn(args_expr)
return "{}({})".format(fn, args_expr) | 81fc9dc55c7602722303c2623d20aa88ce12f532 | 16,413 |
from typing import Sequence
def distribute(tensor: np.ndarray,
grid_shape: Sequence[int],
pmap: bool = True) -> pxla.ShardedDeviceArray:
"""
Convert a numpy array into a ShardedDeviceArray (distributed according to
`grid_shape`). It is assumed that the dimensions of `tensor`
are evenly divided by `grid`.
Args:
tensor: A distributed array to be converted into a local
numpy tensor.
grid_shape: The shape of the processor grid
according to which `tensor` is distributed.
Returns:
ShardedDeviceArray: The distributed tensor
Raises:
ValueError: If `tensor.shape` is not evenly divisible by `grid_shape`
"""
if not np.all([s % p == 0 for s, p in zip(tensor.shape, grid_shape)]):
raise ValueError(f"tensor.shape = {tensor.shape} not evenly divisible "
f"by grid_shape = {grid_shape}.")
ndim = tensor.ndim
pshape = np.asarray(grid_shape)
shape = misc.flatten(
[p, s] for s, p in zip(np.array(tensor.shape) // pshape, pshape))
perm = list(range(0, 2 * ndim, 2)) + list(range(1, 2 * ndim, 2))
reshaped = tensor.reshape(shape).transpose(perm)
final_shape = (np.prod(reshaped.shape[:ndim]), *reshaped.shape[ndim:])
A = reshaped.reshape(final_shape)
if not pmap:
return A
return jax.pmap(lambda x: x, devices=jax.local_devices())(A) | 5e0ca59a23f1cde027769334a938e5855b17bf62 | 16,415 |
def predict(w, X):
"""
Returns a vector of predictions.
"""
return expit(X.dot(w)) | c3bcb56cdd700ddf96124792b3f356644680e356 | 16,417 |
def maskrgb_to_class(mask, class_map):
""" decode rgb mask to classes using class map"""
h, w, channels = mask.shape[0], mask.shape[1], mask.shape[2]
mask_out = -1 * np.ones((h, w), dtype=int)
for k in class_map:
matches = np.zeros((h, w, channels), dtype=bool)
for c in range(channels):
matches[:, :, c] = mask[:, :, c] == k[c]
matches_total = np.sum(matches, axis=2)
valid_idx = matches_total == channels
mask_out[valid_idx] = class_map[k]
return mask_out | 0af4d42fc2dfba4d56bf990df222895b94b3002d | 16,419 |
def translate_error_code(error_code):
"""
Return the related Cloud error code for a given device error code
"""
return (CLOUD_ERROR_CODES.get(error_code) if error_code in
CLOUD_ERROR_CODES else error_code) | f6cc38b296b330811e932d3e7227d201ed09fe80 | 16,420 |
def generate_oi_quads():
"""Return a list of quads representing a single OI, OLDInstance.
"""
old_instance, err = domain.construct_old_instance(
slug='oka',
name='Okanagan OLD',
url='http://127.0.0.1:5679/oka',
leader='',
state=domain.NOT_SYNCED_STATE,
is_auto_syncing=False)
old_instance_quads = aol_mod.instance_to_quads(
old_instance, domain.OLD_INSTANCE_TYPE)
aol = []
for quad in old_instance_quads:
aol = aol_mod.append_to_aol(aol, quad)
return aol | 6b3466e81014d14f88f17e855d726a608afda946 | 16,421 |
import torch
def graph_intersection(pred_graph, truth_graph):
"""
Use sparse representation to compare the predicted graph
and the truth graph so as to label the edges in the predicted graph
to be 1 as true and 0 as false.
"""
array_size = max(pred_graph.max().item(), truth_graph.max().item()) + 1
l1 = pred_graph.cpu().numpy()
l2 = truth_graph.cpu().numpy()
e_1 = sp.sparse.coo_matrix((np.ones(l1.shape[1]), l1), shape=(array_size, array_size)).tocsr()
e_2 = sp.sparse.coo_matrix((np.ones(l2.shape[1]), l2), shape=(array_size, array_size)).tocsr()
e_intersection = (e_1.multiply(e_2) - ((e_1 - e_2)>0)).tocoo()
new_pred_graph = torch.from_numpy(np.vstack([e_intersection.row, e_intersection.col])).long().to(device)
y = e_intersection.data > 0
return new_pred_graph, y | c63ae9cb52c9a55d54bfb5237c43b1998c51c482 | 16,422 |
import requests
def get_qid_for_title(title):
"""
Gets the best Wikidata candidate from the title of the paper.
"""
api_call = f"https://www.wikidata.org/w/api.php?action=wbsearchentities&search={title}&language=en&format=json"
api_result = requests.get(api_call).json()
if api_result["success"] == 1:
return(api_result["search"][0]["id"]) | 663db71c7a1bbf1617941ba81c5fa3b7d359e00b | 16,423 |
def experiment(L, T, dL, dT, dLsystm = 0):
"""
Performs a g-measurement experiment
Args:
L: A vector of length measurements of the pendulum
T: A vector of period measurements of the pendulum
dL: The error in length measurement
dT: The error in period measurement
dLsystm: Systematic error of length measurement, default value 0
Returns:
A dictionary with the mean values of g,
the g-error values and the measured period
values, for each length
"""
L = L + dLsystm # Add systematic error, if it exists
g = np.power(2*np.pi, 2) * L / np.power(T, 2) # Indirect g measurement from
# length and period
dg = gError(L, T, dL, dT) # g measurement error
gMean = np.sum(g)/g.size # Mean value of g measurements
dgMean = np.sqrt(np.sum(dg*dg))/dg.size # Error of mean value of g
return {'g':gMean, 'dg':dgMean} | cdf7384518fb92295675eb1b15bec883b50a450f | 16,424 |
def find_jobs(schedd=None, attr_list=None, **constraints):
"""Query the condor queue for jobs matching the constraints
Parameters
----------
schedd : `htcondor.Schedd`, optional
open scheduler connection
attr_list : `list` of `str`
list of attributes to return for each job, defaults to all
all other keyword arguments should be ClassAd == value constraints to
apply to the scheduler query
Returns
-------
jobs : `list` of `classad.ClassAd`
the job listing for each job found
"""
if schedd is None:
schedd = htcondor.Schedd()
qstr = ' && '.join(['%s == %r' % (k, v) for
k, v in constraints.items()]).replace("'", '"')
if not attr_list:
attr_list = []
return list(schedd.query(qstr, attr_list)) | 9a6a32002a945d186ea40c534dbc28805458cec2 | 16,425 |
def create_vocab(sequences, min_count, counts):
"""Generate character-to-idx mapping from list of sequences."""
vocab = {const.SOS: const.SOS_IDX, const.EOS: const.EOS_IDX,
const.SEP: const.SEP_IDX}
for seq in sequences:
for token in seq:
for char in token:
if char not in vocab and counts[char] >= min_count:
vocab[char] = len(vocab)
vocab[const.UNK] = len(vocab)
return vocab | 40ca7b3ed88d4134c2949223ec93ef871a18a8fb | 16,426 |
def get_ind_sphere(mesh, ind_active, origin, radius):
"""Retreives the indices of a sphere object coordintes in a mesh."""
return (
(mesh.gridCC[ind_active, 0] <= origin[0] + radius)
& (mesh.gridCC[ind_active, 0] >= origin[0] - radius)
& (mesh.gridCC[ind_active, 1] <= origin[1] + radius)
& (mesh.gridCC[ind_active, 1] >= origin[1] - radius)
& (mesh.gridCC[ind_active, 2] <= origin[2] + radius)
& (mesh.gridCC[ind_active, 2] >= origin[2] - radius)
) | 9e246c3c0d3d7750a668476f0d0d90b28c46fc27 | 16,427 |
def find_frame_times(eegFile, signal_idx=-1, min_interval=40, every_n=1):
"""Find imaging frame times in LFP data using the pockels blanking signal.
Due to inconsistencies in the fame signal, we look for local maxima. This
avoids an arbitrary threshold that misses small spikes or includes two
nearby time points that are part of the same frame pulse.
Parameters
----------
eegFile : str
Path to eeg data file
signal_idx : int
Index of the pockels signal, e.g. eeg[signal_idx, :], default -1
min_interval : int
Minimum radius around local maxima to enforce, default 40
every_n : int
Return every nth frame time, useful for multiplane data, default 1
Returns
-------
frame times : array, shape (n_frame_times, )
"""
pc_signal = loadEEG(eegFile.replace('.eeg', ''))['EEG'][signal_idx, :]
# break ties for local maxima by increasing first point by 1
same_idx = np.where(np.diff(pc_signal) == 0)[0]
pc_signal[same_idx] += 1
pc_signal = np.abs(np.diff(pc_signal))
frame_times = argrelextrema(pc_signal, np.greater, order=min_interval)[0]
return frame_times[::every_n] | 7ed6a6a5b3d873132575ed5af1d9132d22e3898b | 16,428 |
def _interpolate_signals(signals, sampling_times, verbose=False):
"""
Interpolate signals at given sampling times.
"""
# Reshape all signals to one-dimensional array object (e.g. AnalogSignal)
for i, signal in enumerate(signals):
if signal.ndim == 2:
signals[i] = signal.flatten()
elif signal.ndim > 2:
raise ValueError('elements in fir_rates must have 2 dimensions')
if verbose:
print('create time slices of the rates...')
# Interpolate in the time bins
interpolated_signal = np.vstack([_analog_signal_step_interp(
signal, sampling_times).rescale('Hz').magnitude
for signal in signals]) * pq.Hz
return interpolated_signal | b72d8b5bbd55fb70107e36c551cb558953baed50 | 16,429 |
def mean_standard_error_residuals(A, b):
"""
Mean squared error of the residuals. The sum of squared residuals
divided by the residual degrees of freedom.
"""
n, k = A.shape
ssr = sum_of_squared_residuals(A, b)
return ssr / (n - k) | 6860ea11b2f2af29c9b519ef692ee990d2aef149 | 16,430 |
def cel2gal(ra, dec):
"""
Convert celestial coordinates (J2000) to Galactic
coordinates. (Much faster than astropy for small arrays.)
Parameters
----------
ra : `numpy.array`
dec : `numpy.array`
Celestical Coordinates (in degrees)
Returns
-------
glon : `numpy.array`
glat : `numpy.array`
Galactic Coordinates (in degrees)
"""
dec = np.radians(dec)
sin_dec = np.sin(dec)
cos_dec = np.cos(dec)
ra = np.radians(ra)
ra_gp = np.radians(192.85948)
de_gp = np.radians(27.12825)
sin_ra_gp = np.sin(ra - ra_gp)
cos_ra_gp = np.cos(ra - ra_gp)
lcp = np.radians(122.932)
sin_b = (np.sin(de_gp) * sin_dec) + (np.cos(de_gp) * cos_dec * cos_ra_gp)
lcpml = np.arctan2(cos_dec * sin_ra_gp,
(np.cos(de_gp) * sin_dec)
- (np.sin(de_gp) * cos_dec * cos_ra_gp))
glat = np.arcsin(sin_b)
glon = (lcp - lcpml + (2. * np.pi)) % (2. * np.pi)
return np.degrees(glon), np.degrees(glat) | b1185ce199c0f929c3395c452e619b93e2ee66a9 | 16,431 |
def index(request):
"""Renders main website with welcome message"""
return render(request, 'mapper/welcome.html', {}) | 37194ef3ccc415c6db39f664bc819d7df1b9665a | 16,432 |
def tidy_output(differences):
"""Format the output given by other functions properly."""
out = []
if differences:
out.append("--ACLS--")
out.append("User Path Port Protocol")
for item in differences:
#if item[2] != None: #En algunos casos salían procesos con puerto None
out.append("%s %s %s %s" % item)
# En item queda un elemento que es el protocolo
# no se usa en la salida normal
return out | 2a7007ae16e91b111f556ea95eedc466a8606494 | 16,433 |
from typing import Dict
from typing import Collection
def get_issues_overview_for(db_user: User, app_url: str) -> Dict[str, Collection]:
"""
Returns dictionary with keywords 'user' and 'others', which got lists with dicts with infos
IMPORTANT: URL's are generated for the frontend!
:param db_user: User
:param app_url: current applications url
:return: dict
"""
if not db_user or db_user.nickname == nick_of_anonymous_user:
return {
'user': [],
'other': []
}
if db_user.is_admin():
db_issues_other_users = DBDiscussionSession.query(Issue).filter(Issue.author != db_user).all()
else:
db_issues_other_users = [issue for issue in db_user.accessible_issues if issue.author != db_user]
db_issues_of_user = DBDiscussionSession.query(Issue).filter_by(author=db_user).order_by(
Issue.uid.asc()).all()
return {
'user': [__create_issue_dict(issue, app_url) for issue in db_issues_of_user],
'other': [__create_issue_dict(issue, app_url) for issue in db_issues_other_users]
} | 02ab5314a961a7fa398df2d43792fed1321939c6 | 16,434 |
def load_file(file):
"""Returns an AdblockRules object using the rules specified in file."""
with open(file) as f:
rules = f.readlines()
return AdblockRules(rules) | a9783ec4e8a195af688456af1949e33fd17d3cb7 | 16,435 |
def isSV0_QSO(gflux=None, rflux=None, zflux=None, w1flux=None, w2flux=None,
gsnr=None, rsnr=None, zsnr=None, w1snr=None, w2snr=None,
dchisq=None, maskbits=None, objtype=None, primary=None):
"""Target Definition of an SV0-like QSO. Returns a boolean array.
Parameters
----------
See :func:`~desitarget.cuts.set_target_bits`.
Returns
-------
:class:`array_like` or :class:`float`
``True`` if and only if the object is an SV-like QSO target.
If `floats` are passed, a `float` is returned.
Notes
-----
- Current version (10/14/19) is version 112 on `the SV wiki`_.
- Hardcoded for south=False.
- Combines all QSO-like SV classes into one bit.
"""
if primary is None:
primary = np.ones_like(rflux, dtype='?')
qsocolor_north = isQSO_cuts(
primary=primary, zflux=zflux, rflux=rflux, gflux=gflux,
w1flux=w1flux, w2flux=w2flux,
dchisq=dchisq, maskbits=maskbits,
objtype=objtype, w1snr=w1snr, w2snr=w2snr,
south=False
)
qsorf_north = isQSO_randomforest(
primary=primary, zflux=zflux, rflux=rflux, gflux=gflux,
w1flux=w1flux, w2flux=w2flux,
dchisq=dchisq, maskbits=maskbits,
objtype=objtype, south=False
)
qsohizf_north = isQSO_highz_faint(
primary=primary, zflux=zflux, rflux=rflux, gflux=gflux,
w1flux=w1flux, w2flux=w2flux,
dchisq=dchisq, maskbits=maskbits,
objtype=objtype, south=False
)
qsocolor_high_z_north = isQSO_color_high_z(
gflux=gflux, rflux=rflux, zflux=zflux,
w1flux=w1flux, w2flux=w2flux, south=False
)
qsoz5_north = isQSOz5_cuts(
primary=primary, gflux=gflux, rflux=rflux, zflux=zflux,
gsnr=gsnr, rsnr=rsnr, zsnr=zsnr,
w1flux=w1flux, w2flux=w2flux, w1snr=w1snr, w2snr=w2snr,
dchisq=dchisq, maskbits=maskbits, objtype=objtype,
south=False
)
qsocolor_highz_north = (qsocolor_north & qsocolor_high_z_north)
qsorf_highz_north = (qsorf_north & qsocolor_high_z_north)
qsocolor_lowz_north = (qsocolor_north & ~qsocolor_high_z_north)
qsorf_lowz_north = (qsorf_north & ~qsocolor_high_z_north)
qso_north = (qsocolor_lowz_north | qsorf_lowz_north | qsocolor_highz_north
| qsorf_highz_north | qsohizf_north | qsoz5_north)
# ADM The individual routines return arrays, so we need
# ADM a check to preserve the single-object case.
if _is_row(rflux):
return qso_north[0]
return qso_north | f8be10f2d5d52ed0afd06a23cf7a9f1a98af1f25 | 16,436 |
def show_df_nans(df, columns=None, plot_width=10, plot_height=8):
"""
Input: df (pandas dataframe), collist (list)
Output: seaborn heatmap plot
Description: Create a data frame for features which may be nan. Set NaN values be 1 and
numeric values to 0. Plots a heat map where dark squares/lines show where
data is missing. The columns to plot can be specified with an input param. Otherwise
all columns will be plotted -- which appear crowded.
"""
if not columns:
plot_cols = df.columns
else:
plot_cols = columns
df_viznan = pd.DataFrame(data=1, index=df.index, columns=plot_cols)
df_viznan[~pd.isnull(df[plot_cols])] = 0
plt.figure(figsize=(plot_width, plot_height))
plt.title('Dark values are nans')
return sns.heatmap(df_viznan.astype(float)) | 5f93f78eee905c81c7178a2a6ed7167597d4964c | 16,437 |
def evaluate_accuracy(file1, file2):
"""
evaluate accuracy
"""
count = 0
same_count = 0
f1 = open(file1, 'r')
f2 = open(file2, 'r')
while 1:
line1 = f1.readline().strip('\n')
line2 = f2.readline().strip('\n')
if (not line1) or (not line2):
break
count += 1
if int(float(line1)) == int(1 if float(line2) > 0.5 else 0):
same_count += 1
logger.info("evaluate accuracy: ")
logger.info(float(same_count) / count)
return float(same_count) / count | 52fdb8054de07fe53f77dd74317f18ed1dfbbb36 | 16,439 |
def add_hp_label(merged_annotations_column, label_type):
"""Adds prefix to annotation labels that identify the annotation as
belonging to the provided label_type (e.g. 'h@' for host proteins).
Parameters
----------
merged_annotations_column : array-like (pandas Series))
An array containing sets of annotations that need to be labeled.
e.g.
0 {GO:0010008, GO:0070062, IPR036865, GO:0048471...
1 {GO:0006351, GO:0070062, GO:0007623, GO:004851...
2 {GO:0019888, GO:0006470, GO:0001754, GO:009024...
label_type : str
The prefix to be appended (without the "@" separator).
Returns
-------
labeled_annotations : array-like (pandas Series)
A new pandas Series where all annotations have received a prefix.
"""
labeled_annotations = merged_annotations_column.map(
lambda x: set([label_type + '@' + i for i in x]))
return labeled_annotations | 648f548931a1fae5d19291d81f2355a0a00877c3 | 16,441 |
def _serialize_value(
target_expr: str, value_expr: str, a_type: mapry.Type,
auto_id: mapry.py.generate.AutoID, py: mapry.Py) -> str:
"""
Generate the code to serialize a value.
The code serializes the ``value_expr`` into the ``target_expr``.
:param target_expr: Python expression of the JSONable to be set
:param value_expr: Python expression of the value to be serialized
:param a_type: the mapry type of the value
:param auto_id: generator of unique identifiers
:param py: Python settings
:return: generated serialization code
"""
result = ''
serialization_expr = _serialization_expr(
value_expr=value_expr, a_type=a_type, py=py)
if serialization_expr is not None:
result = '{} = {}'.format(target_expr, serialization_expr)
elif isinstance(a_type, mapry.Array):
result = _serialize_array(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_type,
auto_id=auto_id,
py=py)
elif isinstance(a_type, mapry.Map):
result = _serialize_map(
target_expr=target_expr,
value_expr=value_expr,
a_type=a_type,
auto_id=auto_id,
py=py)
else:
raise NotImplementedError(
"Unhandled serialization of type: {}".format(a_type))
return result | 6ec6051715ca34771bf32582bb86280d58af27d3 | 16,443 |
def return_union_close():
"""union of statements, close statement"""
return " return __result" | c1a1b6b6b1164a641a7f9e598eec346af13f2aa7 | 16,444 |
from typing import Union
from typing import List
from typing import Tuple
def parse_unchanged(value: Union[str, List[str]]) -> Tuple[bool, Union[str, List[str]]]:
"""Determine if a value is 'unchanged'.
Args:
value: value supplied by user
"""
unchanges = [
SETTING_UNCHANGED,
str(SETTING_UNCHANGED),
SETTING_UNCHANGED[0],
str(SETTING_UNCHANGED[0]),
]
if value in unchanges:
return True, SETTING_UNCHANGED
return False, value | b4f1e155064c053fa1df65d242e920ae4ecf2fe5 | 16,445 |
def get_val(tup):
"""Get the value from an index-value pair"""
return tup[1] | 5966bbbb28006c46eaf11afaef152573aaaa8d2a | 16,446 |
def lnprior_d(d,L=default_L):
""" Expotentially declining prior. d, L in kpc (default L=0.5) """
if d < 0: return -np.inf
return -np.log(2) - 3*np.log(L) + 2*np.log(d) - d/L | fd7cf591c5095fe8129662794b5c05235eda8941 | 16,449 |
def textile(text, **args):
"""This is Textile.
Generates XHTML from a simple markup developed by Dean Allen.
This function should be called like this:
textile(text, head_offset=0, validate=0, sanitize=0,
encoding='latin-1', output='ASCII')
"""
return Textiler(text).process(**args) | 051f2aa254c2c24e80640b0779712d2154a1b67d | 16,450 |
def binary_info_gain(df, feature, y):
"""
:param df: input dataframe
:param feature: column to investigate
:param y: column to predict
:return: information gain from binary feature column
"""
return float(sum(np.logical_and(df[feature], df[y])))/len(df[feature]) | 8aa4bbb6997b913001074e15fcdefb5f6047cab3 | 16,451 |
def get_all_instances(region):
"""
Returns a list of all the type of instances, and their instances, managed
by the scheduler
"""
ec2 = boto3.resource('ec2', region_name=region)
rds = boto3.client('rds', region_name=region)
return {
'EC2': [EC2Schedulable(ec2, i) for i in ec2.instances.all()],
'RDS': [RDSSchedulable(rds, i) for i in rds.describe_db_instances()['DBInstances']]
} | daf6c4cc71f19c7b94f625a283eeb59f4fcae10f | 16,452 |
import warnings
import asyncio
def futures_navigating(urls: list, amap: bool = True) -> dict:
"""
异步 基于 drive url list 通过请求高德接口 获得 路径规划结果
:param urls:
:param amap: 开关
:return:
"""
data_collections = [None] * len(urls)
pack_data_result = {}
all_tasks = []
# 准备
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
event_loop = asyncio.get_event_loop()
except Exception as _:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
event_loop = asyncio.get_event_loop()
# 线程池
# for idx in range(len(urls)):
# all_tasks.append(event_loop.run_in_executor(register.pool, request_navigating, urls[idx], idx, data_collections))
# event_loop.run_until_complete(asyncio.wait(all_tasks))
# 异步io
if amap:
event_loop.run_until_complete(async_request_navigating(urls, data_collections))
# 获取结果,只获取 ['route']['paths'][0] ,也即只获取第一种策略的数据
for idx in range(len(urls)):
# 如果新url请求失败
if not data_collections[idx]:
if amap:
register.logger.error(f"futures_navigating request failed,new url:{urls[idx]},url_idx:{idx}")
data_collections[idx] = default_data_with_navigating_url(urls[idx], idx, data_collections)
api_data_result = data_collections[idx]
if not pack_data_result:
pack_data_result = api_data_result
pack_data_result['route']['paths'] = [pack_data_result['route']['paths'][0]]
else:
pack_data_result['route']['destination'] = api_data_result['route']['destination']
pack_data_result['route']['taxi_cost'] = str(
float(pack_data_result['route']['taxi_cost']) + float(api_data_result['route']['taxi_cost']))
pack_data_result['route']['paths'][0]['distance'] = str(
float(pack_data_result['route']['paths'][0]['distance']) + float(api_data_result['route']['paths'][0]['distance']))
pack_data_result['route']['paths'][0]['duration'] = str(
float(pack_data_result['route']['paths'][0]['duration']) + float(api_data_result['route']['paths'][0]['duration']))
pack_data_result['route']['paths'][0]['tolls'] = str(
float(pack_data_result['route']['paths'][0]['tolls']) + float(api_data_result['route']['paths'][0]['tolls']))
pack_data_result['route']['paths'][0]['toll_distance'] = str(
float(pack_data_result['route']['paths'][0]['toll_distance']) + float(
api_data_result['route']['paths'][0]['toll_distance']))
pack_data_result['route']['paths'][0]['steps'].extend(api_data_result['route']['paths'][0]['steps'])
return pack_data_result | 4b65488f2c3ba7ac7ecc46a2da949c15bffbbf9b | 16,453 |
def getdim(s):
"""If s is a representation of a vector, return the dimension."""
if len(s) > 4 and s[0] == "[" and s[-1] == "]":
return len(splitargs(s[1:-1], ["(", "["], [")", "]"]))
else:
return 0 | 7ad62245ad2ebf7a262f6442ff209b819b1fa36e | 16,454 |
def numToString(num: int) -> str:
"""Write a number in base 36 and return it as a string
:param num: number to encode
:return: number encoded as a base-36 string
"""
base36 = ''
while num:
num, i = divmod(num, 36)
base36 = BASE36_ALPHABET[i] + base36
return base36 or BASE36_ALPHABET[0] | 419759cdbe7b4e0dcf38c3f79f0de6dec4f84131 | 16,457 |
def parse_enumeration(enumeration_bytes):
"""Parse enumeration_bytes into a list of test_ids."""
# If subunit v2 is available, use it.
if bytestream_to_streamresult is not None:
return _v2(enumeration_bytes)
else:
return _v1(enumeration_bytes) | ce7104f30eda416a59cb1397736886422af866fd | 16,459 |
import re
def register_user(username, passwd, email): # type: (str, str, str) -> Optional[str]
"""Returns an error message or None on success."""
if passwd == "":
return "The password can't be empty!"
if email: # validate the email only if it is provided
result = validate_email_address(email)
if result:
return result
username = username.strip()
if not re.match(config.get('nick_regex'), username):
return "Invalid username!"
crypted_pw = encrypt_pw(passwd)
with crawl_db(config.get('password_db')) as db:
db.c.execute("select username from dglusers where username=? collate nocase",
(username,))
result = db.c.fetchone()
if result:
return "User already exists!"
with crawl_db(config.get('password_db')) as db:
query = """
INSERT INTO dglusers
(username, email, password, flags, env)
VALUES
(?, ?, ?, 0, '')
"""
db.c.execute(query, (username, email, crypted_pw))
db.conn.commit()
return None | d7960dc9c66ee584787b9a2b25f367fcbc7455e8 | 16,460 |
def _train_n_hmm(data: _Array, m_states: int, n_trails: int):
"""Trains ``n_trails`` HMMs each initialized with a random tpm.
Args:
data: Possibly unporcessed input data set.
m_states: Number of states.
n_trails: Number of trails.
Returns:
Best model regarding to log-likelihood.
"""
feat = data.round().astype(int)
trails = []
for i in range(n_trails):
hmm = PoissonHmm(feat, m_states, init_gamma='softmax')
hmm.fit(feat)
if hmm.success:
trails.append(hmm)
if len(trails) == 0:
return None
return min(trails, key=lambda hmm: abs(hmm.quality.nll)) | db5864ca45ac6fb939a0d1fd63fcb0b61e0ce6b9 | 16,461 |
def get_metric_function(name):
"""
Get a metric from the supported_sklearn_metric_functions dictionary.
Parameters
----------
name : str
The name of the metric to get.
Returns
-------
metric : function
The metric function.
"""
if name in supported_sklearn_metric_functions:
return supported_sklearn_metric_functions[name]
raise ValueError(
"The metric {} is not supported. Supported metrics are: {}".format(
name, list(supported_sklearn_metrics)
)
) | ba490650f55fd5d9a480fc9b9b94c5e71fefe23c | 16,462 |
def ping(enode, count, destination, interval=None, quiet=False, shell=None):
"""
Perform a ping and parse the result.
:param enode: Engine node to communicate with.
:type enode: topology.platforms.base.BaseNode
:param int count: Number of packets to send.
:param str destination: The destination host.
:param float interval: The wait interval in seconds between each packet.
:param str shell: Shell name to execute commands. If ``None``, use the
Engine Node default shell.
:rtype: dict
:return: The parsed result of the ping command in a dictionary of the form:
::
{
'transmitted': 0,
'received': 0,
'errors': 0,
'loss_pc': 0,
'time_ms': 0
}
"""
assert count > 0
assert destination
addr = ip_address(destination)
cmd = 'ping'
if addr.version == 6:
cmd = 'ping6'
cmd = [cmd, '-c', str(count), destination]
if interval is not None:
assert interval > 0
cmd.append('-i')
cmd.append(str(interval))
if quiet:
cmd.append('-q')
ping_raw = enode(' '.join(cmd), shell=shell)
assert ping_raw
for line in ping_raw.splitlines():
m = match(PING_RE, line)
if m:
return {
k: (int(v) if v is not None else 0)
for k, v in m.groupdict().items()
}
raise Exception('Could not parse ping result') | e9fce1819ea21ad801468653e534350e863e123b | 16,463 |
def calculate_EHF_severity(
T,
T_p95_file=None,
EHF_p85_file=None,
T_p95_period=None,
T_p95_dim=None,
EHF_p85_period=None,
EHF_p85_dim=None,
rolling_dim="time",
T_name="t_ref",
):
"""
Calculate the severity of the Excess Heat Factor index, defined as:
EHF_severity = EHF / EHF_p85
where "_p85" denotes the 85th percentile of all positive values using all days in the
year and the Excess Heat Factor (EHF) is defined as:
EHF = max(0, EHI_sig) * max(1, EHI_accl)
with
EHI_sig = (T_i + T_i+1 + T_i+2) / 3 – T_p95
EHI_accl = (T_i + T_i+1 + T_i+2) / 3 – (T_i–1 + ... + T_i–30) / 30
T is the daily mean temperature (commonly calculated as the mean of the min and max
daily temperatures, usually with daily maximum typically preceding the daily minimum,
and the two observations relate to the same 9am-to-9am 24-h period) and T_p95 is the 95th
percentile of T using all days in the year.
Parameters
----------
T : xarray DataArray
Array of daily mean temperature
T_p95_file : xarray DataArray, optional
Path to a file with the 95th percentiles of T using all days in the year. This should be
relative to the project directory. If not provided, T_p95_period and T_p95_dim must be
provided
EHF_p85_file : xarray DataArray, optional
Path to a file with the 85th percentiles of positive EHF using all days in the year. This
should be relative to the project directory. If not provided, EHF_p85_period and
EHF_p85_dim must be provided
T_p95_period : list of str, optional
Size 2 iterable containing strings indicating the start and end dates of the period over
which to calculate T_p95. Only used if T_p95 is None
T_p95_dim : str or list of str, optional
The dimension(s) over which to calculate T_p95. Only used if T_p95 is None
EHF_p85_period : list of str, optional
Size 2 iterable containing strings indicating the start and end dates of the period over
which to calculate EHF_p85. Only used if EHF_p85 is None
EHF_p85_dim : str or list of str, optional
The dimension(s) over which to calculate EHF_p85. Only used if EHF_p85 is None
rolling_dim : str, optional
The dimension over which to compute the rolling averages in the definition of EHF
T_name : str, optional
The name of the temperature variable in T
References
----------
Nairn et al. 2015: https://doi.org/10.3390/ijerph120100227
"""
if EHF_p85_file is None:
if (EHF_p85_period is not None) & (EHF_p85_dim is not None):
calculate_EHF_p85 = True
else:
raise ValueError(
(
"Must provide either thresholds of the 85th percentile of EHF (E_p85) or details "
"of the climatological period and dimensions to use to calculate these thresholds "
"(EHF_p85_period and EHF_p85_dim)"
)
)
else:
EHF_p85_file = PROJECT_DIR / EHF_p85_file
EHF_p85 = xr.open_zarr(EHF_p85_file)
calculate_EHF_p85 = False
EHF = calculate_EHF(T, T_p95_file, T_p95_period, T_p95_dim, rolling_dim, T_name)
if calculate_EHF_p85:
EHF_p85 = calculate_percentile_thresholds(
EHF.where(EHF > 0), 0.85, EHF_p85_period, EHF_p85_dim, frequency=None
)
EHF_sev = EHF / EHF_p85
EHF_sev = EHF_sev.rename({"ehf": "ehf_severity"})
EHF_sev["ehf_severity"].attrs["long_name"] = "Severity of the Excess Heat Factor"
EHF_sev["ehf_severity"].attrs["standard_name"] = "excess_heat_factor_severity"
EHF_sev["ehf_severity"].attrs["units"] = "-"
return EHF_sev | e82d54f0bd67c5cd4c938dbdd335aed70fe3c521 | 16,464 |
from typing import Dict
from typing import List
from typing import Tuple
def arrange_train_data(keypoints: Dict, beg_end_times: List[Tuple], fps: float, MAX_PERSONS: int) -> Dict:
"""
Arrange data into frames. Add gestures present or not based on time ranges. Generate each frame and also, add dummy when necessary.
"""
data = {}
for key in keypoints.keys():
persons = list(keypoints[key].keys())
persons.remove("start_frame")
persons.remove("end_frame")
count_persons = len(persons)
gestures_xy = []
start_frame, end_frame = keypoints[key]["start_frame"], keypoints[key]["end_frame"]
start_time_ms = start_frame/fps*1000
end_time_ms = end_frame/fps*1000
for per_ind in range(1, count_persons+1):
per_str = str(per_ind)
gestures_xy.append(keypoints[key][per_str]["person_keypoints"])
# dummy to always have MAX_PERSONS (training to be done in matrices (Required_keypoints x Max_persons x window))
dummy = generate_dummy_keypoints()
dummy_frames_list = []
for _ in range(start_frame, end_frame+1):
dummy_frames_list.append(dummy)
for i in range(MAX_PERSONS - count_persons):
gestures_xy.append(dummy_frames_list)
frame_division_gestures = list(zip(*gestures_xy))
frames_dict = {}
for i, frame in enumerate(frame_division_gestures):
frames_dict[str(start_frame + i)] = {
"frames": frame,
"gesture": False
}
data[key] = frames_dict
for be_time in beg_end_times:
if be_time[0] > end_time_ms or be_time[1] < start_time_ms:
continue
elif be_time[0] < start_time_ms and be_time[1] < end_time_ms:
bt = start_time_ms
et = be_time[1]
elif be_time[0] > start_time_ms and be_time[1] < end_time_ms:
bt = be_time[0]
et = be_time[1]
elif be_time[0] < start_time_ms and be_time[1] > end_time_ms:
bt = start_time_ms
et = end_time_ms
elif be_time[0] > start_time_ms and be_time[1] > end_time_ms:
bt = be_time[0]
et = end_time_ms
# Now using bt and et, find the frame indices with gesture
begin_at_frame_ind = int(bt*fps/1000+0.5)
no_of_frames = int((et-bt)*fps/1000+0.5)
end_at_frame_ind = begin_at_frame_ind + no_of_frames
if end_at_frame_ind > int((list(data[key].keys()))[-1]):
end_at_frame_ind = int((list(data[key].keys()))[-1])
for frame_no in range(begin_at_frame_ind, end_at_frame_ind+1):
data[key][str(frame_no)]["gesture"] = True
return data | ef433809c5e59d2b870a84c9cfce9e31faa4659a | 16,466 |
def length(list):
"""Return the number of items in the list."""
if list == ():
return 0
else:
_, tail = list
return 1 + length(tail) | 35864cd8cdd065463592d3737077a4d06b38aad1 | 16,467 |
def buzz(x):
"""
Takes an input `x` and checks to see if x is a
number, and if so, also a multiple of 5.
If it is both, return 'Buzz'.
Otherwise, return the input.
"""
return 'Buzz' if isinstance(x, Number) and x % 5 == 0 else x | b24a37816d218a6cc1d960bfd767cb1a2052067d | 16,468 |
from typing import Iterable
from typing import Any
from typing import Tuple
def _tuple_of_big_endian_int(bit_groups: Iterable[Any]) -> Tuple[int, ...]:
"""Returns the big-endian integers specified by groups of bits.
Args:
bit_groups: Groups of descending bits, each specifying a big endian
integer with the 1s bit at the end.
Returns:
A tuple containing the integer for each group.
"""
return tuple(value.big_endian_bits_to_int(bits) for bits in bit_groups) | 78d3b739a8d8a3f724dca2b226e976cde93426dd | 16,469 |
def make_simple_server(service, handler,
host="localhost",
port=9090):
"""Return a server of type TSimple Server.
Based on thriftpy's make_server(), but return TSimpleServer instead of
TThreadedServer.
Since TSimpleServer's constructor doesn't accept kwargs, some arguments of
make_server can't be used here. By default:
client_timeout: None
protocol: TBinaryProtocolFactory
transport: TBufferedTransportFactory
"""
processor = TProcessor(service, handler)
if host and port:
server_socket = TServerSocket(
host=host, port=port, client_timeout=None)
else:
raise ValueError("Either host/port or unix_socket must be provided.")
server = TSimpleServer(processor, server_socket)
return server | 42fc9f0bdcfbe4a509d5a682821ea3e71386f699 | 16,470 |
def morph(clm1, clm2, t, lmax):
"""Interpolate linearly the two sets of sph harm. coeeficients."""
clm = (1 - t) * clm1 + t * clm2
grid_reco = clm.expand(lmax=lmax) # cut "high frequency" components
agrid_reco = grid_reco.to_array()
pts = []
for i, longs in enumerate(agrid_reco):
ilat = grid_reco.lats()[i]
for j, value in enumerate(longs):
ilong = grid_reco.lons()[j]
th = np.deg2rad(90 - ilat)
ph = np.deg2rad(ilong)
r = value + rbias
p = np.array([sin(th) * cos(ph), sin(th) * sin(ph), cos(th)]) * r
pts.append(p)
return pts | a6e6ca0070cc38b54f2bfd41b0fe69e2a5bb21f8 | 16,471 |
import re
from typing import OrderedDict
def read_avg_residuemap(infile):
""" Read sequence definition from PSN avg file, returning sequence Map
:param infile: File handle pointing to WORDOM avgpsn output file
:return: Returns an internal.map.Map object mapping the .pdb
residues to WORDOM id's from "Seq" section of the avgpsn-file
"""
m_start = re.compile("^\*\*\* Seq \*\*\*")
m_end = re.compile("^============")
m_entry = re.compile("^\s*\d+\s+.:.\d+\s+\d+\.\d+\s*$")
residuemap = OrderedDict()
reading = False
for line in infile:
if reading:
# Stop reading if end of interaction strength section
if m_end.search(line):
break
else:
if m_entry.search(line):
[num, resname, normfact] = line.split()
residuemap[resname] = int(num)
# Start reading when header found
elif m_start.search(line):
reading = True
return residuemap | 92c4cbe53edcd3d894a038d7cb9308c653e37146 | 16,472 |
def schedule_remove(retval=None):
"""
schedule(retval=stackless.current) -- switch to the next runnable tasklet.
The return value for this call is retval, with the current
tasklet as default.
schedule_remove(retval=stackless.current) -- ditto, and remove self.
"""
_scheduler_remove(getcurrent())
r = schedule(retval)
return r | 8ba10819d4de5cc676583e7b05036be49e6958cf | 16,474 |
def check_region(read, pair, region):
"""
determine whether or not reads map to specific region of scaffold
"""
if region is False:
return True
for mapping in read, pair:
if mapping is False:
continue
start, length = int(mapping[3]), len(mapping[9])
r = [start, start + length - 1]
if get_overlap(r, region) > 0:
return True
return False | c71378d9ee674a117635634e3153f3cb7650fab3 | 16,475 |
def millisecond_to_clocktime(value):
"""Convert a millisecond time to internal GStreamer time."""
return value * Gst.MSECOND | 8359b65a015febedba8bb6b68d310d70b1b8e1a6 | 16,477 |
def SE2_exp(v):
"""
SE2 matrix exponential
"""
theta, x, y = v
if np.abs(theta) < 1e-6:
A = 1 - theta**2/6 + theta**4/120
B = theta/2 - theta**3/24 + theta**5/720
else:
A = np.sin(theta)/theta
B = (1 - np.cos(theta))/theta
V = np.array([[A, -B], [B, A]])
R = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
u = np.array([[x, y]]).T
return np.block([
[R, V.dot(u)],
[0, 0, 1]]) | f94454bf4b134fac7b45d89d4c3798b1d6c201fa | 16,478 |
def skip_on_hw(func):
"""Test decorator for skipping tests which should not be run on HW."""
def decorator(f):
def decorated(self, *args, **kwargs):
if has_ci_ipus():
self.skipTest("Skipping test on HW")
return f(self, *args, **kwargs)
return decorated
return decorator(func) | 6cbde5ce9b70c9d9f71330470f5f2ae913f56021 | 16,479 |
def rxns4tag(tag, rdict=None, ver='1.7', wd=None):
"""
Get a list of all reactions with a given p/l tag
Notes
-----
- This function is useful, but update to GEOS-Chem flexchem ( in >v11)
will make it redundent and therefore this is not being maintained.
"""
# --- get reaction dictionary
if isinstance(rdict, type(None)):
rdict = rxn_dict_from_smvlog(wd, ver=ver)
# --- Caveats -
# to adapt for long line errors in fortran written output
errs = ['LO3_36'] # + ['LO3_87']
cerrs = ['RD95'] # + ['LR48']
# To account for reaction where not all channels result in Ox loss
errs += ['RD48']
cerrs += ['LO3_87']
if any([(tag == i) for i in errs]):
tag = cerrs[errs.index(tag)]
# -- loop reactions, if tag in reaction return reaction
rxns = []
for n, rxn in enumerate(rdict.values()):
expanded_rxn_str = [i.split('+') for i in rxn]
expanded_rxn_str = [
item for sublist in expanded_rxn_str for item in sublist]
# ( Issue) Why endswith? Restore to use if contains any tag
# if any( [ (i.endswith(tag) ) for i in rxn]):
# This is because otherwise 'LR10' would be read as 'LR100'
# if any( [tag in i for i in rxn]): # <= This will lead to false +ve
# However, fortran print statment err for ( LO3_87 )
if any([i.endswith(tag) for i in expanded_rxn_str]):
rxns.append([list(rdict.keys())[n]] + rxn)
return rxns | 32243bcdb66c9320679d580da2b6f9ee086179d2 | 16,480 |
def DateTime_GetCurrentYear(*args, **kwargs):
"""DateTime_GetCurrentYear(int cal=Gregorian) -> int"""
return _misc_.DateTime_GetCurrentYear(*args, **kwargs) | 5f25e4387e72497673ea49d6f67f06e9894e29af | 16,481 |
def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=10):
"""Decay learning rate by a f# model_out_path ="./model/W_epoch_{}.pth".format(epoch)
# torch.save(model_W, model_out_path) actor of 0.1 every lr_decay_epoch epochs."""
lr = init_lr * (0.8**(epoch // lr_decay_epoch))
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer | 2095eda8493e0e53bca5e6f1b9149b544c34da61 | 16,482 |
def wait(duration):
"""
Waits the duration, in seconds, you specify.
Args:
duration (:any:`DoubleValue`): time, in seconds, this function waits. You may specify fractions of seconds.
Returns:
float: actual seconds waited.
This wait is non-blocking, so other tasks will run while this wait executes.
"""
init_time = DoubleValue(0)
init_time.value = seqtime()
while seqtime() - init_time.value < duration.value:
nivs_yield()
init_time.value = seqtime()
return init_time.value | 0a40028bbe88d290cc309dc94ef7be48522ded02 | 16,483 |
from datetime import datetime
def yesterday_handler(update: Update, context: CallbackContext):
"""
Diary content upload handler. Uploads incoming messages to db as a note for yesterday.
"""
# get user timezone
user_timezone = Dao.get_user_timezone(update.effective_user)
# calculate time at user's
user_datetime = update.effective_message.date.astimezone(user_timezone)
# get yesterday
user_yesterday = user_datetime - datetime.timedelta(days=1)
# save message content
save_message_content_by_date(update, context, user_yesterday)
return ConversationHandler.END | 7b37d1157fc40ec01703aac92a5b176d14ab2f27 | 16,484 |
def get_lens_pos(sequence):
"""
Calculate positions of lenses.
Returns
-------
List of tuples with index and position of OPE in sequence.
"""
d = 0.0
d_ = []
for idx, ope in enumerate(sequence):
if ope.is_lens():
d_.append((idx, d))
else:
d += ope.get_travel_length()
return d_ | f1f1ebb4212406e78e48613584f67f5e2b6f2265 | 16,485 |
def disorientation(orientation_matrix, orientation_matrix1, crystal_structure=None):
"""Compute the disorientation another crystal orientation.
Considering all the possible crystal symmetries, the disorientation
is defined as the combination of the minimum misorientation angle
and the misorientation axis lying in the fundamental zone, which
can be used to bring the two lattices into coincidence.
.. note::
Both orientations are supposed to have the same symmetry. This is not
necessarily the case in multi-phase materials.
:param orientation: an instance of
:py:class:`~pymicro.crystal.microstructure.Orientation` class
describing the other crystal orientation from which to compute the
angle.
:param crystal_structure: an instance of the `Symmetry` class
describing the crystal symmetry, triclinic (no symmetry) by
default.
:returns tuple: the misorientation angle in radians, the axis as a
numpy vector (crystal coordinates), the axis as a numpy vector
(sample coordinates).
"""
the_angle = np.pi
symmetries = crystal_structure.symmetry_operators()
(gA, gB) = (orientation_matrix, orientation_matrix1) # nicknames
for (g1, g2) in [(gA, gB), (gB, gA)]:
for j in range(symmetries.shape[0]):
sym_j = symmetries[j]
oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied
for i in range(symmetries.shape[0]):
sym_i = symmetries[i]
oi = np.dot(sym_i, g2)
delta = np.dot(oi, oj.T)
mis_angle = misorientation_angle_from_delta(delta)
if mis_angle < the_angle:
# now compute the misorientation axis, should check if it lies in the fundamental zone
mis_axis = misorientation_axis_from_delta(delta)
the_angle = mis_angle
the_axis = mis_axis
the_axis_xyz = np.dot(oi.T, the_axis)
return the_angle, the_axis, the_axis_xyz | eefca78d7736de073646c97190f736bedb302136 | 16,486 |
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list,1)}
features = []
for (ex_index, example) in enumerate(examples): # example : InputExample obj
text_spc_tokens = example.text_a
aspect_tokens = example.text_b
sentence_label = example.sentence_label
aspect_label = example.aspect_label
polaritiylist = example.polarity # 标记aspect和非aspect
tokens = []
labels = []
polarities = []
valid = []
label_mask = []
text_spc_tokens.extend(['[SEP]'])
text_spc_tokens.extend(aspect_tokens)
enum_tokens = text_spc_tokens # text_scp_tokens : sentence tokens + [SEP] + aspect tokens 注意并不是规范的BERT-SPC格式
sentence_label.extend(['[SEP]'])
# sentence_label.extend(['O'])
sentence_label.extend(aspect_label)
label_lists = sentence_label
# if len(enum_tokens) != len(label_lists):
# print(enum_tokens)
# print(label_lists)
for i, word in enumerate(enum_tokens): # spc tokens, 注意这里的enum_tokens并不是标准的bert spc格式, 后边会添加新的符号使之符合标准
token = tokenizer.tokenize(word) # bert tokenizer, 使用bert进行分词
tokens.extend(token)
label_1 = label_lists[i]
polarity_1 = polaritiylist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
polarities.append(polarity_1)
valid.append(1)
label_mask.append(1)
else: # 如果bert对token进一步细分,就会到这里
valid.append(0)
if len(tokens) >= max_seq_length - 1: # 为啥剔除后边2个而不是更多?
tokens = tokens[0:(max_seq_length - 2)]
polarities = polarities[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
valid = valid[0:(max_seq_length - 2)]
label_mask = label_mask[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
valid.insert(0,1)
label_mask.insert(0,1)
label_ids.append(label_map["[CLS]"])
# label_ids.append(label_map["O"])
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
if len(labels) > i: # 感觉这个判断是多余的
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]") # 得到标准的bert spc格式
segment_ids.append(0)
valid.append(1)
label_mask.append(1)
label_ids.append(label_map["[SEP]"])
# label_ids.append(label_map["O"])
input_ids_spc = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids_spc)
label_mask = [1] * len(label_ids)
# import numpy as np
while len(input_ids_spc) < max_seq_length: # pad
input_ids_spc.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
valid.append(1)
label_mask.append(0)
while len(label_ids) < max_seq_length:
label_ids.append(0)
label_mask.append(0)
while len(polarities) < max_seq_length:
polarities.append(-1)
assert len(input_ids_spc) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(valid) == max_seq_length
assert len(label_mask) == max_seq_length
# if ex_index < 5:
# print("*** Example ***")
# print("guid: %s" % (example.guid))
# print("tokens: %s" % " ".join(
# [str(x) for x in ntokens]))
# print("input_ids: %s" % " ".join([str(x) for x in input_ids_spc]))
# print("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# print("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
# # print("label: %s (id = %d)" % (example.label, label_ids))
#
# input_ids_spc = np.array(input_ids_spc)
# label_ids = np.array(label_ids)
# labels = np.array(labels)
# valid = np.array(valid)
features.append(
InputFeatures(input_ids_spc=input_ids_spc,
input_mask=input_mask, # spc 非pad部分的 attention mask
segment_ids=segment_ids, # 全为0, bert 的 token_type_ids
label_id=label_ids, # aspect抽取的label
polarities=polarities, # aspect 对应的情感倾向, 非aspect的标记值是-1
valid_ids=valid, #
label_mask=label_mask)) # label_mask和input_mask没区别
return features | 5d844b9d88fa7bbd5532547b772fef9c1811e039 | 16,488 |
def to_json_compatible_object(obj):
"""
This function returns a representation of a UAVCAN structure (message, request, or response), or
a DSDL entity (array or primitive), or a UAVCAN transfer, as a structure easily able to be
transformed into json or json-like serialization
Args:
obj: Object to convert.
Returns: structure which can easily be transformed into a json-like serialization
"""
if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'):
output = dict()
if hasattr(obj, 'message'):
payload = obj.message
output['transfer_type'] = 'Message'
elif hasattr(obj, 'request'):
payload = obj.request
output['transfer_type'] = 'Request'
elif hasattr(obj, 'response'):
payload = obj.response
output['transfer_type'] = 'Response'
else:
raise ValueError('Cannot generate JSON-compatible object representation for %r' % type(obj))
output['source_node_id'] = obj.transfer.source_node_id
output['dest_node_id'] = obj.transfer.dest_node_id
output['ts_monotonic'] = obj.transfer.ts_monotonic
output['ts_real'] = obj.transfer.ts_real
output['transfer_priority'] = obj.transfer.transfer_priority
output['datatype'] = '{}'.format(payload._type)
output['fields'] = _to_json_compatible_object_impl(payload)
return output
else:
return _to_json_compatible_object_impl(obj) | 131fa3a43abf55fd0f51b1e3160ba2f1486d2e25 | 16,489 |
def rot_box_kalman_filter(initial_state, Q_std, R_std):
"""
Tracks a 2D rectangular object (e.g. a bounding box) whose state includes
position, centroid velocity, dimensions, and rotation angle.
Parameters
----------
initial_state : sequence of floats
[x, vx, y, vy, w, h, phi]
Q_std : float
Standard deviation to use for process noise covariance matrix
R_std : float
Standard deviation to use for measurement noise covariance matrix
Returns
-------
kf : filterpy.kalman.KalmanFilter instance
"""
kf = KalmanFilter(dim_x=7, dim_z=5)
dt = 1.0 # time step
# state mean and covariance
kf.x = np.array([initial_state]).T
kf.P = np.eye(kf.dim_x) * 500.
# no control inputs
kf.u = 0.
# state transition matrix
kf.F = np.eye(kf.dim_x)
kf.F[0, 1] = kf.F[2, 3] = dt
# measurement matrix - maps from state space to observation space, so
# shape is dim_z x dim_x.
kf.H = np.zeros([kf.dim_z, kf.dim_x])
# z = Hx. H has nonzero coefficients for the following components of kf.x:
# x y w h phi
kf.H[0, 0] = kf.H[1, 2] = kf.H[2, 4] = kf.H[3, 5] = kf.H[4, 6] = 1.0
# measurement noise covariance
kf.R = np.eye(kf.dim_z) * R_std**2
# process noise covariance for x-vx or y-vy pairs
q = Q_discrete_white_noise(dim=2, dt=dt, var=Q_std**2)
# diagonal process noise sub-matrix for width, height, and phi
qq = Q_std**2*np.eye(3)
# process noise covariance matrix for full state
kf.Q = block_diag(q, q, qq)
return kf | ac0bca07b6d7b08c3b27439855fac93bddffcb91 | 16,490 |
def validate_schema(path, schema_type):
"""Validate a single file against its schema"""
if schema_type not in _VALID_SCHEMA_TYPES.keys():
raise ValueError(f"No validation schema found for '{schema_type}'")
return globals()["validate_" + schema_type](path) | 8883226eff948de2b05d442157818ab0b3904e47 | 16,491 |
def import_vote_internal(vote, principal, file, mimetype):
""" Tries to import the given csv, xls or xlsx file.
This is the format used by onegov.ballot.Vote.export().
This function is typically called automatically every few minutes during
an election day - we use bulk inserts to speed up the import.
:return:
A list containing errors.
"""
csv, error = load_csv(
file, mimetype, expected_headers=INTERNAL_VOTE_HEADERS, dialect='excel'
)
if error:
return [error]
ballot_results = {}
errors = []
added_entity_ids = {}
ballot_types = set()
status = 'unknown'
entities = principal.entities[vote.date.year]
for line in csv.lines:
line_errors = []
status = line.status or 'unknown'
if status not in STATI:
line_errors.append(_("Invalid status"))
ballot_type = line.type
if ballot_type not in BALLOT_TYPES:
line_errors.append(_("Invalid ballot type"))
ballot_types.add(ballot_type)
added_entity_ids.setdefault(ballot_type, set())
ballot_results.setdefault(ballot_type, [])
# the id of the entity
entity_id = None
try:
entity_id = validate_integer(line, 'entity_id')
except ValueError as e:
line_errors.append(e.args[0])
else:
if entity_id not in entities and entity_id in EXPATS:
entity_id = 0
if entity_id in added_entity_ids[ballot_type]:
line_errors.append(
_("${name} was found twice", mapping={
'name': entity_id
}))
if entity_id and entity_id not in entities:
line_errors.append(
_("${name} is unknown", mapping={
'name': entity_id
}))
else:
added_entity_ids[ballot_type].add(entity_id)
# Skip expats if not enabled
if entity_id == 0 and not vote.expats:
continue
# Counted
counted = line.counted.strip().lower() == 'true'
# the yeas
try:
yeas = validate_integer(line, 'yeas')
except ValueError as e:
line_errors.append(e.args[0])
# the nays
try:
nays = validate_integer(line, 'nays')
except ValueError as e:
line_errors.append(e.args[0])
# the eligible voters
try:
eligible_voters = validate_integer(line, 'eligible_voters')
except ValueError as e:
line_errors.append(e.args[0])
# the empty votes
try:
empty = validate_integer(line, 'empty')
except ValueError as e:
line_errors.append(e.args[0])
# the invalid votes
try:
invalid = validate_integer(line, 'invalid')
except ValueError as e:
line_errors.append(e.args[0])
# now let's do some sanity checks
try:
if not eligible_voters:
line_errors.append(_("No eligible voters"))
if (yeas + nays + empty + invalid) > eligible_voters:
line_errors.append(_("More cast votes than eligible voters"))
except UnboundLocalError:
pass
# pass the errors
if line_errors:
errors.extend(
FileImportError(error=err, line=line.rownumber)
for err in line_errors
)
continue
# all went well (only keep doing this as long as there are no errors)
if not errors:
entity = entities.get(entity_id, {})
ballot_results[ballot_type].append(
dict(
name=entity.get('name', ''),
district=entity.get('district', ''),
counted=counted,
yeas=yeas,
nays=nays,
eligible_voters=eligible_voters,
entity_id=entity_id,
empty=empty,
invalid=invalid
)
)
if errors:
return errors
if not any((len(results) for results in ballot_results.values())):
return [FileImportError(_("No data found"))]
# Add the missing entities
for ballot_type in ballot_types:
remaining = set(entities.keys())
if vote.expats:
remaining.add(0)
remaining -= added_entity_ids[ballot_type]
for entity_id in remaining:
entity = entities.get(entity_id, {})
ballot_results[ballot_type].append(
dict(
name=entity.get('name', ''),
district=entity.get('district', ''),
counted=False,
entity_id=entity_id
)
)
# Add the results to the DB
vote.clear_results()
vote.status = status
ballot_ids = {b: vote.ballot(b, create=True).id for b in ballot_types}
session = object_session(vote)
session.flush()
session.bulk_insert_mappings(
BallotResult,
(
dict(**result, ballot_id=ballot_ids[ballot_type])
for ballot_type in ballot_types
for result in ballot_results[ballot_type]
)
)
return [] | 03eacf90418fd68bcf24c0a731f2d1216beb786b | 16,492 |
def get_mail_count(imap, mailbox_list):
""" Gets the total number of emails on specified account.
Args:
imap <imaplib.IMAP4_SSL>: the account to check
mailbox_list [<str>]: a list of mailboxes
Must be surrounded by double quotes
Returns:
<int>: total emails
"""
total = 0
num_mailboxes = len(mailbox_list)
for idx, mailbox in enumerate(mailbox_list):
print("Counting mail: %d (Mailbox %d of %d) " \
% (total, idx+1, num_mailboxes), end='\r')
total += int(imap.select(mailbox)[1][0])
imap.close()
print("Counting mail: %d (Mailbox %d of %d) " \
% (total, idx+1, num_mailboxes))
return total | 8c8fd2d6849d58860f3bd6c20335e7a399bee99d | 16,493 |
def get_bdb_path_by_shoulder_model(shoulder_model, root_path=None):
"""Get the path to a BerkeleyDB minter file in a minter directory hierarchy.
The path may or may not exist. The caller may be obtaining the path in which to
create a new minter, so the path is not checked.
Args:
shoulder_model (Shoulder): The Django ORM model for the shoulder to use for
the minting. The model may be a legacy record for N2T based minting, or
a record from a minter created in EZID.
root_path (str, optional):
Path to the root of the minter directory hierarchy. If not provided, the
default for EZID is used.
Returns:
pathlib2.Path
"""
m = shoulder_model
minter_uri = m.minter.strip()
if not minter_uri:
raise nog.exc.MinterNotSpecified(
'A minter has not been specified (minter field in the database is empty)'
)
return pathlib2.Path(
_get_bdb_root(root_path), '/'.join(minter_uri.split('/')[-2:]), 'nog.bdb',
).resolve() | c694306d18ed940cf229a46dae6fb72d2207418e | 16,494 |
def getDefuzzificationMethod(name):
"""Get an instance of a defuzzification method with given name.
Normally looks into the fuzzy.defuzzify package for a suitable class.
"""
m = __import__("fuzzy.defuzzify."+name, fromlist=[name])
c = m.__dict__[name]
return c() | c3306ba9fc4ce21eae9adb1bde1b04505dd6b24f | 16,495 |
def celestial(func):
"""
Transform a point x from cartesian coordinates to celestial coordinates and returns the function evaluated at the probit point y
"""
def f_transf(ref, x, *args, **kwargs):
y = cartesian_to_celestial(x)
return func(ref, y, *args)
return f_transf | e6980abfbc0833639b9d1eb716633d1c6d6dcda2 | 16,496 |
def _rescale_to_width(
img: Image,
target_width: int):
"""Helper function to rescale image to `target_width`.
Parameters
----------
img : PIL.Image
Input image object to be rescaled.
target_width : int
Target width (in pixels) for rescaling.
Returns
-------
PIL.Image
Rescaled image object
"""
w, h = img.size
rescaled_img = img.resize(_scale_wh_by_target_width(w, h, target_width))
return rescaled_img | 38efe7bbbd1681abfd2d48bcf4765b817a28fa27 | 16,497 |
def compute_exact_R_P(final_patterns, centones_tab):
"""
Function tha computes Recall and Precision with exact matches
"""
true = 0
for tab in final_patterns:
for centon in centones_tab[tab]:
check = False
for p in final_patterns[tab]:
if centon == p:
check = True
if check:
true += 1
all_centones = len([x for y in centones_tab.values() for x in y])
all_ours = len([x for y in final_patterns.values() for x in y])
overall_recall = true / all_centones
overall_precision = true / all_ours
return overall_recall, overall_precision | d82e6fccacdc09bb078d6e954150c143994df1ca | 16,498 |
def build_embedding(embedding_matrix, max_len, name):
"""
Build embedding by lda
:param max_len:
:param name:
:return:
"""
# build embedding with initial weights
topic_emmd = Embedding(embedding_matrix.shape[0],
embedding_matrix.shape[1],
weights=[embedding_matrix],
input_length=max_len,
trainable=True,
name=name)
return topic_emmd | 87a89462bfb2eee285099353f78e151394c7c74a | 16,499 |
import re
def check_playlist_url(playlist_url):
"""Check if a playlist URL is well-formated.
Parameters
----------
playlist_url : str
URL to a YouTube playlist.
Returns
-------
str
If the URL is well-formated, return the playlist ID. Else return `None`.
"""
match = re.match(
r"https?://www\.youtube\.com/playlist\?list=(.+)",
playlist_url.strip()
)
if match is None:
raise ValueError("Incorrect URL: %s" % playlist_url)
return match.group(1) | b14808e3dc25fcb7f91e9b66ec5f31ae869c6ae5 | 16,500 |
import multiprocessing
import asyncio
def test_PipeJsonRpcSendAsync_2(method, params, result, notification):
"""
Test of basic functionality. Here we don't test for timeout case (it raises an exception).
"""
value_nonlocal = None
def method_handler1():
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return 5
def method_handler2(value=2):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 10
def method_handler3(*, value=3):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 15
class SomeClass:
def method_handler4(self, *, value=4):
nonlocal value_nonlocal
value_nonlocal = "function_was_called"
return value + 15
some_class = SomeClass()
conn1, conn2 = multiprocessing.Pipe()
pc = PipeJsonRpcReceive(conn=conn2, name="comm-server")
pc.add_method(method_handler1) # No name is specified, default name is "method_handler1"
pc.add_method(method_handler1, "method1")
pc.add_method(method_handler2, "method2")
pc.add_method(method_handler3, "method3")
pc.add_method(some_class.method_handler4, "method4")
pc.start()
async def send_messages():
nonlocal value_nonlocal
p_send = PipeJsonRpcSendAsync(conn=conn1, name="comm-client")
p_send.start()
for n in range(3):
value_nonlocal = None
response = await p_send.send_msg(method, params, notification=notification)
if not notification:
assert response == result, f"Result does not match the expected: {response}"
assert value_nonlocal == "function_was_called", "Non-local variable has incorrect value"
elif response is not None:
assert False, "Response was received for notification."
p_send.stop()
asyncio.run(send_messages())
pc.stop() | 3311c1be5013eae986b5c8e358ef08eafff6420c | 16,501 |
def f(t, T):
"""
returns -1, 0, or 1 based on relationship between t and T
throws IndexError
"""
if(t > 0 and t < float(T/2)):
return 1
elif(t == float(T/2)):
return 0
elif(t > float(T/2) and t < T):
return -1
raise IndexError("Out of function domain") | f2365094d41d2a151322ad640dcf4b290dd1de79 | 16,502 |
import http
import json
def auth0_token():
"""
Token for Auth0 API
"""
auth = settings["auth0"]
conn = http.client.HTTPSConnection(auth['domain'])
payload = '{' + f"\"client_id\":\"{auth['client']}\"," \
f"\"client_secret\":\"{auth['client-secret']}\"," \
f"\"audience\":\"https://{auth['domain']}/api/v2/\",\"grant_type\":\"client_credentials\"" + '}'
headers = {'content-type': "application/json"}
conn.request("POST", "/oauth/token", payload, headers)
res = conn.getresponse()
data = res.read()
return json.loads(data.decode("utf-8"))["access_token"] | ba27798d4af79999f1c37d5b356a54cd427a0681 | 16,503 |
def get_ami(region, instance_type):
"""Returns the appropriate AMI to use for a given region + instance type
HVM is always used except for instance types which cannot use it. Based
on matrix here:
http://aws.amazon.com/amazon-linux-ami/instance-type-matrix/
.. note::
:func:`populate_ami_ids` must be called first to populate the available
AMI's.
"""
if not _POPULATED:
raise KeyError('populate_ami_ids must be called first')
instances = AWS_AMI_IDS[region]
inst_type = "hvm"
if instance_type[:2] in ["m1", "m2", "c1", "t1"]:
inst_type = "paravirtual"
if inst_type not in instances:
msg = "Could not find instance type %r in %s for region %s"
raise KeyError(msg % (inst_type, list(instances.keys()), region))
return instances[inst_type].id | 7ea60dbda0dabb05d9f7509ddb4c567560d681eb | 16,504 |
def convert_config(cfg):
""" Convert some configuration values to different values
Args:
cfg (dict): dict of sub-dicts, each sub-dict containing configuration
keys and values pertinent to a process or algorithm
Returns:
dict: configuration dict with some items converted to different objects
Raises:
KeyError: raise KeyError if configuration file is not specified
correctly
"""
# Parse dataset:
cfg = _parse_dataset_config(cfg)
# Parse YATSM:
cfg = _parse_YATSM_config(cfg)
return cfg | f84ae8f5b90f364eb378b48071c7ea4a99370076 | 16,505 |
def _signals_exist(names):
""" Return true if all of the given signals exist in this version of flask.
"""
return all(getattr(signals, n, False) for n in names) | 3f62e5a6309d792843947c3aa6f5ad687b6debf5 | 16,506 |
def login():
"""Route for logging the user in."""
try:
if request.method == 'POST':
return do_the_login()
if session.get('logged_in'):
return redirect(url_for('home'))
return render_template('login.html')
except Exception as e:
abort(500, {'message': str(e)}) | 1f610bcfd450de5de576eef797ed9d26f726ec72 | 16,507 |
from typing import List
def __screen_info_to_dict():
"""
筛查
:return:
"""
screen_dict: {str: List[GitLogObject]} = {}
for info in git.get_all_commit_info():
if not authors.__contains__(info.name):
continue
if not info.check_today_time():
continue
if screen_dict.__contains__(info.name):
screen_dict.get(info.name).append(info.msg)
else:
screen_dict[info.name] = [info.msg]
pass
pass
return screen_dict | 6fb5519ab746b8918f18ec0c0a1769b5dca3558c | 16,509 |
from typing import Type
from typing import Optional
def bind_prop_arr(
prop_name: str,
elem_type: Type[Variable],
doc: Optional[str] = None,
doc_add_type=True,
) -> property:
"""Convenience wrapper around bind_prop for array properties
:meta private:
"""
if doc is None:
doc = f"Wrapper around `variables['{prop_name}']` of type `VariableArray[{elem_type.__name__}]`."
if doc_add_type:
doc = f"MutableSequence[{_get_python_prop_type(elem_type)}]: {doc}"
return bind_prop(
prop_name,
VariableArray,
lambda: VariableArray(elem_type),
doc=doc,
doc_add_type=False,
objtype=True,
) | f8e610011f096013c3976ee22f43eb58472c0513 | 16,510 |
from typing import Dict
from typing import Any
import json
def generate_profile_yaml_file(destination_type: DestinationType, test_root_dir: str) -> Dict[str, Any]:
"""
Each destination requires different settings to connect to. This step generates the adequate profiles.yml
as described here: https://docs.getdbt.com/reference/profiles.yml
"""
config_generator = TransformConfig()
profiles_config = config_generator.read_json_config(f"../secrets/{destination_type.value.lower()}.json")
# Adapt credential file to look like destination config.json
if destination_type.value == DestinationType.BIGQUERY.value:
profiles_config["credentials_json"] = json.dumps(profiles_config)
profiles_config["dataset_id"] = target_schema
else:
profiles_config["schema"] = target_schema
profiles_yaml = config_generator.transform(destination_type, profiles_config)
config_generator.write_yaml_config(test_root_dir, profiles_yaml)
return profiles_config | f66b8df469b00d8aa11e6aba7d6efb5c2af4e21f | 16,511 |
def get_foreign_trips(db_connection):
"""
Gets the time series data for all Foreign visitors from the database
Args:
db_connection (Psycopg.connection): The database connection
Returns:
Pandas.DataFrame: The time series data for each unique Foreign visitor.
It has the columns cust_id, date, date_diff, calls,
calls_in_florence, calls_near_airport
"""
counts = get_daily_call_counts(db_connection,
'optourism.foreigners_timeseries_daily')
return get_trips(counts) | bd5f0d308681286c615d60b0bf2fcb88bfd2a806 | 16,512 |
from typing import List
def get_diagonal_sums(square: Square) -> List[int]:
""" Returns a list of the sum of each diagonal. """
topleft = 0
bottomleft = 0
# Seems like this could be more compact
i = 0
for row in square.rows:
topleft += row[i]
i += 1
i = 0
for col in square.columns:
bottomleft += col[i]
i += 1
return [topleft, bottomleft] | 37a5e167b3170feece19b963c21eae1359df14ec | 16,513 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.