content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import warnings
def disable_warnings_temporarily(func):
"""Helper to disable warnings for specific functions (used mainly during testing of old functions)."""
def inner(*args, **kwargs):
warnings.filterwarnings("ignore")
func(*args, **kwargs)
warnings.filterwarnings("default")
return inner | 5e19b8f51ca092709a1e1a5d6ff0b2543a41e5e1 | 17,200 |
def progress_bar(progress):
"""
Generates a light bar matrix to display volume / brightness level.
:param progress: value between 0..1
"""
dots = list(" " * 81)
num_dots = ceil(round(progress, 3) * 9)
while num_dots > 0:
dots[81 - ((num_dots - 1) * 9 + 5)] = "*"
num_dots -= 1
return "".join(dots) | 88986ecc505cf786e197d8ad55cd70b21fa3aa27 | 17,201 |
def get_context_command_parameter_converters(func):
"""
Parses the given `func`'s parameters.
Parameters
----------
func : `async-callable`
The function used by a ``SlasherApplicationCommand``.
Returns
-------
func : `async-callable`
The converted function.
parameter_converters : `tuple` of ``ParameterConverter``
Parameter converters for the given `func` in order.
Raises
------
TypeError
- If `func` is not async callable, neither cannot be instanced to async.
- If `func` accepts keyword only parameters.
- If `func` accepts `*args`.
- If `func` accepts `**kwargs`.
ValueError
- If any parameter is not internal.
"""
analyzer, real_analyzer, should_instance = check_command_coroutine(func)
parameters = real_analyzer.get_non_reserved_positional_parameters()
parameter_converters = []
target_converter_detected = False
for parameter in parameters:
parameter_converter = create_internal_parameter_converter(parameter)
if (parameter_converter is None):
if target_converter_detected:
raise TypeError(f'`{real_analyzer.real_function!r}`\'s `{parameter.name}` do not refers to any of the '
f'expected internal parameters. Context commands do not accept any additional parameters.')
else:
parameter_converter = create_target_parameter_converter(parameter)
target_converter_detected = True
parameter_converters.append(parameter_converter)
parameter_converters = tuple(parameter_converters)
if should_instance:
func = analyzer.instance()
return func, parameter_converters | 294706230f95745dbd50681cafc066a5d226880d | 17,202 |
def norm(x):
"""Normalize 1D tensor to unit norm"""
mu = x.mean()
std = x.std()
y = (x - mu)/std
return y | ea8546da2ea478edb0727614323bba69f6af288d | 17,203 |
def honest_propose(validator, known_items):
"""
Returns an honest `SignedBeaconBlock` as soon as the slot where
the validator is supposed to propose starts.
Checks whether a block was proposed for the same slot to avoid slashing.
Args:
validator: Validator
known_items (Dict): Known blocks and attestations received over-the-wire (but perhaps not included yet in `validator.store`)
Returns:
Optional[SignedBeaconBlock]: Either `None` if the validator decides not to propose,
otherwise a `SignedBeaconBlock` containing attestations
"""
# Not supposed to propose for current slot
if not validator.data.current_proposer_duties[validator.data.slot % SLOTS_PER_EPOCH]:
return None
# Already proposed for this slot
if validator.data.last_slot_proposed == validator.data.slot:
return None
# honest propose
return honest_propose_base(validator, known_items) | c6b0403b15154e3e3b19547770a162e2ac05501b | 17,204 |
import re
def formatKwargsKey(key):
"""
'fooBar_baz' -> 'foo-bar-baz'
"""
key = re.sub(r'_', '-', key)
return key | 24c79b37fdd1cd6d73ab41b0d2234b1ed2ffb448 | 17,205 |
import dateutil
def mktimestamp(dt):
"""
Prepares a datetime for sending to HipChat.
"""
if dt.tzinfo is None:
dt = dt.replace(tzinfo=dateutil.tz.tzutc())
return dt.isoformat(), dt.tzinfo.tzname(dt) | 2f444d0ea27a3afbed68742bade8833a49e191e4 | 17,206 |
import os
def load_and_assign_npz_dict(name='model.npz', sess=None):
"""Restore the parameters saved by ``tl.files.save_npz_dict()``.
Parameters
----------
name : a string
The name of the .npz file.
sess : Session
"""
assert sess is not None
if not os.path.exists(name):
print("[!] Load {} failed!".format(name))
return False
params = np.load(name)
if len(params.keys()) != len(set(params.keys())):
raise Exception("Duplication in model npz_dict %s" % name)
ops = list()
for key in params.keys():
try:
# tensor = tf.get_default_graph().get_tensor_by_name(key)
# varlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=key)
varlist = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=key)
if len(varlist) > 1:
raise Exception("[!] Multiple candidate variables to be assigned for name %s" % key)
elif len(varlist) == 0:
raise KeyError
else:
ops.append(varlist[0].assign(params[key]))
print("[*] params restored: %s" % key)
except KeyError:
print("[!] Warning: Tensor named %s not found in network." % key)
sess.run(ops)
print("[*] Model restored from npz_dict %s" % name) | b5c2a0d1878117cb7e6461e0615457ee92501823 | 17,207 |
def build_accuracy(logits, labels, name_scope='accuracy'):
"""
Builds a graph node to compute accuracy given 'logits' a probability distribution over the output and 'labels' a
one-hot vector.
"""
with tf.name_scope(name_scope):
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
correct_prediction = tf.cast(correct_prediction, tf.float32)
return tf.reduce_mean(correct_prediction) | 53f5f78a8c07c691e20d14c416c1fe21a2547bc6 | 17,208 |
import json
import os
def generate_markdown_files(domain, mitigations, side_nav_data, side_nav_mobile_data, notes):
"""Responsible for generating shared data between all mitigation pages
and begins mitigation markdown generation
"""
data = {}
if mitigations:
data['domain'] = domain.split("-")[0]
data['mitigation_list_len'] = str(len(mitigations))
data['side_menu_data'] = side_nav_data
data['side_menu_mobile_view_data'] = side_nav_mobile_data
data['mitigation_table'] = get_mitigation_table_data(mitigations)
subs = mitigations_config.mitigation_domain_md.substitute(data)
subs = subs + json.dumps(data)
with open(os.path.join(mitigations_config.mitigation_markdown_path, data['domain'] + "-mitigations.md"), "w", encoding='utf8') as md_file:
md_file.write(subs)
# Generates the markdown files to be used for page generation
for mitigation in mitigations:
generate_mitigation_md(mitigation, domain, side_nav_data, side_nav_mobile_data, notes)
return True
else:
return False | 8a30af0868b541c8771019c57d42d33a842050bc | 17,209 |
def compute_mp_av(mp, index, m, df, k):
"""
Given a matrix profile, a matrix profile index, the window size and the DataFrame that contains the timeseries.
Create a matrix profile object and add the corrected matrix profile after applying the complexity av.
Uses an extended version of the apply_av function from matrixprofile foundation that is compatible with
multi-dimensional timeseries. The implementation can be found here
(https://github.com/MORE-EU/matrixprofile/blob/master/matrixprofile/transform.py)
Args:
mp: A matrix profile.
index: The matrix profile index that accompanies the matrix profile.
window: The subsequence window size.
df: The timeseries that was used to calculate the matrix profile.
Return:
Updated profile with an annotation vector
"""
# Apply the annotation vector
m = m # window size
mp = np.nan_to_num(mp, np.nanmax(mp)) # remove nan values
profile = to_mpf(mp, index, m, df)
av_type = 'complexity'
profile = mpf.transform.apply_av(profile, av_type)
return profile | cc89d34dd145339c99d1ded8ced9af853c061124 | 17,210 |
import logging
def read_burris(fh):
"""
Read Burris formatted file, from given open file handle.
Accepts comma or tab-separated files.
Parameters
----------
fh : TextIOWrapper
open file handle
Returns
-------
ChannelList
"""
all_survey_data = ChannelList()
for i, orig_line in enumerate(fh, 1):
try:
line = orig_line.strip()
if line.find(",") != -1:
vals_temp = line.split(",")
if vals_temp[0] == "Station ID" or vals_temp[0] == "Station":
continue
elif line.find("\t") != -1:
vals_temp = line.split("\t")
else:
vals_temp = line.split()
if vals_temp[0] == "Station ID" or vals_temp[0] == "Station":
continue
if len(vals_temp) == 15: # no meter operator specified
(
c_station,
c_meter,
c_date,
c_time,
c_grav,
c_dial,
c_feedback,
c_tide,
c_tilt,
_,
_,
c_height,
c_elev,
c_lat,
c_long,
) = range(
15
) # 0 - 15
all_survey_data.oper.append("None")
else: # 16 values, includes meter operator.
# Numbers are columns in the imported file
(
c_station,
c_oper,
c_meter,
c_date,
c_time,
c_grav,
c_dial,
c_feedback,
c_tide,
c_tilt,
_,
_,
c_height,
c_elev,
c_lat,
c_long,
) = range(
16
) # 0 - 14
all_survey_data.oper.append(vals_temp[c_oper])
if line.find("/") != -1:
date_temp = vals_temp[c_date].split("/")
elif line.find("-") != -1:
date_temp = vals_temp[c_date].split("-")
else:
date_temp = []
if int(date_temp[2]) > 999:
date_temp = [date_temp[2], date_temp[0], date_temp[1]]
elif int(date_temp[0]) > 999:
date_temp = [date_temp[0], date_temp[1], date_temp[2]]
# Else raise date error
time_temp = vals_temp[c_time].split(":")
# fill object properties:
all_survey_data.station.append(vals_temp[c_station].strip())
all_survey_data.elev.append(float(vals_temp[c_elev]))
all_survey_data.height.append(float(vals_temp[c_height]))
all_survey_data.lat.append(float(vals_temp[c_lat]))
all_survey_data.long.append(float(vals_temp[c_long]))
# remove Earth tide correction; it's added in using the @grav property
all_survey_data.raw_grav.append(
float(vals_temp[c_grav]) * 1000.0 - float(vals_temp[c_tide]) * 1000.0
)
all_survey_data.tare.append(0)
all_survey_data.etc.append(float(vals_temp[c_tide]) * 1000.0)
all_survey_data.meter_etc.append(float(vals_temp[c_tide]) * 1000.0)
all_survey_data.dial.append(float(vals_temp[c_dial]))
all_survey_data.feedback.append(float(vals_temp[c_feedback]))
all_survey_data.sd.append(-999) # Burris doesn't ouput SD, tiltx, tilty
all_survey_data.meter.append(vals_temp[c_meter])
all_survey_data.tiltx.append(float(vals_temp[c_tilt]) * 1000.0)
all_survey_data.tilty.append(0.0)
all_survey_data.temp.append(0.0)
all_survey_data.dur.append(5)
all_survey_data.rej.append(5)
all_survey_data.t.append(
date2num(
dt.datetime(
int(date_temp[0]),
int(date_temp[1]),
int(date_temp[2]),
int(time_temp[0]),
int(time_temp[1]),
int(time_temp[2]),
)
)
)
all_survey_data.keepdata.append(1)
except (IndexError, ValueError) as e:
logging.exception("Error loading Burris file at line %d", i)
logging.info("LINE: %s", line)
e.i = i
e.line = orig_line
raise e
all_survey_data.meter_type = "Burris"
return all_survey_data | 4519cf73be5663a70e76e083aa9f735f427248a6 | 17,211 |
import re
def _remove_invalid_characters(file_name):
"""Removes invalid characters from the given file name."""
return re.sub(r'[/\x00-\x1f]', '', file_name) | 49a9f668e8142855ca4411921c0180977afe0370 | 17,212 |
def get_ops():
""" Builds an opcode name <-> value dictionary """
li = ["EOF","ADD","SUB","MUL","DIV","POW","BITAND","BITOR","CMP","GET", \
"SET","NUMBER","STRING","GGET","GSET","MOVE","DEF","PASS", \
"JUMP","CALL","RETURN","IF","DEBUG","EQ","LE","LT","DICT", \
"LIST","NONE","LEN","LINE","PARAMS","IGET","FILE","NAME", \
"NE","HAS","RAISE","SETJMP","MOD","LSH","RSH","ITER","DEL", \
"REGS","BITXOR", "IFN", "NOT", "BITNOT"]
dic = {}
for i in li:
dic[i] = li.index(i)
return dic | 6cd9e4014a124faa6be6dfc36b9f77a22df1ccfb | 17,213 |
def load_actions(
file_pointer, file_metadata, target_adim, action_mismatch, impute_autograsp_action
):
"""Load states from a file given metadata and hyperparameters
Inputs:
file_pointer : file object
file_metadata : file metadata row (Pandas)
target_adim : dimensionality of action vector
action_mismatch : indicator variable (ACTION_MISMATCH) to handle action length mismatches
impute_autograsp_action : boolean flag indicating to impute action dim if missing in primative:"autograsp"
Outputs:
actions : np.array((T-1, action_dims))
"""
a_T, adim = file_metadata["action_T"], file_metadata["adim"]
if target_adim == adim:
return file_pointer["policy"]["actions"][:]
elif (
target_adim == adim + 1
and impute_autograsp_action
and file_metadata["primitives"] == "autograsp"
):
action_append, old_actions = (
np.zeros((a_T, 1)),
file_pointer["policy"]["actions"][:],
)
next_state = file_pointer["env"]["state"][:][1:, -1]
high_val, low_val = (
file_metadata["high_bound"][-1],
file_metadata["low_bound"][-1],
)
midpoint = (high_val + low_val) / 2.0
for t, s in enumerate(next_state):
if s > midpoint:
action_append[t, 0] = high_val
else:
action_append[t, 0] = low_val
return np.concatenate((old_actions, action_append), axis=-1)
elif adim < target_adim and (action_mismatch & ACTION_MISMATCH.PAD_ZERO):
pad = np.zeros((a_T, target_adim - adim), dtype=np.float32)
return np.concatenate((file_pointer["policy"]["actions"][:], pad), axis=-1)
elif adim > target_adim and (action_mismatch & ACTION_MISMATCH.CLEAVE):
return file_pointer["policy"]["actions"][:][:, :target_adim]
else:
raise ValueError(
"file adim - {}, target adim - {}, pad behavior - {}".format(
adim, target_adim, action_mismatch
)
) | 0b8b5dc259fa0645069cc57b4510355ec6897ab6 | 17,214 |
def send_message(hookurl: str, text: str) -> int:
"""
Send a message on the channel of the Teams.
The HTTP status is returned.
parameters
----------
hookurl : str
URL for the hook to the Teams' channel.
text : str
text to send.
returns
-------
int
HTTP status from the sent message.
"""
msg = pymsteams.connectorcard(hookurl)
msg.text(text)
msg.send()
return msg.last_http_status.status_code | 8ffef50d745fafd125b556e9fb1ceff2cb438a4e | 17,215 |
def np_fft_irfftn(a, *args, **kwargs):
"""Numpy fft.irfftn wrapper for Quantity objects.
Drop dimension, compute result and add it back."""
res = np.fft.irfftn(a.value, *args, **kwargs)
return Quantity(res, a.dimension) | fbfdfe470f09106e9589709ebae5fa19ba8a2732 | 17,216 |
def get_codec_options() -> CodecOptions:
"""
Register all flag type registry and get the :class:`CodecOptions` to be used on ``pymongo``.
:return: `CodecOptions` to be used from `pymongo`
"""
return CodecOptions(type_registry=TypeRegistry(type_registry)) | a0acd3e719ae0a4be463c71cba5eb86914348248 | 17,217 |
def get_frame_lims(x_eye, y_eye, x_nose, y_nose, view, vertical_align='eye'):
"""Automatically compute the crop parameters of a view using the eye and nose and reference.
Note that horizontal/vertical proportions are currently hard-coded.
Parameters
----------
x_eye : float
x position of the eye
y_eye : float
y position of the eye
x_nose : float
x position of the nose
y_nose : float
y position of the nose
view : str
'left' | 'right'
vertical_align : str
defines which feature controls the vertical alignment
'eye' | 'nose'
Returns
-------
tuple
- xmin (float)
- xmax (float)
- ymin (float)
- ymax (float)
"""
# horizontal proportions
edge2nose = 0.02
nose2eye = 0.33
eye2edge = 0.65
# vertical proportions
eye2top = 0.10
eye2bot = 0.90
nose2top = 0.25
nose2bot = 0.75
# horizontal calc
nose2eye_pix = np.abs(x_eye - x_nose)
edge2nose_pix = edge2nose / nose2eye * nose2eye_pix
eye2edge_pix = eye2edge / nose2eye * nose2eye_pix
total_x_pix = np.round(nose2eye_pix + edge2nose_pix + eye2edge_pix)
if view == 'left':
xmin = int(x_nose - edge2nose_pix)
xmax = int(x_eye + eye2edge_pix)
elif view == 'right':
xmin = int(x_eye - eye2edge_pix)
xmax = int(x_nose + edge2nose_pix)
else:
raise Exception
# vertical calc (assume we want a square image out)
if vertical_align == 'eye':
# based on eye
eye2top_pix = eye2top * total_x_pix
eye2bot_pix = eye2bot * total_x_pix
ymin = int(y_eye - eye2top_pix)
ymax = int(y_eye + eye2bot_pix)
else:
# based on nose
nose2top_pix = nose2top * total_x_pix
nose2bot_pix = nose2bot * total_x_pix
ymin = int(y_nose - nose2top_pix)
ymax = int(y_nose + nose2bot_pix)
return xmin, xmax, ymin, ymax | 20b3c5d74b7d4dd4b2b63c9d32f7325a199d3dee | 17,218 |
import os
def write_curies(filepaths: dict, ontoid: str, prefix_map: dict, pref_prefix_map: dict) -> bool:
"""
Update node id field in an edgefile
and each corresponding subject/object
node in the corresponding edges
to have a CURIE, where the prefix is
the ontology ID and the class is
inferred from the IRI.
:param in_path: str, path to directory
:param ontoid: the Bioportal ID of the ontology
:return: True if complete, False otherwise
"""
success = False
nodepath = filepaths["nodelist"]
edgepath = filepaths["edgelist"]
outnodepath = nodepath + ".tmp"
outedgepath = edgepath + ".tmp"
update_these_nodes = {}
try:
with open(nodepath,'r') as innodefile, \
open(edgepath, 'r') as inedgefile:
with open(outnodepath,'w') as outnodefile, \
open(outedgepath, 'w') as outedgefile:
for line in innodefile:
updated_node = False
line_split = (line.rstrip()).split("\t")
node_iri = line_split[0]
if ontoid in prefix_map:
for prefix in prefix_map[ontoid]["prefixes"]:
if node_iri.startswith(prefix[0]):
split_iri = node_iri.rsplit(prefix[1],1)
if ontoid in pref_prefix_map:
ontoid = pref_prefix_map[ontoid]
if len(split_iri) == 2:
new_curie = f"{ontoid}:{split_iri[1]}"
else:
new_curie = f"{ontoid}:"
line_split[0] = new_curie
update_these_nodes[node_iri] = new_curie
updated_node = True
continue
# If we don't have a native prefix OR this is a foreign prefix
# then look at other ontologies
if ontoid not in prefix_map or not updated_node:
for prefix_set in prefix_map:
for prefix in prefix_map[prefix_set]["prefixes"]:
if node_iri.startswith(prefix[0]):
split_iri = node_iri.rsplit(prefix[1],1)
if prefix_set in pref_prefix_map:
prefix_set = pref_prefix_map[prefix_set]
if len(split_iri) == 2:
new_curie = f"{prefix_set}:{split_iri[1]}"
else:
new_curie = f"{prefix_set}:"
line_split[0] = new_curie
update_these_nodes[node_iri] = new_curie
continue
outnodefile.write("\t".join(line_split) + "\n")
for line in inedgefile:
line_split = (line.rstrip()).split("\t")
# Check for edges containing nodes to be updated
if line_split[1] in update_these_nodes:
line_split[1] = update_these_nodes[line_split[1]]
if line_split[3] in update_these_nodes:
line_split[3] = update_these_nodes[line_split[3]]
outedgefile.write("\t".join(line_split) + "\n")
os.replace(outnodepath,nodepath)
os.replace(outedgepath,edgepath)
success = True
except (IOError, KeyError) as e:
print(f"Failed to write CURIES for {nodepath} and/or {edgepath}: {e}")
success = False
return success | d574142a634b9a4998f8b887e4bc309661c5625e | 17,219 |
def split(time: list, value: list, step, group_hours, region=None, whole_group=False):
"""
Split and group 'step' number of averaged values 'hours' apart
:param time: time per value (hour apart)
:param value: values corresponding to time
:param step: number of group times set for each index
:param group_hours: group times into 'hours' hours
:param region: region of indices to be considered
:param whole_group: include the aggregated value of
whole time group for each of its members not just until that member
:return:
"""
splits = list() # step group times per index
size = len(time)
if size != len(value):
return -1
# direction is the sign of step
direction = np.sign(step)
# indices to be considered
region = (0, size - 1) if region is None else region
region = (max(region[0], 0), size - 1 if region[1] < 0 else region[1])
# Running group average of each index either forward (when step < 0)
# or backward (when step > 0), when whole_group = False
if not whole_group:
run_average = running_average(time, value, group_hours=group_hours,
direction=-np.sign(step), whole_group=False)
else:
run_average = []
group_time, average, group_lookup, _ = group_average(time, value, group_hours=group_hours)
group_size = len(group_time)
# init first 'steps' (for forward)
# or duplication o first (for backward) [whole/partial] group average as array of step values
group_time = pre_group_time = round_hour(time[region[0]], group_hours)
group_index = group_lookup[group_time]
last_index = group_index + step - direction
if step > 0:
initial_values = average[group_index:min(last_index + 1, group_size)]
if len(initial_values) != abs(step): # duplicate the last group average to reach 'step' values
initial_values += [[average[-1] * (group_size - last_index)]]
else:
initial_values = average[max(last_index, 0):group_index + 1]
if len(initial_values) != abs(step): # duplicate the first group average to reach 'step' values
initial_values = ([average[0]] * (-last_index)) + initial_values
step_values = deque(initial_values)
cur_step = 0
for i in range(region[0], region[1] + 1):
group_time = round_hour(time[i], group_hours)
if group_time != pre_group_time:
group_index = group_lookup[group_time]
last_index = group_index + step - direction
cur_step = min(step, cur_step + 1)
step_values.rotate(-1) # shift right to go toward end of groups
# duplicate the second to last value if group size is passed
# otherwise set the last value from group averages
if step > 0:
step_values[-1] = average[last_index] if last_index < group_size else step_values[-2]
else:
step_values[-1] = average[group_index]
pre_group_time = group_time
# replace the group average with partial average if the whole group is not required
if not whole_group:
if cur_step == step or step > 0:
step_values[0 if step > 0 else -1] = run_average[i]
elif group_index == 0:
# this branch is executed only for the first group for backward (few times)
step_values = deque([run_average[i]] * abs(step))
splits.append(list(step_values))
return splits | a8f8cf51d241a532e6a925d4323abb281215f543 | 17,220 |
def launch_ebs_affinity_process(instanceid, instance_infos, ebs_configs):
""" Manage the ebs affinity process.
:param instanceid string The instance id
:param instance_infos dict Informations about the instance
:param ebs_config dict The EBS parameters
:return None
"""
if not check_if_ebs_already_attached(instanceid,
ebs_configs['mount_point'],
instance_infos):
if manage_ebs_volume(ebs_configs, instanceid, instance_infos):
logger.info("EBS: {0} has been attached on the Instance-id: {1}" .format(ebs_configs['mount_point'], instanceid))
else:
logger.error("Error during the management of the EBS volume: {0}. Disk not attached to the instance: {1} " .format(ebs_configs['mount_point'], instanceid))
return False
return True
else:
logger.info("A disk is already attached on the target mount point: {0}" .format(ebs_configs['mount_point']))
return True | ec30f4748417cee8f9fe96c2c47cf78dd10be59f | 17,221 |
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieve a list of mapping rules
"""
return isamAppliance.invoke_get("Retrieve a list of mapping rules",
"/iam/access/v8/mapping-rules") | e48aa65f5212ea32e84c40e326633cf2971d378a | 17,222 |
def get_oyente(test_subject=None, mutation=None):
"""
Run the Oyente test suite on a provided script
"""
is_request = False
if not test_subject:
test_subject = request.form.get('data')
is_request = True
o = Oyente(test_subject)
info, errors = o.oyente(test_subject)
if len(errors) > 0:
errors = [{'lineno':e[0].split(':')[1],'code':"\n".join(e[1].split('\n')[1:]),'description':e[1].split('\n')[0]} for e in errors]
if len(info) > 0:
info = [{x[0]:x[1] for x in info}]
output = {"info":info, "issues": errors, 'error':[]}
if mutation:
output['mutation'] = mutation
if is_request:
return jsonify(output)
return output | f264262c22314ac26b56369f4d7741effb4cf09e | 17,223 |
def search_for_example(search_string: str) -> tuple:
"""Get the Example for a Particular Function"""
function = match_string(search_string)
if function:
function = function.strip()
sql = f"SELECT example, comment FROM example WHERE function='{function}'"
data = execute(sql)
return function, data
else:
return None, (()) | 16eb034369954017b1b51a206d48af40f5768ef6 | 17,224 |
def WildZumba(x,c1=20,c2=0.2,c3=2*np.pi) :
""" A separable R**n==>R function, assumes a real-valued numpy vector as input """
return -c1 * np.exp(-c2*np.sqrt(np.mean(x**2))) - np.exp(np.mean(np.cos(c3*x))) + c1 + np.exp(1) | 589f90f174d61269c2c019ef678f51c498c68ff8 | 17,225 |
def import_xlsx(filename, skip_variation=False):
"""Импортирует параметры пиков, хроматограммы и варьируемых параметров, если они указаны.
Parameters
----------
filename : str
Имя xlsx файла.
skip_variation : bool, default = False
Пропустить блок Variation даже если он есть.
Returns
-------
Tuple[List[Peak], Chromatogram, dict, int, np.ndarray]
Если в xlsx файле есть блок Variation, то вернется кортеж, в который входит список из
экземпляров класса Peak, экземпляр класса Chromatogram, словарь варьируемых параметров
со списками сигм, количество файлов и массив с долями файлов в которых будут пропущенны пики.
Tuple[List[Peak], Chromatogram]
Если же блок Variation отсутствует, то вернется кортеж только из списка экземпляров класса
Peak и экземпляра класса Chromatogram.
"""
wb = ox.load_workbook(filename, data_only=True)
sheet_ranges = wb['input']
max_row = sheet_ranges.max_row
rows = list(sheet_ranges.rows)
wb.close()
def get_row(row, key):
return list(map(lambda x: x.value, rows[row][d_xl[key]['start_idx']:
d_xl[key]['end_idx']]))
def get_col(col, start_row, nn):
res = []
for i_cell in range(start_row, start_row + nn):
res.append(sheet_ranges.cell(i_cell, col).value)
return res
d_xl = {}
# читаем первую строку
for cell in rows[0]:
cell_value = cell.value
if cell_value is not None:
d_xl.update({cell_value: {}})
# обработка объединенных ячеек (Chromatogram, Peaks, Variation)
mcr = sheet_ranges.merged_cells.ranges
for cr in mcr:
name = cr.start_cell.value
if name in d_xl:
start_idx = cr.start_cell.col_idx - 1
cols = cr.size['columns']
end_idx = start_idx + cols
d_xl[name].update({'start_idx': start_idx, 'cols': cols, 'end_idx': end_idx})
# Chromatogram
names, values = map(lambda x: get_row(x, 'Chromatogram'), (1, 2))
d_xl['Chromatogram'].update(zip(names, values))
chrom = Chromatogram(**d_xl['Chromatogram'])
# Peaks
head_peaks = get_row(1, 'Peaks')
params_peak = {}
sep_mz_i = ';'
sep_into_mz_i = ' '
peak_list = []
for i in range(2, max_row):
params_peak.update(zip(head_peaks, get_row(i, 'Peaks')))
mz_i = np.fromstring(params_peak['mass_spect'].replace('\n', '').
replace(sep_mz_i, ''), sep=sep_into_mz_i).reshape((-1, 2))
del params_peak['mass_spect']
mz_list = mz_i[:, 0].astype(np.int16)
peak_list.append(Peak(mz_list=mz_list, intensity_list=mz_i[:, 1], **params_peak))
# Variation
if 'Variation' in d_xl and not skip_variation:
head_variation = get_row(1, 'Variation')
params_variation = {}
for par in head_variation:
params_variation.update({par: []})
for i in range(2, max_row):
for key, value in zip(head_variation, get_row(i, 'Variation')):
params_variation[key].append(value)
num_files = 0
for n, i in enumerate(rows[0]):
if i.value in ('Num_files', 'Num files'):
num_files = rows[1][n].value
break
# Missing
miss = np.zeros(max_row)
for n, i in enumerate(rows[0]):
if i.value in ('Missing', 'missing', 'miss'):
miss = np.array(get_col(n + 1, 3, len(peak_list)))
break
return peak_list, chrom, params_variation, num_files, miss
return peak_list, chrom | 75b32618274fb2ab7ede9f525856fdc13e8c97ee | 17,226 |
from typing import Union
from typing import Optional
def _get_dataset_builder(
dataset: Union[str, tfds.core.DatasetBuilder],
data_dir: Optional[str] = None) -> tfds.core.DatasetBuilder:
"""Returns a dataset builder."""
if isinstance(dataset, str):
dataset_builder = tfds.builder(dataset, data_dir=data_dir)
elif isinstance(dataset, tfds.core.DatasetBuilder):
dataset_builder = dataset
else:
raise ValueError("`dataset` must be a string or tfds.core.DatasetBuilder. "
f"Received {dataset} instead.")
return dataset_builder | 0f17169541604e69a614ddfeee4c8a963834ed8e | 17,227 |
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return | c7504260306495e6252569a3cb83f61ca084de26 | 17,228 |
def label_rotate(annot, rotate):
"""
anti-clockwise rotate the occ order annotation by rotate*90 degrees
:param annot: (H, W, 9) ; [-1, 0, 1]
:param rotate: value in [0, 1, 2, 3]
:return:
"""
rotate = int(rotate)
if rotate == 0:
return annot
else:
annot_rot = np.rot90(annot, rotate)
orientation = annot_rot[:, :, 1:].copy()
if rotate == 1:
mapping = [2, 4, 7, 1, 6, 0, 3, 5]
elif rotate == 2:
mapping = [7, 6, 5, 4, 3, 2, 1, 0]
else:
mapping = [5, 3, 0, 6, 1, 7, 4, 2]
annot_rot[:, :, 1:] = orientation[:, :, mapping]
return annot_rot | e37a2e9dddc5f19898691fe22d02978d1954d435 | 17,229 |
def allocate_available_excess(region):
"""
Allocate available excess capital (if any).
"""
difference = region['total_revenue'] - region['total_cost']
if difference > 0:
region['available_cross_subsidy'] = difference
region['deficit'] = 0
else:
region['available_cross_subsidy'] = 0
region['deficit'] = abs(difference)
return region | 19a3d7fbc776ae5b5b47ecfc32db14bf4abd949e | 17,230 |
def items(dic):
"""Py 2/3 compatible way of getting the items of a dictionary."""
try:
return dic.iteritems()
except AttributeError:
return iter(dic.items()) | 2664567765efe172591fafb49a0efa36ab9fcca8 | 17,231 |
import json
import binascii
def new_settingsresponse_message(loaded_json, origin):
"""
takes in a request - executes search for settings and creates a response as bytes
:param loaded_json:
:param origin: is this a response of drone or groundstation
:return: a complete response packet as bytes
"""
complete_response = {}
complete_response['destination'] = 4
complete_response['type'] = DBCommProt.DB_TYPE_SETTINGS_RESPONSE.value
complete_response['response'] = loaded_json['request']
complete_response['origin'] = origin
complete_response['id'] = loaded_json['id']
if loaded_json['request'] == DBCommProt.DB_REQUEST_TYPE_DB.value:
if 'settings' in loaded_json:
complete_response = read_dronebridge_settings(complete_response, origin, True, loaded_json['settings'])
else:
complete_response = read_dronebridge_settings(complete_response, origin, False, None)
elif loaded_json['request'] == DBCommProt.DB_REQUEST_TYPE_WBC.value:
if 'settings' in loaded_json:
complete_response = read_wbc_settings(complete_response, True, loaded_json['settings'])
else:
complete_response = read_wbc_settings(complete_response, False, None)
response = json.dumps(complete_response)
crc32 = binascii.crc32(str.encode(response))
return response.encode() + crc32.to_bytes(4, byteorder='little', signed=False) | 812444353a50ffeb468398d8681e81a74cb9d7e9 | 17,232 |
def list_icmp_block(zone, permanent=True):
"""
List ICMP blocks on a zone
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewlld.list_icmp_block zone
"""
cmd = "--zone={0} --list-icmp-blocks".format(zone)
if permanent:
cmd += " --permanent"
return __firewall_cmd(cmd).split() | 9f0f8c2e7a688263ddd31c8384babba25e2300e6 | 17,233 |
def dependencies_found(analysis_id, execution_id):
"""
Installation data from buildbot.
Requires a JSON list of objects with the following keys:
* installer: The system used to install the dependency.
* spec: The full specification used by the user to request the
package.
* source: Entity providing the artifact.
* name: The real package name.
* version: The installed version of the package.
.. note:: Internal API
"""
installations = bottle.request.json
if installations:
# Create database objects returning a list of scanneable artifacts.
artifacts = register_installations(analysis_id, execution_id,
installations)
analysis_needed = {a for a in artifacts if a.analysis_needed()}
# Launch dependency scan and mark done when finished.
analysis_task = (
providers.analyze_artifacts(analysis_needed) # <- group of tasks
| tasks.mark_task_done.si(analysis_id)).delay()
return {'task_id': analysis_task.id, 'scanning': len(analysis_needed)}
else:
return {'task_id': None, 'scanning': 0} | 8983e53dd558b42272d4886412e918e7b192754e | 17,234 |
def set_augmentor():
"""
Set the augmentor.
1. Select the operations and create the config dictionary
2. Pass it to the Augmentor class with any other information that requires
3. Return the instance of the class.
:return:
"""
config = {'blur': {'values': ('gaussian', 0.7, 1.0), 'prob': 0.3},
'brightness': {'values': (0.6, 1.0), 'prob': 0.1},
'brightness1': {'values': (1.0, 1.5), 'prob': 0.1},
'flip': {'values': ('hor',), 'prob': 0.5},
'grid_mask': {'values': (0, 0.2, 0, 0.2, 0.01, 0.1, 0.01, 0.1, 0.1, 0.2, 0.1, 0.2), 'prob': 0.4},
'illumination': {'values': ('blob_negative', 0.1, 0.2, 100, 150), 'prob': 0.2},
'noise': {'values': (2, 10), 'use_gray_noise': True, 'prob': 1},
'rotate': {'values': (-45, 45), 'prob': 0.4},
'translate': {'values': ('RANDOM', -0.2, 0.2), 'prob': 0.2, 'use_replication': True},
'zoom': {'values': (0.5, 1.5), 'prob': 0.9, 'use_replication': True}}
augmentor = Augmentor(config, no_repetition=True)
return augmentor | 77c64cec87af2d41a4cc6dc55600ab5eaedad247 | 17,235 |
def get_global_event_logger_instance():
"""Get an event logger with prefilled fields for the collection.
This returns an options configured event logger (proxy) with prefilled
fields. This is almost CERTAINLY the event logger that you want to use in
zaza test functions.
:returns: a configured LoggerInstance with prefilled collection and unit
fields.
:rtype: LoggerInstance
"""
return get_global_events_logging_manager().get_event_logger_instance() | 66228b15dd4d1ac9468834124e4ba073a846580f | 17,236 |
def plot_market_entry(cat_entry_and_exit_df, cat_entry_and_exit_df_2):
"""
returns a plot with the entry and exit of firms per category
"""
# get the limits so everything is on the same scale
df = pd.concat([cat_entry_and_exit_df, cat_entry_and_exit_df_2])
limits = [-df.exit.max() - 0.3, df.entry.max() + 0.3]
fig = tools.make_subplots(rows=1, cols=2)
xs = cat_entry_and_exit_df.index
new_per_cat = cat_entry_and_exit_df.entry.astype(int)
dead_per_cat = cat_entry_and_exit_df.exit.astype(int)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} entries in category {}'.format(x, y)
for x, y in zip(new_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[0]}), 1, 1)
fig.append_trace(
go.Bar(y=xs, x=-dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} exits in category {}'.format(x, y)
for x, y in zip(dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[0]}), 1, 1)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat - dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} net entries in category {}'.format(x, y)
for x, y in zip(new_per_cat - dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': dark_scen_colours[0]}), 1, 1)
xs = cat_entry_and_exit_df_2.index
new_per_cat = cat_entry_and_exit_df_2.entry.astype(int)
dead_per_cat = cat_entry_and_exit_df_2.exit.astype(int)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} entries in category {}'.format(x, y)
for x, y in zip(new_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[1]}), 1, 2)
fig.append_trace(
go.Bar(y=xs, x=-dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} exits in category {}'.format(x, y)
for x, y in zip(dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[1]}), 1, 2)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat - dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} net entries in category {}'.format(x, y)
for x, y in zip(new_per_cat - dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': dark_scen_colours[1]}), 1, 2)
fig['layout']['xaxis2'].update(title="Number of companies", range=limits)
fig['layout']['xaxis1'].update(title="Number of companies", range=limits)
fig['layout']['yaxis1'].update(title="Product category")
fig['layout'].update(title='Market entry and exit per product category')
fig['layout']['font'].update(family='HelveticaNeue')
fig['layout'].update(barmode='overlay')
return fig | c1b1ad00c1dbdde804e4d594dda4ae6525c7779f | 17,237 |
import io
def find_elements_by_image(self, filename):
"""
Locate all the occurence of an image in the webpage.
:Args:
- filename: The path to the image to search (image shall be in PNG format).
:Returns:
A list of ImageElement.
"""
template = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
template_height, template_width, _ = template.shape
webpage_png = self.get_screenshot_as_png()
webpage_img = Image.open(io.BytesIO(webpage_png))
webpage = np.asarray(webpage_img, dtype=np.float32).astype(np.uint8)
webpage = cv2.cvtColor(webpage, cv2.COLOR_BGR2RGB)
return [
ImageElement(self, loc[0], loc[1], template_width, template_height)
for loc in match_template(webpage, template)
] | 23137766b68068c8cb78bb57127bfa6040bace70 | 17,238 |
from typing import Set
from typing import Sequence
def compile_tf_signature_def_saved_model(
saved_model_dir: str, saved_model_tags: Set[str], module_name: str,
exported_name: str, input_names: Sequence[str],
output_names: Sequence[str]) -> Modules:
"""Compiles a SignatureDef SavedModel to each backend that we test.
Args:
saved_model_dir: Directory of the saved model.
saved_model_tags: Optional set of tags to use when loading the model.
module_name: A name for this compiled module.
backend_info: BackendInfo with the details for compiling the saved model.
exported_name: A str representing the signature on the saved model to
compile.
input_names: A sequence of kwargs to feed to the saved model.
output_names: A sequence of named outputs to extract from the saved model.
Returns:
A 'Modules' dataclass containing the reference module, target modules and
artifacts directory.
"""
global _global_modules
if _global_modules is not None:
return _global_modules
# Setup the directory for saving compilation artifacts and traces.
artifacts_dir = _setup_artifacts_dir(module_name)
# Get the backend information for this test.
ref_backend_info = module_utils.BackendInfo(FLAGS.reference_backend,
f"{FLAGS.reference_backend}_ref")
tar_backend_infos = get_target_backends()
compile_backend = (
lambda backend_info: backend_info.compile_signature_def_saved_model(
saved_model_dir, saved_model_tags, module_name, exported_name,
input_names, output_names, artifacts_dir))
ref_module = compile_backend(ref_backend_info)
tar_modules = [
compile_backend(backend_info) for backend_info in tar_backend_infos
]
_global_modules = Modules(ref_module, tar_modules, artifacts_dir)
return _global_modules | ed1a1efc28c9ae473d76c700ab7781f141fc3765 | 17,239 |
def origtime2float(time):
""" converts current datetime to float
>>> import datetime
>>> t = datetime.datetime(2010, 8, 5, 14, 45, 41, 778877)
>>> origtime2float(t)
53141.778876999997
"""
t3fmt = time.strftime("%H:%M:%S:%f")
return time2float(t3fmt) | 03cadf1f686fde1dd46cbb52fd71adcc2f06585c | 17,240 |
def discrete_fourier_transform1(freq, tvec, dvec, log=False):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
:param log: boolean, if True prints progress to standard output
if False silent
"""
# deal with logging
if log:
print('\n\t Calculating Discrete Fourier Transform...')
# -------------------------------------------------------------------------
# Code starts here
# -------------------------------------------------------------------------
wfn = np.zeros(len(freq), dtype=complex)
dft = np.zeros(int(len(freq)/2), dtype=complex)
for i in __tqdmlog__(range(len(freq)), log):
phase = -2*np.pi*freq[i]*tvec
phvec = np.array(np.cos(phase) + 1j * np.sin(phase))
if i < int(len(freq)/2):
wfn[i] = np.sum(phvec)/len(tvec)
dft[i] = np.sum(dvec*phvec)/len(tvec)
# complete the spectral window function
else:
wfn[i] = np.sum(phvec)/len(tvec)
return wfn, dft | b5e1bafe1ba2b8863ac97bb95c204ca84877b8fd | 17,241 |
from typing import List
def ngram_overlaps(a: List[str], b: List[str], threshold: int = 3) -> List[int]:
"""
Compute the set over overlapping strings in each set based on n-gram
overlap where 'n' is defined by the passed in threshold.
"""
def get_ngrams(text):
"""
Get a set of all the ngrams in the text
"""
return set(" ".join(g) for g in grouper(text.split(), threshold))
overlaps = []
remaining = set(range(len(b)))
for text in a:
best_idx = -1
best_overlap = 0
ngrams = get_ngrams(text)
for idx in remaining:
ngram_overlap = len(ngrams & get_ngrams(b[idx]))
if ngram_overlap > best_overlap:
best_idx = idx
best_overlap = ngram_overlap
if best_idx >= 0:
overlaps.append(best_idx)
remaining.remove(best_idx)
return overlaps | 87621e28a4a5d2cba5bb66c6bfa9834c711a7ecf | 17,242 |
def ssq_cwt(x, wavelet='morlet', scales='log', nv=None, fs=None, t=None,
ssq_freqs=None, padtype='symmetric', squeezing='sum',
difftype='direct', difforder=None, gamma=None):
"""Calculates the synchrosqueezed Continuous Wavelet Transform of `x`.
Implements the algorithm described in Sec. III of [1].
# Arguments:
x: np.ndarray
Vector of signal samples (e.g. x = np.cos(20 * np.pi * t))
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
scales: str['log', 'linear'] / np.ndarray
CWT scales.
- 'log': exponentially distributed scales, as pow of 2:
`[2^(1/nv), 2^(2/nv), ...]`
- 'linear': linearly distributed scales.
!!! EXPERIMENTAL; default scheme for len(x)>2048 performs
poorly (and there may not be a good non-piecewise scheme).
nv: int / None
Number of voices (CWT only). Suggested >= 32 (default=32).
fs: float / None
Sampling frequency of `x`. Defaults to 1, which makes ssq
frequencies range from 1/dT to 0.5, i.e. as fraction of reference
sampling rate up to Nyquist limit; dT = total duration (N/fs).
Overridden by `t`, if provided.
Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Overrides `fs` if not None.
ssq_freqs: str['log', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
padtype: str
Pad scheme to apply on input. One of:
('zero', 'symmetric', 'replicate').
'zero' is most naive, while 'symmetric' (default) partly mitigates
boundary effects. See `padsignal`.
squeezing: str['sum', 'lebesgue']
- 'sum' = standard synchrosqueezing using `Wx`.
- 'lebesgue' = as in [4], setting `Wx=ones()/len(Wx)`, which is
not invertible but has better robustness properties in some cases.
Not recommended unless you know what you're doing.
difftype: str['direct', 'phase', 'numerical']
Method by which to differentiate Wx (default='direct') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'direct': use `dWx`, obtained via frequency-domain
differentiation (see `cwt`, `phase_cwt`).
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx` (see `phase_cwt`).
- 'numerical': first-, second-, or fourth-order (set by
`difforder`) numeric differentiation (see `phase_cwt_num`).
difforder: int[1, 2, 4]
Order of differentiation for difftype='numerical' (default=4).
gamma: float / None
CWT phase threshold. Sets `w=inf` for small values of `Wx` where
phase computation is unstable and inaccurate (like in DFT):
w[abs(Wx) < beta] = inf
This is used to zero `Wx` where `w=0` in computing `Tx` to ignore
contributions from points with indeterminate phase.
Default = sqrt(machine epsilon) = np.sqrt(np.finfo(np.float64).eps)
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
Wx: np.ndarray [na x n]
Continuous Wavelet Transform of `x` L2-normed (see `cwt`);
to L1-norm, `Wx /= np.sqrt(scales)`
scales: np.ndarray [na]
Scales associated with rows of `Wx`.
w: np.ndarray [na x n]
Phase transform for each element of `Wx`.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
def _process_args(N, fs, t, nv, difftype, difforder, squeezing):
if difftype not in ('direct', 'phase', 'numerical'):
raise ValueError("`difftype` must be one of: direct, phase, numerical"
" (got %s)" % difftype)
if difforder is not None:
if difftype != 'numerical':
WARN("`difforder` is ignored if `difftype != 'numerical'")
elif difforder not in (1, 2, 4):
raise ValueError("`difforder` must be one of: 1, 2, 4 "
"(got %s)" % difforder)
elif difftype == 'numerical':
difforder = 4
if squeezing not in ('sum', 'lebesgue'):
raise ValueError("`squeezing` must be one of: sum, lebesgue "
"(got %s)" % squeezing)
dt, fs, t = _process_fs_and_t(fs, t, N)
nv = nv or 32
return dt, fs, difforder, nv
def _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder):
if difftype == 'direct':
# calculate instantaneous frequency directly from the
# frequency-domain derivative
w = phase_cwt(Wx, dWx, difftype, gamma)
elif difftype == 'phase':
# !!! bad; yields negatives, and forcing abs(w) doesn't help
# calculate inst. freq. from unwrapped phase of CWT
w = phase_cwt(Wx, None, difftype, gamma)
elif difftype == 'numerical':
# !!! tested to be very inaccurate for small `a`
# calculate derivative numerically
_, n1, _ = p2up(N)
Wx = Wx[:, (n1 - 4):(n1 + N + 4)]
w = phase_cwt_num(Wx, dt, difforder, gamma)
return Wx, w
N = len(x)
dt, fs, difforder, nv = _process_args(N, fs, t, nv, difftype, difforder,
squeezing)
scales, cwt_scaletype, *_ = process_scales(scales, N, nv=nv, get_params=True)
# l1_norm=False to spare a multiplication; for SSWT L1 & L2 are exactly same
# anyway since we're inverting CWT over time-frequency plane
rpadded = (difftype == 'numerical')
Wx, scales, _, dWx = cwt(x, wavelet, scales=scales, fs=fs, l1_norm=False,
derivative=True, padtype=padtype, rpadded=rpadded)
gamma = gamma or np.sqrt(EPS)
Wx, w = _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder)
if ssq_freqs is None:
# default to same scheme used by `scales`
ssq_freqs = cwt_scaletype
Tx, ssq_freqs = ssqueeze(Wx, w, scales=scales, fs=fs, ssq_freqs=ssq_freqs,
transform='cwt', squeezing=squeezing)
if difftype == 'numerical':
Wx = Wx[:, 4:-4]
w = w[:, 4:-4]
Tx = Tx[:, 4:-4]
return Tx, ssq_freqs, Wx, scales, w | 8af5caea64e9a861f7702f52c50681e61322658c | 17,243 |
import random
import logging
import time
def request_retry_decorator(fn_to_call, exc_handler):
"""A generic decorator for retrying cloud API operations with consistent repeatable failure
patterns. This can be API rate limiting errors, connection timeouts, transient SSL errors, etc.
Args:
fn_to_call: the function to call and wrap around
exc_handler: a bool return function to check if the passed in exception is retriable
"""
def wrapper(*args, **kwargs):
MAX_ATTEMPTS = 10
SLEEP_SEC_MIN = 5
SLEEP_SEC_MAX = 15
for i in range(1, MAX_ATTEMPTS + 1):
try:
return fn_to_call(*args, **kwargs)
except Exception as e:
if i < MAX_ATTEMPTS and exc_handler(e):
sleep_duration_sec = \
SLEEP_SEC_MIN + random.random() * (SLEEP_SEC_MAX - SLEEP_SEC_MIN)
logging.warn(
"API call failed, waiting for {} seconds before re-trying (this was attempt"
" {} out of {}).".format(sleep_duration_sec, i, MAX_ATTEMPTS))
time.sleep(sleep_duration_sec)
continue
raise e
return wrapper | 0813cc19d9826275917c9eb701683a73bfe597f9 | 17,244 |
import logging
import os
def lambda_handler(event, context):
"""
1. Receive from data from the lambda event.
2. DynamoDB: Stores incomming form data
3. Discord: Posts notification to a channel
4. Mailgun: sends notification
args:
- event: Event data that has trigged the lambda function
- context:
"""
logging.info(f'OS.environ: {os.environ}')
logging.info(f'lambda_handler: event {event}')
# store form data
for key, value in event["data"].items():
logging.info(f'lambda_handler: {key}: {value}')
data = event["data"]
db_put_success = dynamo_put(data=data)
if not db_put_success:
payload = create_payload(
is_success=False, data=data, method="DynamoDB")
notification.post_message_to_channel(payload=payload)
return {
'statusCode': 500,
'body': 'There was a problem uploading your data to DynamoDB.',
}
em_send_success = send_mailgun_message(data=data)
if not em_send_success:
payload = create_payload(
is_success=False, data=data, method="Mailgun")
notification.post_message_to_channel(payload=payload)
return {
'statusCode': 500,
'body': 'There was a problem sending your email via the Mailgun API.',
}
payload = create_payload(is_success=True, data=data, method="Lambda")
notification.post_message_to_channel(payload=payload)
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Headers': 'Content-Type',
'Access-Control-Allow-Origin': f'{settings.company_url}',
'Access-Control-Allow-Methods': 'OPTIONS,POST'
},
'body': 'success',
} | 1cecfe77c6d845976372003d1f465c5d9e5ad2a1 | 17,245 |
import json
async def establish_async_connection(config: json, logger: AirbyteLogger) -> AsyncConnection:
"""
Creates an async connection to Firebolt database using the parameters provided.
This connection can be used for parallel operations.
:param config: Json object containing db credentials.
:param logger: AirbyteLogger instance to print logs.
:return: PEP-249 compliant database Connection object.
"""
logger.debug("Connecting to Firebolt.")
connection = await async_connect(**parse_config(config, logger))
logger.debug("Connection to Firebolt established.")
return connection | 696334cdce9e04ef8f5d42ceafbf1031b063b074 | 17,246 |
def _wrap_with_before(action, responder, resource=None, is_method=False):
"""Execute the given action function before a responder method.
Args:
action: A function with a similar signature to a resource responder
method, taking (req, resp, resource, params)
responder: The responder method to wrap
resource: The resource affected by `action` (default None). If None,
`is_method` MUST BE True, so that the resource can be
derived from the `self` param that is passed into the wrapper
is_method: Whether or not `responder` is an unbound method
(default False)
"""
# NOTE(swistakm): introspect action function to guess if it can handle
# additional resource argument without breaking backwards compatibility
action_spec = _get_argspec(action)
# NOTE(swistakm): create shim before checking what will be actually
# decorated. This allows to avoid excessive nesting
if len(action_spec.args) == (5 if _has_self(action_spec) else 4):
shim = action
else:
# TODO(kgriffs): This decorator does not work on callable
# classes in Python vesions prior to 3.4.
#
# @wraps(action)
def shim(req, resp, resource, kwargs):
# NOTE(kgriffs): Don't have to pass "self" even if has_self,
# since method is assumed to be bound.
action(req, resp, kwargs)
# NOTE(swistakm): method must be decorated differently than
# normal function
if is_method:
@wraps(responder)
def do_before(self, req, resp, **kwargs):
shim(req, resp, self, kwargs)
responder(self, req, resp, **kwargs)
else:
assert resource is not None
@wraps(responder)
def do_before(req, resp, **kwargs):
shim(req, resp, resource, kwargs)
responder(req, resp, **kwargs)
return do_before | 96d4e6640f85bf920a5cc35ed02926ca5f3be7e9 | 17,247 |
from datetime import datetime
def last_day_of_month(d):
""" From: https://stackoverflow.com/a/43088/6929343 """
if d.month == 12:
return d.replace(day=31)
return d.replace(month=d.month+1, day=1) - datetime.timedelta(days=1) | a97ce3bdbcd9d5cb707919750ecc818de04deb7e | 17,248 |
def plot_landscape(landscape):
"""
Plot all landscapes for a given (set of) diagrams
Inputs:
-------
landscape (list): Output of one iteration of persim.to_landscape()
Outputs:
--------
Plots for each landscape in the list
Returns:
--------
None
"""
# TODO: Integrate more complex plotting args and kwargs for more precise control
for i in range(len(landscape)):
pts, ls = landscape[i]
plt.figure()
for j in range(len(ls)):
plt.plot(pts, ls[j], label = f'$\lambda_{{{j}}}$')
plt.title(f'$H_{{{i}}}$ Landscape')
plt.legend()
plt.show()
return None | 0583f3d8c25079720e586d297396d04ab39dc7f5 | 17,249 |
import google
def get_access_token():
"""Return access token for use in API request.
Raises:
requests.exceptions.ConnectionError.
"""
credentials, _ = google.auth.default(scopes=[
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only'
])
request = google.auth.transport.requests.Request()
credentials.refresh(request)
return credentials.token | 907aacd6d9976390b2896149179c84ea9bd3d0fc | 17,250 |
from typing import List
from typing import Dict
def _singleInstrumentParametersToJson(instrument: InstrumentBase,
get: bool = False,
addPrefix: str = '',
includeMeta: List[str] = [],
excludeParameters: List[str] = [],
simpleFormat: bool = True) -> Dict:
"""Create a dictionary that holds the parameters of an instrument."""
if "IDN" not in excludeParameters:
excludeParameters.append("IDN")
ret = {}
snap = instrument.snapshot(update=get)
for name, param in instrument.parameters.items():
if name not in excludeParameters:
if len(includeMeta) == 0 and simpleFormat:
ret[addPrefix + name] = snap['parameters'][name].get('value', None)
else:
ret[addPrefix + name] = dict()
for k, v in snap['parameters'][name].items():
if k in (['value'] + includeMeta):
ret[addPrefix + name][k] = v
else:
logger.debug(f"excluded: {addPrefix + name}")
for name, submod in instrument.submodules.items():
ret.update(_singleInstrumentParametersToJson(
submod, get=get, addPrefix=f"{addPrefix + name}.",
simpleFormat=simpleFormat, includeMeta=includeMeta))
return ret | 44b5b77f261a5774438aaeda25b7576d9b9b9274 | 17,251 |
def unknown_action(player: Player, table: dynamodb.Table) -> ActionResponse:
"""
Do nothing because the action could not be resolved.
In the message list, returns a message saying the action was bad.
:return: Original inputs matching updated inputs, and a message
"""
message = ["Action could not be resolved, type better next time"]
return player, player, {}, {}, message | ea2a03d140eea2853b77da492ea0f403fc9c6ad9 | 17,252 |
def forecast_marginal_bindglm(mod, n, k, X=None, nsamps=1, mean_only=False):
"""
Marginal forecast function k steps ahead for a binomial DGLM
"""
# Plug in the correct F values
F = update_F(mod, X, F=mod.F.copy())
# F = np.copy(mod.F)
# if mod.nregn > 0:
# F[mod.iregn] = X.reshape(mod.nregn,1)
# Evolve to the prior for time t + k
a, R = forecast_aR(mod, k)
# Mean and variance
ft, qt = mod.get_mean_and_var(F, a, R)
# Choose conjugate prior, match mean and variance
param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2)
if mean_only:
return mod.get_mean(n, param1, param2)
# Simulate from the forecast distribution
return mod.simulate(n, param1, param2, nsamps) | dc8c82fc17465c4a22e7bc5d46cf9b5abd9abd54 | 17,253 |
def import_file(isamAppliance, id, filename, check_mode=False, force=False):
"""
Importing a file in the runtime template files directory.
"""
warnings = []
check_file = _check(isamAppliance, id)
if check_file != None and force == False:
warnings.append("File {0} exist.".format(id))
if force is True or _check_import(isamAppliance, id, filename, check_mode=check_mode):
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post_files(
"Importing a file in the runtime template files directory",
"/mga/template_files/{0}".format(id),
[
{
'file_formfield': 'file',
'filename': filename,
'mimetype': 'application/octet-stream'
}
],
{
'type': 'file',
'force': force
})
return isamAppliance.create_return_object(warnings=warnings) | 3978e5476a7344ef4c92d2cec3852a57850380df | 17,254 |
def get_func_from_attrdict(func_name : str, attrdict : AttrDict) -> ObjectiveFunction1D:
"""
Given a string func_name, attempts to find the corresponding entry from attrdict.
:param func_name
:param attrdict
:returns Objective Function
"""
for key, val in attrdict.items():
if val.name == func_name:
return val | bb4b03371d5fbb642864d7c9e77d4172fee92315 | 17,255 |
from fdk import runner
import io
def event_handle(handle_code):
"""
Performs HTTP request-response procedure
:param handle_code: customer's code
:type handle_code: fdk.customer_code.Function
:return: None
"""
async def pure_handler(request):
log.log("in pure_handler")
headers = dict(request.headers)
log_frame_header(headers)
func_response = await runner.handle_request(
handle_code, constants.HTTPSTREAM,
headers=headers, data=io.BytesIO(request.body))
log.log("request execution completed")
headers = func_response.context().GetResponseHeaders()
status = func_response.status()
if status not in constants.FN_ENFORCED_RESPONSE_CODES:
status = constants.FN_DEFAULT_RESPONSE_CODE
return response.HTTPResponse(
headers=headers,
status=status,
content_type=headers.get(constants.CONTENT_TYPE),
body_bytes=func_response.body_bytes(),
)
return pure_handler | 03ca9cff4b7993e92c146565cb697a080d40c5ef | 17,256 |
import torch
def load_precomputed_embeddings(det_df, seq_info_dict, embeddings_dir, use_cuda):
"""
Given a sequence's detections, it loads from disk embeddings that have already been computed and stored for its
detections
Args:
det_df: pd.DataFrame with detection coordinates
seq_info_dict: dict with sequence meta info (we need frame dims)
embeddings_dir: name of the directory where embeddings are stored
Returns:
torch.Tensor with shape (num_detects, embeddings_dim)
"""
# Retrieve the embeddings we need from their corresponding locations
embeddings_path = osp.join(
seq_info_dict["seq_path"],
"processed_data",
"embeddings",
seq_info_dict["det_file_name"],
embeddings_dir,
)
# print("EMBEDDINGS PATH IS ", embeddings_path)
frames_to_retrieve = sorted(det_df.frame.unique())
embeddings_list = [
torch.load(osp.join(embeddings_path, f"{frame_num}.pt"))
for frame_num in frames_to_retrieve
]
embeddings = torch.cat(embeddings_list, dim=0)
# First column in embeddings is the index. Drop the rows of those that are not present in det_df
ixs_to_drop = list(
set(embeddings[:, 0].int().numpy()) - set(det_df["detection_id"])
)
embeddings = embeddings[
~np.isin(embeddings[:, 0], ixs_to_drop)
] # Not so clean, but faster than a join
assert_str = "Problems loading embeddings. Indices between query and stored embeddings do not match. BOTH SHOULD BE SORTED!"
assert (embeddings[:, 0].numpy() == det_df["detection_id"].values).all(), assert_str
embeddings = embeddings[:, 1:] # Get rid of the detection index
return embeddings.to(
torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu")
) | 685eb63ca23d634345304575a148e72a8172567e | 17,257 |
def _model_gpt(size=0, dropout_rate=0.0, attention_dropout_rate=0.0):
"""Configs for a variety of Transformer model sizes."""
num_layers = [1, 3, 6, 12, 24, 36, 48][size]
dim = [64, 128, 512, 768, 1024, 1280, 1600][size]
num_heads = int(dim / 64) # Always dim 64 per head
return _transformer(
emb_dim=dim,
num_heads=num_heads,
num_layers=num_layers,
qkv_dim=dim,
mlp_dim=dim * 4,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate) | 7bc9eab929b8c48ca4b8ff671e9e0885c0d2bc44 | 17,258 |
def test_module(client, demisto_args: dict):
"""
Test the OMWS Client connection by attempting to query a common username
"""
d = client.query_profile_data("maneenus")
if d:
return 'ok'
else:
raise DemistoException("Incorrect or empty API response") | 59e653c8fb5c40ee84a5945e5e4b0410418248ec | 17,259 |
def as_string(raw_data):
"""Converts the given raw bytes to a string (removes NULL)"""
return bytearray(raw_data[:-1]) | 6610291bb5b71ffc0be18b4505c95653bdac4c55 | 17,260 |
import seaborn
import matplotlib
def plot_series_statistics(observed=None,
expected=None,
total_stdev=None,
explained_stdev=None,
color_set='Set2',
xscale="linear",
yscale="linear",
xlabel="feature",
ylabel="value",
y_cutoff=None,
sort_by='expected',
sort_ascending=True,
despine=True,
legend_enable=True,
legend_title=None,
legend_loc='best',
alpha=None,
markersize=1.0,
linewdith=1.2,
fontsize=8,
ax=None,
title=None,
return_handles=False,
return_indices=False):
""" This function can plot 2 comparable series, and the
scale are represented in 2 y-axes (major axis - left) and
the right one
Parameters
----------
xcale, yscale : {"linear", "log", "symlog", "logit", ...}
text or instance in `matplotlib.scale`
despine : bool (default: True)
if True, remove the top and right spines from plot,
otherwise, only remove the top spine
Example
-------
>>> import numpy as np
>>> from matplotlib import pyplot as plt
>>> np.random.seed(1234)
>>> x = np.random.randn(8000)
>>> y = np.random.randn(8000)
...
>>> z = np.random.rand(8000) + 3
>>> w = np.random.rand(8000) + 3
...
>>> ax, handles1 = V.plot_series_statistics(observed=x, expected=y,
... explained_stdev=np.std(x),
... total_stdev=np.std(y),
... color_set='Set1',
... legend_enable=False, legend_title="Series_1",
... return_handles=True)
>>> _, handles2 = V.plot_series_statistics(observed=z, expected=w,
... explained_stdev=np.std(z),
... total_stdev=np.std(w),
... color_set='Set2',
... legend_enable=False, legend_title="Series_2",
... return_handles=True,
... ax=ax.twinx(), alpha=0.2)
>>> plt.legend(handles=handles1 + handles2, loc='best', fontsize=8)
"""
ax = to_axis2D(ax)
observed, expected, total_stdev, explained_stdev = _preprocess_series(
observed, expected, total_stdev, explained_stdev)
# ====== color palette ====== #
if isinstance(color_set, (tuple, list)):
observed_color, expected_color, \
expected_total_standard_deviations_color, \
expected_explained_standard_deviations_color = color_set
else:
standard_palette = seaborn.color_palette(color_set, 8)
observed_color = standard_palette[0]
expected_palette = seaborn.light_palette(standard_palette[1], 5)
expected_color = expected_palette[-1]
expected_total_standard_deviations_color = expected_palette[1]
expected_explained_standard_deviations_color = expected_palette[3]
# ====== prepare ====== #
sort_indices = _get_sort_indices(observed, expected, sort_by, sort_ascending)
# ====== plotting expected and observed ====== #
indices = np.arange(
len(observed) if observed is not None else len(expected)) + 1
handles = []
# ====== series title ====== #
if legend_title is not None:
_, = ax.plot([],
marker='None',
linestyle='None',
label="$%s$" % legend_title)
handles.append(_)
# ====== plotting expected and observed ====== #
if observed is not None:
_, = ax.plot(indices,
observed[sort_indices],
label="Observations",
color=observed_color,
linestyle="",
marker="o",
zorder=2,
markersize=markersize)
handles.append(_)
if expected is not None:
_, = ax.plot(indices,
expected[sort_indices],
label="Expectation",
color=expected_color,
linestyle="-",
marker="",
zorder=3,
linewidth=linewdith)
handles.append(_)
# ====== plotting stdev ====== #
if total_stdev is not None:
lower = expected - total_stdev
upper = expected + total_stdev
ax.fill_between(
indices,
lower[sort_indices],
upper[sort_indices],
color=expected_total_standard_deviations_color,
zorder=0,
alpha=alpha,
)
_ = matplotlib.patches.Patch(label="Stdev(Total)",
color=expected_total_standard_deviations_color)
handles.append(_)
if explained_stdev is not None:
lower = expected - explained_stdev
upper = expected + explained_stdev
ax.fill_between(
indices,
lower[sort_indices],
upper[sort_indices],
color=expected_explained_standard_deviations_color,
zorder=1,
alpha=alpha,
)
_ = matplotlib.patches.Patch(
label="Stdev(Explained)",
color=expected_explained_standard_deviations_color)
handles.append(_)
# ====== legend ====== #
if legend_enable:
ax.legend(handles=handles, loc=legend_loc, fontsize=fontsize)
# ====== adjusting ====== #
if bool(despine):
seaborn.despine(top=True, right=True)
else:
seaborn.despine(top=True, right=False)
ax.set_yscale(yscale, nonposy="clip")
ax.set_ylabel('[%s]%s' % (yscale, ylabel), fontsize=fontsize)
ax.set_xscale(xscale)
ax.set_xlabel('[%s]%s%s' %
(xscale, xlabel, ' (sorted by "%s")' %
str(sort_by).lower() if sort_by is not None else ''),
fontsize=fontsize)
# ====== set y-cutoff ====== #
y_min, y_max = ax.get_ylim()
if y_cutoff is not None:
if yscale == "linear":
y_max = y_cutoff
elif yscale == "log":
y_min = y_cutoff
ax.set_ylim(y_min, y_max)
ax.tick_params(axis='both', labelsize=fontsize)
# ====== title ====== #
if title is not None:
ax.set_title(title, fontsize=fontsize, fontweight='bold')
ret = [ax]
if return_handles:
ret.append(handles)
if return_indices:
ret.append(sort_indices)
return ax if len(ret) == 1 else tuple(ret) | 0e940011e2a00b41bf1303604ff13c6d8c152327 | 17,261 |
import math
def generate_trapezoid_profile(max_v, time_to_max_v, dt, goal):
"""Creates a trapezoid profile with the given constraints.
Returns:
t_rec -- list of timestamps
x_rec -- list of positions at each timestep
v_rec -- list of velocities at each timestep
a_rec -- list of accelerations at each timestep
Keyword arguments:
max_v -- maximum velocity of profile
time_to_max_v -- time from rest to maximum velocity
dt -- timestep
goal -- final position when the profile is at rest
"""
t_rec = [0.0]
x_rec = [0.0]
v_rec = [0.0]
a_rec = [0.0]
a = max_v / time_to_max_v
time_at_max_v = goal / max_v - time_to_max_v
# If profile is short
if max_v * time_to_max_v > goal:
time_to_max_v = math.sqrt(goal / a)
time_from_max_v = time_to_max_v
time_total = 2.0 * time_to_max_v
profile_max_v = a * time_to_max_v
else:
time_from_max_v = time_to_max_v + time_at_max_v
time_total = time_from_max_v + time_to_max_v
profile_max_v = max_v
while t_rec[-1] < time_total:
t = t_rec[-1] + dt
t_rec.append(t)
if t < time_to_max_v:
# Accelerate up
a_rec.append(a)
v_rec.append(a * t)
elif t < time_from_max_v:
# Maintain max velocity
a_rec.append(0.0)
v_rec.append(profile_max_v)
elif t < time_total:
# Accelerate down
decel_time = t - time_from_max_v
a_rec.append(-a)
v_rec.append(profile_max_v - a * decel_time)
else:
a_rec.append(0.0)
v_rec.append(0.0)
x_rec.append(x_rec[-1] + v_rec[-1] * dt)
return t_rec, x_rec, v_rec, a_rec | 5851cfab06e20a9e79c3a321bad510d33639aaca | 17,262 |
import logging
def main(args):
"""
Main function of PyGalGen generator
Parameters
----------
args : list of command line arguments:
Returns
-------
Error code
"""
logging.basicConfig(level=logging.DEBUG)
parser = define_default_params()
pipeline = PipelineExecutor(parser)
logging.info("Created pipeline executor")
path_to_default = res.files(pygalgen.generator.default_plugins)
default_plugins = discover_plugins(path_to_default)
logging.info(f"Discovered {len(default_plugins)} default"
f" plugin{'' if len(default_plugins) == 1 else 's'}")
plugin_path = obtain_plugins_path(args)
custom_plugins = discover_plugins(plugin_path)
logging.info(f"Discovered {len(custom_plugins)} custom"
f" plugin{'' if len(default_plugins) == 1 else 's'}")
result = pipeline.execute_pipeline(default_plugins +
custom_plugins)
return result | 4ee6dcfb0b2897b34e36966256e8b7990dbc2760 | 17,263 |
def query_pypi(spec_pk):
""" Query one spec of package on PyPI"""
spec = Spec.objects.get(pk=spec_pk)
logger.debug('[PYPI] Fetching data for %s' % spec)
pkg_data = PyPI().get_info(spec.name, spec.version)
if not pkg_data:
logger.debug('[PYPI] Errored %s ' % spec)
spec.status = 'error'
spec.save(update_fields=['status', 'updated_at'])
return {}
spec.release_date = pkg_data['last_release_date']
spec.python_versions = pkg_data['py3_versions']
spec.save(update_fields=['release_date', 'python_versions', 'updated_at'])
logger.debug('[PYPI] Finished %s ' % spec)
return pkg_data | e306ac77792653a3c106d8094c00e06994329952 | 17,264 |
import logging
def get_logger(name: str,
format_str: str = aps_logger_format,
date_format: str = aps_time_format,
file: bool = False) -> logging.Logger:
"""
Get logger instance
Args:
name: logger name
format_str|date_format: to configure logging format
file: if true, treat name as the name of the logging file
"""
def get_handler(handler):
handler.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=format_str, datefmt=date_format)
handler.setFormatter(formatter)
return handler
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# both stdout & file
if file:
logger.addHandler(get_handler(logging.FileHandler(name)))
return logger | 06d673473c7014d6373003bf924fdf2dc9965baf | 17,265 |
import torch
def to_tensor(args, device=None):
"""Convert an arg or sequence of args to torch Tensors
"""
singleton = not isinstance(args, (list, tuple))
if singleton:
args = [args]
tensor_args = []
for arg in args:
if isinstance(arg, torch.Tensor):
tensor_args.append(arg)
elif _is_numeric(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.float64:
tensor_args.append(
torch.tensor(arg, dtype=torch.float32, device=device)
)
else:
tensor_args.append(torch.tensor(arg, device=device))
else:
raise ValueError("Received non-numeric argument ", arg)
return tensor_args[0] if singleton else tensor_args | d85d842c095aa3c942f94e61c79d2bbeb49bc41d | 17,266 |
def main(self):
"""
to run:
kosmos 'j.data.bcdb.test(name="meta_test")'
"""
bcdb, _ = self._load_test_model()
assert len(bcdb.get_all()) == 0
assert len(bcdb.meta._data["url"]) == 7
s = list(j.data.schema._url_to_md5.keys())
assert "despiegk.test" in s
m = bcdb.model_get(url="despiegk.test")
schema_text = """
@url = jumpscale.schema.test.a
category**= ""
txt = ""
i = 0
"""
s = bcdb.schema_get(schema=schema_text)
assert s.properties_unique == []
bcdb.meta._schema_set(s)
assert len(bcdb.meta._data["url"]) == 8
assert "jumpscale.schema.test.a" in j.data.schema._url_to_md5
assert "jumpscale.bcdb.circle.2" in j.data.schema._url_to_md5
schema = bcdb.model_get(url="jumpscale.schema.test.a")
o = schema.new()
assert "jumpscale.schema.test.a" in j.data.schema._url_to_md5
assert "jumpscale.bcdb.circle.2" in j.data.schema._url_to_md5
s0 = bcdb.schema_get(url="jumpscale.schema.test.a")
s0md5 = s0._md5 + ""
model = bcdb.model_get(schema=s0)
assert bcdb.get_all() == [] # just to make sure its empty
assert len(bcdb.meta._data["url"]) == 8
a = model.new()
a.category = "acat"
a.txt = "data1"
a.i = 1
a.save()
a2 = model.new()
a2.category = "acat2"
a2.txt = "data2"
a2.i = 2
a2.save()
assert len([i for i in model.index.model.find()]) == 2
myid = a.id + 0
assert a._model.schema._md5 == s0md5
# lets upgrade schema to float
s_temp = bcdb.schema_get(schema=schema_text)
assert len(bcdb.meta._data["url"]) == 8 # should be same because is same schema, should be same md5
assert s_temp._md5 == s0._md5
# lets upgrade schema to float
s2 = bcdb.schema_get(schema=schema_text)
model2 = bcdb.model_get(schema=s2)
assert len(bcdb.meta._data["url"]) == 8 # acl, user, circle, despiegktest and the 1 new one
a3 = model2.new()
a3.category = "acat3"
a3.txt = "data3"
a3.i = 3
a3.save()
assert a3.i == 3.0
assert a2.i == 2 # int
assert len(model2.find()) == 3 # needs to be 3 because model is for all of them
assert len(model.find()) == 3 # needs to be 3 because model is for all of them
all = model2.find()
print(all)
a4 = model2.get(all[0].id)
a4_ = model.get(all[0].id)
assert a4_ == a4
a5 = model2.get(all[1].id)
a6 = model.get(all[2].id)
a6_ = model.get(all[2].id)
assert a6_ == a6
assert a6.id == a3.id
assert a6.i == a3.i
self._log_info("TEST META DONE")
return "OK" | 7f88d33bd6cc2df5284201d859718a8a06e6a4e4 | 17,267 |
def snake_head_only():
"""
|===========|
|···········|
|···········|
|···········|
|···········|
|···········|
|···········|
|·······o···|
|···········|
|···········|
|···········|
|···········|
|===========|
"""
return Snake.from_dict(
**{
"body": [
{"x": 7, "y": 4},
],
}
) | c08ffd0a86ec9d5a40d2649dd63a2b60019a6791 | 17,268 |
import six
def str_to_bool(s):
"""Convert a string value to its corresponding boolean value."""
if isinstance(s, bool):
return s
elif not isinstance(s, six.string_types):
raise TypeError('argument must be a string')
true_values = ('true', 'on', '1')
false_values = ('false', 'off', '0')
if s.lower() in true_values:
return True
elif s.lower() in false_values:
return False
else:
raise ValueError('not a recognized boolean value: %s'.format(s)) | c228321872f253ce3e05c6af9284ec496dea8dcf | 17,269 |
def id_feat_pred_mz_rt(cursor, mz, rt, ccs, tol_mz, tol_rt, tol_ccs, esi_mode, norm='l2'):
"""
id_feat_pred_mz_rt
description:
identifies a feature on the basis of predicted m/z and retention time
parameters:
cursor (sqlite3.Cursor) -- cursor for querying lipids.db
mz (float) -- m/z to match
rt (float) -- retention time to match
ccs (float) -- CCS to match
tol_mz (float) -- tolerance for m/z
tol_rt (float) -- tolerance for retention time
tol_ccs (float) -- tolerance for CCS
esi_mode (str) -- filter results by ionization mode: 'neg', 'pos', or None for unspecified
[norm (str)] -- specify l1 or l2 norm for computing scores [optional, default='l2']
returns:
(str or list(str)), (str) -- putative identification(s) (or '' for no matches), identification level
"""
qry = 'SELECT name, adduct, mz, rt FROM predicted_mz JOIN predicted_rt ON ' \
+ 'predicted_mz.t_id=predicted_rt.t_id WHERE mz BETWEEN ? AND ? AND rt BETWEEN ? and ?'
if esi_mode == 'pos':
qry += ' AND adduct LIKE "%+"'
elif esi_mode == 'neg':
qry += ' AND adduct LIKE "%-"'
mz_min = mz - tol_mz
mz_max = mz + tol_mz
rt_min = rt - tol_rt
rt_max = rt + tol_rt
putative_ids, putative_scores = [], []
for name, adduct, mz_x, rt_x in cursor.execute(qry, (mz_min, mz_max, rt_min, rt_max)).fetchall():
putative_ids.append('{}_{}'.format(name, adduct))
putative_scores.append(get_score(tol_mz, tol_rt, tol_ccs, mz_q=mz, rt_q=rt, mz_x=mz_x, rt_x=rt_x))
if putative_ids:
return putative_ids, 'pred_mz_rt', putative_scores
else:
return '', '', [] | 72818a631b155e1c50d53b26c1749bf8f68767f7 | 17,270 |
import os
import re
import sys
def help(user_display_name, module_file_fullpath, module_name):
"""Generate help message for all actions can be used in the job"""
my_path = os.path.dirname(module_file_fullpath)
my_fname = os.path.basename(module_file_fullpath)
my_package = module_name.rsplit(u'.')[-2] # ex: sayhello
my_package_path = module_name.rsplit(u'.', 1)[-2] # ex: wechatbot.sayhello
help_msg = u'Actions in "%s":\n========\n' % (my_package)
for action_py in os.listdir(my_path):
action_name = u''
action_desc = u''
# Skip non-file
if not os.path.isfile(os.path.join(my_path, action_py)):
continue
# Skip self
if action_py == my_fname:
continue
# Folders start with "__"
if re.findall(u'^__.+', action_py):
continue
# Folders start with "."
if re.findall(u'^\..*', action_py):
continue
action_name = re.sub(u'\.py$', u'', action_py)
# Load action module
action_module_path = u'%s.%s' % (my_package_path, action_name)
action_from_path = my_package_path
# Import the "help" module
try:
action_module = __import__(
action_module_path, fromlist = [action_from_path])
except:
print(u"Cannot import %s." % (action_module_path), file = sys.stderr)
continue
# Get Job description
try:
action_desc = action_module._help_desc
except:
action_desc = u'[no description]'
print(u"No _help_desc for %s." % (action_module_path), file = sys.stderr)
# Arrange action_name and action_desc in help_msg
help_msg += u'> %s\n\t%s\n' % (action_name, action_desc)
# Tail messages
help_msg += u'========\nTo get detailed usage for\neach action, try:\n'
if user_display_name:
help_msg += u'@%s\u2005%s <action> -h' % (user_display_name, my_package)
else:
help_msg += u'%s <action> -h' % (my_package)
return help_msg | d76d0a2c07f7d60d1f9409281dc699d402fa1dd7 | 17,271 |
def Arrow_bg(self):
"""
The function that will create the background for the dropdown arrow button.
For internal use only. This function is therefore also not imported by __init__.py
"""
#Just leave the making of the buttons background to the default function. Not gonna bother re-doing that here (because why would I?)
if not self.func_data:
surface = self.Make_background_surface(None)
elif self.value:
surface = self.Make_background_surface(self.func_data["__accent_bg"])
else:
surface = self.Make_background_surface(self.func_data["__bg"])
#Draw the arrow so characteristic to dropdown boxes
if not self.value:
arrow_coords = (
(self.scaled(self.width * 1/6), self.scaled(self.height * 1/3)), #Top left
(self.scaled(self.width * 1/2), self.scaled(self.height * 2/3)), #Bottom
(self.scaled(self.width * 5/6), self.scaled(self.height * 1/3)), #Top right
)
else:
arrow_coords = (
(self.scaled(self.width * 1/6), self.scaled(self.height * 2/3)), #Bottom left
(self.scaled(self.width * 1/2), self.scaled(self.height * 1/3)), #Top
(self.scaled(self.width * 5/6), self.scaled(self.height * 2/3)), #Bottom right
)
pygame.draw.polygon(surface, self.border[0] if self.border else (63, 63, 63), arrow_coords)
return surface | 4b09c197666aa5ea15713d98ae7c38e1b0ffa0e0 | 17,272 |
def _is_debugging(ctx):
"""Returns `True` if the current compilation mode produces debug info.
rules_apple specific implementation of rules_swift's `is_debugging`, which
is not currently exported.
See: https://github.com/bazelbuild/rules_swift/blob/44146fccd9e56fe1dc650a4e0f21420a503d301c/swift/internal/api.bzl#L315-L326
"""
return ctx.var["COMPILATION_MODE"] in ("dbg", "fastbuild") | f2468f394d4cb6ef545df06af5e78e0f4f1c1525 | 17,273 |
def get_bounds_5km_to_1km( itk_5km, isc_5km ) :
"""
return the 1km pixel indexes limits in the 5km pixel [ itk_5km, isc_5km ] footprint
"""
# set the (track,scan) indexes of the 5km pixel in the 5km grid
itk_1km = itk_5km_to_1km ( itk_5km )
isc_1km = isc_5km_to_1km ( isc_5km )
# set the 1km indexes of pixels to interpolate along track
itk_1km_min = itk_1km - 2
itk_1km_max = itk_1km + 2
# general case : 2 interpolations done along scan : [isc-1, isc] then [isc, isc+1]
isc_1km_min = isc_1km - 2
isc_1km_max = isc_1km + 2
# if last 5km pixel along scan, only 4 1km pixels in the 5km footprint in this direction
if ( isc_5km == sz_sc_5km - 1 ) :
isc_1km_max = isc_1km + 6
return itk_1km_min, itk_1km_max, isc_1km_min, isc_1km_max | 7fd175787f075d7ed9b3e8ed04565f38877de1e4 | 17,274 |
import torch
def batch_hard_triplet_loss(labels, embeddings, margin, squared=False):
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
triplet_loss: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = _pairwise_distances(embeddings, squared=squared)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = _get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = torch.multiply(mask_anchor_positive, pairwise_dist)
# shape (batch_size, 1)
hardest_positive_dist = torch.max(anchor_positive_dist, dim=1, keepdim=True).values
# print("hardest_positive_dist", hardest_positive_dist.mean())
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = _get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist = torch.max(pairwise_dist, dim=1, keepdim=True).values
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist = torch.min(anchor_negative_dist, dim=1, keepdim=True).values
# print("hardest_negative_dist", hardest_negative_dist.mean())
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
triplet_loss = torch.relu(hardest_positive_dist - hardest_negative_dist + margin)
# Get final mean triplet loss
triplet_loss = torch.mean(triplet_loss)
return triplet_loss | 37d20237580463e668cee77c96f732f2d0211aef | 17,275 |
def categorical_sample_logits(logits):
"""
Samples (symbolically) from categorical distribution, where logits is a NxK
matrix specifying N categorical distributions with K categories
specifically, exp(logits) / sum( exp(logits), axis=1 ) is the
probabilities of the different classes
Cleverly uses gumbell trick, based on
https://github.com/tensorflow/tensorflow/issues/456
"""
U = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1, name='sample_once') | f93752b11de02b1b61f60b3ff5c12dd9c15f7d8f | 17,276 |
def sort_results(boxes):
"""Returns the top n boxes based on score given DenseCap
results.json output
Parameters
----------
boxes : dictionary
output from load_output_json
n : integer
number of boxes to return
Returns
-------
sorted dictionary
"""
return sorted(results[k], key=lambda x : x['score'], reverse=True) | 20f30e5846de4ce46073c3d32573d283576489e0 | 17,277 |
from datetime import datetime
def get_date(d : str) -> datetime.datetime:
"""A helper function that takes a ModDB string representation of time and returns an equivalent
datetime.datetime object. This can range from a datetime with the full year to
second to just a year and a month.
Parameters
-----------
d : str
String representation of a datetime
Returns
-------
datetime.datetime
The datetime object for the given string
"""
try:
return datetime.datetime.strptime(d[:-3] + d[-2:], '%Y-%m-%dT%H:%M:%S%z')
except ValueError:
pass
try:
return datetime.datetime.strptime(d, '%Y-%m-%d')
except ValueError:
pass
return datetime.datetime.strptime(d, '%Y-%m') | 44fb951ecb96102c631f88dc888aac11d11c8bad | 17,278 |
def var_policer(*args):
"""Returns a variable policer object built from args."""
return VarPolicer(args) | a346e041118f1be2ed6b0acd2c9e3d04603031df | 17,279 |
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
#looking horizontal winner
i = 0
while i < len(board):
j = 1
while j <len(board):
if board[i][j-1]==board[i][j] and board[i][j] == board[i][j+1]:
return board[i][j]
j += 2
i += 1
#looking vertical winner
i = 1
while i < len(board):
j = 0
while j <len(board):
if board[i-1][j]==board[i][j] and board[i][j] == board[i+1][j]:
return board[i][j]
j += 1
i += 2
#looking diagonal winner
if board[1][1] ==board[0][0] and board[1][1] == board[2][2]:
return board[1][1]
elif board[1][1] ==board[0][2] and board[1][1] == board[2][0]:
return board[1][1]
else:
return None | 31ab2cf04dfe269598efdd073762505643563a96 | 17,280 |
import signal
def freqz_resp_list(b, a=np.array([1]), mode='dB', fs=1.0, n_pts=1024, fsize=(6, 4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
Parameters
----------
b : ndarray of numerator coefficients
a : ndarray of denominator coefficents
mode : display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
n_pts : number of points to plot; default is 1024
fsize : figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
else:
return None
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w, H = signal.freqz(b[n], a[n], 2 * np.pi * f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f * fs, 20 * np.log10(np.abs(H)))
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f * fs, np.angle(H))
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2 * theta) / 2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2) / np.diff(w)
# For gain almost zero set groupdelay = 0
idx = pylab.find(20 * np.log10(H[:-1]) < -400)
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
# print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1] * fs, Tg / fs)
plt.ylim([0, 1.2 * max_Tg])
else:
plt.plot(f[:-1] * fs, Tg)
plt.ylim([0, 1.2 * max_Tg])
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2) | 207ad7ad59a3260df9d5df80c1b8e1bee4c33a3e | 17,281 |
import socket
def _nslookup(ipv4):
"""Lookup the hostname of an IPv4 address.
Args:
ipv4: IPv4 address
Returns:
hostname: Name of host
"""
# Initialize key variables
hostname = None
# Return result
try:
ip_results = socket.gethostbyaddr(ipv4)
if len(ip_results) > 1:
hostname = ip_results[0]
except:
hostname = None
return (ipv4, hostname) | 7771887dbfcd60e73b8fce0ce4029fcd7058a7d1 | 17,282 |
def get_service_node(service):
""" Returns the name of the node that is providing the given service, or empty string """
node = rosservice_get_service_node(service)
if node == None:
node = ""
return node | 7a8df548e119e8197f92340d228fdc7855494670 | 17,283 |
import warnings
from io import StringIO
def _download_nasdaq_symbols(timeout):
"""
@param timeout: the time to wait for the FTP connection
"""
try:
ftp_session = FTP(_NASDAQ_FTP_SERVER, timeout=timeout)
ftp_session.login()
except all_errors as err:
raise RemoteDataError('Error connecting to %r: $s' %
(_NASDAQ_FTP_SERVER, err))
lines = []
try:
ftp_session.retrlines('RETR ' + _NASDAQ_TICKER_LOC, lines.append)
except all_errors as err:
raise RemoteDataError('Error downloading from %r: $s' %
(_NASDAQ_FTP_SERVER, err))
finally:
ftp_session.close()
# Sanity Checking
if not lines[-1].startswith('File Creation Time:'):
raise RemoteDataError('Missing expected footer. Found %r' % lines[-1])
# Convert Y/N to True/False.
converter_map = dict((col, _bool_converter) for col, t in _TICKER_DTYPE
if t is bool)
# For pandas >= 0.20.0, the Python parser issues a warning if
# both a converter and dtype are specified for the same column.
# However, this measure is probably temporary until the read_csv
# behavior is better formalized.
with warnings.catch_warnings(record=True):
data = read_csv(StringIO('\n'.join(lines[:-1])), '|',
dtype=_TICKER_DTYPE, converters=converter_map,
index_col=1)
# Properly cast enumerations
for cat in _CATEGORICAL:
data[cat] = data[cat].astype('category')
return data | 9b34571086ac3e738e29b3ed130ab2d0c7303657 | 17,284 |
def sessions(request):
"""
Cookies prepeocessor
"""
context = {}
return context | 562f4e9da57d3871ce780dc1a0661a34b3279ec5 | 17,285 |
import logging
def dynamax_mnn(src: nb.typed.Dict, trg: nb.typed.Dict,
src_emb: np.ndarray, trg_emb: np.ndarray,
src_k: np.ndarray, trg_k: np.ndarray) -> np.ndarray:
"""
Run Dynamax-Jaccard in both directions and infer mutual neighbors.
:param src nb.typed.Dict: src_id2pointers dictionary
:param trg nb.typed.Dict: trg_id2pointers dictionary
:param src_emb np.ndarray: unnormalized word embeddings matrix for src lang
:param trg_emb np.ndarray: unnormalized word embeddings matrix for trg lang
:param src_k np.ndarray: preranked target candidates for source lanaguage
:param trg_k np.ndarray: preranked source candidates for target lanaguage
"""
logging.info('DynaMax: commencing first loop')
src_argmax = dynamax_loop(src, trg, src_emb, trg_emb, src_k)
logging.info('DynaMax: commencing second loop')
trg_argmax = dynamax_loop(trg, src, trg_emb, src_emb, trg_k)
logging.info('DynaMax: inferring mutual nearest neighbors')
mnn = mutual_nn(src_argmax, trg_argmax)
return mnn | 174f603df09cbe7a8ee91de29de48ccaf2573b31 | 17,286 |
def resnet152(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', DistillerBottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs) | 5e3435eea1a18c028d422abef57d9e88d88e609f | 17,287 |
import torch
def _load_model(featurizer_path):
"""Load the featurization model
Parameters
----------
featurizer_path: str
Path to the saved model file
Returns
-------
The loaded PyTorch model
"""
# load in saved model
pth = torch.load(featurizer_path)
model_args = pth['model_args']
model_state = pth['model_state']
model = UNet(**model_args)
model.load_state_dict(model_state)
# remove last layer and activation
model.segment = layers.Identity()
model.activate = layers.Identity()
model.eval()
return model | 21c29c9320b482bb33e82f97723d04bb53364ba1 | 17,288 |
def grid0_baseline(num_runs, render=True):
"""Run script for the grid0 baseline.
Parameters
----------
num_runs : int
number of rollouts the performance of the environment is evaluated
over
render : bool, optional
specifies whether to use the gui during execution
Returns
-------
flow.core.experiment.Experiment
class needed to run simulations
"""
exp_tag = flow_params['exp_tag']
sim_params = flow_params['sim']
vehicles = flow_params['veh']
env_params = flow_params['env']
net_params = flow_params['net']
initial_config = flow_params.get('initial', InitialConfig())
# define the traffic light logic
tl_logic = TrafficLightParams(baseline=False)
phases = [{"duration": "31", "minDur": "8", "maxDur": "45",
"state": "GrGr"},
{"duration": "6", "minDur": "3", "maxDur": "6",
"state": "yryr"},
{"duration": "31", "minDur": "8", "maxDur": "45",
"state": "rGrG"},
{"duration": "6", "minDur": "3", "maxDur": "6",
"state": "ryry"}]
for i in range(N_ROWS * N_COLUMNS):
tl_logic.add('center'+str(i), tls_type='actuated', phases=phases,
programID=1)
# modify the rendering to match what is requested
sim_params.render = render
# set the evaluation flag to True
env_params.evaluate = True
# import the network class
module = __import__('flow.networks', fromlist=[flow_params['network']])
network_class = getattr(module, flow_params['network'])
# create the network object
network = network_class(
name=exp_tag,
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=tl_logic
)
# import the environment class
module = __import__('flow.envs', fromlist=[flow_params['env_name']])
env_class = getattr(module, flow_params['env_name'])
# create the environment object
env = env_class(env_params, sim_params, network)
exp = Experiment(env)
results = exp.run(num_runs, env_params.horizon)
total_delay = np.mean(results['returns'])
return total_delay | 8cb8a01309bb8ea3aae467d8a6c8a81ce295ab94 | 17,289 |
def read_aims(filename):
"""Method to read FHI-aims geometry files in phonopy context."""
lines = open(filename, 'r').readlines()
cell = []
is_frac = []
positions = []
symbols = []
magmoms = []
for line in lines:
fields = line.split()
if not len(fields):
continue
if fields[0] == "lattice_vector":
vec = lmap(float, fields[1:4])
cell.append(vec)
elif fields[0][0:4] == "atom":
if fields[0] == "atom":
frac = False
elif fields[0] == "atom_frac":
frac = True
pos = lmap(float, fields[1:4])
sym = fields[4]
is_frac.append(frac)
positions.append(pos)
symbols.append(sym)
magmoms.append(None)
# implicitly assuming that initial_moments line adhere to FHI-aims geometry.in specification,
# i.e. two subsequent initial_moments lines do not occur
# if they do, the value specified in the last line is taken here - without any warning
elif fields[0] == "initial_moment":
magmoms[-1] = float(fields[1])
for (n,frac) in enumerate(is_frac):
if frac:
pos = [ sum( [ positions[n][l] * cell[l][i] for l in range(3) ] ) for i in range(3) ]
positions[n] = pos
if None in magmoms:
atoms = Atoms(cell=cell, symbols=symbols, positions=positions)
else:
atoms = Atoms(cell=cell, symbols=symbols, positions=positions, magmoms=magmoms)
return atoms | bcf5f00e57ed249c10667ad0b883986cb1b36865 | 17,290 |
def test(model, issue_batches):
"""
return accuracy on test set
"""
session = tf.get_default_session()
num_correct = 0
num_predict = 0
for epoch, step, eigens, labels in issue_batches:
feeds = {
model['eigens']: eigens,
}
guess = session.run(model['guess'], feed_dict=feeds)
num_predict += guess.shape[0]
num_correct += \
np.sum(np.argmax(labels, axis=1) == np.argmax(guess, axis=1))
return float(num_correct) / float(num_predict) | 0d8ae672766567a6665089c2d7d5004e25d80755 | 17,291 |
def evaluate_sample(ResNet50_model, X_train, Y_train, X_val_b,Y_val_b,X_data,Y_data,checkpoint_path):
"""
A function that accepts a labeled-unlabeled data split and trains the relevant model on the labeled data, returning
the model and it's accuracy on the test set.
"""
# shuffle the training set:
perm = np.random.permutation(X_train.shape[0])
X_train = X_train[perm]
Y_train = Y_train[perm]
X_validation = X_val_b
Y_validation=to_categorical(Y_val_b)
# train and evaluate the model:
model = train_disease_classification_model(ResNet50_model,X_train, Y_train, X_validation, Y_validation, checkpoint_path)
acc = model.evaluate(X_data, Y_data, verbose=0)
return acc, model | c5f86feede372f078e9c88bac688c790de6578d6 | 17,292 |
import six
import base64
def Base64WSEncode(s):
"""
Return Base64 web safe encoding of s. Suppress padding characters (=).
Uses URL-safe alphabet: - replaces +, _ replaces /. Will convert s of type
unicode to string type first.
@param s: string to encode as Base64
@type s: string
@return: Base64 representation of s.
@rtype: string
NOTE: Taken from keyczar (Apache 2.0 license)
"""
if isinstance(s, six.text_type):
# Make sure input string is always converted to bytes (if not already)
s = s.encode("utf-8")
return base64.urlsafe_b64encode(s).decode("utf-8").replace("=", "") | cb28001bddec215b763936fde4652289cf6480c0 | 17,293 |
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs] | df9ecbb73dfc9a764e4129069a4317517830307a | 17,294 |
def get_image_filename_index():
"""
Obtain a mapping of filename -> filepath for images
:return:
"""
index_path = osp.join(SEG_ROOT, 'privacy_filters', 'cache', 'fname_index.pkl')
if osp.exists(index_path):
print 'Found cached index. Loading it...'
return pickle.load(open(index_path, 'rb'))
else:
print 'Creating filename index ...'
fname_index = dict()
images_dir = osp.join(SEG_ROOT, 'images')
for fold in os.listdir(images_dir):
for img_filename in os.listdir(osp.join(images_dir, fold)):
image_path = osp.join(images_dir, fold, img_filename)
fname_index[img_filename] = image_path
pickle.dump(fname_index, open(index_path, 'wb'))
return fname_index | 002b6fd4dea1b00bb758377e71de0e67f5d979d3 | 17,295 |
def merge_coordinates(coordinates, capture_size):
"""Merge overlapping coordinates for MIP targets.
Parameters
----------
coordinates: python dictionary
Coordinates to be merged in the form {target-name: {chrom: chrx,
begin: start-coordinate, end: end-coordinate}, ..}
capture_size: int
Anticipated MIP capture size. If two regions are as close as 2 times
this value, they will be merged.
Returns
-------
target_coordinates: python dictionary
merged coordinates dictionary
target_names: python dictionary
names of included targets in each merged region.
"""
# create target regions to cover all snps
# start by getting snps on same chromosome together
chroms = {}
for c in coordinates:
chrom = coordinates[c]["chrom"]
try:
chroms[chrom].append([coordinates[c]["begin"],
coordinates[c]["end"]])
except KeyError:
chroms[chrom] = [[coordinates[c]["begin"],
coordinates[c]["end"]]]
# merge snps that are too close to get separate regions
# the length should be twice the capture size
merged_chroms = {}
for c in chroms:
merged_chroms[c] = merge_overlap(chroms[c], 2 * capture_size)
# create regions for alignment
# create target coordinate for each region
target_coordinates = {}
target_names = {}
for c in merged_chroms:
regions = merged_chroms[c]
for reg in regions:
targets_in_region = []
for co in coordinates:
if (coordinates[co]["chrom"] == c
and reg[0] <= coordinates[co]["begin"]
<= coordinates[co]["end"] <= reg[1]):
targets_in_region.append(co)
region_name = targets_in_region[0]
target_names[region_name] = targets_in_region
r_start = reg[0]
r_end = reg[1]
target_coordinates[region_name] = [c, r_start, r_end]
return target_coordinates, target_names | 8af4a34fc8ce1a01ddcd4d4f257815ef5f852911 | 17,296 |
def Emojify_V2(input_shape, word_to_vec_map, word_to_index):
"""
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation word_to_index -- dictionary mapping from
words to their indices in the vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
"""
# Define sentence_indices as the input of the graph,
# it should be of shape input_shape and dtype 'int32' (as it contains indices).
sentence_indices = Input(shape=input_shape, dtype=np.int32)
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer, you get back the embeddings
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a batch of sequences.
X = LSTM(128, return_sequences=True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a single hidden state, not a batch of sequences.
X = LSTM(128)(X)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.
X = Dense(5, activation='softmax')(X)
# Add a softmax activation
X = Activation('softmax')(X)
# Create Model instance which converts sentence_indices into X.
model = Model(sentence_indices, X)
return model | 111a0c97e9a8c75aa6a7191ee89cbda69a84794c | 17,297 |
import fcntl
import os
def SetFdBlocking(fd, is_blocking):
"""Set a file descriptor blocking or nonblocking.
Please note that this may affect more than expected, for example it may
affect sys.stderr when called for sys.stdout.
Returns:
The old blocking value (True or False).
"""
if hasattr(fd, 'fileno'):
fd = fd.fileno()
old = fcntl.fcntl(fd, fcntl.F_GETFL)
if is_blocking:
value = old & ~os.O_NONBLOCK
else:
value = old | os.O_NONBLOCK
if old != value:
fcntl.fcntl(fd, fcntl.F_SETFL, value)
return bool(old & os.O_NONBLOCK) | 58d1496152cc752b59e8abc0ae3b42387a2f8926 | 17,298 |
def de_parser(lines):
"""return a dict of {OfficalName: str, Synonyms: str, Fragment: bool,
Contains: [itemdict,], Includes: [itemdict,]} from DE lines
The DE (DEscription) lines contain general descriptive information about
the sequence stored. This information is generally sufficient to identify
the protein precisely.
The description always starts with the proposed official name of the
protein. Synonyms are indicated between brackets. Examples below
If a protein is known to be cleaved into multiple functional components,
the description starts with the name of the precursor protein, followed by
a section delimited by '[Contains: ...]'. All the individual components are
listed in that section and are separated by semi-colons (';'). Synonyms are
allowed at the level of the precursor and for each individual component.
If a protein is known to include multiple functional domains each of which
is described by a different name, the description starts with the name of
the overall protein, followed by a section delimited by '[Includes: ]'. All
the domains are listed in that section and are separated by semi-colons
(';'). Synonyms are allowed at the level of the protein and for each
individual domain.
In rare cases, the functional domains of an enzyme are cleaved, but the
catalytic activity can only be observed, when the individual chains
reorganize in a complex. Such proteins are described in the DE line by a
combination of both '[Includes:...]' and '[Contains:...]', in the order
given in the following example:
If the complete sequence is not determined, the last information given on
the DE lines is '(Fragment)' or '(Fragments)'. Example:
DE Dihydrodipicolinate reductase (EC 1.3.1.26) (DHPR) (Fragment).
DE Arginine biosynthesis bifunctional protein argJ [Includes: Glutamate
DE N-acetyltransferase (EC 2.3.1.35) (Ornithine acetyltransferase)
DE (Ornithine transacetylase) (OATase); Amino-acid acetyltransferase
DE (EC 2.3.1.1) (N-acetylglutamate synthase) (AGS)] [Contains: Arginine
DE biosynthesis bifunctional protein argJ alpha chain; Arginine
DE biosynthesis bifunctional protein argJ beta chain] (Fragment).
Trouble maker:
DE Amiloride-sensitive amine oxidase [copper-containing] precursor(EC
DE 1.4.3.6) (Diamine oxidase) (DAO).
"""
labeloff_lines = labeloff(lines)
joined = join_parser(labeloff_lines, chars_to_strip="). ")
keys = ["Includes", "Contains", "Fragment"]
fragment_label = "(Fragment"
contains_label = "[Contains:"
includes_label = "[Includes:"
# Process Fragment
fragment = False
if joined.endswith(fragment_label):
fragment = True
joined = joined.rsplit("(", 1)[0]
# Process Contains
contains = []
if contains_label in joined:
joined, contains_str = joined.split(contains_label)
contains_str = contains_str.strip(" ]")
contains = list(map(de_itemparser, contains_str.split("; ")))
# Process Includes
includes = []
if includes_label in joined:
joined, includes_str = joined.split(includes_label)
includes_str = includes_str.strip(" ]")
includes = list(map(de_itemparser, includes_str.split("; ")))
# Process Primary
primary = de_itemparser(joined)
result = dict(list(zip(keys, (includes, contains, fragment))))
result.update(primary)
return result | d22158e365c52976ed638c27a0a85d8a047d743d | 17,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.