content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import logging
def read_burris(fh):
"""
Read Burris formatted file, from given open file handle.
Accepts comma or tab-separated files.
Parameters
----------
fh : TextIOWrapper
open file handle
Returns
-------
ChannelList
"""
all_survey_data = ChannelList()
for i, orig_line in enumerate(fh, 1):
try:
line = orig_line.strip()
if line.find(",") != -1:
vals_temp = line.split(",")
if vals_temp[0] == "Station ID" or vals_temp[0] == "Station":
continue
elif line.find("\t") != -1:
vals_temp = line.split("\t")
else:
vals_temp = line.split()
if vals_temp[0] == "Station ID" or vals_temp[0] == "Station":
continue
if len(vals_temp) == 15: # no meter operator specified
(
c_station,
c_meter,
c_date,
c_time,
c_grav,
c_dial,
c_feedback,
c_tide,
c_tilt,
_,
_,
c_height,
c_elev,
c_lat,
c_long,
) = range(
15
) # 0 - 15
all_survey_data.oper.append("None")
else: # 16 values, includes meter operator.
# Numbers are columns in the imported file
(
c_station,
c_oper,
c_meter,
c_date,
c_time,
c_grav,
c_dial,
c_feedback,
c_tide,
c_tilt,
_,
_,
c_height,
c_elev,
c_lat,
c_long,
) = range(
16
) # 0 - 14
all_survey_data.oper.append(vals_temp[c_oper])
if line.find("/") != -1:
date_temp = vals_temp[c_date].split("/")
elif line.find("-") != -1:
date_temp = vals_temp[c_date].split("-")
else:
date_temp = []
if int(date_temp[2]) > 999:
date_temp = [date_temp[2], date_temp[0], date_temp[1]]
elif int(date_temp[0]) > 999:
date_temp = [date_temp[0], date_temp[1], date_temp[2]]
# Else raise date error
time_temp = vals_temp[c_time].split(":")
# fill object properties:
all_survey_data.station.append(vals_temp[c_station].strip())
all_survey_data.elev.append(float(vals_temp[c_elev]))
all_survey_data.height.append(float(vals_temp[c_height]))
all_survey_data.lat.append(float(vals_temp[c_lat]))
all_survey_data.long.append(float(vals_temp[c_long]))
# remove Earth tide correction; it's added in using the @grav property
all_survey_data.raw_grav.append(
float(vals_temp[c_grav]) * 1000.0 - float(vals_temp[c_tide]) * 1000.0
)
all_survey_data.tare.append(0)
all_survey_data.etc.append(float(vals_temp[c_tide]) * 1000.0)
all_survey_data.meter_etc.append(float(vals_temp[c_tide]) * 1000.0)
all_survey_data.dial.append(float(vals_temp[c_dial]))
all_survey_data.feedback.append(float(vals_temp[c_feedback]))
all_survey_data.sd.append(-999) # Burris doesn't ouput SD, tiltx, tilty
all_survey_data.meter.append(vals_temp[c_meter])
all_survey_data.tiltx.append(float(vals_temp[c_tilt]) * 1000.0)
all_survey_data.tilty.append(0.0)
all_survey_data.temp.append(0.0)
all_survey_data.dur.append(5)
all_survey_data.rej.append(5)
all_survey_data.t.append(
date2num(
dt.datetime(
int(date_temp[0]),
int(date_temp[1]),
int(date_temp[2]),
int(time_temp[0]),
int(time_temp[1]),
int(time_temp[2]),
)
)
)
all_survey_data.keepdata.append(1)
except (IndexError, ValueError) as e:
logging.exception("Error loading Burris file at line %d", i)
logging.info("LINE: %s", line)
e.i = i
e.line = orig_line
raise e
all_survey_data.meter_type = "Burris"
return all_survey_data | 4519cf73be5663a70e76e083aa9f735f427248a6 | 17,211 |
import re
def _remove_invalid_characters(file_name):
"""Removes invalid characters from the given file name."""
return re.sub(r'[/\x00-\x1f]', '', file_name) | 49a9f668e8142855ca4411921c0180977afe0370 | 17,212 |
def load_actions(
file_pointer, file_metadata, target_adim, action_mismatch, impute_autograsp_action
):
"""Load states from a file given metadata and hyperparameters
Inputs:
file_pointer : file object
file_metadata : file metadata row (Pandas)
target_adim : dimensionality of action vector
action_mismatch : indicator variable (ACTION_MISMATCH) to handle action length mismatches
impute_autograsp_action : boolean flag indicating to impute action dim if missing in primative:"autograsp"
Outputs:
actions : np.array((T-1, action_dims))
"""
a_T, adim = file_metadata["action_T"], file_metadata["adim"]
if target_adim == adim:
return file_pointer["policy"]["actions"][:]
elif (
target_adim == adim + 1
and impute_autograsp_action
and file_metadata["primitives"] == "autograsp"
):
action_append, old_actions = (
np.zeros((a_T, 1)),
file_pointer["policy"]["actions"][:],
)
next_state = file_pointer["env"]["state"][:][1:, -1]
high_val, low_val = (
file_metadata["high_bound"][-1],
file_metadata["low_bound"][-1],
)
midpoint = (high_val + low_val) / 2.0
for t, s in enumerate(next_state):
if s > midpoint:
action_append[t, 0] = high_val
else:
action_append[t, 0] = low_val
return np.concatenate((old_actions, action_append), axis=-1)
elif adim < target_adim and (action_mismatch & ACTION_MISMATCH.PAD_ZERO):
pad = np.zeros((a_T, target_adim - adim), dtype=np.float32)
return np.concatenate((file_pointer["policy"]["actions"][:], pad), axis=-1)
elif adim > target_adim and (action_mismatch & ACTION_MISMATCH.CLEAVE):
return file_pointer["policy"]["actions"][:][:, :target_adim]
else:
raise ValueError(
"file adim - {}, target adim - {}, pad behavior - {}".format(
adim, target_adim, action_mismatch
)
) | 0b8b5dc259fa0645069cc57b4510355ec6897ab6 | 17,214 |
def send_message(hookurl: str, text: str) -> int:
"""
Send a message on the channel of the Teams.
The HTTP status is returned.
parameters
----------
hookurl : str
URL for the hook to the Teams' channel.
text : str
text to send.
returns
-------
int
HTTP status from the sent message.
"""
msg = pymsteams.connectorcard(hookurl)
msg.text(text)
msg.send()
return msg.last_http_status.status_code | 8ffef50d745fafd125b556e9fb1ceff2cb438a4e | 17,215 |
def np_fft_irfftn(a, *args, **kwargs):
"""Numpy fft.irfftn wrapper for Quantity objects.
Drop dimension, compute result and add it back."""
res = np.fft.irfftn(a.value, *args, **kwargs)
return Quantity(res, a.dimension) | fbfdfe470f09106e9589709ebae5fa19ba8a2732 | 17,216 |
def get_codec_options() -> CodecOptions:
"""
Register all flag type registry and get the :class:`CodecOptions` to be used on ``pymongo``.
:return: `CodecOptions` to be used from `pymongo`
"""
return CodecOptions(type_registry=TypeRegistry(type_registry)) | a0acd3e719ae0a4be463c71cba5eb86914348248 | 17,217 |
def get_frame_lims(x_eye, y_eye, x_nose, y_nose, view, vertical_align='eye'):
"""Automatically compute the crop parameters of a view using the eye and nose and reference.
Note that horizontal/vertical proportions are currently hard-coded.
Parameters
----------
x_eye : float
x position of the eye
y_eye : float
y position of the eye
x_nose : float
x position of the nose
y_nose : float
y position of the nose
view : str
'left' | 'right'
vertical_align : str
defines which feature controls the vertical alignment
'eye' | 'nose'
Returns
-------
tuple
- xmin (float)
- xmax (float)
- ymin (float)
- ymax (float)
"""
# horizontal proportions
edge2nose = 0.02
nose2eye = 0.33
eye2edge = 0.65
# vertical proportions
eye2top = 0.10
eye2bot = 0.90
nose2top = 0.25
nose2bot = 0.75
# horizontal calc
nose2eye_pix = np.abs(x_eye - x_nose)
edge2nose_pix = edge2nose / nose2eye * nose2eye_pix
eye2edge_pix = eye2edge / nose2eye * nose2eye_pix
total_x_pix = np.round(nose2eye_pix + edge2nose_pix + eye2edge_pix)
if view == 'left':
xmin = int(x_nose - edge2nose_pix)
xmax = int(x_eye + eye2edge_pix)
elif view == 'right':
xmin = int(x_eye - eye2edge_pix)
xmax = int(x_nose + edge2nose_pix)
else:
raise Exception
# vertical calc (assume we want a square image out)
if vertical_align == 'eye':
# based on eye
eye2top_pix = eye2top * total_x_pix
eye2bot_pix = eye2bot * total_x_pix
ymin = int(y_eye - eye2top_pix)
ymax = int(y_eye + eye2bot_pix)
else:
# based on nose
nose2top_pix = nose2top * total_x_pix
nose2bot_pix = nose2bot * total_x_pix
ymin = int(y_nose - nose2top_pix)
ymax = int(y_nose + nose2bot_pix)
return xmin, xmax, ymin, ymax | 20b3c5d74b7d4dd4b2b63c9d32f7325a199d3dee | 17,218 |
def split(time: list, value: list, step, group_hours, region=None, whole_group=False):
"""
Split and group 'step' number of averaged values 'hours' apart
:param time: time per value (hour apart)
:param value: values corresponding to time
:param step: number of group times set for each index
:param group_hours: group times into 'hours' hours
:param region: region of indices to be considered
:param whole_group: include the aggregated value of
whole time group for each of its members not just until that member
:return:
"""
splits = list() # step group times per index
size = len(time)
if size != len(value):
return -1
# direction is the sign of step
direction = np.sign(step)
# indices to be considered
region = (0, size - 1) if region is None else region
region = (max(region[0], 0), size - 1 if region[1] < 0 else region[1])
# Running group average of each index either forward (when step < 0)
# or backward (when step > 0), when whole_group = False
if not whole_group:
run_average = running_average(time, value, group_hours=group_hours,
direction=-np.sign(step), whole_group=False)
else:
run_average = []
group_time, average, group_lookup, _ = group_average(time, value, group_hours=group_hours)
group_size = len(group_time)
# init first 'steps' (for forward)
# or duplication o first (for backward) [whole/partial] group average as array of step values
group_time = pre_group_time = round_hour(time[region[0]], group_hours)
group_index = group_lookup[group_time]
last_index = group_index + step - direction
if step > 0:
initial_values = average[group_index:min(last_index + 1, group_size)]
if len(initial_values) != abs(step): # duplicate the last group average to reach 'step' values
initial_values += [[average[-1] * (group_size - last_index)]]
else:
initial_values = average[max(last_index, 0):group_index + 1]
if len(initial_values) != abs(step): # duplicate the first group average to reach 'step' values
initial_values = ([average[0]] * (-last_index)) + initial_values
step_values = deque(initial_values)
cur_step = 0
for i in range(region[0], region[1] + 1):
group_time = round_hour(time[i], group_hours)
if group_time != pre_group_time:
group_index = group_lookup[group_time]
last_index = group_index + step - direction
cur_step = min(step, cur_step + 1)
step_values.rotate(-1) # shift right to go toward end of groups
# duplicate the second to last value if group size is passed
# otherwise set the last value from group averages
if step > 0:
step_values[-1] = average[last_index] if last_index < group_size else step_values[-2]
else:
step_values[-1] = average[group_index]
pre_group_time = group_time
# replace the group average with partial average if the whole group is not required
if not whole_group:
if cur_step == step or step > 0:
step_values[0 if step > 0 else -1] = run_average[i]
elif group_index == 0:
# this branch is executed only for the first group for backward (few times)
step_values = deque([run_average[i]] * abs(step))
splits.append(list(step_values))
return splits | a8f8cf51d241a532e6a925d4323abb281215f543 | 17,220 |
def launch_ebs_affinity_process(instanceid, instance_infos, ebs_configs):
""" Manage the ebs affinity process.
:param instanceid string The instance id
:param instance_infos dict Informations about the instance
:param ebs_config dict The EBS parameters
:return None
"""
if not check_if_ebs_already_attached(instanceid,
ebs_configs['mount_point'],
instance_infos):
if manage_ebs_volume(ebs_configs, instanceid, instance_infos):
logger.info("EBS: {0} has been attached on the Instance-id: {1}" .format(ebs_configs['mount_point'], instanceid))
else:
logger.error("Error during the management of the EBS volume: {0}. Disk not attached to the instance: {1} " .format(ebs_configs['mount_point'], instanceid))
return False
return True
else:
logger.info("A disk is already attached on the target mount point: {0}" .format(ebs_configs['mount_point']))
return True | ec30f4748417cee8f9fe96c2c47cf78dd10be59f | 17,221 |
def get_all(isamAppliance, check_mode=False, force=False):
"""
Retrieve a list of mapping rules
"""
return isamAppliance.invoke_get("Retrieve a list of mapping rules",
"/iam/access/v8/mapping-rules") | e48aa65f5212ea32e84c40e326633cf2971d378a | 17,222 |
def get_oyente(test_subject=None, mutation=None):
"""
Run the Oyente test suite on a provided script
"""
is_request = False
if not test_subject:
test_subject = request.form.get('data')
is_request = True
o = Oyente(test_subject)
info, errors = o.oyente(test_subject)
if len(errors) > 0:
errors = [{'lineno':e[0].split(':')[1],'code':"\n".join(e[1].split('\n')[1:]),'description':e[1].split('\n')[0]} for e in errors]
if len(info) > 0:
info = [{x[0]:x[1] for x in info}]
output = {"info":info, "issues": errors, 'error':[]}
if mutation:
output['mutation'] = mutation
if is_request:
return jsonify(output)
return output | f264262c22314ac26b56369f4d7741effb4cf09e | 17,223 |
def search_for_example(search_string: str) -> tuple:
"""Get the Example for a Particular Function"""
function = match_string(search_string)
if function:
function = function.strip()
sql = f"SELECT example, comment FROM example WHERE function='{function}'"
data = execute(sql)
return function, data
else:
return None, (()) | 16eb034369954017b1b51a206d48af40f5768ef6 | 17,224 |
def WildZumba(x,c1=20,c2=0.2,c3=2*np.pi) :
""" A separable R**n==>R function, assumes a real-valued numpy vector as input """
return -c1 * np.exp(-c2*np.sqrt(np.mean(x**2))) - np.exp(np.mean(np.cos(c3*x))) + c1 + np.exp(1) | 589f90f174d61269c2c019ef678f51c498c68ff8 | 17,225 |
def import_xlsx(filename, skip_variation=False):
"""Импортирует параметры пиков, хроматограммы и варьируемых параметров, если они указаны.
Parameters
----------
filename : str
Имя xlsx файла.
skip_variation : bool, default = False
Пропустить блок Variation даже если он есть.
Returns
-------
Tuple[List[Peak], Chromatogram, dict, int, np.ndarray]
Если в xlsx файле есть блок Variation, то вернется кортеж, в который входит список из
экземпляров класса Peak, экземпляр класса Chromatogram, словарь варьируемых параметров
со списками сигм, количество файлов и массив с долями файлов в которых будут пропущенны пики.
Tuple[List[Peak], Chromatogram]
Если же блок Variation отсутствует, то вернется кортеж только из списка экземпляров класса
Peak и экземпляра класса Chromatogram.
"""
wb = ox.load_workbook(filename, data_only=True)
sheet_ranges = wb['input']
max_row = sheet_ranges.max_row
rows = list(sheet_ranges.rows)
wb.close()
def get_row(row, key):
return list(map(lambda x: x.value, rows[row][d_xl[key]['start_idx']:
d_xl[key]['end_idx']]))
def get_col(col, start_row, nn):
res = []
for i_cell in range(start_row, start_row + nn):
res.append(sheet_ranges.cell(i_cell, col).value)
return res
d_xl = {}
# читаем первую строку
for cell in rows[0]:
cell_value = cell.value
if cell_value is not None:
d_xl.update({cell_value: {}})
# обработка объединенных ячеек (Chromatogram, Peaks, Variation)
mcr = sheet_ranges.merged_cells.ranges
for cr in mcr:
name = cr.start_cell.value
if name in d_xl:
start_idx = cr.start_cell.col_idx - 1
cols = cr.size['columns']
end_idx = start_idx + cols
d_xl[name].update({'start_idx': start_idx, 'cols': cols, 'end_idx': end_idx})
# Chromatogram
names, values = map(lambda x: get_row(x, 'Chromatogram'), (1, 2))
d_xl['Chromatogram'].update(zip(names, values))
chrom = Chromatogram(**d_xl['Chromatogram'])
# Peaks
head_peaks = get_row(1, 'Peaks')
params_peak = {}
sep_mz_i = ';'
sep_into_mz_i = ' '
peak_list = []
for i in range(2, max_row):
params_peak.update(zip(head_peaks, get_row(i, 'Peaks')))
mz_i = np.fromstring(params_peak['mass_spect'].replace('\n', '').
replace(sep_mz_i, ''), sep=sep_into_mz_i).reshape((-1, 2))
del params_peak['mass_spect']
mz_list = mz_i[:, 0].astype(np.int16)
peak_list.append(Peak(mz_list=mz_list, intensity_list=mz_i[:, 1], **params_peak))
# Variation
if 'Variation' in d_xl and not skip_variation:
head_variation = get_row(1, 'Variation')
params_variation = {}
for par in head_variation:
params_variation.update({par: []})
for i in range(2, max_row):
for key, value in zip(head_variation, get_row(i, 'Variation')):
params_variation[key].append(value)
num_files = 0
for n, i in enumerate(rows[0]):
if i.value in ('Num_files', 'Num files'):
num_files = rows[1][n].value
break
# Missing
miss = np.zeros(max_row)
for n, i in enumerate(rows[0]):
if i.value in ('Missing', 'missing', 'miss'):
miss = np.array(get_col(n + 1, 3, len(peak_list)))
break
return peak_list, chrom, params_variation, num_files, miss
return peak_list, chrom | 75b32618274fb2ab7ede9f525856fdc13e8c97ee | 17,226 |
from typing import Union
from typing import Optional
def _get_dataset_builder(
dataset: Union[str, tfds.core.DatasetBuilder],
data_dir: Optional[str] = None) -> tfds.core.DatasetBuilder:
"""Returns a dataset builder."""
if isinstance(dataset, str):
dataset_builder = tfds.builder(dataset, data_dir=data_dir)
elif isinstance(dataset, tfds.core.DatasetBuilder):
dataset_builder = dataset
else:
raise ValueError("`dataset` must be a string or tfds.core.DatasetBuilder. "
f"Received {dataset} instead.")
return dataset_builder | 0f17169541604e69a614ddfeee4c8a963834ed8e | 17,227 |
def write_bbox(scene_bbox, out_filename):
"""Export scene bbox to meshes
Args:
scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths
out_filename: (string) filename
Note:
To visualize the boxes in MeshLab.
1. Select the objects (the boxes)
2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh
3. Select Wireframe view.
"""
def convert_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type="ply")
return | c7504260306495e6252569a3cb83f61ca084de26 | 17,228 |
def label_rotate(annot, rotate):
"""
anti-clockwise rotate the occ order annotation by rotate*90 degrees
:param annot: (H, W, 9) ; [-1, 0, 1]
:param rotate: value in [0, 1, 2, 3]
:return:
"""
rotate = int(rotate)
if rotate == 0:
return annot
else:
annot_rot = np.rot90(annot, rotate)
orientation = annot_rot[:, :, 1:].copy()
if rotate == 1:
mapping = [2, 4, 7, 1, 6, 0, 3, 5]
elif rotate == 2:
mapping = [7, 6, 5, 4, 3, 2, 1, 0]
else:
mapping = [5, 3, 0, 6, 1, 7, 4, 2]
annot_rot[:, :, 1:] = orientation[:, :, mapping]
return annot_rot | e37a2e9dddc5f19898691fe22d02978d1954d435 | 17,229 |
def allocate_available_excess(region):
"""
Allocate available excess capital (if any).
"""
difference = region['total_revenue'] - region['total_cost']
if difference > 0:
region['available_cross_subsidy'] = difference
region['deficit'] = 0
else:
region['available_cross_subsidy'] = 0
region['deficit'] = abs(difference)
return region | 19a3d7fbc776ae5b5b47ecfc32db14bf4abd949e | 17,230 |
def items(dic):
"""Py 2/3 compatible way of getting the items of a dictionary."""
try:
return dic.iteritems()
except AttributeError:
return iter(dic.items()) | 2664567765efe172591fafb49a0efa36ab9fcca8 | 17,231 |
import json
import binascii
def new_settingsresponse_message(loaded_json, origin):
"""
takes in a request - executes search for settings and creates a response as bytes
:param loaded_json:
:param origin: is this a response of drone or groundstation
:return: a complete response packet as bytes
"""
complete_response = {}
complete_response['destination'] = 4
complete_response['type'] = DBCommProt.DB_TYPE_SETTINGS_RESPONSE.value
complete_response['response'] = loaded_json['request']
complete_response['origin'] = origin
complete_response['id'] = loaded_json['id']
if loaded_json['request'] == DBCommProt.DB_REQUEST_TYPE_DB.value:
if 'settings' in loaded_json:
complete_response = read_dronebridge_settings(complete_response, origin, True, loaded_json['settings'])
else:
complete_response = read_dronebridge_settings(complete_response, origin, False, None)
elif loaded_json['request'] == DBCommProt.DB_REQUEST_TYPE_WBC.value:
if 'settings' in loaded_json:
complete_response = read_wbc_settings(complete_response, True, loaded_json['settings'])
else:
complete_response = read_wbc_settings(complete_response, False, None)
response = json.dumps(complete_response)
crc32 = binascii.crc32(str.encode(response))
return response.encode() + crc32.to_bytes(4, byteorder='little', signed=False) | 812444353a50ffeb468398d8681e81a74cb9d7e9 | 17,232 |
def list_icmp_block(zone, permanent=True):
"""
List ICMP blocks on a zone
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' firewlld.list_icmp_block zone
"""
cmd = "--zone={0} --list-icmp-blocks".format(zone)
if permanent:
cmd += " --permanent"
return __firewall_cmd(cmd).split() | 9f0f8c2e7a688263ddd31c8384babba25e2300e6 | 17,233 |
def dependencies_found(analysis_id, execution_id):
"""
Installation data from buildbot.
Requires a JSON list of objects with the following keys:
* installer: The system used to install the dependency.
* spec: The full specification used by the user to request the
package.
* source: Entity providing the artifact.
* name: The real package name.
* version: The installed version of the package.
.. note:: Internal API
"""
installations = bottle.request.json
if installations:
# Create database objects returning a list of scanneable artifacts.
artifacts = register_installations(analysis_id, execution_id,
installations)
analysis_needed = {a for a in artifacts if a.analysis_needed()}
# Launch dependency scan and mark done when finished.
analysis_task = (
providers.analyze_artifacts(analysis_needed) # <- group of tasks
| tasks.mark_task_done.si(analysis_id)).delay()
return {'task_id': analysis_task.id, 'scanning': len(analysis_needed)}
else:
return {'task_id': None, 'scanning': 0} | 8983e53dd558b42272d4886412e918e7b192754e | 17,234 |
def set_augmentor():
"""
Set the augmentor.
1. Select the operations and create the config dictionary
2. Pass it to the Augmentor class with any other information that requires
3. Return the instance of the class.
:return:
"""
config = {'blur': {'values': ('gaussian', 0.7, 1.0), 'prob': 0.3},
'brightness': {'values': (0.6, 1.0), 'prob': 0.1},
'brightness1': {'values': (1.0, 1.5), 'prob': 0.1},
'flip': {'values': ('hor',), 'prob': 0.5},
'grid_mask': {'values': (0, 0.2, 0, 0.2, 0.01, 0.1, 0.01, 0.1, 0.1, 0.2, 0.1, 0.2), 'prob': 0.4},
'illumination': {'values': ('blob_negative', 0.1, 0.2, 100, 150), 'prob': 0.2},
'noise': {'values': (2, 10), 'use_gray_noise': True, 'prob': 1},
'rotate': {'values': (-45, 45), 'prob': 0.4},
'translate': {'values': ('RANDOM', -0.2, 0.2), 'prob': 0.2, 'use_replication': True},
'zoom': {'values': (0.5, 1.5), 'prob': 0.9, 'use_replication': True}}
augmentor = Augmentor(config, no_repetition=True)
return augmentor | 77c64cec87af2d41a4cc6dc55600ab5eaedad247 | 17,235 |
def get_global_event_logger_instance():
"""Get an event logger with prefilled fields for the collection.
This returns an options configured event logger (proxy) with prefilled
fields. This is almost CERTAINLY the event logger that you want to use in
zaza test functions.
:returns: a configured LoggerInstance with prefilled collection and unit
fields.
:rtype: LoggerInstance
"""
return get_global_events_logging_manager().get_event_logger_instance() | 66228b15dd4d1ac9468834124e4ba073a846580f | 17,236 |
def plot_market_entry(cat_entry_and_exit_df, cat_entry_and_exit_df_2):
"""
returns a plot with the entry and exit of firms per category
"""
# get the limits so everything is on the same scale
df = pd.concat([cat_entry_and_exit_df, cat_entry_and_exit_df_2])
limits = [-df.exit.max() - 0.3, df.entry.max() + 0.3]
fig = tools.make_subplots(rows=1, cols=2)
xs = cat_entry_and_exit_df.index
new_per_cat = cat_entry_and_exit_df.entry.astype(int)
dead_per_cat = cat_entry_and_exit_df.exit.astype(int)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} entries in category {}'.format(x, y)
for x, y in zip(new_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[0]}), 1, 1)
fig.append_trace(
go.Bar(y=xs, x=-dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} exits in category {}'.format(x, y)
for x, y in zip(dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[0]}), 1, 1)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat - dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} net entries in category {}'.format(x, y)
for x, y in zip(new_per_cat - dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': dark_scen_colours[0]}), 1, 1)
xs = cat_entry_and_exit_df_2.index
new_per_cat = cat_entry_and_exit_df_2.entry.astype(int)
dead_per_cat = cat_entry_and_exit_df_2.exit.astype(int)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} entries in category {}'.format(x, y)
for x, y in zip(new_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[1]}), 1, 2)
fig.append_trace(
go.Bar(y=xs, x=-dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} exits in category {}'.format(x, y)
for x, y in zip(dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': scen_colours[1]}), 1, 2)
fig.append_trace(
go.Bar(y=xs, x=new_per_cat - dead_per_cat, orientation='h', showlegend=False, hoverinfo='text',
hovertext=['{} net entries in category {}'.format(x, y)
for x, y in zip(new_per_cat - dead_per_cat, np.arange(len(new_per_cat)))],
marker={'color': dark_scen_colours[1]}), 1, 2)
fig['layout']['xaxis2'].update(title="Number of companies", range=limits)
fig['layout']['xaxis1'].update(title="Number of companies", range=limits)
fig['layout']['yaxis1'].update(title="Product category")
fig['layout'].update(title='Market entry and exit per product category')
fig['layout']['font'].update(family='HelveticaNeue')
fig['layout'].update(barmode='overlay')
return fig | c1b1ad00c1dbdde804e4d594dda4ae6525c7779f | 17,237 |
import io
def find_elements_by_image(self, filename):
"""
Locate all the occurence of an image in the webpage.
:Args:
- filename: The path to the image to search (image shall be in PNG format).
:Returns:
A list of ImageElement.
"""
template = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
template_height, template_width, _ = template.shape
webpage_png = self.get_screenshot_as_png()
webpage_img = Image.open(io.BytesIO(webpage_png))
webpage = np.asarray(webpage_img, dtype=np.float32).astype(np.uint8)
webpage = cv2.cvtColor(webpage, cv2.COLOR_BGR2RGB)
return [
ImageElement(self, loc[0], loc[1], template_width, template_height)
for loc in match_template(webpage, template)
] | 23137766b68068c8cb78bb57127bfa6040bace70 | 17,238 |
from typing import Set
from typing import Sequence
def compile_tf_signature_def_saved_model(
saved_model_dir: str, saved_model_tags: Set[str], module_name: str,
exported_name: str, input_names: Sequence[str],
output_names: Sequence[str]) -> Modules:
"""Compiles a SignatureDef SavedModel to each backend that we test.
Args:
saved_model_dir: Directory of the saved model.
saved_model_tags: Optional set of tags to use when loading the model.
module_name: A name for this compiled module.
backend_info: BackendInfo with the details for compiling the saved model.
exported_name: A str representing the signature on the saved model to
compile.
input_names: A sequence of kwargs to feed to the saved model.
output_names: A sequence of named outputs to extract from the saved model.
Returns:
A 'Modules' dataclass containing the reference module, target modules and
artifacts directory.
"""
global _global_modules
if _global_modules is not None:
return _global_modules
# Setup the directory for saving compilation artifacts and traces.
artifacts_dir = _setup_artifacts_dir(module_name)
# Get the backend information for this test.
ref_backend_info = module_utils.BackendInfo(FLAGS.reference_backend,
f"{FLAGS.reference_backend}_ref")
tar_backend_infos = get_target_backends()
compile_backend = (
lambda backend_info: backend_info.compile_signature_def_saved_model(
saved_model_dir, saved_model_tags, module_name, exported_name,
input_names, output_names, artifacts_dir))
ref_module = compile_backend(ref_backend_info)
tar_modules = [
compile_backend(backend_info) for backend_info in tar_backend_infos
]
_global_modules = Modules(ref_module, tar_modules, artifacts_dir)
return _global_modules | ed1a1efc28c9ae473d76c700ab7781f141fc3765 | 17,239 |
def origtime2float(time):
""" converts current datetime to float
>>> import datetime
>>> t = datetime.datetime(2010, 8, 5, 14, 45, 41, 778877)
>>> origtime2float(t)
53141.778876999997
"""
t3fmt = time.strftime("%H:%M:%S:%f")
return time2float(t3fmt) | 03cadf1f686fde1dd46cbb52fd71adcc2f06585c | 17,240 |
def discrete_fourier_transform1(freq, tvec, dvec, log=False):
"""
Calculate the Discrete Fourier transform (slow scales with N^2)
The DFT is normalised to have the mean value of the data at zero frequency
:param freq: numpy array, frequency grid calculated from the time vector
:param tvec: numpy array or list, input time(independent) vector, normalised
by the mean of the time vector
:param dvec: numpy array or list, input dependent vector, normalised by the
mean of the data vector
:return wfn: numpy array of complex numbers, spectral window function
:return dft: numpy array of complex numbers, "dirty" discrete Fourier
transform
:param log: boolean, if True prints progress to standard output
if False silent
"""
# deal with logging
if log:
print('\n\t Calculating Discrete Fourier Transform...')
# -------------------------------------------------------------------------
# Code starts here
# -------------------------------------------------------------------------
wfn = np.zeros(len(freq), dtype=complex)
dft = np.zeros(int(len(freq)/2), dtype=complex)
for i in __tqdmlog__(range(len(freq)), log):
phase = -2*np.pi*freq[i]*tvec
phvec = np.array(np.cos(phase) + 1j * np.sin(phase))
if i < int(len(freq)/2):
wfn[i] = np.sum(phvec)/len(tvec)
dft[i] = np.sum(dvec*phvec)/len(tvec)
# complete the spectral window function
else:
wfn[i] = np.sum(phvec)/len(tvec)
return wfn, dft | b5e1bafe1ba2b8863ac97bb95c204ca84877b8fd | 17,241 |
from typing import List
def ngram_overlaps(a: List[str], b: List[str], threshold: int = 3) -> List[int]:
"""
Compute the set over overlapping strings in each set based on n-gram
overlap where 'n' is defined by the passed in threshold.
"""
def get_ngrams(text):
"""
Get a set of all the ngrams in the text
"""
return set(" ".join(g) for g in grouper(text.split(), threshold))
overlaps = []
remaining = set(range(len(b)))
for text in a:
best_idx = -1
best_overlap = 0
ngrams = get_ngrams(text)
for idx in remaining:
ngram_overlap = len(ngrams & get_ngrams(b[idx]))
if ngram_overlap > best_overlap:
best_idx = idx
best_overlap = ngram_overlap
if best_idx >= 0:
overlaps.append(best_idx)
remaining.remove(best_idx)
return overlaps | 87621e28a4a5d2cba5bb66c6bfa9834c711a7ecf | 17,242 |
def ssq_cwt(x, wavelet='morlet', scales='log', nv=None, fs=None, t=None,
ssq_freqs=None, padtype='symmetric', squeezing='sum',
difftype='direct', difforder=None, gamma=None):
"""Calculates the synchrosqueezed Continuous Wavelet Transform of `x`.
Implements the algorithm described in Sec. III of [1].
# Arguments:
x: np.ndarray
Vector of signal samples (e.g. x = np.cos(20 * np.pi * t))
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
scales: str['log', 'linear'] / np.ndarray
CWT scales.
- 'log': exponentially distributed scales, as pow of 2:
`[2^(1/nv), 2^(2/nv), ...]`
- 'linear': linearly distributed scales.
!!! EXPERIMENTAL; default scheme for len(x)>2048 performs
poorly (and there may not be a good non-piecewise scheme).
nv: int / None
Number of voices (CWT only). Suggested >= 32 (default=32).
fs: float / None
Sampling frequency of `x`. Defaults to 1, which makes ssq
frequencies range from 1/dT to 0.5, i.e. as fraction of reference
sampling rate up to Nyquist limit; dT = total duration (N/fs).
Overridden by `t`, if provided.
Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Overrides `fs` if not None.
ssq_freqs: str['log', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
padtype: str
Pad scheme to apply on input. One of:
('zero', 'symmetric', 'replicate').
'zero' is most naive, while 'symmetric' (default) partly mitigates
boundary effects. See `padsignal`.
squeezing: str['sum', 'lebesgue']
- 'sum' = standard synchrosqueezing using `Wx`.
- 'lebesgue' = as in [4], setting `Wx=ones()/len(Wx)`, which is
not invertible but has better robustness properties in some cases.
Not recommended unless you know what you're doing.
difftype: str['direct', 'phase', 'numerical']
Method by which to differentiate Wx (default='direct') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'direct': use `dWx`, obtained via frequency-domain
differentiation (see `cwt`, `phase_cwt`).
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx` (see `phase_cwt`).
- 'numerical': first-, second-, or fourth-order (set by
`difforder`) numeric differentiation (see `phase_cwt_num`).
difforder: int[1, 2, 4]
Order of differentiation for difftype='numerical' (default=4).
gamma: float / None
CWT phase threshold. Sets `w=inf` for small values of `Wx` where
phase computation is unstable and inaccurate (like in DFT):
w[abs(Wx) < beta] = inf
This is used to zero `Wx` where `w=0` in computing `Tx` to ignore
contributions from points with indeterminate phase.
Default = sqrt(machine epsilon) = np.sqrt(np.finfo(np.float64).eps)
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
Wx: np.ndarray [na x n]
Continuous Wavelet Transform of `x` L2-normed (see `cwt`);
to L1-norm, `Wx /= np.sqrt(scales)`
scales: np.ndarray [na]
Scales associated with rows of `Wx`.
w: np.ndarray [na x n]
Phase transform for each element of `Wx`.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
def _process_args(N, fs, t, nv, difftype, difforder, squeezing):
if difftype not in ('direct', 'phase', 'numerical'):
raise ValueError("`difftype` must be one of: direct, phase, numerical"
" (got %s)" % difftype)
if difforder is not None:
if difftype != 'numerical':
WARN("`difforder` is ignored if `difftype != 'numerical'")
elif difforder not in (1, 2, 4):
raise ValueError("`difforder` must be one of: 1, 2, 4 "
"(got %s)" % difforder)
elif difftype == 'numerical':
difforder = 4
if squeezing not in ('sum', 'lebesgue'):
raise ValueError("`squeezing` must be one of: sum, lebesgue "
"(got %s)" % squeezing)
dt, fs, t = _process_fs_and_t(fs, t, N)
nv = nv or 32
return dt, fs, difforder, nv
def _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder):
if difftype == 'direct':
# calculate instantaneous frequency directly from the
# frequency-domain derivative
w = phase_cwt(Wx, dWx, difftype, gamma)
elif difftype == 'phase':
# !!! bad; yields negatives, and forcing abs(w) doesn't help
# calculate inst. freq. from unwrapped phase of CWT
w = phase_cwt(Wx, None, difftype, gamma)
elif difftype == 'numerical':
# !!! tested to be very inaccurate for small `a`
# calculate derivative numerically
_, n1, _ = p2up(N)
Wx = Wx[:, (n1 - 4):(n1 + N + 4)]
w = phase_cwt_num(Wx, dt, difforder, gamma)
return Wx, w
N = len(x)
dt, fs, difforder, nv = _process_args(N, fs, t, nv, difftype, difforder,
squeezing)
scales, cwt_scaletype, *_ = process_scales(scales, N, nv=nv, get_params=True)
# l1_norm=False to spare a multiplication; for SSWT L1 & L2 are exactly same
# anyway since we're inverting CWT over time-frequency plane
rpadded = (difftype == 'numerical')
Wx, scales, _, dWx = cwt(x, wavelet, scales=scales, fs=fs, l1_norm=False,
derivative=True, padtype=padtype, rpadded=rpadded)
gamma = gamma or np.sqrt(EPS)
Wx, w = _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder)
if ssq_freqs is None:
# default to same scheme used by `scales`
ssq_freqs = cwt_scaletype
Tx, ssq_freqs = ssqueeze(Wx, w, scales=scales, fs=fs, ssq_freqs=ssq_freqs,
transform='cwt', squeezing=squeezing)
if difftype == 'numerical':
Wx = Wx[:, 4:-4]
w = w[:, 4:-4]
Tx = Tx[:, 4:-4]
return Tx, ssq_freqs, Wx, scales, w | 8af5caea64e9a861f7702f52c50681e61322658c | 17,243 |
import random
import logging
import time
def request_retry_decorator(fn_to_call, exc_handler):
"""A generic decorator for retrying cloud API operations with consistent repeatable failure
patterns. This can be API rate limiting errors, connection timeouts, transient SSL errors, etc.
Args:
fn_to_call: the function to call and wrap around
exc_handler: a bool return function to check if the passed in exception is retriable
"""
def wrapper(*args, **kwargs):
MAX_ATTEMPTS = 10
SLEEP_SEC_MIN = 5
SLEEP_SEC_MAX = 15
for i in range(1, MAX_ATTEMPTS + 1):
try:
return fn_to_call(*args, **kwargs)
except Exception as e:
if i < MAX_ATTEMPTS and exc_handler(e):
sleep_duration_sec = \
SLEEP_SEC_MIN + random.random() * (SLEEP_SEC_MAX - SLEEP_SEC_MIN)
logging.warn(
"API call failed, waiting for {} seconds before re-trying (this was attempt"
" {} out of {}).".format(sleep_duration_sec, i, MAX_ATTEMPTS))
time.sleep(sleep_duration_sec)
continue
raise e
return wrapper | 0813cc19d9826275917c9eb701683a73bfe597f9 | 17,244 |
from datetime import datetime
def last_day_of_month(d):
""" From: https://stackoverflow.com/a/43088/6929343 """
if d.month == 12:
return d.replace(day=31)
return d.replace(month=d.month+1, day=1) - datetime.timedelta(days=1) | a97ce3bdbcd9d5cb707919750ecc818de04deb7e | 17,248 |
import google
def get_access_token():
"""Return access token for use in API request.
Raises:
requests.exceptions.ConnectionError.
"""
credentials, _ = google.auth.default(scopes=[
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only'
])
request = google.auth.transport.requests.Request()
credentials.refresh(request)
return credentials.token | 907aacd6d9976390b2896149179c84ea9bd3d0fc | 17,250 |
def unknown_action(player: Player, table: dynamodb.Table) -> ActionResponse:
"""
Do nothing because the action could not be resolved.
In the message list, returns a message saying the action was bad.
:return: Original inputs matching updated inputs, and a message
"""
message = ["Action could not be resolved, type better next time"]
return player, player, {}, {}, message | ea2a03d140eea2853b77da492ea0f403fc9c6ad9 | 17,252 |
def forecast_marginal_bindglm(mod, n, k, X=None, nsamps=1, mean_only=False):
"""
Marginal forecast function k steps ahead for a binomial DGLM
"""
# Plug in the correct F values
F = update_F(mod, X, F=mod.F.copy())
# F = np.copy(mod.F)
# if mod.nregn > 0:
# F[mod.iregn] = X.reshape(mod.nregn,1)
# Evolve to the prior for time t + k
a, R = forecast_aR(mod, k)
# Mean and variance
ft, qt = mod.get_mean_and_var(F, a, R)
# Choose conjugate prior, match mean and variance
param1, param2 = mod.get_conjugate_params(ft, qt, mod.param1, mod.param2)
if mean_only:
return mod.get_mean(n, param1, param2)
# Simulate from the forecast distribution
return mod.simulate(n, param1, param2, nsamps) | dc8c82fc17465c4a22e7bc5d46cf9b5abd9abd54 | 17,253 |
def import_file(isamAppliance, id, filename, check_mode=False, force=False):
"""
Importing a file in the runtime template files directory.
"""
warnings = []
check_file = _check(isamAppliance, id)
if check_file != None and force == False:
warnings.append("File {0} exist.".format(id))
if force is True or _check_import(isamAppliance, id, filename, check_mode=check_mode):
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post_files(
"Importing a file in the runtime template files directory",
"/mga/template_files/{0}".format(id),
[
{
'file_formfield': 'file',
'filename': filename,
'mimetype': 'application/octet-stream'
}
],
{
'type': 'file',
'force': force
})
return isamAppliance.create_return_object(warnings=warnings) | 3978e5476a7344ef4c92d2cec3852a57850380df | 17,254 |
def get_func_from_attrdict(func_name : str, attrdict : AttrDict) -> ObjectiveFunction1D:
"""
Given a string func_name, attempts to find the corresponding entry from attrdict.
:param func_name
:param attrdict
:returns Objective Function
"""
for key, val in attrdict.items():
if val.name == func_name:
return val | bb4b03371d5fbb642864d7c9e77d4172fee92315 | 17,255 |
from fdk import runner
import io
def event_handle(handle_code):
"""
Performs HTTP request-response procedure
:param handle_code: customer's code
:type handle_code: fdk.customer_code.Function
:return: None
"""
async def pure_handler(request):
log.log("in pure_handler")
headers = dict(request.headers)
log_frame_header(headers)
func_response = await runner.handle_request(
handle_code, constants.HTTPSTREAM,
headers=headers, data=io.BytesIO(request.body))
log.log("request execution completed")
headers = func_response.context().GetResponseHeaders()
status = func_response.status()
if status not in constants.FN_ENFORCED_RESPONSE_CODES:
status = constants.FN_DEFAULT_RESPONSE_CODE
return response.HTTPResponse(
headers=headers,
status=status,
content_type=headers.get(constants.CONTENT_TYPE),
body_bytes=func_response.body_bytes(),
)
return pure_handler | 03ca9cff4b7993e92c146565cb697a080d40c5ef | 17,256 |
import torch
def load_precomputed_embeddings(det_df, seq_info_dict, embeddings_dir, use_cuda):
"""
Given a sequence's detections, it loads from disk embeddings that have already been computed and stored for its
detections
Args:
det_df: pd.DataFrame with detection coordinates
seq_info_dict: dict with sequence meta info (we need frame dims)
embeddings_dir: name of the directory where embeddings are stored
Returns:
torch.Tensor with shape (num_detects, embeddings_dim)
"""
# Retrieve the embeddings we need from their corresponding locations
embeddings_path = osp.join(
seq_info_dict["seq_path"],
"processed_data",
"embeddings",
seq_info_dict["det_file_name"],
embeddings_dir,
)
# print("EMBEDDINGS PATH IS ", embeddings_path)
frames_to_retrieve = sorted(det_df.frame.unique())
embeddings_list = [
torch.load(osp.join(embeddings_path, f"{frame_num}.pt"))
for frame_num in frames_to_retrieve
]
embeddings = torch.cat(embeddings_list, dim=0)
# First column in embeddings is the index. Drop the rows of those that are not present in det_df
ixs_to_drop = list(
set(embeddings[:, 0].int().numpy()) - set(det_df["detection_id"])
)
embeddings = embeddings[
~np.isin(embeddings[:, 0], ixs_to_drop)
] # Not so clean, but faster than a join
assert_str = "Problems loading embeddings. Indices between query and stored embeddings do not match. BOTH SHOULD BE SORTED!"
assert (embeddings[:, 0].numpy() == det_df["detection_id"].values).all(), assert_str
embeddings = embeddings[:, 1:] # Get rid of the detection index
return embeddings.to(
torch.device("cuda" if torch.cuda.is_available() and use_cuda else "cpu")
) | 685eb63ca23d634345304575a148e72a8172567e | 17,257 |
def _model_gpt(size=0, dropout_rate=0.0, attention_dropout_rate=0.0):
"""Configs for a variety of Transformer model sizes."""
num_layers = [1, 3, 6, 12, 24, 36, 48][size]
dim = [64, 128, 512, 768, 1024, 1280, 1600][size]
num_heads = int(dim / 64) # Always dim 64 per head
return _transformer(
emb_dim=dim,
num_heads=num_heads,
num_layers=num_layers,
qkv_dim=dim,
mlp_dim=dim * 4,
dropout_rate=dropout_rate,
attention_dropout_rate=attention_dropout_rate) | 7bc9eab929b8c48ca4b8ff671e9e0885c0d2bc44 | 17,258 |
def test_module(client, demisto_args: dict):
"""
Test the OMWS Client connection by attempting to query a common username
"""
d = client.query_profile_data("maneenus")
if d:
return 'ok'
else:
raise DemistoException("Incorrect or empty API response") | 59e653c8fb5c40ee84a5945e5e4b0410418248ec | 17,259 |
def as_string(raw_data):
"""Converts the given raw bytes to a string (removes NULL)"""
return bytearray(raw_data[:-1]) | 6610291bb5b71ffc0be18b4505c95653bdac4c55 | 17,260 |
import math
def generate_trapezoid_profile(max_v, time_to_max_v, dt, goal):
"""Creates a trapezoid profile with the given constraints.
Returns:
t_rec -- list of timestamps
x_rec -- list of positions at each timestep
v_rec -- list of velocities at each timestep
a_rec -- list of accelerations at each timestep
Keyword arguments:
max_v -- maximum velocity of profile
time_to_max_v -- time from rest to maximum velocity
dt -- timestep
goal -- final position when the profile is at rest
"""
t_rec = [0.0]
x_rec = [0.0]
v_rec = [0.0]
a_rec = [0.0]
a = max_v / time_to_max_v
time_at_max_v = goal / max_v - time_to_max_v
# If profile is short
if max_v * time_to_max_v > goal:
time_to_max_v = math.sqrt(goal / a)
time_from_max_v = time_to_max_v
time_total = 2.0 * time_to_max_v
profile_max_v = a * time_to_max_v
else:
time_from_max_v = time_to_max_v + time_at_max_v
time_total = time_from_max_v + time_to_max_v
profile_max_v = max_v
while t_rec[-1] < time_total:
t = t_rec[-1] + dt
t_rec.append(t)
if t < time_to_max_v:
# Accelerate up
a_rec.append(a)
v_rec.append(a * t)
elif t < time_from_max_v:
# Maintain max velocity
a_rec.append(0.0)
v_rec.append(profile_max_v)
elif t < time_total:
# Accelerate down
decel_time = t - time_from_max_v
a_rec.append(-a)
v_rec.append(profile_max_v - a * decel_time)
else:
a_rec.append(0.0)
v_rec.append(0.0)
x_rec.append(x_rec[-1] + v_rec[-1] * dt)
return t_rec, x_rec, v_rec, a_rec | 5851cfab06e20a9e79c3a321bad510d33639aaca | 17,262 |
import logging
def get_logger(name: str,
format_str: str = aps_logger_format,
date_format: str = aps_time_format,
file: bool = False) -> logging.Logger:
"""
Get logger instance
Args:
name: logger name
format_str|date_format: to configure logging format
file: if true, treat name as the name of the logging file
"""
def get_handler(handler):
handler.setLevel(logging.INFO)
formatter = logging.Formatter(fmt=format_str, datefmt=date_format)
handler.setFormatter(formatter)
return handler
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
# both stdout & file
if file:
logger.addHandler(get_handler(logging.FileHandler(name)))
return logger | 06d673473c7014d6373003bf924fdf2dc9965baf | 17,265 |
import torch
def to_tensor(args, device=None):
"""Convert an arg or sequence of args to torch Tensors
"""
singleton = not isinstance(args, (list, tuple))
if singleton:
args = [args]
tensor_args = []
for arg in args:
if isinstance(arg, torch.Tensor):
tensor_args.append(arg)
elif _is_numeric(arg):
if isinstance(arg, np.ndarray) and arg.dtype == np.float64:
tensor_args.append(
torch.tensor(arg, dtype=torch.float32, device=device)
)
else:
tensor_args.append(torch.tensor(arg, device=device))
else:
raise ValueError("Received non-numeric argument ", arg)
return tensor_args[0] if singleton else tensor_args | d85d842c095aa3c942f94e61c79d2bbeb49bc41d | 17,266 |
def main(self):
"""
to run:
kosmos 'j.data.bcdb.test(name="meta_test")'
"""
bcdb, _ = self._load_test_model()
assert len(bcdb.get_all()) == 0
assert len(bcdb.meta._data["url"]) == 7
s = list(j.data.schema._url_to_md5.keys())
assert "despiegk.test" in s
m = bcdb.model_get(url="despiegk.test")
schema_text = """
@url = jumpscale.schema.test.a
category**= ""
txt = ""
i = 0
"""
s = bcdb.schema_get(schema=schema_text)
assert s.properties_unique == []
bcdb.meta._schema_set(s)
assert len(bcdb.meta._data["url"]) == 8
assert "jumpscale.schema.test.a" in j.data.schema._url_to_md5
assert "jumpscale.bcdb.circle.2" in j.data.schema._url_to_md5
schema = bcdb.model_get(url="jumpscale.schema.test.a")
o = schema.new()
assert "jumpscale.schema.test.a" in j.data.schema._url_to_md5
assert "jumpscale.bcdb.circle.2" in j.data.schema._url_to_md5
s0 = bcdb.schema_get(url="jumpscale.schema.test.a")
s0md5 = s0._md5 + ""
model = bcdb.model_get(schema=s0)
assert bcdb.get_all() == [] # just to make sure its empty
assert len(bcdb.meta._data["url"]) == 8
a = model.new()
a.category = "acat"
a.txt = "data1"
a.i = 1
a.save()
a2 = model.new()
a2.category = "acat2"
a2.txt = "data2"
a2.i = 2
a2.save()
assert len([i for i in model.index.model.find()]) == 2
myid = a.id + 0
assert a._model.schema._md5 == s0md5
# lets upgrade schema to float
s_temp = bcdb.schema_get(schema=schema_text)
assert len(bcdb.meta._data["url"]) == 8 # should be same because is same schema, should be same md5
assert s_temp._md5 == s0._md5
# lets upgrade schema to float
s2 = bcdb.schema_get(schema=schema_text)
model2 = bcdb.model_get(schema=s2)
assert len(bcdb.meta._data["url"]) == 8 # acl, user, circle, despiegktest and the 1 new one
a3 = model2.new()
a3.category = "acat3"
a3.txt = "data3"
a3.i = 3
a3.save()
assert a3.i == 3.0
assert a2.i == 2 # int
assert len(model2.find()) == 3 # needs to be 3 because model is for all of them
assert len(model.find()) == 3 # needs to be 3 because model is for all of them
all = model2.find()
print(all)
a4 = model2.get(all[0].id)
a4_ = model.get(all[0].id)
assert a4_ == a4
a5 = model2.get(all[1].id)
a6 = model.get(all[2].id)
a6_ = model.get(all[2].id)
assert a6_ == a6
assert a6.id == a3.id
assert a6.i == a3.i
self._log_info("TEST META DONE")
return "OK" | 7f88d33bd6cc2df5284201d859718a8a06e6a4e4 | 17,267 |
def snake_head_only():
"""
|===========|
|···········|
|···········|
|···········|
|···········|
|···········|
|···········|
|·······o···|
|···········|
|···········|
|···········|
|···········|
|===========|
"""
return Snake.from_dict(
**{
"body": [
{"x": 7, "y": 4},
],
}
) | c08ffd0a86ec9d5a40d2649dd63a2b60019a6791 | 17,268 |
import six
def str_to_bool(s):
"""Convert a string value to its corresponding boolean value."""
if isinstance(s, bool):
return s
elif not isinstance(s, six.string_types):
raise TypeError('argument must be a string')
true_values = ('true', 'on', '1')
false_values = ('false', 'off', '0')
if s.lower() in true_values:
return True
elif s.lower() in false_values:
return False
else:
raise ValueError('not a recognized boolean value: %s'.format(s)) | c228321872f253ce3e05c6af9284ec496dea8dcf | 17,269 |
def id_feat_pred_mz_rt(cursor, mz, rt, ccs, tol_mz, tol_rt, tol_ccs, esi_mode, norm='l2'):
"""
id_feat_pred_mz_rt
description:
identifies a feature on the basis of predicted m/z and retention time
parameters:
cursor (sqlite3.Cursor) -- cursor for querying lipids.db
mz (float) -- m/z to match
rt (float) -- retention time to match
ccs (float) -- CCS to match
tol_mz (float) -- tolerance for m/z
tol_rt (float) -- tolerance for retention time
tol_ccs (float) -- tolerance for CCS
esi_mode (str) -- filter results by ionization mode: 'neg', 'pos', or None for unspecified
[norm (str)] -- specify l1 or l2 norm for computing scores [optional, default='l2']
returns:
(str or list(str)), (str) -- putative identification(s) (or '' for no matches), identification level
"""
qry = 'SELECT name, adduct, mz, rt FROM predicted_mz JOIN predicted_rt ON ' \
+ 'predicted_mz.t_id=predicted_rt.t_id WHERE mz BETWEEN ? AND ? AND rt BETWEEN ? and ?'
if esi_mode == 'pos':
qry += ' AND adduct LIKE "%+"'
elif esi_mode == 'neg':
qry += ' AND adduct LIKE "%-"'
mz_min = mz - tol_mz
mz_max = mz + tol_mz
rt_min = rt - tol_rt
rt_max = rt + tol_rt
putative_ids, putative_scores = [], []
for name, adduct, mz_x, rt_x in cursor.execute(qry, (mz_min, mz_max, rt_min, rt_max)).fetchall():
putative_ids.append('{}_{}'.format(name, adduct))
putative_scores.append(get_score(tol_mz, tol_rt, tol_ccs, mz_q=mz, rt_q=rt, mz_x=mz_x, rt_x=rt_x))
if putative_ids:
return putative_ids, 'pred_mz_rt', putative_scores
else:
return '', '', [] | 72818a631b155e1c50d53b26c1749bf8f68767f7 | 17,270 |
def Arrow_bg(self):
"""
The function that will create the background for the dropdown arrow button.
For internal use only. This function is therefore also not imported by __init__.py
"""
#Just leave the making of the buttons background to the default function. Not gonna bother re-doing that here (because why would I?)
if not self.func_data:
surface = self.Make_background_surface(None)
elif self.value:
surface = self.Make_background_surface(self.func_data["__accent_bg"])
else:
surface = self.Make_background_surface(self.func_data["__bg"])
#Draw the arrow so characteristic to dropdown boxes
if not self.value:
arrow_coords = (
(self.scaled(self.width * 1/6), self.scaled(self.height * 1/3)), #Top left
(self.scaled(self.width * 1/2), self.scaled(self.height * 2/3)), #Bottom
(self.scaled(self.width * 5/6), self.scaled(self.height * 1/3)), #Top right
)
else:
arrow_coords = (
(self.scaled(self.width * 1/6), self.scaled(self.height * 2/3)), #Bottom left
(self.scaled(self.width * 1/2), self.scaled(self.height * 1/3)), #Top
(self.scaled(self.width * 5/6), self.scaled(self.height * 2/3)), #Bottom right
)
pygame.draw.polygon(surface, self.border[0] if self.border else (63, 63, 63), arrow_coords)
return surface | 4b09c197666aa5ea15713d98ae7c38e1b0ffa0e0 | 17,272 |
def get_bounds_5km_to_1km( itk_5km, isc_5km ) :
"""
return the 1km pixel indexes limits in the 5km pixel [ itk_5km, isc_5km ] footprint
"""
# set the (track,scan) indexes of the 5km pixel in the 5km grid
itk_1km = itk_5km_to_1km ( itk_5km )
isc_1km = isc_5km_to_1km ( isc_5km )
# set the 1km indexes of pixels to interpolate along track
itk_1km_min = itk_1km - 2
itk_1km_max = itk_1km + 2
# general case : 2 interpolations done along scan : [isc-1, isc] then [isc, isc+1]
isc_1km_min = isc_1km - 2
isc_1km_max = isc_1km + 2
# if last 5km pixel along scan, only 4 1km pixels in the 5km footprint in this direction
if ( isc_5km == sz_sc_5km - 1 ) :
isc_1km_max = isc_1km + 6
return itk_1km_min, itk_1km_max, isc_1km_min, isc_1km_max | 7fd175787f075d7ed9b3e8ed04565f38877de1e4 | 17,274 |
import torch
def batch_hard_triplet_loss(labels, embeddings, margin, squared=False):
"""Build the triplet loss over a batch of embeddings.
For each anchor, we get the hardest positive and hardest negative to form a triplet.
Args:
labels: labels of the batch, of size (batch_size,)
embeddings: tensor of shape (batch_size, embed_dim)
margin: margin for triplet loss
squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
If false, output is the pairwise euclidean distance matrix.
Returns:
triplet_loss: scalar tensor containing the triplet loss
"""
# Get the pairwise distance matrix
pairwise_dist = _pairwise_distances(embeddings, squared=squared)
# For each anchor, get the hardest positive
# First, we need to get a mask for every valid positive (they should have same label)
mask_anchor_positive = _get_anchor_positive_triplet_mask(labels).float()
# We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
anchor_positive_dist = torch.multiply(mask_anchor_positive, pairwise_dist)
# shape (batch_size, 1)
hardest_positive_dist = torch.max(anchor_positive_dist, dim=1, keepdim=True).values
# print("hardest_positive_dist", hardest_positive_dist.mean())
# For each anchor, get the hardest negative
# First, we need to get a mask for every valid negative (they should have different labels)
mask_anchor_negative = _get_anchor_negative_triplet_mask(labels).float()
# We add the maximum value in each row to the invalid negatives (label(a) == label(n))
max_anchor_negative_dist = torch.max(pairwise_dist, dim=1, keepdim=True).values
anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)
# shape (batch_size,)
hardest_negative_dist = torch.min(anchor_negative_dist, dim=1, keepdim=True).values
# print("hardest_negative_dist", hardest_negative_dist.mean())
# Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
triplet_loss = torch.relu(hardest_positive_dist - hardest_negative_dist + margin)
# Get final mean triplet loss
triplet_loss = torch.mean(triplet_loss)
return triplet_loss | 37d20237580463e668cee77c96f732f2d0211aef | 17,275 |
def categorical_sample_logits(logits):
"""
Samples (symbolically) from categorical distribution, where logits is a NxK
matrix specifying N categorical distributions with K categories
specifically, exp(logits) / sum( exp(logits), axis=1 ) is the
probabilities of the different classes
Cleverly uses gumbell trick, based on
https://github.com/tensorflow/tensorflow/issues/456
"""
U = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(U)), dimension=1, name='sample_once') | f93752b11de02b1b61f60b3ff5c12dd9c15f7d8f | 17,276 |
def sort_results(boxes):
"""Returns the top n boxes based on score given DenseCap
results.json output
Parameters
----------
boxes : dictionary
output from load_output_json
n : integer
number of boxes to return
Returns
-------
sorted dictionary
"""
return sorted(results[k], key=lambda x : x['score'], reverse=True) | 20f30e5846de4ce46073c3d32573d283576489e0 | 17,277 |
from datetime import datetime
def get_date(d : str) -> datetime.datetime:
"""A helper function that takes a ModDB string representation of time and returns an equivalent
datetime.datetime object. This can range from a datetime with the full year to
second to just a year and a month.
Parameters
-----------
d : str
String representation of a datetime
Returns
-------
datetime.datetime
The datetime object for the given string
"""
try:
return datetime.datetime.strptime(d[:-3] + d[-2:], '%Y-%m-%dT%H:%M:%S%z')
except ValueError:
pass
try:
return datetime.datetime.strptime(d, '%Y-%m-%d')
except ValueError:
pass
return datetime.datetime.strptime(d, '%Y-%m') | 44fb951ecb96102c631f88dc888aac11d11c8bad | 17,278 |
def var_policer(*args):
"""Returns a variable policer object built from args."""
return VarPolicer(args) | a346e041118f1be2ed6b0acd2c9e3d04603031df | 17,279 |
def winner(board):
"""
Returns the winner of the game, if there is one.
"""
#looking horizontal winner
i = 0
while i < len(board):
j = 1
while j <len(board):
if board[i][j-1]==board[i][j] and board[i][j] == board[i][j+1]:
return board[i][j]
j += 2
i += 1
#looking vertical winner
i = 1
while i < len(board):
j = 0
while j <len(board):
if board[i-1][j]==board[i][j] and board[i][j] == board[i+1][j]:
return board[i][j]
j += 1
i += 2
#looking diagonal winner
if board[1][1] ==board[0][0] and board[1][1] == board[2][2]:
return board[1][1]
elif board[1][1] ==board[0][2] and board[1][1] == board[2][0]:
return board[1][1]
else:
return None | 31ab2cf04dfe269598efdd073762505643563a96 | 17,280 |
import signal
def freqz_resp_list(b, a=np.array([1]), mode='dB', fs=1.0, n_pts=1024, fsize=(6, 4)):
"""
A method for displaying digital filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freq_resp(self,mode = 'dB',Npts = 1024)
A method for displaying the filter frequency response magnitude,
phase, and group delay. A plot is produced using matplotlib
freqz_resp(b,a=[1],mode = 'dB',Npts = 1024,fsize=(6,4))
Parameters
----------
b : ndarray of numerator coefficients
a : ndarray of denominator coefficents
mode : display mode: 'dB' magnitude, 'phase' in radians, or
'groupdelay_s' in samples and 'groupdelay_t' in sec,
all versus frequency in Hz
n_pts : number of points to plot; default is 1024
fsize : figure size; defult is (6,4) inches
Mark Wickert, January 2015
"""
if type(b) == list:
# We have a list of filters
N_filt = len(b)
else:
return None
f = np.arange(0, n_pts) / (2.0 * n_pts)
for n in range(N_filt):
w, H = signal.freqz(b[n], a[n], 2 * np.pi * f)
if n == 0:
plt.figure(figsize=fsize)
if mode.lower() == 'db':
plt.plot(f * fs, 20 * np.log10(np.abs(H)))
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.title('Frequency Response - Magnitude')
elif mode.lower() == 'phase':
plt.plot(f * fs, np.angle(H))
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
plt.ylabel('Phase (rad)')
plt.title('Frequency Response - Phase')
elif (mode.lower() == 'groupdelay_s') or (mode.lower() == 'groupdelay_t'):
"""
Notes
-----
Since this calculation involves finding the derivative of the
phase response, care must be taken at phase wrapping points
and when the phase jumps by +/-pi, which occurs when the
amplitude response changes sign. Since the amplitude response
is zero when the sign changes, the jumps do not alter the group
delay results.
"""
theta = np.unwrap(np.angle(H))
# Since theta for an FIR filter is likely to have many pi phase
# jumps too, we unwrap a second time 2*theta and divide by 2
theta2 = np.unwrap(2 * theta) / 2.
theta_dif = np.diff(theta2)
f_diff = np.diff(f)
Tg = -np.diff(theta2) / np.diff(w)
# For gain almost zero set groupdelay = 0
idx = pylab.find(20 * np.log10(H[:-1]) < -400)
Tg[idx] = np.zeros(len(idx))
max_Tg = np.max(Tg)
# print(max_Tg)
if mode.lower() == 'groupdelay_t':
max_Tg /= fs
plt.plot(f[:-1] * fs, Tg / fs)
plt.ylim([0, 1.2 * max_Tg])
else:
plt.plot(f[:-1] * fs, Tg)
plt.ylim([0, 1.2 * max_Tg])
if n == N_filt - 1:
plt.xlabel('Frequency (Hz)')
if mode.lower() == 'groupdelay_t':
plt.ylabel('Group Delay (s)')
else:
plt.ylabel('Group Delay (samples)')
plt.title('Frequency Response - Group Delay')
else:
s1 = 'Error, mode must be "dB", "phase, '
s2 = '"groupdelay_s", or "groupdelay_t"'
log.info(s1 + s2) | 207ad7ad59a3260df9d5df80c1b8e1bee4c33a3e | 17,281 |
import socket
def _nslookup(ipv4):
"""Lookup the hostname of an IPv4 address.
Args:
ipv4: IPv4 address
Returns:
hostname: Name of host
"""
# Initialize key variables
hostname = None
# Return result
try:
ip_results = socket.gethostbyaddr(ipv4)
if len(ip_results) > 1:
hostname = ip_results[0]
except:
hostname = None
return (ipv4, hostname) | 7771887dbfcd60e73b8fce0ce4029fcd7058a7d1 | 17,282 |
def get_service_node(service):
""" Returns the name of the node that is providing the given service, or empty string """
node = rosservice_get_service_node(service)
if node == None:
node = ""
return node | 7a8df548e119e8197f92340d228fdc7855494670 | 17,283 |
import warnings
from io import StringIO
def _download_nasdaq_symbols(timeout):
"""
@param timeout: the time to wait for the FTP connection
"""
try:
ftp_session = FTP(_NASDAQ_FTP_SERVER, timeout=timeout)
ftp_session.login()
except all_errors as err:
raise RemoteDataError('Error connecting to %r: $s' %
(_NASDAQ_FTP_SERVER, err))
lines = []
try:
ftp_session.retrlines('RETR ' + _NASDAQ_TICKER_LOC, lines.append)
except all_errors as err:
raise RemoteDataError('Error downloading from %r: $s' %
(_NASDAQ_FTP_SERVER, err))
finally:
ftp_session.close()
# Sanity Checking
if not lines[-1].startswith('File Creation Time:'):
raise RemoteDataError('Missing expected footer. Found %r' % lines[-1])
# Convert Y/N to True/False.
converter_map = dict((col, _bool_converter) for col, t in _TICKER_DTYPE
if t is bool)
# For pandas >= 0.20.0, the Python parser issues a warning if
# both a converter and dtype are specified for the same column.
# However, this measure is probably temporary until the read_csv
# behavior is better formalized.
with warnings.catch_warnings(record=True):
data = read_csv(StringIO('\n'.join(lines[:-1])), '|',
dtype=_TICKER_DTYPE, converters=converter_map,
index_col=1)
# Properly cast enumerations
for cat in _CATEGORICAL:
data[cat] = data[cat].astype('category')
return data | 9b34571086ac3e738e29b3ed130ab2d0c7303657 | 17,284 |
def sessions(request):
"""
Cookies prepeocessor
"""
context = {}
return context | 562f4e9da57d3871ce780dc1a0661a34b3279ec5 | 17,285 |
import logging
def dynamax_mnn(src: nb.typed.Dict, trg: nb.typed.Dict,
src_emb: np.ndarray, trg_emb: np.ndarray,
src_k: np.ndarray, trg_k: np.ndarray) -> np.ndarray:
"""
Run Dynamax-Jaccard in both directions and infer mutual neighbors.
:param src nb.typed.Dict: src_id2pointers dictionary
:param trg nb.typed.Dict: trg_id2pointers dictionary
:param src_emb np.ndarray: unnormalized word embeddings matrix for src lang
:param trg_emb np.ndarray: unnormalized word embeddings matrix for trg lang
:param src_k np.ndarray: preranked target candidates for source lanaguage
:param trg_k np.ndarray: preranked source candidates for target lanaguage
"""
logging.info('DynaMax: commencing first loop')
src_argmax = dynamax_loop(src, trg, src_emb, trg_emb, src_k)
logging.info('DynaMax: commencing second loop')
trg_argmax = dynamax_loop(trg, src, trg_emb, src_emb, trg_k)
logging.info('DynaMax: inferring mutual nearest neighbors')
mnn = mutual_nn(src_argmax, trg_argmax)
return mnn | 174f603df09cbe7a8ee91de29de48ccaf2573b31 | 17,286 |
def resnet152(pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', DistillerBottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs) | 5e3435eea1a18c028d422abef57d9e88d88e609f | 17,287 |
import torch
def _load_model(featurizer_path):
"""Load the featurization model
Parameters
----------
featurizer_path: str
Path to the saved model file
Returns
-------
The loaded PyTorch model
"""
# load in saved model
pth = torch.load(featurizer_path)
model_args = pth['model_args']
model_state = pth['model_state']
model = UNet(**model_args)
model.load_state_dict(model_state)
# remove last layer and activation
model.segment = layers.Identity()
model.activate = layers.Identity()
model.eval()
return model | 21c29c9320b482bb33e82f97723d04bb53364ba1 | 17,288 |
def grid0_baseline(num_runs, render=True):
"""Run script for the grid0 baseline.
Parameters
----------
num_runs : int
number of rollouts the performance of the environment is evaluated
over
render : bool, optional
specifies whether to use the gui during execution
Returns
-------
flow.core.experiment.Experiment
class needed to run simulations
"""
exp_tag = flow_params['exp_tag']
sim_params = flow_params['sim']
vehicles = flow_params['veh']
env_params = flow_params['env']
net_params = flow_params['net']
initial_config = flow_params.get('initial', InitialConfig())
# define the traffic light logic
tl_logic = TrafficLightParams(baseline=False)
phases = [{"duration": "31", "minDur": "8", "maxDur": "45",
"state": "GrGr"},
{"duration": "6", "minDur": "3", "maxDur": "6",
"state": "yryr"},
{"duration": "31", "minDur": "8", "maxDur": "45",
"state": "rGrG"},
{"duration": "6", "minDur": "3", "maxDur": "6",
"state": "ryry"}]
for i in range(N_ROWS * N_COLUMNS):
tl_logic.add('center'+str(i), tls_type='actuated', phases=phases,
programID=1)
# modify the rendering to match what is requested
sim_params.render = render
# set the evaluation flag to True
env_params.evaluate = True
# import the network class
module = __import__('flow.networks', fromlist=[flow_params['network']])
network_class = getattr(module, flow_params['network'])
# create the network object
network = network_class(
name=exp_tag,
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config,
traffic_lights=tl_logic
)
# import the environment class
module = __import__('flow.envs', fromlist=[flow_params['env_name']])
env_class = getattr(module, flow_params['env_name'])
# create the environment object
env = env_class(env_params, sim_params, network)
exp = Experiment(env)
results = exp.run(num_runs, env_params.horizon)
total_delay = np.mean(results['returns'])
return total_delay | 8cb8a01309bb8ea3aae467d8a6c8a81ce295ab94 | 17,289 |
def read_aims(filename):
"""Method to read FHI-aims geometry files in phonopy context."""
lines = open(filename, 'r').readlines()
cell = []
is_frac = []
positions = []
symbols = []
magmoms = []
for line in lines:
fields = line.split()
if not len(fields):
continue
if fields[0] == "lattice_vector":
vec = lmap(float, fields[1:4])
cell.append(vec)
elif fields[0][0:4] == "atom":
if fields[0] == "atom":
frac = False
elif fields[0] == "atom_frac":
frac = True
pos = lmap(float, fields[1:4])
sym = fields[4]
is_frac.append(frac)
positions.append(pos)
symbols.append(sym)
magmoms.append(None)
# implicitly assuming that initial_moments line adhere to FHI-aims geometry.in specification,
# i.e. two subsequent initial_moments lines do not occur
# if they do, the value specified in the last line is taken here - without any warning
elif fields[0] == "initial_moment":
magmoms[-1] = float(fields[1])
for (n,frac) in enumerate(is_frac):
if frac:
pos = [ sum( [ positions[n][l] * cell[l][i] for l in range(3) ] ) for i in range(3) ]
positions[n] = pos
if None in magmoms:
atoms = Atoms(cell=cell, symbols=symbols, positions=positions)
else:
atoms = Atoms(cell=cell, symbols=symbols, positions=positions, magmoms=magmoms)
return atoms | bcf5f00e57ed249c10667ad0b883986cb1b36865 | 17,290 |
def test(model, issue_batches):
"""
return accuracy on test set
"""
session = tf.get_default_session()
num_correct = 0
num_predict = 0
for epoch, step, eigens, labels in issue_batches:
feeds = {
model['eigens']: eigens,
}
guess = session.run(model['guess'], feed_dict=feeds)
num_predict += guess.shape[0]
num_correct += \
np.sum(np.argmax(labels, axis=1) == np.argmax(guess, axis=1))
return float(num_correct) / float(num_predict) | 0d8ae672766567a6665089c2d7d5004e25d80755 | 17,291 |
def evaluate_sample(ResNet50_model, X_train, Y_train, X_val_b,Y_val_b,X_data,Y_data,checkpoint_path):
"""
A function that accepts a labeled-unlabeled data split and trains the relevant model on the labeled data, returning
the model and it's accuracy on the test set.
"""
# shuffle the training set:
perm = np.random.permutation(X_train.shape[0])
X_train = X_train[perm]
Y_train = Y_train[perm]
X_validation = X_val_b
Y_validation=to_categorical(Y_val_b)
# train and evaluate the model:
model = train_disease_classification_model(ResNet50_model,X_train, Y_train, X_validation, Y_validation, checkpoint_path)
acc = model.evaluate(X_data, Y_data, verbose=0)
return acc, model | c5f86feede372f078e9c88bac688c790de6578d6 | 17,292 |
import six
import base64
def Base64WSEncode(s):
"""
Return Base64 web safe encoding of s. Suppress padding characters (=).
Uses URL-safe alphabet: - replaces +, _ replaces /. Will convert s of type
unicode to string type first.
@param s: string to encode as Base64
@type s: string
@return: Base64 representation of s.
@rtype: string
NOTE: Taken from keyczar (Apache 2.0 license)
"""
if isinstance(s, six.text_type):
# Make sure input string is always converted to bytes (if not already)
s = s.encode("utf-8")
return base64.urlsafe_b64encode(s).decode("utf-8").replace("=", "") | cb28001bddec215b763936fde4652289cf6480c0 | 17,293 |
def onlyWikipediaURLS(urls):
"""Some example HTML page data is from wikipedia. This function converts
relative wikipedia links to full wikipedia URLs"""
wikiURLs = [url for url in urls if url.startswith('/wiki/')]
return ["https://en.wikipedia.org"+url for url in wikiURLs] | df9ecbb73dfc9a764e4129069a4317517830307a | 17,294 |
def get_image_filename_index():
"""
Obtain a mapping of filename -> filepath for images
:return:
"""
index_path = osp.join(SEG_ROOT, 'privacy_filters', 'cache', 'fname_index.pkl')
if osp.exists(index_path):
print 'Found cached index. Loading it...'
return pickle.load(open(index_path, 'rb'))
else:
print 'Creating filename index ...'
fname_index = dict()
images_dir = osp.join(SEG_ROOT, 'images')
for fold in os.listdir(images_dir):
for img_filename in os.listdir(osp.join(images_dir, fold)):
image_path = osp.join(images_dir, fold, img_filename)
fname_index[img_filename] = image_path
pickle.dump(fname_index, open(index_path, 'wb'))
return fname_index | 002b6fd4dea1b00bb758377e71de0e67f5d979d3 | 17,295 |
def merge_coordinates(coordinates, capture_size):
"""Merge overlapping coordinates for MIP targets.
Parameters
----------
coordinates: python dictionary
Coordinates to be merged in the form {target-name: {chrom: chrx,
begin: start-coordinate, end: end-coordinate}, ..}
capture_size: int
Anticipated MIP capture size. If two regions are as close as 2 times
this value, they will be merged.
Returns
-------
target_coordinates: python dictionary
merged coordinates dictionary
target_names: python dictionary
names of included targets in each merged region.
"""
# create target regions to cover all snps
# start by getting snps on same chromosome together
chroms = {}
for c in coordinates:
chrom = coordinates[c]["chrom"]
try:
chroms[chrom].append([coordinates[c]["begin"],
coordinates[c]["end"]])
except KeyError:
chroms[chrom] = [[coordinates[c]["begin"],
coordinates[c]["end"]]]
# merge snps that are too close to get separate regions
# the length should be twice the capture size
merged_chroms = {}
for c in chroms:
merged_chroms[c] = merge_overlap(chroms[c], 2 * capture_size)
# create regions for alignment
# create target coordinate for each region
target_coordinates = {}
target_names = {}
for c in merged_chroms:
regions = merged_chroms[c]
for reg in regions:
targets_in_region = []
for co in coordinates:
if (coordinates[co]["chrom"] == c
and reg[0] <= coordinates[co]["begin"]
<= coordinates[co]["end"] <= reg[1]):
targets_in_region.append(co)
region_name = targets_in_region[0]
target_names[region_name] = targets_in_region
r_start = reg[0]
r_end = reg[1]
target_coordinates[region_name] = [c, r_start, r_end]
return target_coordinates, target_names | 8af4a34fc8ce1a01ddcd4d4f257815ef5f852911 | 17,296 |
def Emojify_V2(input_shape, word_to_vec_map, word_to_index):
"""
Function creating the Emojify-v2 model's graph.
Arguments:
input_shape -- shape of the input, usually (max_len,)
word_to_vec_map -- dictionary mapping every word in a vocabulary into its
50-dimensional vector representation word_to_index -- dictionary mapping from
words to their indices in the vocabulary (400,001 words)
Returns:
model -- a model instance in Keras
"""
# Define sentence_indices as the input of the graph,
# it should be of shape input_shape and dtype 'int32' (as it contains indices).
sentence_indices = Input(shape=input_shape, dtype=np.int32)
# Create the embedding layer pretrained with GloVe Vectors (≈1 line)
embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)
# Propagate sentence_indices through your embedding layer, you get back the embeddings
embeddings = embedding_layer(sentence_indices)
# Propagate the embeddings through an LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a batch of sequences.
X = LSTM(128, return_sequences=True)(embeddings)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X trough another LSTM layer with 128-dimensional hidden state
# Be careful, the returned output should be a single hidden state, not a batch of sequences.
X = LSTM(128)(X)
# Add dropout with a probability of 0.5
X = Dropout(0.5)(X)
# Propagate X through a Dense layer with softmax activation to get back a batch of 5-dimensional vectors.
X = Dense(5, activation='softmax')(X)
# Add a softmax activation
X = Activation('softmax')(X)
# Create Model instance which converts sentence_indices into X.
model = Model(sentence_indices, X)
return model | 111a0c97e9a8c75aa6a7191ee89cbda69a84794c | 17,297 |
def de_parser(lines):
"""return a dict of {OfficalName: str, Synonyms: str, Fragment: bool,
Contains: [itemdict,], Includes: [itemdict,]} from DE lines
The DE (DEscription) lines contain general descriptive information about
the sequence stored. This information is generally sufficient to identify
the protein precisely.
The description always starts with the proposed official name of the
protein. Synonyms are indicated between brackets. Examples below
If a protein is known to be cleaved into multiple functional components,
the description starts with the name of the precursor protein, followed by
a section delimited by '[Contains: ...]'. All the individual components are
listed in that section and are separated by semi-colons (';'). Synonyms are
allowed at the level of the precursor and for each individual component.
If a protein is known to include multiple functional domains each of which
is described by a different name, the description starts with the name of
the overall protein, followed by a section delimited by '[Includes: ]'. All
the domains are listed in that section and are separated by semi-colons
(';'). Synonyms are allowed at the level of the protein and for each
individual domain.
In rare cases, the functional domains of an enzyme are cleaved, but the
catalytic activity can only be observed, when the individual chains
reorganize in a complex. Such proteins are described in the DE line by a
combination of both '[Includes:...]' and '[Contains:...]', in the order
given in the following example:
If the complete sequence is not determined, the last information given on
the DE lines is '(Fragment)' or '(Fragments)'. Example:
DE Dihydrodipicolinate reductase (EC 1.3.1.26) (DHPR) (Fragment).
DE Arginine biosynthesis bifunctional protein argJ [Includes: Glutamate
DE N-acetyltransferase (EC 2.3.1.35) (Ornithine acetyltransferase)
DE (Ornithine transacetylase) (OATase); Amino-acid acetyltransferase
DE (EC 2.3.1.1) (N-acetylglutamate synthase) (AGS)] [Contains: Arginine
DE biosynthesis bifunctional protein argJ alpha chain; Arginine
DE biosynthesis bifunctional protein argJ beta chain] (Fragment).
Trouble maker:
DE Amiloride-sensitive amine oxidase [copper-containing] precursor(EC
DE 1.4.3.6) (Diamine oxidase) (DAO).
"""
labeloff_lines = labeloff(lines)
joined = join_parser(labeloff_lines, chars_to_strip="). ")
keys = ["Includes", "Contains", "Fragment"]
fragment_label = "(Fragment"
contains_label = "[Contains:"
includes_label = "[Includes:"
# Process Fragment
fragment = False
if joined.endswith(fragment_label):
fragment = True
joined = joined.rsplit("(", 1)[0]
# Process Contains
contains = []
if contains_label in joined:
joined, contains_str = joined.split(contains_label)
contains_str = contains_str.strip(" ]")
contains = list(map(de_itemparser, contains_str.split("; ")))
# Process Includes
includes = []
if includes_label in joined:
joined, includes_str = joined.split(includes_label)
includes_str = includes_str.strip(" ]")
includes = list(map(de_itemparser, includes_str.split("; ")))
# Process Primary
primary = de_itemparser(joined)
result = dict(list(zip(keys, (includes, contains, fragment))))
result.update(primary)
return result | d22158e365c52976ed638c27a0a85d8a047d743d | 17,299 |
def roundPrecision(number, precision=4):
""" Rounds the given floating point number to a certain precision, for output."""
return float(('{:.' + str(precision) + 'E}').format(number)) | 3bac0b54f1f8320c158ce0ddc14db7bbd092d2ff | 17,300 |
def stringToNumbers(string, separators=[","], commentSymbol="#"):
""" Return a list of splitted string and numbers from string "string". Numbers will be converted into floats. Text after "#" will be skipped.
--- string: the string to be converted.
--- separators: a list of additional separators other than whitespace to be used.
--- commentSymbol: text after which will be ignored.
"""
if "#" in string: string = string[:string.index("#")].strip(); # take everything before "#" symbol, then strip
splitted = [string];
for separator in flatten(separators): splitted = FLI([x.split(separator) for x in splitted]);
splitted = FLI([x.split() for x in splitted]); # clean up empty strings
if splitted == []: return [];
lineData = [];
for piece in splitted:
if isFloat(piece):
lineData.append(float(piece));
else:
lineData.append(piece);
return lineData; | dea1fb1d3257d00eaa637e1b80f23ad0e6475c38 | 17,302 |
import json
def friend_invitation_by_facebook_send_view(request): # friendInvitationByFacebookSend
"""
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
recipients_facebook_id_array = request.GET.getlist('recipients_facebook_id_array[]', "")
recipients_facebook_name_array = request.GET.getlist('recipients_facebook_name_array[]', "")
facebook_request_id = request.GET.get('facebook_request_id', "")
results = friend_invitation_by_facebook_send_for_api(voter_device_id, recipients_facebook_id_array,
recipients_facebook_name_array, facebook_request_id)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'all_friends_facebook_link_created_results': results['all_friends_facebook_link_created_results'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json') | 6aeb2852b9e299bc8ddd5d03fbff2d0200e5c4a0 | 17,303 |
import json
def inv_send_received(r, **attr):
"""
Confirm a Shipment has been Received
- called via POST from inv_send_rheader
- called via JSON method to reduce request overheads
"""
if r.http != "POST":
r.error(405, current.ERROR.BAD_METHOD,
next = URL(),
)
T = current.T
send_id = r.id
if not send_id:
r.error(405, "Can only confirm a single shipment.")
auth = current.auth
s3db = current.s3db
stable = s3db.inv_send
if not auth.s3_has_permission("update", stable,
record_id = send_id,
):
r.unauthorised()
db = current.db
tracktable = s3db.inv_track_item
db(stable.id == send_id).update(status = SHIP_STATUS_RECEIVED)
db(tracktable.send_id == send_id).update(status = TRACK_STATUS_ARRIVED)
if current.deployment_settings.get_inv_send_req():
rtable = s3db.inv_req
srtable = s3db.inv_send_req
reqs = db(srtable.send_id == send_id).select(srtable.req_id)
if reqs:
req_ids = [row.req_id for row in reqs]
# Get the full list of items in the request(s)
ritable = s3db.inv_req_item
for req_id in req_ids:
query = (ritable.req_id == req_id)
ritems = db(query).select(ritable.id,
ritable.item_pack_id,
ritable.quantity,
# Virtual Field
#ritable.pack_quantity,
)
# Get all Received Shipments in-system for this request
query = (stable.status == SHIP_STATUS_RECEIVED) & \
(tracktable.send_id == send_id) & \
(stable.id == srtable.send_id) & \
(srtable.req_id == req_id)
sitems = db(query).select(tracktable.item_pack_id,
tracktable.quantity,
# Virtual Field
#tracktable.pack_quantity,
)
fulfil_qty = {}
for item in sitems:
item_pack_id = item.item_pack_id
if item_pack_id in fulfil_qty:
fulfil_qty[item_pack_id] += (item.quantity * item.pack_quantity())
else:
fulfil_qty[item_pack_id] = (item.quantity * item.pack_quantity())
complete = False
for item in ritems:
if item.item_pack_id in fulfil_qty:
quantity_fulfil = fulfil_qty[item.item_pack_id]
db(ritable.id == item.id).update(quantity_fulfil = quantity_fulfil)
req_quantity = item.quantity * item.pack_quantity()
complete = quantity_fulfil >= req_quantity
# Update overall Request Status
if complete:
# REQ_STATUS_COMPLETE
db(rtable.id == req_id).update(fulfil_status = 2)
else:
# REQ_STATUS_PARTIAL
db(rtable.id == req_id).update(fulfil_status = 1)
message = T("Shipment received")
current.session.confirmation = message
current.response.headers["Content-Type"] = "application/json"
return json.dumps({"message": s3_str(message),
"tree": URL(args = [send_id, "track_item"]),
}, separators=SEPARATORS) | 645281e0e2023bd454021058e0c0ed79a61223b2 | 17,304 |
import numbers
def filter_table(table, filter_series, ignore=None):
"""
Filter a table based on a set of restrictions given in
Series of column name / filter parameter pairs. The column
names can have suffixes `_min` and `_max` to indicate
"less than" and "greater than" constraints.
Parameters
----------
table : pandas.DataFrame
Table to filter.
filter_series : pandas.Series
Series of column name / value pairs of filter constraints.
Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
ignore : sequence of str, optional
List of column names that should not be used for filtering.
Returns
-------
filtered : pandas.DataFrame
"""
with log_start_finish('filter table', logger):
ignore = ignore if ignore else set()
filters = [_filterize(name, val)
for name, val in filter_series.iteritems()
if not (name in ignore or
(isinstance(val, numbers.Number) and
np.isnan(val)))]
return apply_filter_query(table, filters) | 5e5692c46e2dd207eca8d752912dff2b712cce18 | 17,305 |
def analogy_computation_2d(f_first_enc,
f_first_frame,
f_current_enc,
first_depth):
"""Implements the deep analogy computation."""
with tf.variable_scope('analogy_computation'):
frame_enc_diff = f_first_frame - f_first_enc
frame_enc_diff_enc = tf.layers.conv2d(
frame_enc_diff,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
f_current_enc_enc = tf.layers.conv2d(
f_current_enc,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.concat([frame_enc_diff_enc, f_current_enc_enc], 3)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
analogy = tf.contrib.layers.layer_norm(analogy)
analogy = tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1)
return tf.layers.conv2d(
analogy,
first_depth * 4,
3,
padding='same',
activation=tf.nn.relu,
strides=1) | 376176b8c17cf9e2f9611943b1fb18da4359748d | 17,306 |
def format_to_TeX(elements):
"""returns BeautifulSoup elements in LaTeX.
"""
accum = []
for el in elements:
if isinstance(el, NavigableString):
accum.append(escape_LaTeX(el.string))
else:
accum.append(format_el(el))
return "".join(accum) | 2df2c4979fc65656b8ef7f4b514a9c4e036b3fa1 | 17,307 |
from typing import OrderedDict
def namedlist(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of list with named fields.
>>> Point = namedlist('Point', ['x', 'y'])
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with pos args or keywords
>>> p[0] + p[1] # indexable like a plain list
33
>>> x, y = p # unpack like a regular list
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessible by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Validate the field names. At the user's option, either generate an error
# message or automatically replace the field name with a valid name.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split()
field_names = [str(x) for x in field_names]
if rename:
seen = set()
for index, name in enumerate(field_names):
if (not all(c.isalnum() or c == '_' for c in name)
or _iskeyword(name)
or not name
or name[0].isdigit()
or name.startswith('_')
or name in seen):
field_names[index] = '_%d' % index
seen.add(name)
for name in [typename] + field_names:
if not all(c.isalnum() or c == '_' for c in name):
raise ValueError('Type names and field names can only contain '
'alphanumeric characters and underscores: %r'
% name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a '
'keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with '
'a number: %r' % name)
seen = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: '
'%r' % name)
if name in seen:
raise ValueError('Encountered duplicate field name: %r' % name)
seen.add(name)
# Fill-in the class template
fmt_kw = {'typename': typename}
fmt_kw['field_names'] = tuple(field_names)
fmt_kw['num_fields'] = len(field_names)
fmt_kw['arg_list'] = repr(tuple(field_names)).replace("'", "")[1:-1]
fmt_kw['repr_fmt'] = ', '.join(_repr_tmpl.format(name=name)
for name in field_names)
fmt_kw['field_defs'] = '\n'.join(_m_field_tmpl.format(index=index, name=name)
for index, name in enumerate(field_names))
class_definition = _namedlist_tmpl.format(**fmt_kw)
if verbose:
print(class_definition)
def _itemsetter(key):
def _itemsetter(obj, value):
obj[key] = value
return _itemsetter
# Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter,
_itemsetter=_itemsetter,
__name__='namedlist_%s' % typename,
OrderedDict=OrderedDict,
_property=property,
_list=list)
try:
exec_(class_definition, namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + class_definition)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to
# the frame where the named list is created. Bypass this step in
# environments where sys._getframe is not defined (Jython for
# example) or sys._getframe is not defined for arguments greater
# than 0 (IronPython).
try:
frame = _sys._getframe(1)
result.__module__ = frame.f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result | 0d7e567e69c9d5c0038f4258b37f17ff1e6fb5b1 | 17,308 |
from typing import List
def has_good_frames(frames: List[MonitoredFrame]) -> bool:
"""
Find a frame with a score larger than X
"""
return any([frame.score and frame.score > 3 for frame in frames]) | 16e9e2bce53ae957254121438c2c4e4f8add2142 | 17,309 |
def updateHistory(conn, author, message_id, backer):
"""
Updates the history
Returns success
"""
c = conn.cursor()
c.execute(prepareQuery("INSERT INTO votes_history (user_id, message_id, backer) VALUES (?,?,?)"), (int(author), int(message_id), int(backer), ))
conn.commit()
return c.rowcount > 0 | 6e0f06ace0e3600c307fe3f5848da583c930bbe8 | 17,310 |
import six
def pyc_loads(data):
"""
Load a .pyc file from a bytestring.
Arguments:
data(bytes): The content of the .pyc file.
Returns:
PycFile: The parsed representation of the .pyc file.
"""
return pyc_load(six.BytesIO(data)) | 99b4b7d07d00a0c5098f1a3ded7c1929e2a4b231 | 17,311 |
import numpy
def time_series_figure(time_series, polynomial, drift, snr):
""" Return a matplotlib figure containing the time series and its
polynomial model.
"""
figure = plt.figure()
plot = figure.add_subplot(111)
plot.grid()
plt.title("Drift: {0: .1f}% - SNR: {1: .1f}dB".format(
drift * 100, 10 * numpy.log10(snr)))
x = numpy.arange(2, 2 + len(time_series))
model = numpy.polyval(polynomial, x)
plot.plot(x, time_series, "k-")
plot.plot(x, model, "k-")
plot.axes.set_xlabel("Volume number")
plot.axes.set_ylabel("Intensity")
return figure | 132aaf22108999e75ec6ca797753724d3198b2c8 | 17,312 |
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {}) | 0f20f8414adf1d324fbe8541a27cad2219e87794 | 17,313 |
def log(cm_uuid: UUID):
"""
:GET: returns the most recent logs for the specified control module. accepts the following url parameters
- limit: the number of logs that should be returned
- offset: offset the number of logs that should be returned
- log_type: the type of log that should be returned
:POST: inserts log with data into the database
"""
if request.method == 'GET':
limit = 20
offset = 0
log_type = "%"
if limit_arg := request.args.get('limit'):
limit = limit_arg
if offset_arg := request.args.get('offset'):
offset = offset_arg
if log_type_arg := request.args.get('log_type'):
log_type = log_type_arg
logs = CMLog.query.filter_by(cm_uuid=cm_uuid, log_type=log_type)\
.order_by(CMLog.timestamp.desc())\
.limit(limit)\
.offset(offset*limit)\
.all()
returnval = dict()
returnval['cm_uuid'] = logs[0].cm_uuid
returnval['status'] = 'success'
returnval['data'] = []
for current_log in logs:
log_data = {
'id': current_log.id,
'log_type': current_log.log_type,
'timestamp': current_log.timestamp,
'data': current_log.data
}
returnval['data'].append(log_data)
return jsonify(returnval), 200
if request.method == 'POST':
if not request.is_json:
return jsonify({
"status": "error",
"message": "missing json"
}), 415
if not CMMeta.query.filter_by(uuid=cm_uuid).first():
return jsonify({
'status': 'error',
'message': 'invalid control module uuid'
}), 404
log_type = request.json.get('log_type')
data = request.json.get('data')
error = False
missing = None
if not log_type:
error = True
missing = "log_type"
if not data:
error = True
missing = "data"
if error:
return jsonify({
"status": "error",
"message": "missing " + missing
}), 422
if not CMLogTypes.query.filter_by(cm_uuid=cm_uuid, log_type=log_type).first():
CMLogTypes.create(cm_uuid, log_type)
return jsonify(CMLog.create(cm_uuid, log_type, request.json.get("data"))), 201 | fa129b78497f44a4781e5fa2103abbb232294a7a | 17,314 |
def RetrieveResiduesNumbers(ResiduesInfo):
"""Retrieve residue numbers."""
# Setup residue IDs sorted by residue numbers...
ResNumMap = {}
for ResName in ResiduesInfo["ResNames"]:
for ResNum in ResiduesInfo["ResNum"][ResName]:
ResNumMap[ResNum] = ResName
ResNumsList = []
if len(ResNumMap):
ResNumsList = sorted(ResNumMap, key = int)
return ResNumsList | e9f522af368a8a058792b26f9cf53b1114e241ef | 17,315 |
def get_data_with_station(station_id):
"""
*** Returns Pandas DataFrame ***
Please Input Station ID: (String)"""
print("\nGETTING DATA FOR STATION: ",station_id)
ftp = FTP('ftp.ncdc.noaa.gov')
ftp.login()
ftp.cwd('pub/data/ghcn/daily/all')
ftp.retrbinary('RETR '+station_id+'.dly', open(station_id+'.dly', 'wb').write)
ftp.quit()
outfile=station_id+".dly"
dt = read_ghcn_data_file(filename=outfile)
dt = dt.rename_axis("DATE", axis="columns")
print('{} STATION DATA IS TAKEN'.format(station_id))
return dt | 093b6f7d88335e3ef591cedee7c362bf3b1468d6 | 17,316 |
def _canonicalize(path):
"""Makes all paths start at top left, and go clockwise first."""
# convert args to floats
path = [[x[0]] + list(map(float, x[1:])) for x in path]
# _canonicalize each subpath separately
new_substructures = []
for subpath in _separate_substructures(path):
leftmost_point, leftmost_idx = _get_leftmost_point(subpath)
reordered = ([['M', leftmost_point[0], leftmost_point[1]]] + subpath[leftmost_idx + 1:] + subpath[1:leftmost_idx + 1])
new_substructures.append((reordered, leftmost_point))
new_path = []
first_substructure_done = False
should_flip_cardinality = False
for sp, _ in sorted(new_substructures, key=lambda x: (x[1][1], x[1][0])):
if not first_substructure_done:
# we're looking at the first substructure now, we can determine whether we
# will flip the cardniality of the whole icon or not
should_flip_cardinality = not _is_clockwise(sp)
first_substructure_done = True
if should_flip_cardinality:
sp = _make_clockwise(sp)
new_path.extend(sp)
# convert args to strs
path = [[x[0]] + list(map(str, x[1:])) for x in new_path]
return path | 3f5aa9a4ac75417935415b5dcc561a1057b465e5 | 17,317 |
from typing import Tuple
def extract_meta(src: bytes) -> Tuple[int, int]:
"""
Return a 2-tuple:
- the length of the decoded block
- the number of bytes that the length header occupied.
"""
v, n = uvarint(src)
if n <= 0 or v > 0xFFFFFFFF:
raise CorruptError
if v > 0x7FFFFFFF:
raise TooLargeError
return v, n | 4bb02fd1c8b9870b450fcbca790fa94870a82cf2 | 17,318 |
def metric_source_configuration_table(data_model, metric_key, source_key) -> str:
"""Return the metric source combination's configuration as Markdown table."""
configurations = data_model["sources"][source_key].get("configuration", {}).values()
relevant_configurations = [config for config in configurations if metric_key in config["metrics"]]
if not relevant_configurations:
return ""
markdown = markdown_table_header("Configuration", "Value")
for configuration in sorted(relevant_configurations, key=lambda config: str(config["name"])):
name = configuration["name"]
values = ", ".join(sorted(configuration["value"], key=lambda value: value.lower()))
markdown += markdown_table_row(name, values)
markdown += "\n"
return markdown | 718a69df60272b7cdfafdbfeff3136a1aac49707 | 17,319 |
import requests
import json
def search(keyword, limit=20):
"""
Search is the iTunes podcast directory for the given keywords.
Parameter:
keyword = A string containing the keyword to search.
limit: the maximum results to return,
The default is 20 results.
returns:
A JSON object.
"""
keyword = keyword.replace(' ', '+') # Replace white space with +.
# Set user agent.
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
headers = {'User-Agent': user_agent}
# ITunes podcast search URL.
itunesurl = 'https://itunes.apple.com/search?term=%s&country=us&limit=%d&entity=podcast' % (keyword, limit)
req = requests.get(itunesurl, headers=headers)
return json.loads(req.text) | 922cd7dfaea30e7254c459588d28c33673281dac | 17,320 |
def job_checks(name: str):
"""
Check if the job has parameters
and ask to insert them printing
the default value
"""
p = job_parameters(name)
new_param = {}
if p:
ask = Confirm.ask(
f"Job [bold green] {name} [/bold green] has parameters, do you want to insert them?", default=True
)
if ask:
for k, v in p.items():
t = Prompt.ask(f"{k}", default=f"{v}")
new_param[k] = t
return new_param
else:
ask = Confirm.ask(
f"Job [bold green] {name} [/bold green] has no parameters, do you want to proceed?", default=True
)
if ask:
return new_param
else:
exit(0) | 2f64820ec6b180cc6c626fab0616774d7e9086b2 | 17,321 |
import re
def post():
"""Post new message"""
error = None
if request.method == 'POST'\
and request.form['message'] != '' and request.form['message'] is not None:
user_zid = session['logged_in']
post_message = request.form['message']
post_privacy = request.form['post_privacy']
# print('post_privacy: "{}"'.format(post_privacy))
cur_time_txt = time_date2txt()
db = get_db()
db.execute('INSERT INTO POST (zid, time, message, privacy) values (?, ?, ?, ?)',
[user_zid, cur_time_txt, post_message, post_privacy])
db.commit()
for m_zid in set(re.findall(r'z[0-9]{7}', post_message)):
m_user = get_user(zid=m_zid)
if m_user and m_user['email']:
email_subj = '{} Mentioned you in his post!!'.format(g.user['full_name'])
path = url_for('search', _external=True)+'?suggestion={}'.format(m_zid)
print(path)
email_body = 'Check the link to check the post: <a href="{0}">{0}</a>'.format(path)
send_email(m_user['email'], email_subj, email_body)
elif request.form['message'] == '' or request.form['message'] is None:
error = "Post cannot be empty"
return redirect(url_for('index', new_post_error=error)) | b59c5fb30d4b6ce499d0199fb794be38c5c2dfdf | 17,322 |
def data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_connection_end_pointtopology_uuidnode_uuidnode_edge_point_uuidconnection_end_point_uuid_get(uuid, local_id, topology_uuid, node_uuid, node_edge_point_uuid, connection_end_point_uuid): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_end_pointlocal_id_connection_end_pointtopology_uuidnode_uuidnode_edge_point_uuidconnection_end_point_uuid_get
returns tapi.connectivity.ConnectionEndPointRef # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param local_id: Id of end-point
:type local_id: str
:param topology_uuid: Id of connection-end-point
:type topology_uuid: str
:param node_uuid: Id of connection-end-point
:type node_uuid: str
:param node_edge_point_uuid: Id of connection-end-point
:type node_edge_point_uuid: str
:param connection_end_point_uuid: Id of connection-end-point
:type connection_end_point_uuid: str
:rtype: TapiConnectivityConnectionEndPointRef
"""
return 'do some magic!' | 24fe9a977542f52d8bc8cc765c63ce32882d9f76 | 17,323 |
def softmax(logits):
"""Take the softmax over a set of logit scores.
Args:
logits (np.array): a 1D numpy array
Returns:
a 1D numpy array of probabilities, of the same shape.
"""
if not isinstance(logits, np.ndarray):
logits = np.array(logits) # 1D array
logits = logits - np.max(logits) # re-center
exp_logits = np.exp(logits)
probs = exp_logits / np.sum(exp_logits)
return probs | 7e1897748172e095ac58ce7111bed73caa4e2cb6 | 17,324 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.