content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import logging
import re
def split_if_then(source_file: str) -> dict:
"""Split a script file into component pieces"""
logging.debug("Splitting '{}' into IF/THEN blocks".format(source_file))
with open(source_file) as f:
source_text = f.read()
logging.debug("Read {} bytes".format(len(source_text)))
r = re.compile(_if_then_regex, flags=re.MULTILINE)
r_or = re.compile(r"OR\((\d+)\)")
r_resp = re.compile(r"RESPONSE #(\d+)")
# Replace all double quotes outside comments with single quotes.
source_text = replace_double_quotes_with_single_outside_comment(
source_text)
count = 0
triggers = []
actions = []
for m in r.finditer(source_text):
count = count + 1
or_count = 0
# Break if conditions into separate lines.
for line in m.group("IF").split('\n'):
line = line.strip()
or_check = r_or.match(line)
if 0 == len(line):
pass
# elif 0 == or_count and "ActionListEmpty()" == line:
# output["ActionListEmpty"] = True
elif or_check:
or_count = int(or_check.group(1))
triggers.append([])
elif or_count > 0:
triggers[-1].append(line)
or_count = or_count - 1
else:
triggers.append(line)
# Break then conditions into separate lines.
action_list = []
response_value = None
for line in m.group("THEN").split('\n'):
line = line.strip()
response_check = r_resp.match(line)
if 0 == len(line):
pass
elif response_check:
if response_value:
actions.append({response_value: action_list})
response_value = response_check.group(1)
action_list = []
else:
action_list.append(line)
if response_value:
actions.append({response_value: action_list})
if count > 1:
raise RuntimeError("IF/THEN Parse found multiple matches in '{}'"
.format(source_file))
# triggers = promote_trigger(triggers, "^HaveSpell")
# triggers = promote_trigger(triggers, "^ActionListEmpty")
result = {"IF": triggers, "THEN": actions}
name = get_name(actions)
if name:
result["name"] = name
return result | 585f5ecfec144116840fa0a0be228cc279482fda | 10,300 |
import re
def id_label_to_project(id_label):
"""
Given a project's id_label, return the project.
"""
match = re.match(r"direct-sharing-(?P<id>\d+)", id_label)
if match:
project = DataRequestProject.objects.get(id=int(match.group("id")))
return project | bd5f322b986776b95a3b3b9203e67c2caaa81c8a | 10,301 |
def payoff_blotto_sign(x, y):
"""
Returns:
(0, 0, 1) -- x wins, y loss;
(0, 1, 0) -- draw;
(1, 0, 0)-- x loss, y wins.
"""
wins, losses = 0, 0
for x_i, y_i in zip(x, y):
if x_i > y_i:
wins += 1
elif x_i < y_i:
losses += 1
if wins > losses:
return (0, 0, 1)
elif wins < losses:
return (1, 0, 0)
return (0, 1, 0) | 5a34ce81fdff8f90ee715d9c82fc55abf7eb2904 | 10,302 |
def import_trips(url_path, dl_dir, db_path, taxi_type, nrows=None, usecols=None,
overwrite=False, verbose=0):
"""Downloads, cleans, and imports nyc tlc taxi record files for the
specified taxi type into a sqlite database.
Parameters
----------
url_path : str or None
Path to text file containing nyc tlc taxi record file urls to
download from. Set to None to skip download.
dl_dir : str
Path of directory to download files to or load files from.
db_path : str
Path to sqlite database.
taxi_type : str
Taxi type to create regex for ('fhv', 'green', 'yellow', or 'all').
nrows : int or None
Number of rows to read. Set to None to read all rows.
usecols : list
List of column names to include. Specify columns names as strings.
Column names can be entered based on names found in original tables
for the year specified or names found in the trips table. Set to None to
read all columns.
overwrite : bool
Defines whether or not to overwrite existing database tables.
verbose : int
Defines verbosity for output statements.
Returns
-------
import_num : int
Number of files imported into database.
Notes
-----
"""
# download taxi record files
if url_path:
dl_num = dl_urls(url_path, dl_dir, taxi_type, verbose=verbose)
else:
dl_num = 0
# get taxi record files
files = get_regex_files(dl_dir, taxi_regex_patterns(taxi_type),
verbose=verbose)
# create trips table (if needed)
create_sql = """
CREATE TABLE IF NOT EXISTS trips (
trip_id INTEGER PRIMARY KEY,
taxi_type INTEGER,
vendor_id INTEGER,
pickup_datetime TEXT,
dropoff_datetime TEXT,
passenger_count INTEGER,
trip_distance REAL,
pickup_longitude REAL,
pickup_latitude REAL,
pickup_location_id INTEGER,
dropoff_longitude REAL,
dropoff_latitude REAL,
dropoff_location_id INTEGER,
trip_duration REAL,
trip_pace REAL,
trip_straightline REAL,
trip_windingfactor REAL
); """
indexes = ['CREATE INDEX IF NOT EXISTS trips_pickup_datetime ON trips '
'(pickup_datetime);']
create_table(db_path, 'trips', create_sql, indexes=indexes,
overwrite=overwrite, verbose=verbose)
# load, clean, and import taxi files into table
import_num = 0
for file in files:
if verbose >= 1:
output('Started importing ' + file + '.')
if taxi_type == 'fhv':
df = pd.DataFrame({'taxi_type': []})
elif taxi_type == 'green':
df = pd.DataFrame({'taxi_type': []})
elif taxi_type == 'yellow':
df, year, month = load_yellow(dl_dir + file, nrows=nrows,
usecols=usecols, verbose=verbose)
df = clean_yellow(df, year, month, verbose=verbose)
import_num += 1
else:
output('Unknown taxi_type.', fn_str='import_trips')
df = pd.DataFrame({'taxi_type': []})
df_to_table(db_path, df, table='trips', overwrite=False,
verbose=verbose)
if verbose >= 1:
output('Imported ' + file + '.')
output('Finished importing ' + str(import_num) + ' files.')
return dl_num, import_num | 8e017873a1b17f493ee5b2df4457690a2fbc7b63 | 10,303 |
def householder(h_v: Vector) -> Matrix:
"""Get Householder transformation Matrix"""
return Matrix.identity(h_v.size()).subtract(2 * h_v * h_v.transpose() / (h_v * h_v)) | 686e8088eff5bf1b14e1438f0668a865a59a13d4 | 10,304 |
def _suppression_loop_body(boxes, iou_threshold, output_size, idx):
"""Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE).
Args:
boxes: a tensor with a shape of [batch_size, anchors, 4].
iou_threshold: a float representing the threshold for deciding whether boxes
overlap too much with respect to IOU.
output_size: an int32 tensor of size [batch_size]. Representing the number
of selected boxes for each batch.
idx: an integer scalar representing induction variable.
Returns:
boxes: updated boxes.
iou_threshold: pass down iou_threshold to the next iteration.
output_size: the updated output_size.
idx: the updated induction variable.
"""
num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE
batch_size = tf.shape(boxes)[0]
# Iterates over tiles that can possibly suppress the current tile.
box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0],
[batch_size, _NMS_TILE_SIZE, 4])
_, box_slice, _, _ = tf.while_loop(
lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,
_cross_suppression, [boxes, box_slice, iou_threshold,
tf.constant(0)])
# Iterates over the current tile to compute self-suppression.
iou = box_utils.bbox_overlap(box_slice, box_slice)
mask = tf.expand_dims(
tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape(
tf.range(_NMS_TILE_SIZE), [-1, 1]), 0)
iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)
suppressed_iou, _, _, _ = tf.while_loop(
lambda _iou, loop_condition, _iou_sum, _: loop_condition,
_self_suppression,
[iou, tf.constant(True), tf.reduce_sum(iou, [1, 2]), iou_threshold])
suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0
box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)
# Uses box_slice to update the input boxes.
mask = tf.reshape(
tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])
boxes = tf.tile(tf.expand_dims(
box_slice, [1]), [1, num_tiles, 1, 1]) * mask + tf.reshape(
boxes, [batch_size, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask)
boxes = tf.reshape(boxes, [batch_size, -1, 4])
# Updates output_size.
output_size += tf.reduce_sum(
tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1])
return boxes, iou_threshold, output_size, idx + 1 | 4adcb176d8d7269bded2b684d830a90a371d0df5 | 10,305 |
from typing import Iterable
import hashlib
import json
def calculate_invalidation_digest(requirements: Iterable[str]) -> str:
"""Returns an invalidation digest for the given requirements."""
m = hashlib.sha256()
inputs = {
# `FrozenOrderedSet` deduplicates while keeping ordering, which speeds up the sorting if
# the input was already sorted.
"requirements": sorted(FrozenOrderedSet(requirements)),
}
m.update(json.dumps(inputs).encode("utf-8"))
return m.hexdigest() | 1fb2f014ad0b0ea98031d022ed02942e1b6ac1d0 | 10,306 |
def to_base_str(n, base):
"""Converts a number n into base `base`."""
convert_string = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
if n < base:
return convert_string[n]
else:
return to_base_str(n // base, base) + convert_string[n % base] | bc137d41c9543ef1a201f4bb14234fa277067a77 | 10,307 |
def number_of_photons(i,n=6):
"""Check if number of photons in a sample is higher than n (default value is 6)"""
bitstring = tuple(i)
if sum(bitstring) > n:
return True
else:
return False | 6c7cfea354aa4948d2c94469708f250e6d5b659d | 10,308 |
from typing import List
from typing import Optional
from typing import Dict
def _build_conflicts_from_states(
trackers: List[TrackerWithCachedStates],
domain: Domain,
max_history: Optional[int],
conflicting_state_action_mapping: Dict[int, Optional[List[Text]]],
tokenizer: Optional[Tokenizer] = None,
) -> List["StoryConflict"]:
"""Builds a list of `StoryConflict` objects for each given conflict.
Args:
trackers: Trackers that contain the states.
domain: The domain object.
max_history: Number of turns to take into account for the state descriptions.
conflicting_state_action_mapping: A dictionary mapping state-hashes to a list
of actions that follow from each state.
tokenizer: A tokenizer to tokenize the user messages.
Returns:
A list of `StoryConflict` objects that describe inconsistencies in the story
structure. These objects also contain the history that leads up to the conflict.
"""
# Iterate once more over all states and note the (unhashed) state,
# for which a conflict occurs
conflicts = {}
for element in _sliced_states_iterator(trackers, domain, max_history, tokenizer):
hashed_state = element.sliced_states_hash
if hashed_state in conflicting_state_action_mapping:
if hashed_state not in conflicts:
conflicts[hashed_state] = StoryConflict(element.sliced_states)
conflicts[hashed_state].add_conflicting_action(
action=str(element.event), story_name=element.tracker.sender_id
)
# Return list of conflicts that arise from unpredictable actions
# (actions that start the conversation)
return [
conflict
for (hashed_state, conflict) in conflicts.items()
if conflict.conflict_has_prior_events
] | 651a200dfd94518b4eb9da76d915794a761c9777 | 10,309 |
def inception_d(input_layer, nfilt):
# Corresponds to a modified version of figure 10 in the paper
"""
Parameters
----------
input_layer :
nfilt :
Returns
-------
"""
l1 = bn_conv(input_layer, num_filters=nfilt[0][0], filter_size=1)
l1 = bn_conv(l1, num_filters=nfilt[0][1], filter_size=3, stride=2)
l2 = bn_conv(input_layer, num_filters=nfilt[1][0], filter_size=1)
l2 = bn_conv(l2, num_filters=nfilt[1][1], filter_size=(1, 7), pad=(0, 3))
l2 = bn_conv(l2, num_filters=nfilt[1][2], filter_size=(7, 1), pad=(3, 0))
l2 = bn_conv(l2, num_filters=nfilt[1][3], filter_size=3, stride=2)
l3 = Pool2DLayer(input_layer, pool_size=3, stride=2)
return ConcatLayer([l1, l2, l3]) | cb1a192ef210239c0eafe944126eb965dbcf6357 | 10,310 |
def generate_response(response, output):
"""
:param response:
:return dictionary
"""
status, command = None, None
if isinstance(response, dict):
status = response.get('ok', None)
response.get('command', None)
elif isinstance(response, object):
status = getattr(response, 'ok', None)
command = getattr(response, 'command', None)
return {
'status': 'successful' if status else 'failed',
'command': command,
'output': output
} | ea8764dd3e8f0205a0ec1dd278164140a414dadc | 10,311 |
def check_add_predecessor(data_predecessor_str_set, xml_data_list, xml_chain_list, output_xml):
"""
Check if each string in data_predecessor_str_set is corresponding to an actual Data object,
create new [Data, predecessor] objects lists for object's type : Data.
Send lists to add_predecessor() to write them within xml and then returns update_list from it.
Parameters:
data_predecessor_str_set ([str]) : Lists of string from jarvis cell
xml_data_list ([Data]) : Data list from xml parsing
xml_chain_list ([View]) : View list from xml parsing
output_xml (GenerateXML object) : XML's file object
Returns:
update ([0/1]) : 1 if update, else 0
"""
data_predecessor_list = []
allocated_item_list = []
# Filter input string
data_predecessor_str_list = shared_orchestrator.cut_string_list(data_predecessor_str_set)
# Create data names list already in xml
xml_data_name_list = get_objects_names(xml_data_list)
is_elem_found = False
for elem in data_predecessor_str_list:
is_elem_found = True
if elem[0] not in xml_data_name_list:
is_elem_found = False
if elem[1] not in xml_data_name_list:
print(f"{elem[0]} and {elem[1]} do not exist")
else:
print(f"{elem[0]} does not exist")
if elem[0] in xml_data_name_list:
if elem[1] not in xml_data_name_list:
is_elem_found = False
print(f"{elem[1]} does not exist")
if is_elem_found:
for d, p in data_predecessor_str_list:
predecessor = None
selected_data = None
existing_predecessor_id_list = []
for data in xml_data_list:
if d == data.name:
selected_data = data
for existing_predecessor in data.predecessor_list:
existing_predecessor_id_list.append(existing_predecessor.id)
for da in xml_data_list:
if p == da.name and da.id not in existing_predecessor_id_list:
predecessor = da
if predecessor is not None and selected_data is not None:
data_predecessor_list.append([selected_data, predecessor])
allocation_chain_1 = shared_orchestrator.check_add_allocated_item(d,
xml_data_list,
xml_chain_list)
if allocation_chain_1:
allocated_item_list.append(allocation_chain_1)
allocation_chain_2 = shared_orchestrator.check_add_allocated_item(p,
xml_data_list,
xml_chain_list)
if allocation_chain_2:
allocated_item_list.append(allocation_chain_2)
update = add_predecessor(data_predecessor_list, xml_data_list, output_xml)
shared_orchestrator.add_allocation({5: allocated_item_list}, output_xml)
return update | d6bef7af0e32202825705ac36c3b4f09b3aa62ce | 10,312 |
def try_wrapper(func, *args, ret_=None, msg_="", verbose_=True, **kwargs):
"""Wrap ``func(*args, **kwargs)`` with ``try-`` and ``except`` blocks.
Args:
func (functions) : functions.
args (tuple) : ``*args`` for ``func``.
kwargs (kwargs) : ``*kwargs`` for ``func``.
ret_ (any) : default ret val.
msg_ (str) : message to print.
verbose_ (bool) : Whether to print message or not. (default= ``True``)
Examples:
>>> from gummy.utils import try_wrapper
>>> ret = try_wrapper(lambda x,y: x/y, 1, 2, msg_="divide")
* Succeeded to divide
>>> ret
0.5
>>> ret = try_wrapper(lambda x,y: x/y, 1, 0, msg_="divide")
* Failed to divide (ZeroDivisionError: division by zero)
>>> ret is None
True
>>> ret = try_wrapper(lambda x,y: x/y, 1, 0, ret_=1, msg_="divide")
* Failed to divide (ZeroDivisionError: division by zero)
>>> ret is None
False
>>> ret
1
"""
try:
ret_ = func(*args, **kwargs)
prefix = toGREEN("Succeeded to ")
suffix = ""
except Exception as e:
e.__class__.__name__
prefix = toRED("Failed to ")
suffix = f" ({toRED(e.__class__.__name__)}: {toACCENT(e)})"
if verbose_: print("* " + prefix + msg_ + suffix)
return ret_ | 6098c966deffd5ac5ae4bba84219088b2052d878 | 10,313 |
def hnet_bsd(args, x, train_phase):
"""High frequency convolutions are unstable, so get rid of them"""
# Sure layers weight & bias
order = 1
nf = int(args.n_filters)
nf2 = int((args.filter_gain)*nf)
nf3 = int((args.filter_gain**2)*nf)
nf4 = int((args.filter_gain**3)*nf)
bs = args.batch_size
fs = args.filter_size
nch = args.n_channels
nr = args.n_rings
tp = train_phase
std = args.std_mult
x = tf.reshape(x, shape=[bs,args.height,args.width,1,1,3])
fm = {}
# Convolutional Layers
with tf.name_scope('stage1') as scope:
cv1 = hl.conv2d(x, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_1')
cv1 = hl.non_linearity(cv1, name='1_1')
cv2 = hl.conv2d(cv1, nf, fs, stddev=std, padding='SAME', n_rings=nr, name='1_2')
cv2 = hl.batch_norm(cv2, tp, name='bn1')
mags = to_4d(hl.stack_magnitudes(cv2))
fm[1] = linear(mags, 1, 1, name='sw1')
with tf.name_scope('stage2') as scope:
cv3 = hl.mean_pooling(cv2, ksize=(1,2,2,1), strides=(1,2,2,1))
cv3 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_1')
cv3 = hl.non_linearity(cv3, name='2_1')
cv4 = hl.conv2d(cv3, nf2, fs, stddev=std, padding='SAME', n_rings=nr, name='2_2')
cv4 = hl.batch_norm(cv4, train_phase, name='bn2')
mags = to_4d(hl.stack_magnitudes(cv4))
fm[2] = linear(mags, 1, 1, name='sw2')
with tf.name_scope('stage3') as scope:
cv5 = hl.mean_pooling(cv4, ksize=(1,2,2,1), strides=(1,2,2,1))
cv5 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_1')
cv5 = hl.non_linearity(cv5, name='3_1')
cv6 = hl.conv2d(cv5, nf3, fs, stddev=std, padding='SAME', n_rings=nr, name='3_2')
cv6 = hl.batch_norm(cv6, train_phase, name='bn3')
mags = to_4d(hl.stack_magnitudes(cv6))
fm[3] = linear(mags, 1, 1, name='sw3')
with tf.name_scope('stage4') as scope:
cv7 = hl.mean_pooling(cv6, ksize=(1,2,2,1), strides=(1,2,2,1))
cv7 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_1')
cv7 = hl.non_linearity(cv7, name='4_1')
cv8 = hl.conv2d(cv7, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='4_2')
cv8 = hl.batch_norm(cv8, train_phase, name='bn4')
mags = to_4d(hl.stack_magnitudes(cv8))
fm[4] = linear(mags, 1, 1, name='sw4')
with tf.name_scope('stage5') as scope:
cv9 = hl.mean_pooling(cv8, ksize=(1,2,2,1), strides=(1,2,2,1))
cv9 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_1')
cv9 = hl.non_linearity(cv9, name='5_1')
cv10 = hl.conv2d(cv9, nf4, fs, stddev=std, padding='SAME', n_rings=nr, name='5_2')
cv10 = hl.batch_norm(cv10, train_phase, name='bn5')
mags = to_4d(hl.stack_magnitudes(cv10))
fm[5] = linear(mags, 1, 1, name='sw5')
fms = {}
side_preds = []
xsh = tf.shape(x)
with tf.name_scope('fusion') as scope:
for key in fm.keys():
fms[key] = tf.image.resize_images(fm[key], tf.stack([xsh[1], xsh[2]]))
side_preds.append(fms[key])
side_preds = tf.concat(axis=3, values=side_preds)
fms['fuse'] = linear(side_preds, 1, 1, bias_init=0.01, name='side_preds')
return fms | 0c44775d5d342b73975cd7ffd00971d4f98fb4aa | 10,314 |
import numpy
def hdrValFilesToTrainingData(input_filebase: str, target_varname: str):
"""Extracts useful info from input_filebase.hdr and input_filebase.val
Args:
input_filebase -- points to two files
target_varname -- this will be the y, and the rest will be the X
Returns:
Xy: 2d array [#vars][#samples] -- transpose of the data from .val file
X: 2d array [#full_input_vars][#samples] -- Xy, except y
y: 1d array [#samples] -- the vector in Xy corr. to target_varname
all_varnames: List[str] -- essentially what .hdr file holds
input_varnames: List[str] -- all_varnames, minus target_varname
"""
# retrieve varnames
all_varnames = asciiRowToStrings(input_filebase + ".hdr")
# split apart input and output labels
x_rows, y_rows, input_varnames = [], [], []
for (row, varname) in enumerate(all_varnames):
if varname == target_varname:
y_rows.append(row)
else:
x_rows.append(row)
input_varnames.append(varname)
assert len(y_rows) == 1, "expected to find one and only one '%s', not: %s" % (
target_varname,
all_varnames,
)
# split apart input and output data
Xy_tr = asciiTo2dArray(input_filebase + ".val")
Xy = numpy.transpose(Xy_tr)
X = numpy.take(Xy, x_rows, 0)
y = numpy.take(Xy, y_rows, 0)[0]
assert X.shape[0] + 1 == Xy.shape[0] == len(input_varnames) + 1 == len(all_varnames)
assert X.shape[1] == Xy.shape[1] == len(y)
return Xy, X, y, all_varnames, input_varnames | de1427340dfed2dc36cdd05f43841399f54ac6a0 | 10,315 |
def create_column(number_rows: int, column_type: ColumnType) -> pd.Series:
"""Creates a column with either duplicated values or not, and either of string or int type.
:param number_rows: the number of rows in the data-frame.
:param column_type: the type of the column.
:returns: the data-frame.
"""
if column_type == ColumnType.UNIQUE_STRING:
return pd.Series(range(number_rows)).astype(str)
elif column_type == ColumnType.UNIQUE_INT:
return pd.Series(range(number_rows))
elif column_type == ColumnType.WITH_DUPLICATES_STRING:
return pd.Series(["a"] * number_rows)
elif column_type == ColumnType.WITH_DUPLICATES_INT:
return pd.Series([2] * number_rows)
else:
raise ValueError(f"Unknown column-type: {column_type}") | 0280f914960b222d589ae13b648339f1ff7ae562 | 10,316 |
import os
def abspath(url):
"""
Get a full path to a file or file URL
See os.abspath
"""
if url.startswith('file://'):
url = url[len('file://'):]
return os.path.abspath(url) | 5c739b7894b4b6d3aabbce9813ce27c72eea6f5d | 10,317 |
import ctypes
def PumpEvents(timeout=-1, hevt=None, cb=None):
"""This following code waits for 'timeout' seconds in the way
required for COM, internally doing the correct things depending
on the COM appartment of the current thread. It is possible to
terminate the message loop by pressing CTRL+C, which will raise
a KeyboardInterrupt.
"""
# XXX Should there be a way to pass additional event handles which
# can terminate this function?
# XXX XXX XXX
#
# It may be that I misunderstood the CoWaitForMultipleHandles
# function. Is a message loop required in a STA? Seems so...
#
# MSDN says:
#
# If the caller resides in a single-thread apartment,
# CoWaitForMultipleHandles enters the COM modal loop, and the
# thread's message loop will continue to dispatch messages using
# the thread's message filter. If no message filter is registered
# for the thread, the default COM message processing is used.
#
# If the calling thread resides in a multithread apartment (MTA),
# CoWaitForMultipleHandles calls the Win32 function
# MsgWaitForMultipleObjects.
# Timeout expected as float in seconds - *1000 to miliseconds
# timeout = -1 -> INFINITE 0xFFFFFFFF;
# It can also be a callable which should return an amount in seconds
if hevt is None:
hevt = ctypes.windll.kernel32.CreateEventA(None, True, False, None)
handles = _handles_type(hevt)
RPC_S_CALLPENDING = -2147417835
# @ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint)
def HandlerRoutine(dwCtrlType):
if dwCtrlType == 0: # CTRL+C
ctypes.windll.kernel32.SetEvent(hevt)
return 1
return 0
HandlerRoutine = (
ctypes.WINFUNCTYPE(ctypes.c_int, ctypes.c_uint)(HandlerRoutine)
)
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 1)
while True:
try:
tmout = timeout() # check if it's a callable
except TypeError:
tmout = timeout # it seems to be a number
if tmout > 0:
tmout *= 1000
tmout = int(tmout)
try:
res = ctypes.oledll.ole32.CoWaitForMultipleHandles(
0, # COWAIT_FLAGS
int(tmout), # dwtimeout
len(handles), # number of handles in handles
handles, # handles array
# pointer to indicate which handle was signaled
ctypes.byref(ctypes.c_ulong())
)
except WindowsError as details:
if details.args[0] == RPC_S_CALLPENDING: # timeout expired
if cb is not None:
cb()
continue
else:
ctypes.windll.kernel32.CloseHandle(hevt)
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0)
raise # something else happened
else:
ctypes.windll.kernel32.CloseHandle(hevt)
ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0)
raise KeyboardInterrupt
# finally:
# if False:
# ctypes.windll.kernel32.CloseHandle(hevt)
# ctypes.windll.kernel32.SetConsoleCtrlHandler(HandlerRoutine, 0)
# break | 4b24bfc7e68b0953c98b507e6ba5176e4b060011 | 10,318 |
def check_mix_up(method):
"""Wrapper method to check the parameters of mix up."""
@wraps(method)
def new_method(self, *args, **kwargs):
[batch_size, alpha, is_single], _ = parse_user_args(method, *args, **kwargs)
check_value(batch_size, (1, FLOAT_MAX_INTEGER))
check_positive(alpha, "alpha")
type_check(is_single, (bool,), "is_single")
return method(self, *args, **kwargs)
return new_method | 528c30d73df3a26d8badc6df17508c6c2e6f69ae | 10,319 |
def generate_motif_distances(cluster_regions, region_sizes, motifs, motif_location, species):
"""
Generates all motif distances for a lsit of motifs
returns list[motif_distances]
motif_location - str location that motifs are stored
species - str species (for finding stored motifs)
motifs - list of motifs to analize
cluster_regions - dict from parse clusters
"""
motif_distance_list = []
#given a specific motif in a motif file generate distances from that motif...?
for motif in motifs:
mf = "motif_" + motif + ".BED"
mfgz = "motif_" + motif + ".BED.gz"
motif_tool = None
if os.path.exists(os.path.join(motif_location, species, mf)):
motif_tool = pybedtools.BedTool(os.path.join(motifBASE, species, mf))
elif os.path.exists(os.path.join(motif_location, species, mfgz)):
motif_tool = pybedtools.BedTool(os.path.join(motif_location, species, mfgz))
else:
print "MOTIF BED FILE for motif: %s is not available, please build it" % (mf)
if motif_tool is not None:
motif_distance_list.append(calculate_motif_distance(cluster_regions, region_sizes, motif_tool))
return motif_distance_list | 4694db72aaf550cac210387e50187fe910e21bf3 | 10,320 |
def sig_to_vrs(sig):
""" Split a signature into r, s, v components """
r = sig[:32]
s = sig[32:64]
v = int(encode_hex(sig[64:66]), 16)
# Ethereum magic number
if v in (0, 1):
v += 27
return [r, s, v] | 48884e4718de7bdda0527470d4e608e8f6b563b8 | 10,321 |
def index_to_string_table_from_tensor(mapping, default_value="UNK", name=None):
"""Returns a lookup table that maps a `Tensor` of indices into strings.
This operation constructs a lookup table to map int64 indices into string
values. The mapping is initialized from a string `mapping` 1-D `Tensor` where
each element is a value and the corresponding index within the tensor is the
key.
Any input which does not have a corresponding index in 'mapping'
(an out-of-vocabulary entry) is assigned the `default_value`
The underlying table must be initialized by calling
`tf.tables_initializer.run()` or `table.init.run()` once.
Elements in `mapping` cannot have duplicates, otherwise when executing the
table initializer op, it will throw a `FailedPreconditionError`.
Sample Usages:
```python
mapping_string = t.constant(["emerson", "lake", "palmer")
indices = tf.constant([1, 5], tf.int64)
table = tf.contrib.lookup.index_to_string_table_from_tensor(
mapping_string, default_value="UNKNOWN")
values = table.lookup(indices)
...
tf.tables_initializer().run()
values.eval() ==> ["lake", "UNKNOWN"]
```
Args:
mapping: A 1-D string `Tensor` that specifies the strings to map from
indices.
default_value: The value to use for out-of-vocabulary indices.
name: A name for this op (optional).
Returns:
The lookup table to map a string values associated to a given index `int64`
`Tensors`.
Raises:
ValueError: when `mapping` is not set.
"""
if mapping is None:
raise ValueError("mapping must be specified.")
return lookup_ops.index_to_string_table_from_tensor(
vocabulary_list=mapping, default_value=default_value, name=name) | 22ba72e1fe6ab28c5eb16a32152de4094d8d2e73 | 10,322 |
def test_profile_reader_no_aws_config(monkeypatch, tmp_path, capsys):
"""Test profile reader without aws config file."""
fake_get_path_called = 0
def fake_get_path():
nonlocal fake_get_path_called
fake_get_path_called += 1
return tmp_path
monkeypatch.setattr(awsswitch, "get_path", fake_get_path)
awsswitch.app()
assert fake_get_path_called == 1
captured = capsys.readouterr()
output = captured.out.split("\n")
assert output[0] == "AWS profile switcher"
err = captured.err.split("\n")
assert err[0] == "AWS config path does not exist." | 549e2f08cfac1f3f579906a80f3853cba8d2501e | 10,323 |
def get_exclusion_type(exclusion):
"""
Utility function to get an exclusion's type object by finding the exclusion type that has the given
exclusion's code.
:param exclusion: The exclusion to find the type for.
:return: The exclusion type if found, None otherwise.
"""
for exclusion_type in EXCLUSION_TYPES:
if exclusion_type.code == exclusion.code:
return exclusion_type
return None | 778efc4cbd6481ae25f76985f30aa593e1e786fa | 10,324 |
def generatePlans(update):
"""
For an update object provided this function references the updateModuleList which lets all exc
modules determine if they need to add functions to change the state of the system when new
chutes are added to the OS.
Returns: True in error, as in we should stop with this update plan
"""
out.header('Generating %r\n' % (update))
# Iterate through the list provided for this update type
for mod in update.updateModuleList:
if(mod.generatePlans(update)):
return True | d764b2b18cebb81c450ef54aaa1a8b6893ec16c8 | 10,325 |
from typing import Dict
def get_str_by_path(payload: Dict, path: str) -> str:
"""Return the string value from the dict for the path using dpath library."""
if payload is None:
return None
try:
raw = dpath_util.get(payload, path)
return str(raw) if raw is not None else raw
except (IndexError, KeyError, TypeError):
return None | bf8077838c2dd2278cd9209b6c560271faaa78cb | 10,326 |
import requests
import json
def get_token_from_code(request):
"""
Get authorization code the provider sent back to you
Find out what URL to hit to get tokens that allow you to ask for
things on behalf of a user.
Prepare and send a request to get tokens.
Parse the tokens using the OAuth 2 client
"""
code = request.args.get("code")
redirect_uri = request.args.get("redirect_uri")
provider_cfg = requests.get(DISCOVERY_URL).json()
token_endpoint = provider_cfg["token_endpoint"]
token_url, headers, body = client.prepare_token_request(
token_endpoint,
authorization_response=request.url,
redirect_url=redirect_uri,
code=code,
include_client_id=False,
)
token_response = requests.post(
token_url,
headers=headers,
data=body,
auth=(CLIENT_ID, SECRET),
)
token_response = token_response.json()
client.parse_request_body_response(json.dumps(token_response))
return token_response | 00edc369150ddb18023799768c4843333079944e | 10,327 |
def DeWeStartCAN(nBoardNo, nChannelNo):
"""Dewe start CAN"""
if f_dewe_start_can is not None:
return f_dewe_start_can(c_int(nBoardNo), c_int(nChannelNo))
else:
return -1 | f810bb73152899fbfbb89caccec469a647c90223 | 10,328 |
def mw_wo_sw(mol, ndigits=2):
"""Molecular weight without salt and water
:param ndigits: number of digits
"""
cp = clone(mol) # Avoid modification of original object
remover.remove_water(cp)
remover.remove_salt(cp)
return round(sum(a.mw() for _, a in cp.atoms_iter()), ndigits) | 32b83d5e74eec3fdc4d18012dc29ed5e3d85edf3 | 10,329 |
def get_contact_list_info(contact_list):
"""
Get contact list info out of contact list
In rgsummary, this looks like:
<ContactLists>
<ContactList>
<ContactType>Administrative Contact</ContactType>
<Contacts>
<Contact>
<Name>Matyas Selmeci</Name>
...
</Contact>
</Contacts>
</ContactList>
...
</ContactLists>
and the arg `contact_list` is the contents of a single <ContactList>
If vosummary, this looks like:
<ContactTypes>
<ContactType>
<Type>Miscellaneous Contact</Type>
<Contacts>
<Contact>
<Name>...</Name>
...
</Contact>
...
</Contacts>
</ContactType>
...
</ContactTypes>
and the arg `contact_list` is the contents of <ContactTypes>
Returns: a list of dicts that each look like:
{ 'ContactType': 'Administrative Contact',
'Name': 'Matyas Selmeci',
'Email': '...',
...
}
"""
contact_list_info = []
for contact in contact_list:
if contact.tag == 'ContactType' or contact.tag == 'Type':
contact_list_type = contact.text.lower()
if contact.tag == 'Contacts':
for con in contact:
contact_info = { 'ContactType' : contact_list_type }
for contact_contents in con:
contact_info[contact_contents.tag] = contact_contents.text
contact_list_info.append(contact_info)
return contact_list_info | 18d82190ad971b2a2cabb60706fc1486a91a32a5 | 10,330 |
import requests
def enableLegacyLDAP(host, args, session):
"""
Called by the ldap function. Configures LDAP on Lagecy systems.
@param host: string, the hostname or IP address of the bmc
@param args: contains additional arguments used by the ldap subcommand
@param session: the active session to use
@param args.json: boolean, if this flag is set to true, the output will
be provided in json format for programmatic consumption
"""
url='https://'+host+'/xyz/openbmc_project/user/ldap/action/CreateConfig'
scope = {
'sub' : 'xyz.openbmc_project.User.Ldap.Create.SearchScope.sub',
'one' : 'xyz.openbmc_project.User.Ldap.Create.SearchScope.one',
'base': 'xyz.openbmc_project.User.Ldap.Create.SearchScope.base'
}
serverType = {
'ActiveDirectory' : 'xyz.openbmc_project.User.Ldap.Create.Type.ActiveDirectory',
'OpenLDAP' : 'xyz.openbmc_project.User.Ldap.Create.Type.OpenLdap'
}
data = {"data": [args.uri, args.bindDN, args.baseDN, args.bindPassword, scope[args.scope], serverType[args.serverType]]}
try:
res = session.post(url, headers=jsonHeader, json=data, verify=False, timeout=baseTimeout)
except(requests.exceptions.Timeout):
return(connectionErrHandler(args.json, "Timeout", None))
except(requests.exceptions.ConnectionError) as err:
return connectionErrHandler(args.json, "ConnectionError", err)
return res.text | 7ae9763930c6a0de29f8eac032b3e58d5cd64791 | 10,331 |
from typing import List
from typing import Dict
from typing import OrderedDict
def retrieve_panelist_appearance_counts(panelist_id: int,
database_connection: mysql.connector.connect
) -> List[Dict]:
"""Retrieve yearly apperance count for the requested panelist ID"""
cursor = database_connection.cursor()
query = ("SELECT YEAR(s.showdate) AS year, COUNT(p.panelist) AS count "
"FROM ww_showpnlmap pm "
"JOIN ww_shows s ON s.showid = pm.showid "
"JOIN ww_panelists p ON p.panelistid = pm.panelistid "
"WHERE pm.panelistid = %s AND s.bestof = 0 "
"AND s.repeatshowid IS NULL "
"GROUP BY p.panelist, YEAR(s.showdate) "
"ORDER BY p.panelist ASC, YEAR(s.showdate) ASC")
cursor.execute(query, (panelist_id, ))
result = cursor.fetchall()
cursor.close()
if not result:
return None
appearances = OrderedDict()
total_appearances = 0
for row in result:
appearances[row[0]] = row[1]
total_appearances += row[1]
appearances["total"] = total_appearances
return appearances | 27a5ccb192cf55714fed8316d647f53bce0ffbb2 | 10,332 |
from datetime import datetime
def chart(
symbols=("AAPL", "GLD", "GOOG", "$SPX", "XOM", "msft"),
start=datetime.datetime(2008, 1, 1),
end=datetime.datetime(2009, 12, 31), # data stops at 2013/1/1
normalize=True,
):
"""Display a graph of the price history for the list of ticker symbols provided
Arguments:
symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc
start (datetime): The date at the start of the period being analyzed.
end (datetime): The date at the end of the period being analyzed.
normalize (bool): Whether to normalize prices to 1 at the start of the time series.
"""
start = util.normalize_date(start or datetime.date(2008, 1, 1))
end = util.normalize_date(end or datetime.date(2009, 12, 31))
symbols = [s.upper() for s in symbols]
timeofday = datetime.timedelta(hours=16)
timestamps = du.getNYSEdays(start, end, timeofday)
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = da.get_data(timestamps, symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
na_price = d_data['close'].values
if normalize:
na_price /= na_price[0, :]
plt.clf()
plt.plot(timestamps, na_price)
plt.legend(symbols)
plt.ylabel('Adjusted Close')
plt.xlabel('Date')
plt.savefig('chart.pdf', format='pdf')
plt.grid(True)
plt.show()
return na_price | 2b4272daa353e21c7f1927e1d8ad68b6c184d97b | 10,333 |
def create_table(peak_id, chrom, pstart, pend, p_center, min_dist_hit, attrib_keys, min_pos, genom_loc, ovl_pf, ovl_fp, i):
"""Saves info of the hit in a tabular form to be written in the output table. """
if attrib_keys != ["None"]:
# extract min_dist_content
[dist, [feat, fstart, fend, strand, attrib_val]] = min_dist_hit
# attrib_val.strip("\r").strip("\t").strip("\n")
dist = max(dist) if isinstance(dist, list) else dist
dist = '%d' % round(dist, 1)
best_res = "\t".join(np.hstack([peak_id, chrom, pstart, p_center, pend, feat, fstart,
fend, strand, min_pos, dist, genom_loc, str(ovl_pf), str(ovl_fp), attrib_val, str(i)]))
return best_res + "\n"
elif attrib_keys == ["None"]:
[dist, [feat, fstart, fend, strand]] = min_dist_hit
dist = max(dist) if isinstance(dist, list) else dist
dist = '%d' % round(dist, 1)
best_res = "\t".join([peak_id, chrom, pstart, p_center, pend, feat, fstart,
fend, strand, min_pos, dist, genom_loc, str(ovl_pf), str(ovl_fp), str(i)])
return best_res + "\n" | 476f0e272fe604c5254b68e93fa059f2ae942b2d | 10,334 |
def _get_tests(tier):
"""Return a generator of test functions."""
return TEST_TIERS[tier] | 364b263f2dc64b375092de6f2de9e771dbc020c2 | 10,335 |
def get_first_of_iterable(iterable):
"""
Return the first element of the given sequence.
Most useful on generator types.
:param iterable iterable: input iterable
:returns: tuple(iterable, first_element). If a generator is passed,
a new generator will be returned preserving the original values.
:raises: IndexError
Example
_______
>>> a = [1,2,3]
>>> b = (str(i) for i in range(3))
>>> a, first_element = get_first_of_iterable(a)
>>> a, first_element
([1, 2, 3], 1)
When the generator ``b`` is given, a new generator is returned by ``is_empty_iterable``
to preserve original values of ``b``:
>>> b, first_element = get_first_of_iterable(b)
>>> next(b), first_element
('0', '0')
"""
if hasattr(iterable, '__getitem__'):
return iterable, iterable[0]
iterable = iter(iterable)
try:
first = next(iterable)
except StopIteration:
raise IndexError('`iterable` is empty')
return chain([first], iterable), first | a16ae6795eb656a98ad6c620ae4f177d9cfc2387 | 10,336 |
import sqlite3
def getTiers(connection=None):
"""
"""
# Open the master database if it is not supplied.
flag = False
if connection is None:
connection = sqlite3.connect(MASTER)
flag = True
# Create a cursor from the connection.
cursor = connection.cursor()
# Execute the statement to remove the tier title combo from the database.
cursor.execute("""SELECT DISTINCT tier FROM hierarchy""")
# Fetch the returned data.
tiers = [tier[0] for tier in cursor.fetchall()]
# Close the cursor.
cursor.close()
# Commit the change to the database and close the connection.
if flag:
connection.close()
return tiers | 1689f9aec4f84f5427e04352e1ce546e548eb505 | 10,337 |
from typing import Any
from typing import get_type_hints
from typing import get_origin
from typing import Union
from typing import get_args
def get_repr_type(type_: Any) -> Any:
"""Parse a type and return an representative type.
Example:
All of the following expressions will be ``True``::
get_repr_type(A) == A
get_repr_type(Annotated[A, ...]) == A
get_repr_type(Union[A, B, ...]) == A
get_repr_type(Optional[A]) == A
"""
class Temporary:
__annotations__ = dict(type=type_)
unannotated = get_type_hints(Temporary)["type"]
if get_origin(unannotated) is Union:
return get_args(unannotated)[0]
return unannotated | fe74d79c1fcc74ff86d0c41db3f8f9da37dbf69a | 10,338 |
from datetime import datetime
import calendar
def get_month_range_from_dict(source):
"""
:param source: dictionary with keys 'start' and 'end
:return: a tuple of datatime objects in the form (start, end)
"""
now = timezone.now()
start = source.get('start')
end = source.get('end', datetime.datetime(now.year, now.month, calendar.monthrange(now.year, now.month)[1]))
if not start:
start = datetime.datetime(end.year-1, end.month+1, 1) if end.month != 12 else datetime.datetime(end.year, 1, 1)
return start, end | af68ad0ebcd63444c627fe5b408e1b59dc54e985 | 10,339 |
def softmax_ad_set_dim_func(head, data, axis):
"""Look up the softmax_ad_set_dim_map, and return hash_value, hash_key."""
key = []
key.append(tuple(data.shape))
key.append(data.dtype)
key.append(axis)
hash_key = str(tuple(key))
if hash_key in softmax_ad_set_dim_map.keys():
return ct_util.set_dims(softmax_ad_set_dim_map[hash_key]), hash_key
return "", hash_key | c2077a70c47bb45dcbc79325e7620d4c63324560 | 10,340 |
import csv
def parse_latency_stats(fp):
"""
Parse latency statistics.
:param fp: the file path that stores the statistics
:returns an average latency in milliseconds to connect a pair of initiator and responder clients
"""
latency = []
with open(fp) as csvfile:
csvreader = csv.DictReader(csvfile, delimiter=' ', fieldnames=['title', 'time'])
for row in csvreader:
latency.append(float(row['time']) * 1000)
return sum(latency) / len(latency) | c50c730b5c5bea704bd682d003baa0addfd7ee89 | 10,341 |
import time
import os
def get_tweets(input, out_dir, ext):
"""
This function takes the list of individuals with the periods list and runs twint for each period. It stores the result in a csv file called c.Output and returns the dictionary of uncollected names and periods.
"""
counter = 0
uncollected = {}
total_uncollected = 0
l = len(list(input.keys()))
c = twint.Config()
c.Store_csv = True
for name in input:
c.Search = name
for p in input[name]:
start = p[0].strftime("%Y-%m-%d")
end = p[1].strftime("%Y-%m-%d")
c.Output = f"{out_dir}{name}_{start}_{end}{ext}"
c.Since = str(p[0])
c.Until = str(p[1])
try:
twint.run.Search(c)
counter += 1
if counter < (l - 1):
time.sleep(7)
except Exception as e:
print(e)
if name not in uncollected:
uncollected[name] = [p]
total_uncollected += 1
else:
uncollected[name].append(p)
total_uncollected += 1
try:
os.remove(c.Output)
except OSError as e:
print(f"Error: {c.Output} --> {e.strerror}")
continue
return uncollected, total_uncollected | 3f8e7793eb95610d8bf549a40b63727782eed178 | 10,342 |
def micro_jaccard(y_true, y_pred):
"""
Calculate the micro Jaccard-score, i.e. TP / (TP + FP + FN).
:param y_true: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. True labels or class assignments.
:param y_pred: `numpy.array` of shape `(n_samples,)` or `(n_samples, n_classes)`. Predicted labels or
class assignments.
:return: The micro Jaccard-score.
"""
return jaccard_score(y_true, y_pred, average='micro') | b2be25baa6b161dabd676bcb4e58b2682485725f | 10,343 |
def round_to_nreads(number_set, n_reads, digit_after_decimal=0):
"""
This function take a list of number and return a list of percentage, which represents the portion of each number in sum of all numbers
Moreover, those percentages are adding up to 100%!!!
Notice: the algorithm we are using here is 'Largest Remainder'
The down-side is that the results won't be accurate, but they are never accurate anyway:)
"""
unround_numbers = [
x / float(sum(number_set)) * n_reads * 10 ** digit_after_decimal
for x in number_set
]
decimal_part_with_index = sorted(
[(index, unround_numbers[index] % 1) for index in range(len(unround_numbers))],
key=lambda y: y[1],
reverse=True,
)
remainder = n_reads * 10 ** digit_after_decimal - sum(
[int(x) for x in unround_numbers]
)
index = 0
while remainder > 0:
unround_numbers[decimal_part_with_index[index][0]] += 1
remainder -= 1
index = (index + 1) % len(number_set)
return [int(x) / float(10 ** digit_after_decimal) for x in unround_numbers] | c7a50b5caffb072b3fb6de9478b4acf83f701780 | 10,344 |
def _get_raster_extent(src):
"""
extract projected extent from a raster dataset
(min_x, max_x, min_y, max_y)
Parameters
----------
src : gdal raster
Returns
-------
(min_x, max_x, min_y, max_y)
"""
ulx, xres, xskew, uly, yskew, yres = src.GetGeoTransform()
lrx = ulx + (src.RasterXSize * xres)
lry = uly + (src.RasterYSize * yres)
return ulx, lrx, lry, uly | 49ed0b3c583cbfa5b9ecbc96d94aec42aeba3a32 | 10,345 |
def joined_table_table_join_args(joined_table: SQLParser.JoinedTableContext) -> dict:
"""
Resolve a joinedTable ParseTree node into relevant keyword arguments for TableJoin.
These will be pushed down and applied to the child TableRef.
"""
assert isinstance(joined_table, SQLParser.JoinedTableContext)
on_clauses = None
if joined_table.expr() is not None:
on_clauses = sql_ast_clauses_from_expr(joined_table.expr())
using_columns = None
if joined_table.identifierListWithParentheses() is not None:
using_columns = sql_ast_identifiers_from_list(
joined_table.identifierListWithParentheses().identifierList()
)
return {
"on_clauses": on_clauses,
"using_columns": using_columns,
**join_type_table_join_args(joined_table),
} | 7419e63d28a4a34a49fe50e917faa478b246cb09 | 10,346 |
def find_by_name(name):
"""
Find and return a format by name.
:param name: A string describing the name of the format.
"""
for format in FORMATS:
if name == format.name:
return format
raise UnknownFormat('No format found with name "%s"' % name) | 3626316b961d913217036ddc58eaa71dbdaea1a7 | 10,347 |
import sys
import re
def test_endpoints(host, port, use_ssl, endpoints):
"""
Test each endpoint with its associated method and compile lists of endpoints that
can and cannot be accessed without prior authentication
"""
conn = get_conn(host, port, use_ssl)
if not conn:
sys.exit("Failed to connect to host {}, port {}".format(host, port))
headers = {"Content-type": "application/json"}
results = []
for entry in endpoints:
method, endpoint = entry
try_endpoint = endpoint
if ":" in endpoint:
try_endpoint = re.sub(r":[a-zA-Z]+", "1", endpoint)
try_endpoints = []
if "(s)?" in try_endpoint:
try_endpoints.append(try_endpoint.replace("(s)?","s"))
try_endpoints.append(try_endpoint.replace("(s)?",""))
else:
try_endpoints = [try_endpoint]
for try_endpoint in try_endpoints:
status, reason, body = test_endpoint(conn, headers, method, try_endpoint)
results.append({
"status":status,
"reason":reason,
"body":body,
"method":method,
"endpoint":endpoint,
"actual_endpoint":try_endpoint
})
conn.close()
return results | fcc33766039e522046a492cd0dcbc8df50970481 | 10,348 |
def create_neighborhood_polygons(gdf):
""" an attempt to muild neighborhoods polygons from asset points"""
gdf = gdf.reset_index()
neis = gdf['Neighborhood'].unique()
gdf['neighborhood_shape'] = gdf.geometry
# Must be a geodataframe:
for nei in neis:
gdf1 = gdf[gdf['Neighborhood'] == nei]
inds = gdf1.index
polygon = gdf1.geometry.unary_union.convex_hull
# gdf.loc[inds, 'neighborhood_shape'] = [polygon for x in range(len(inds))]
gdf.loc[inds, 'neighborhood_shape'] = polygon
return gdf | 7ca77acfd73a4b13f9088e3839121076d1a70730 | 10,349 |
def custom_gradient(f=None):
"""Decorator to define a function with a custom gradient.
This decorator allows fine grained control over the gradients of a sequence
for operations. This may be useful for multiple reasons, including providing
a more efficient or numerically stable gradient for a sequence of operations.
For example, consider the following function that commonly occurs in the
computation of cross entropy and log likelihoods:
```python
def log1pexp(x):
return tf.math.log(1 + tf.exp(x))
```
Due to numerical instability, the gradient of this function evaluated at x=100
is NaN. For example:
```python
x = tf.constant(100.)
y = log1pexp(x)
dy = tf.gradients(y, x) # Will be NaN when evaluated.
```
The gradient expression can be analytically simplified to provide numerical
stability:
```python
@tf.custom_gradient
def log1pexp(x):
e = tf.exp(x)
def grad(dy):
return dy * (1 - 1 / (1 + e))
return tf.math.log(1 + e), grad
```
With this definition, the gradient at x=100 will be correctly evaluated as
1.0.
Nesting custom gradients can lead to unintuitive results. The default
behavior does not correspond to n-th order derivatives. For example
```python
@tf.custom_gradient
def op(x):
y = op1(x)
@tf.custom_gradient
def grad_fn(dy):
gdy = op2(x, y, dy)
def grad_grad_fn(ddy): # Not the 2nd order gradient of op w.r.t. x.
return op3(x, y, dy, ddy)
return gdy, grad_grad_fn
return y, grad_fn
```
The function `grad_grad_fn` will be calculating the first order gradient
of `grad_fn` with respect to `dy`, which is used to generate forward-mode
gradient graphs from backward-mode gradient graphs, but is not the same as
the second order gradient of `op` with respect to `x`.
Instead, wrap nested `@tf.custom_gradients` in another function:
```python
@tf.custom_gradient
def op_with_fused_backprop(x):
y, x_grad = fused_op(x)
def first_order_gradient(dy):
@tf.custom_gradient
def first_order_custom(unused_x):
def second_order_and_transpose(ddy):
return second_order_for_x(...), gradient_wrt_dy(...)
return x_grad, second_order_and_transpose
return dy * first_order_custom(x)
return y, first_order_gradient
```
Additional arguments to the inner `@tf.custom_gradient`-decorated function
control the expected return values of the innermost function.
See also `tf.RegisterGradient` which registers a gradient function for a
primitive TensorFlow operation. `tf.custom_gradient` on the other hand allows
for fine grained control over the gradient computation of a sequence of
operations.
Note that if the decorated function uses `Variable`s, the enclosing variable
scope must be using `ResourceVariable`s.
Args:
f: function `f(*x)` that returns a tuple `(y, grad_fn)` where:
- `x` is a sequence of (nested structures of) `Tensor` inputs to the
function.
- `y` is a (nested structure of) `Tensor` outputs of applying TensorFlow
operations in `f` to `x`.
- `grad_fn` is a function with the signature `g(*grad_ys)` which returns
a list of `Tensor`s the same size as (flattened) `x` - the derivatives
of `Tensor`s in `y` with respect to the `Tensor`s in `x`. `grad_ys` is
a sequence of `Tensor`s the same size as (flattened) `y` holding the
initial value gradients for each `Tensor` in `y`.
In a pure mathematical sense, a vector-argument vector-valued function
`f`'s derivatives should be its Jacobian matrix `J`. Here we are
expressing the Jacobian `J` as a function `grad_fn` which defines how
`J` will transform a vector `grad_ys` when left-multiplied with it
(`grad_ys * J`, the vector-Jacobian product, or VJP). This functional
representation of a matrix is convenient to use for chain-rule
calculation (in e.g. the back-propagation algorithm).
If `f` uses `Variable`s (that are not part of the
inputs), i.e. through `get_variable`, then `grad_fn` should have
signature `g(*grad_ys, variables=None)`, where `variables` is a list of
the `Variable`s, and return a 2-tuple `(grad_xs, grad_vars)`, where
`grad_xs` is the same as above, and `grad_vars` is a `list<Tensor>`
with the derivatives of `Tensor`s in `y` with respect to the variables
(that is, grad_vars has one Tensor per variable in variables).
Returns:
A function `h(x)` which returns the same value as `f(x)[0]` and whose
gradient (as calculated by `tf.gradients`) is determined by `f(x)[1]`.
"""
if f is None:
return lambda f: custom_gradient(f=f)
@Bind.decorator
def decorated(wrapped, args, kwargs):
"""Decorated function with custom gradient."""
# raise ValueError("PW: trap")
if context.executing_eagerly():
return _eager_mode_decorator(wrapped, args, kwargs)
else:
return _graph_mode_decorator(wrapped, args, kwargs)
return tf_decorator.make_decorator(f, decorated(f)) # pylint: disable=no-value-for-parameter | 6c6432ab9a10c219d811651db2cbbf2321e43b95 | 10,350 |
def Field(name,
ctype,
field_loader=FieldLoaderMethod.OPTIONAL,
comment=None,
gen_setters_and_getters=True):
"""Make a field to put in a node class.
Args:
name: field name
ctype: c++ type for this field
Should be a ScalarType like an int, string or enum type,
or the name of a node class type (e.g. ASTExpression).
Cannot be a pointer type, and should not include modifiers like
const.
field_loader: FieldLoaderMethod enum specifies which FieldLoader method
to use for this field.
comment: Comment text for this field. Text will be stripped and
de-indented.
gen_setters_and_getters: When False, suppress generation of default
template-based get and set methods. Non-standard alternatives
may be supplied via extra_defs.
Returns:
The newly created field.
Raises:
RuntimeError: If an error is detected in one or more arguments.
"""
if field_loader == FieldLoaderMethod.REST_AS_REPEATED:
is_vector = True
else:
is_vector = False
member_name = name + '_'
if isinstance(ctype, ScalarType):
member_type = ctype.ctype
cpp_default = ctype.cpp_default
is_node_ptr = False
enum_name = None
element_storage_type = None
else:
element_storage_type = 'const %s*' % ctype
if is_vector:
member_type = 'absl::Span<%s const>' % element_storage_type
cpp_default = ''
is_node_ptr = False
enum_name = None
else:
member_type = 'const %s*' % ctype
cpp_default = 'nullptr'
is_node_ptr = True
enum_name = NameToEnumName(ctype)
return {
'ctype': ctype,
'cpp_default': cpp_default,
'member_name': member_name, # member variable name
'name': name, # name without trailing underscore
'comment': CleanComment(comment, prefix=' // '),
'member_type': member_type,
'is_node_ptr': is_node_ptr,
'field_loader': field_loader.name,
'enum_name': enum_name,
'is_vector': is_vector,
'element_storage_type': element_storage_type,
'gen_setters_and_getters': gen_setters_and_getters,
} | 9f3f84be56213640ca9d7368d35bdca8eb9958b2 | 10,351 |
from typing import Tuple
from typing import List
def sql(dataframe: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, List[str], str]:
"""Infer best fit data types using dataframe values. May be an object converted to a better type,
or numeric values downcasted to a smallter data type.
Parameters
----------
dataframe (pandas.DataFrame) : contains unconverted and non-downcasted columns
Returns
-------
dataframe (pandas.DataFrame) : contains columns converted to best fit pandas data type
schema (pandas.DataFrame) : derived SQL schema
not_nullable (list[str]) : columns that should not be null
pk (str) : name of column that best fits as the primary key
"""
# numeric like: bit, tinyint, smallint, int, bigint, float
dataframe = convert_numeric(dataframe)
# datetime like: time, date, datetime2
dataframe = convert_date(dataframe)
# string like: varchar, nvarchar
dataframe = convert_string(dataframe)
# determine SQL properties
schema = sql_schema(dataframe)
not_nullable, pk = sql_unique(dataframe, schema)
return dataframe, schema, not_nullable, pk | d64a2d44cb3a89896aa3b7c19ec1a11bb8fcc2ff | 10,352 |
def calculateDeviation(img, lineLeft,lineRight, ):
"""This function calculates
the deviation of the vehicle from the center of the
image
"""
frameCenter = np.mean([lineLeft.bestx,lineRight.bestx] , dtype=np.int32)
imgCenter = img.shape[1]//2
dev = frameCenter - imgCenter
xm_per_pix = 3.7/450 # meters per pixel in x dimension
result = dev*xm_per_pix
# Moving average deviation (Not needed as applied to bestx)
#x = np.append(lineLeft.center_deviation, [dev])
#result = moving_average(x, movingAvg)[-1]
#lineLeft.center_deviation = np.append(lineLeft.center_deviation, result)
if dev > 0.01:
text = "Vehicle is {:.2f} m -->".format(abs(result))
elif dev < -0.01:
text = "Vehicle is {:.2f} m <--".format(abs(result))
else:
text = "Vehicle is spot on center!"
return result , text | 79d16240a2d606cb25360532ac77e4fbe834e23d | 10,353 |
import requests
def post_new_tracker_story(message, project_id, user):
"""Posts message contents as a story to the bound project."""
if ";" in message:
name, description = message.split(";", maxsplit=1)
else:
name, description = (message, "")
story_name = "{name} (from {user})".format(
name=name.strip(), user=user)
response = requests.post(
story_post_url.format(project_id=project_id),
headers=pivotal_headers,
json={"name": story_name,
"description": description.strip()})
story_url = response.json()["url"]
return name, story_url | b26391ad07159df3087cf22c136e5959a7fb6f4b | 10,354 |
def nz2epsmu(N, Z):#{{{
""" Accepts index of refraction and impedance, returns effective permittivity and permeability"""
return N/Z, N*Z | 3173df57ab5ad573baab87cd4fd6f353fcf69e2c | 10,355 |
import scipy
def logdet_symm(m, check_symm=False):
"""
Return log(det(m)) asserting positive definiteness of m.
Parameters
----------
m : array-like
2d array that is positive-definite (and symmetric)
Returns
-------
logdet : float
The log-determinant of m.
"""
if check_symm:
if not np.all(m == m.T): # would be nice to short-circuit check
raise ValueError("m is not symmetric.")
c, _ = scipy.linalg.cho_factor(m, lower=True)
return 2 * np.sum(np.log(c.diagonal())) | 4e4358cb9094d671ba393d653adf685a916c4fa3 | 10,356 |
def merge(left, right):
""" Merge helper
Complexity: O(n)
"""
arr = []
left_cursor, right_cursor = 0, 0
while left_cursor < len(left) and right_cursor < len(right):
# Sort each one and place into the result
if left[left_cursor] <= right[right_cursor]:
arr.append(left[left_cursor])
left_cursor += 1
else:
arr.append(right[right_cursor])
right_cursor += 1
# Add the left overs if there's any left to the result
for i in range(left_cursor, len(left)):
arr.append(left[i])
for i in range(right_cursor, len(right)):
arr.append(right[i])
# Return result
return arr | c6730a0fe5bfcaf713c6c3fa8f2e777db50e4445 | 10,357 |
def validate_form_data(FORM_Class):
"""
Validates the passed form/json data to a request and passes the
form to the called function.
If form data is not valid, return a 406 response.
"""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
form = FORM_Class(csrf_enabled=False)
if not form.validate():
return json_error(code=406, data=form.errors)
kwargs['form'] = form
return f(*args, **kwargs)
return decorated_function
return decorator | d0287479d3c5da32c5fbd48a8f2047b64bce5e2b | 10,358 |
def set_axis_tick_format(
ax, xtickformat=None, ytickformat=None, xrotation=0, yrotation=0
):
"""Sets the formats for the ticks of a single axis
:param ax: axis object
:param xtickformat: optional string for the format of the x ticks
:param ytickformat: optional string for the format of the y ticks
:param xrotation: rotation angle of the x ticks. Defaults to 0
:param yrotation: rotation angle of the y ticks. Defaults to 0
:returns: ax
"""
if xtickformat is not None:
ax.xaxis.set_major_formatter(FormatStrFormatter(xtickformat))
if ytickformat is not None:
ax.yaxis.set_major_formatter(FormatStrFormatter(ytickformat))
plt.setp(ax.get_xticklabels(), ha="right", rotation=xrotation)
plt.setp(ax.get_yticklabels(), ha="right", rotation=yrotation)
return ax | 06b7f6cc5ba78fa093cf517a5df414fa1bb6f504 | 10,359 |
def two_body(y, t):
"""
Solves the two body problem
:param y: state vector
y = [rx,ry,rz,vx,vy,vz]
:param t: time
:return: dy
"""
rx, ry, rz = y[0], y[1], y[2]
vx, vy, vz = y[3], y[4], y[5]
r = np.array([rx, ry, rz])
v = np.array([vx, vy, vz])
r_mag = np.linalg.norm(r)
c = -mu / (r_mag ** 3)
dy = np.zeros(6)
dy[0] = y[3]
dy[1] = y[4]
dy[2] = y[5]
dy[3] = c*y[0]
dy[4] = c*y[1]
dy[5] = c*y[2]
return dy | 5a04f279caa3a540f76d3af488344414f4b3547e | 10,360 |
import argparse
import os
def args_parse_params(params):
""" create simple arg parser with default values (input, output)
:param dict dict_params:
:return obj: object argparse<...>
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-i',
'--path_in',
type=str,
required=True,
default=params['path_in'],
help='path to the folder with input image dataset'
)
parser.add_argument(
'-o',
'--path_out',
type=str,
required=True,
default=params['path_out'],
help='path to the output with experiment results'
)
parser.add_argument(
'-t', '--threshold', type=float, required=False, default=0.001, help='threshold for image information'
)
parser.add_argument(
'-m', '--thr_method', type=str, required=False, default='', choices=METHODS, help='used methods'
)
parser.add_argument(
'--nb_workers', type=int, required=False, default=NB_WORKERS, help='number of parallel processes'
)
args = vars(parser.parse_args())
for k in (k for k in args if k.startswith('path_')):
p = update_path(os.path.dirname(args[k]))
assert os.path.exists(p), 'missing (%s): %s' % (k, p)
args[k] = os.path.join(p, os.path.basename(args[k]))
return args | 42f9a3504c5f3005e5e7117fec80730abddb1e6c | 10,361 |
def DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0):
"""
z_prime,a_hat,e_phi = DD_carrier_sync(z,M,BnTs,zeta=0.707,type=0)
Decision directed carrier phase tracking
z = complex baseband PSK signal at one sample per symbol
M = The PSK modulation order, i.e., 2, 8, or 8.
BnTs = time bandwidth product of loop bandwidth and the symbol period,
thus the loop bandwidth as a fraction of the symbol rate.
zeta = loop damping factor
type = Phase error detector type: 0 <> ML, 1 <> heuristic
z_prime = phase rotation output (like soft symbol values)
a_hat = the hard decision symbol values landing at the constellation
values
e_phi = the phase error e(k) into the loop filter
Ns = Nominal number of samples per symbol (Ts/T) in the carrier
phase tracking loop, almost always 1
Kp = The phase detector gain in the carrier phase tracking loop;
This value depends upon the algorithm type. For the ML scheme
described at the end of notes Chapter 9, A = 1, K 1/sqrt(2),
so Kp = sqrt(2).
Mark Wickert July 2014
Motivated by code found in M. Rice, Digital Communications A Discrete-Time
Approach, Prentice Hall, New Jersey, 2009. (ISBN 978-0-13-030497-1).
"""
Ns = 1
Kp = np.sqrt(2.) # for type 0
z_prime = np.zeros_like(z)
a_hat = np.zeros_like(z)
e_phi = np.zeros(len(z))
theta_h = np.zeros(len(z))
theta_hat = 0
# Tracking loop constants
K0 = 1;
K1 = 4*zeta/(zeta + 1/(4*zeta))*BnTs/Ns/Kp/K0;
K2 = 4/(zeta + 1/(4*zeta))**2*(BnTs/Ns)**2/Kp/K0;
# Initial condition
vi = 0
for nn in range(len(z)):
# Multiply by the phase estimate exp(-j*theta_hat[n])
z_prime[nn] = z[nn]*np.exp(-1j*theta_hat)
if M == 2:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*0
elif M == 4:
a_hat[nn] = np.sign(z_prime[nn].real) + 1j*np.sign(z_prime[nn].imag)
elif M == 8:
a_hat[nn] = np.angle(z_prime[nn])/(2*np.pi/8.)
# round to the nearest integer and fold to nonnegative
# integers; detection into M-levels with thresholds at mid points.
a_hat[nn] = np.mod(round(a_hat[nn]),8)
a_hat[nn] = np.exp(1j*2*np.pi*a_hat[nn]/8)
else:
raise ValueError('M must be 2, 4, or 8')
if type == 0:
# Maximum likelihood (ML)
e_phi[nn] = z_prime[nn].imag * a_hat[nn].real - \
z_prime[nn].real * a_hat[nn].imag
elif type == 1:
# Heuristic
e_phi[nn] = np.angle(z_prime[nn]) - np.angle(a_hat[nn])
else:
raise ValueError('Type must be 0 or 1')
vp = K1*e_phi[nn] # proportional component of loop filter
vi = vi + K2*e_phi[nn] # integrator component of loop filter
v = vp + vi # loop filter output
theta_hat = np.mod(theta_hat + v,2*np.pi)
theta_h[nn] = theta_hat # phase track output array
#theta_hat = 0 # for open-loop testing
# Normalize outputs to have QPSK points at (+/-)1 + j(+/-)1
#if M == 4:
# z_prime = z_prime*np.sqrt(2)
return z_prime, a_hat, e_phi, theta_h | 77bcd1dd49a7bb9dfd40b8b45c9a9eb129ed674d | 10,362 |
from typing import Dict
from typing import Any
def rubrik_gps_vm_snapshot_create(client: PolarisClient, args: Dict[str, Any]) -> CommandResults:
"""
Trigger an on-demand vm snapshot.
:type client: ``PolarisClient``
:param client: Rubrik Polaris client to use
:type args: ``dict``
:param args: arguments obtained from demisto.args()
:return: CommandResult object
"""
object_id = validate_required_arg("object_id", args.get("object_id", ""))
sla_domain_id = args.get("sla_domain_id", "")
raw_response = client.create_vm_snapshot(object_id, sla_domain_id)
outputs = raw_response.get("data", {}).get("vsphereOnDemandSnapshot", {})
outputs = remove_empty_elements(outputs)
if not outputs or not outputs.get("id"):
return CommandResults(readable_output=MESSAGES['NO_RESPONSE'])
hr_content = {
"On-Demand Snapshot Request ID": outputs.get("id"),
"Status": outputs.get("status")
}
hr = tableToMarkdown("GPS VM Snapshot", hr_content, headers=["On-Demand Snapshot Request ID", "Status"],
removeNull=True)
return CommandResults(outputs_prefix=OUTPUT_PREFIX["GPS_SNAPSHOT_CREATE"],
outputs_key_field="id",
outputs=outputs,
raw_response=raw_response,
readable_output=hr) | b065c29e58043b2785f48cf788fb1414262b0eee | 10,363 |
import os
def getFullCorpus(emotion, speakerID = None):
"""
Return the 6 speakers files in a massive vstack
:param emotion:
:param speakerID:
:return:
"""
if emotion not in emotions or (speakerID is not None and speakerID not in speakers):
raise Exception("No Such speaker: {} or emotion: {}".format(speakerID, emotion))
#error check
if speakerID is None:
#return whole corpus
speakerID = "Derpington"
# should not be in file
MFCCFiles = os.listdir(ExtractedMFCCs)
try:
MFCCFiles.remove('.DS_Store')
except ValueError:
pass
# It didn't need to be removed
MFCCVals = []
for file in MFCCFiles:
if emotion in file and speakerID not in file:
# print "Currently reading", file
with open(os.path.join(ExtractedMFCCs, file)) as f:
speakerEmotion = cPickle.load(f)
speakerEmotion = np.vstack(speakerEmotion)
MFCCVals.append(speakerEmotion)
return np.vstack(MFCCVals) | b9894e28e75263fe0a84bc947437f093fc9827d8 | 10,364 |
def CT_freezing_first_derivatives(SA, p, saturation_fraction):
"""
Calculates the first derivatives of the Conservative Temperature at
which seawater freezes, with respect to Absolute Salinity SA and
pressure P (in Pa).
Parameters
----------
SA : array-like
Absolute Salinity, g/kg
p : array-like
Sea pressure (absolute pressure minus 10.1325 dbar), dbar
saturation_fraction : array-like
Saturation fraction of dissolved air in seawater. (0..1)
Returns
-------
CTfreezing_SA : array-like, K kg/g
the derivative of the Conservative Temperature at
freezing (ITS-90) with respect to Absolute Salinity at
fixed pressure [ K/(g/kg) ] i.e.
CTfreezing_P : array-like, K/Pa
the derivative of the Conservative Temperature at
freezing (ITS-90) with respect to pressure (in Pa) at
fixed Absolute Salinity
"""
return _gsw_ufuncs.ct_freezing_first_derivatives(SA, p, saturation_fraction) | af3aa120e2e2620f16d984b29d42d583ed9fd347 | 10,365 |
import logging
def run(gParameters):
"""
Runs the model using the specified set of parameters
Args:
gParameters: a python dictionary containing the parameters (e.g. epoch)
to run the model with.
"""
#
if 'dense' in gParameters:
dval = gParameters['dense']
if type(dval) != list:
res = list(dval)
# try:
# is_str = isinstance(dval, basestring)
# except NameError:
# is_str = isinstance(dval, str)
# if is_str:
# res = str2lst(dval)
gParameters['dense'] = res
print(gParameters['dense'])
if 'conv' in gParameters:
flat = gParameters['conv']
gParameters['conv'] = [flat[i:i + 3] for i in range(0, len(flat), 3)]
print('Conv input', gParameters['conv'])
# print('Params:', gParameters)
# Construct extension to save model
ext = benchmark.extension_from_parameters(gParameters, '.keras')
logfile = gParameters['logfile'] if gParameters['logfile'] else gParameters['output_dir'] + ext + '.log'
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter("[%(asctime)s %(process)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"))
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter(''))
sh.setLevel(logging.DEBUG if gParameters['verbose'] else logging.INFO)
benchmark.logger.setLevel(logging.DEBUG)
benchmark.logger.addHandler(fh)
benchmark.logger.addHandler(sh)
benchmark.logger.info('Params: {}'.format(gParameters))
# Get default parameters for initialization and optimizer functions
kerasDefaults = candle.keras_default_config()
seed = gParameters['rng_seed']
# Build dataset loader object
loader = benchmark.DataLoader(seed=seed, dtype=gParameters['data_type'],
val_split=gParameters['val_split'],
test_cell_split=gParameters['test_cell_split'],
cell_features=gParameters['cell_features'],
drug_features=gParameters['drug_features'],
feature_subsample=gParameters['feature_subsample'],
scaling=gParameters['scaling'],
scramble=gParameters['scramble'],
min_logconc=gParameters['min_logconc'],
max_logconc=gParameters['max_logconc'],
subsample=gParameters['subsample'],
category_cutoffs=gParameters['category_cutoffs'])
# Initialize weights and learning rule
initializer_weights = candle.build_initializer(gParameters['initialization'], kerasDefaults, seed)
initializer_bias = candle.build_initializer('constant', kerasDefaults, 0.)
# Define model architecture
gen_shape = None
out_dim = 1
model = Sequential()
if 'dense' in gParameters: # Build dense layers
for layer in gParameters['dense']:
if layer:
model.add(Dense(layer, input_dim=loader.input_dim,
kernel_initializer=initializer_weights,
bias_initializer=initializer_bias))
if gParameters['batch_normalization']:
model.add(BatchNormalization())
model.add(Activation(gParameters['activation']))
if gParameters['dropout']:
model.add(Dropout(gParameters['dropout']))
else: # Build convolutional layers
gen_shape = 'add_1d'
layer_list = list(range(0, len(gParameters['conv'])))
lc_flag = False
if 'locally_connected' in gParameters:
lc_flag = True
for _, i in enumerate(layer_list):
if i == 0:
add_conv_layer(model, gParameters['conv'][i], input_dim=loader.input_dim, locally_connected=lc_flag)
else:
add_conv_layer(model, gParameters['conv'][i], locally_connected=lc_flag)
if gParameters['batch_normalization']:
model.add(BatchNormalization())
model.add(Activation(gParameters['activation']))
if gParameters['pool']:
model.add(MaxPooling1D(pool_size=gParameters['pool']))
model.add(Flatten())
model.add(Dense(out_dim))
# Define optimizer
optimizer = candle.build_optimizer(gParameters['optimizer'],
gParameters['learning_rate'],
kerasDefaults)
# Compile and display model
model.compile(loss=gParameters['loss'], optimizer=optimizer)
model.summary()
benchmark.logger.debug('Model: {}'.format(model.to_json()))
train_gen = benchmark.DataGenerator(loader, batch_size=gParameters['batch_size'], shape=gen_shape, name='train_gen', cell_noise_sigma=gParameters['cell_noise_sigma']).flow()
val_gen = benchmark.DataGenerator(loader, partition='val', batch_size=gParameters['batch_size'], shape=gen_shape, name='val_gen').flow()
val_gen2 = benchmark.DataGenerator(loader, partition='val', batch_size=gParameters['batch_size'], shape=gen_shape, name='val_gen2').flow()
test_gen = benchmark.DataGenerator(loader, partition='test', batch_size=gParameters['batch_size'], shape=gen_shape, name='test_gen').flow()
train_steps = int(loader.n_train / gParameters['batch_size'])
val_steps = int(loader.n_val / gParameters['batch_size'])
test_steps = int(loader.n_test / gParameters['batch_size'])
if 'train_steps' in gParameters:
train_steps = gParameters['train_steps']
if 'val_steps' in gParameters:
val_steps = gParameters['val_steps']
if 'test_steps' in gParameters:
test_steps = gParameters['test_steps']
checkpointer = ModelCheckpoint(filepath=gParameters['output_dir'] + '.model' + ext + '.h5', save_best_only=True)
progbar = MyProgbarLogger(train_steps * gParameters['batch_size'])
loss_history = MyLossHistory(progbar=progbar, val_gen=val_gen2, test_gen=test_gen,
val_steps=val_steps, test_steps=test_steps,
metric=gParameters['loss'], category_cutoffs=gParameters['category_cutoffs'],
ext=ext, pre=gParameters['output_dir'])
# Seed random generator for training
np.random.seed(seed)
candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters)
# history = model.fit(train_gen, steps_per_epoch=train_steps, # this should be the deprecation fix
history = model.fit(train_gen, steps_per_epoch=train_steps,
epochs=gParameters['epochs'],
validation_data=val_gen,
validation_steps=val_steps,
verbose=0,
callbacks=[checkpointer, loss_history, progbar, candleRemoteMonitor],
)
# callbacks=[checkpointer, loss_history, candleRemoteMonitor], # this just caused the job to hang on Biowulf
benchmark.logger.removeHandler(fh)
benchmark.logger.removeHandler(sh)
return history | 53a904dc5aeb7942623d2db6957505d43612f174 | 10,366 |
def get_territory_center(territory: inkex.Group) -> inkex.Vector2d:
"""
Get the name of the territory from its child title element. If no title, returns
Warzone.UNNAMED_TERRITORY_NAME
:param territory:
:return:
territory name
"""
center_rectangle: inkex.Rectangle = territory.find(f"./{Svg.GROUP}/{Svg.RECTANGLE}", NSS)
return inkex.Vector2d(
center_rectangle.left + center_rectangle.rx / 2,
center_rectangle.top + center_rectangle.ry / 2
) | 90f9e8ae7eebc5f3acf3d6e4100dec36ef5839d9 | 10,367 |
def batch_norm_relu(inputs, is_training, data_format):
"""Performs a batch normalization followed by a ReLU."""
# We set fused=True for a significant performance boost. See
# https://www.tensorflow.org/performance/performance_guide#common_fused_ops
inputs = tf.layers.batch_normalization(
inputs=inputs, axis=1 if data_format == 'channels_first' else -1,
momentum=_BATCH_NORM_DECAY, epsilon=_BATCH_NORM_EPSILON, center=True,
scale=True, training=is_training, fused=True)
# unary = {"1":lambda x:x ,"2":lambda x: -x, "3":lambda x:tf.abs, "4":lambda x : tf.pow(x,2),"5":lambda x : tf.pow(x,3),
# "6":lambda x:tf.sqrt,"7":lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x,
# "8":lambda x : x + tf.Variable(tf.truncated_normal([1], stddev=0.08)),"9":lambda x: tf.log(tf.abs(x)+10e-8),
# "10":lambda x:tf.exp,"11":lambda x:tf.sin,"12":lambda x:tf.sinh,"13":lambda x:tf.cosh,"14":lambda x:tf.tanh,"15":lambda x:tf.asinh,"16":lambda x:tf.atan,"17":lambda x: tf.sin(x)/x,
# "18":lambda x : tf.maximum(x,0),"19":lambda x : tf.minimum(x,0),"20":tf.sigmoid,"21":lambda x:tf.log(1+tf.exp(x)),
# "22":lambda x:tf.exp(-tf.pow(x,2)),"23":lambda x:tf.erf,"24":lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))}
# binary = {"1":lambda x,y: tf.add(x,y),"2":lambda x,y:tf.multiply(x,y),"3":lambda x,y:tf.add(x,-y),"4":lambda x,y:x/(y+10e-8),
# "5":lambda x,y:tf.maximum(x,y),"6":lambda x,y: tf.sigmoid(x)*y,"7":lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.pow(x-y,2)),
# "8":lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.abs(x-y)),
# "9":lambda x,y: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x + (1-tf.Variable(tf.truncated_normal([1], stddev=0.08)))*y}
unary = {"1":lambda x:x ,"2":lambda x: -x, "3": lambda x: tf.maximum(x,0), "4":lambda x : tf.pow(x,2),"5":lambda x : tf.tanh(tf.cast(x,tf.float32))}
binary = {"1":lambda x,y: tf.add(x,y),"2":lambda x,y:tf.multiply(x,y),"3":lambda x,y:tf.add(x,-y),"4":lambda x,y:tf.maximum(x,y),"5":lambda x,y: tf.sigmoid(x)*y}
input_fun = {"1":lambda x:tf.cast(x,tf.float32) , "2":lambda x:tf.zeros(tf.shape(x)), "3": lambda x:2*tf.ones(tf.shape(x)),"4": lambda x : tf.ones(tf.shape(x)), "5": lambda x: -tf.ones(tf.shape(x))}
with open("tmp","r") as f:
activation = f.readline()
activation = activation.split(" ")
#inputs = binary[activation[8]](unary[activation[5]](binary[activation[4]](unary[activation[2]](input_fun[activation[0]](inputs)),unary[activation[3]](input_fun[activation[1]](inputs)))),unary[activation[7]](input_fun[activation[6]](inputs)))
inputs = binary[activation[5]](unary[activation[3]](binary[activation[2]](unary[activation[0]](inputs),unary[activation[1]]((inputs)))),unary[activation[4]]((inputs)))
#inputs = binary[activation[4]]((unary[activation[2]](input_fun[activation[0]](inputs))),(unary[activation[3]](input_fun[activation[1]](inputs)))) #b[4](u1[2](x1[0]),u2[3](x2[1])) #core unit
#inputs = binary[activation[2]]((unary[activation[0]](inputs)),(unary[activation[1]](inputs))) #b[2](u1[0](x),u2[1](x)) #core unit
#inputs = tf.nn.relu(inputs)
functions = open("./functions.txt", "a")
functions.write(str(inputs) + "\n")
return inputs | 3d9d08000cd4dc5b90b64c6fdce7fca0039c5a03 | 10,368 |
def create_success_status(found_issue):
"""Create a success status for when an issue number was found in the title."""
issue_number = found_issue.group("issue")
url = f"https://bugs.python.org/issue{issue_number}"
return util.create_status(STATUS_CONTEXT, util.StatusState.SUCCESS,
description=f"Issue number {issue_number} found",
target_url=url) | 0aab1eeb7f3b5a27b06a7f47dac5d105dbeef5fd | 10,369 |
def score_to_rating_string(score):
"""
Convert score to rating
"""
if score < 1:
rating = "Terrible"
elif score < 2:
rating = "Bad"
elif score < 3:
rating = "OK"
elif score < 4:
rating = "Good"
else:
rating = "Excellent"
return rating | 0c6a5aba0cb220a470f2d40c73b873d11b1a0f98 | 10,370 |
def deconv1d_df(t, observed_counts, one_sided_prf, background_count_rate, column_name='deconv', same_time=True,
deconv_func=emcee_deconvolve, **kwargs):
"""
deconvolve and then return results in a pandas.DataFrame
"""
#print("working on chunk with length {}".format(len(observed_counts)))
with util.timewith("deconvolve chunk with {} elements".format(len(observed_counts))) as timer:
results = deconv_func(t, observed_counts, one_sided_prf,
background_count_rate, **kwargs)
sampler, A, t_ret = results[:3]
mean_est = A.mean(axis=0)
percentiles = np.percentile(A, [10, 16, 50, 84, 90], axis=0)
d = {column_name + '_mean': mean_est,
column_name + '_p10': percentiles[0],
column_name + '_p16': percentiles[1],
column_name + '_p50': percentiles[2],
column_name + '_p84': percentiles[3],
column_name + '_p90': percentiles[4]}
df = pd.DataFrame(data=d, index=t_ret)
if same_time:
df = df.ix[t]
return df | bc773398762ac7e82876d935b6ca0b0351960865 | 10,371 |
def create_parser() -> ArgumentParser:
"""Create a parser instance able to parse args of script.
return:
Returns the parser instance
"""
parser = ArgumentParser()
version = get_distribution('hexlet-code').version
parser.add_argument('first_file', help='path to JSON or YAML file')
parser.add_argument('second_file', help='path to JSON or YAML file')
parser.add_argument(
'-f',
'--format',
choices=FORMATS.keys(),
default=DEFAULT_FORMAT,
help='set format of output',
)
parser.add_argument(
'-v',
'--version',
action='version',
version='{prog} {version}'.format(prog=parser.prog, version=version),
help='print version info',
)
return parser | 93f06dd056ab9121be1ad1312b67024312a4108f | 10,372 |
import os
def parse_alignment_file(file_path):
"""Parse the buildAlignment.tsv output file from CreateHdpTrainingData
:param file_path: path to alignment file
:return: panda DataFrame with column names "kmer", "strand", "level_mean", "prob"
"""
assert os.path.exists(file_path), "File path does not exist: {}".format(file_path)
data = pd.read_csv(file_path, delimiter="\t",
usecols=(4, 12, 13, 15),
names=["strand", "prob", "level_mean", "kmer"],
dtype={"kmer": np.str, "strand": np.str, "level_mean": np.float64, "prob": np.float64},
header=None)[["kmer", "strand", "level_mean", "prob"]]
return data | 77ad066b5a85ff608eecc4e8ac8ab83c07cafd8a | 10,373 |
def remap_key(ctx, origin_key, destination_key, *, mode=None, level=None):
"""Remap *origin_key* to *destination_key*.
Returns an instance of :class:`RemappedKey`.
For valid keys refer to `List of Keys
<https://www.autohotkey.com/docs/KeyList.htm>`_.
The optional keyword-only *mode* and *level* arguments are passed to the
:func:`send` function that will send the *destination_key* when the user
presses the *origin_key*.
For more information refer to `Remapping Keys
<https://www.autohotkey.com/docs/misc/Remap.htm>`_.
"""
mouse = destination_key.lower() in {"lbutton", "rbutton", "mbutton", "xbutton1", "xbutton2"}
if mouse:
def origin_hotkey():
if not is_key_pressed(destination_key):
send("{Blind}{%s DownR}" % destination_key, mode=mode, level=level, mouse_delay=-1)
def origin_up_hotkey():
send("{Blind}{%s Up}" % destination_key, mode=mode, level=level, mouse_delay=-1)
else:
ctrl_to_alt = (
origin_key.lower() in {"ctrl", "lctrl", "rctrl"} and
destination_key.lower() in {"alt", "lalt", "ralt"}
)
if ctrl_to_alt:
def origin_hotkey():
send(
"{Blind}{%s Up}{%s DownR}" % (origin_key, destination_key),
mode=mode,
level=level,
key_delay=-1,
)
else:
def origin_hotkey():
send("{Blind}{%s DownR}" % destination_key, mode=mode, level=level, key_delay=-1)
def origin_up_hotkey():
send("{Blind}{%s Up}" % destination_key, mode=mode, level=level, key_delay=-1)
origin_hotkey = ctx.hotkey(f"*{origin_key}", origin_hotkey)
origin_up_hotkey = ctx.hotkey(f"*{origin_key} Up", origin_up_hotkey)
return RemappedKey(origin_hotkey, origin_up_hotkey) | f4a8f7cddea2f82a13d06b5f3f3c4031e862e32b | 10,374 |
def get_anime_list(wf):
"""Get an Animelist instance.
:param Workflow3 wf: the Workflow3 object
:returns: Animelist object
:rtype: Animelist
"""
try:
animelist = Animelist(
wf.settings['UID'], wf.get_password('bangumi-auth-token')
)
except Exception as e:
raise LogoutException("Please login first")
else:
return animelist | a16a063afd2ac6a3fba4877665185a30971e8387 | 10,375 |
def use_linear_strategy():
"""
Uses a linear function to generate target velocities.
"""
max_velocity = kmph2mps(rospy.get_param("~velocity", 40))
stop_line_buffer = 2.0
def linear_strategy(distances_to_waypoints, current_velocity):
# Target velocity function should be a line
# going from (0, current_velocity)
# to (last_waypoint - buffer, 0)
# (after x-intercept, y = 0)
d = max(distances_to_waypoints[-1] - stop_line_buffer, 0) # stopping distance
v = current_velocity # amount by which to slow down within given distance
# Protect against divide by 0 case
if d < 0.01:
return [0 for x in distances_to_waypoints]
f = lambda x: min(
max(
# [0, d]: downward line:
# y = (-v / d)x + v = (1 - (x/d)) * v
(1. - (x / d)) * v,
# (-inf, 0) && (d, +inf): flat
# y = 0
0
),
# Never faster than maximum
max_velocity
)
return map(f, distances_to_waypoints)
return linear_strategy | 7f0be5e0e11c7d29bb68ce7007e911f0fd14d6e2 | 10,376 |
def recomputation_checkpoint(module: nn.Module):
"""Annotates the output of a module to be checkpointed instead of
recomputed"""
def recompute_outputs(module, inputs, outputs):
return tuple(poptorch.recomputationCheckpoint(y) for y in outputs)
return module.register_forward_hook(recompute_outputs) | a39f106f05e84b36ab21a948044adb14fb44b6cd | 10,377 |
import requests
import json
def get_random_quote() -> str:
"""Retrieve a random quote from the Forismatic API.
Returns:
str: The retrieved quote
"""
quote = ""
while quote == "":
response = requests.get(
"http://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=json"
)
if response.status_code != 200:
print(f"Error while getting image: {response}")
continue
try:
response_json = json.loads(response.text.replace("\\'", "'"))
except json.decoder.JSONDecodeError as error:
print(f"Error while decoding JSON: {response.text}\n{error}")
continue
quote_text: str = response_json["quoteText"]
if contains_no_blacklisted_regexes(quote_text):
quote = quote_text
return quote | bd189878c76a4da1544a6d3762c2a67f69ad1846 | 10,378 |
def has_datapoint(fake_services, metric_name=None, dimensions=None, value=None, metric_type=None, count=1):
"""
Returns True if there is a datapoint seen in the fake_services backend that
has the given attributes. If a property is not specified it will not be
considered. Dimensions, if provided, will be tested as a subset of total
set of dimensions on the datapoint and not the complete set.
"""
found = 0
# Try and cull the number of datapoints that have to be searched since we
# have to check each datapoint.
if dimensions is not None:
datapoints = []
for k, v in dimensions.items():
datapoints += fake_services.datapoints_by_dim[f"{k}:{v}"]
elif metric_name is not None:
datapoints = fake_services.datapoints_by_metric[metric_name]
else:
datapoints = fake_services.datapoints
for dp in fake_services.datapoints:
if metric_name and dp.metric != metric_name:
continue
if dimensions and not has_all_dims(dp, dimensions):
continue
if metric_type and dp.metricType != metric_type:
continue
if value is not None:
if dp.value.HasField("intValue"):
if dp.value.intValue != value:
continue
elif dp.value.HasField("doubleValue"):
if dp.value.doubleValue != value:
continue
else:
# Non-numeric values aren't supported, so they always fail to
# match
continue
found += 1
if found >= count:
return True
return False | 59797809cb73644236cdc8d35cf9698491fff83a | 10,379 |
def optimizeMemoryUsage(foregroundTasks, backgroundTasks, K):
"""
:type foregroundTasks: List[int]
:type backgroundTasks: List[int]
:type K: int
:rtype: List[List[int]]
"""
res = []
curr_max = 0
if len(foregroundTasks) == 0:
for j in range(len(backgroundTasks)):
add_result(backgroundTasks[j], K, curr_max, res, j, 1)
if len(backgroundTasks) == 0:
for i in range(len(foregroundTasks)):
add_result(foregroundTasks[i], K, curr_max, res, i, 0)
for i in range(len(foregroundTasks)):
for j in range(len(backgroundTasks)):
curr_usage = foregroundTasks[i] + backgroundTasks[j]
if curr_usage > K:
add_result(foregroundTasks[i], K, curr_max, res, i, 0)
add_result(backgroundTasks[j], K, curr_max, res, j, 1)
if curr_usage > curr_max and curr_usage <= K:
res = [[i, j]]
curr_max = curr_usage
elif curr_usage == curr_max:
res.append([i, j])
return res if len(res) > 0 else [[-1, -1]] | be7de70bf39ea1872ad1d5bbb9c5209ae3978c8c | 10,380 |
import itertools
def cv_indices(num_folds,num_samples):
"""
Given number of samples and num_folds automatically create a subjectwise cross validator
Assumption: per subject we have 340 samples of data
>>> cv_set = cv_indices(2,680)
>>> cv_set
>>> (([0:340],[340:680]),([340:680,0:340]))
Algo:
1.Compute all the permutations.
2.itreate through all the permutations and first calculate the train indices by taking first five then
six,seven so on of each combination of arrangement.The rest will be the values of test indices
3. Finally zip it to form the indices.
:param num_folds: folds for cv
:param num_samples: number of samples of input of data (should be a multiple of 340)
:return: return a zipped list of tuples
of ranges of training and testing data
"""
n_epoch = 340
n_subjects = num_samples/n_epoch
rem=num_samples%n_epoch
assert (rem == 0),"samples passed in not a multiple of 340"
assert (num_folds<=n_subjects),"number of subjects is less then number of folds"
n_set = np.round(n_subjects/num_folds)
n_set = int(n_set)
n_subjects=int(n_subjects)
flag=[]
for i in range(num_folds):
if i<num_folds-1:
flag=flag+[list(range(i*n_set,(i+1)*n_set))]
else:
flag=flag+[list(range(i*n_set,n_subjects))]
train_indices=[]
test_indices=[]
#permutations=perm1(range(num_folds))
permutations=list(itertools.combinations(list(range(num_folds)),num_folds-1))
permutations=list(map(list,permutations))
sets = len(permutations)
permutations_test=list(itertools.combinations(list(range(num_folds)),1))
permutations_test=list(map(list,permutations_test))
permutations_test.reverse()
for i in range(num_folds-1):
for j in range(sets):
for k in range(len(flag[permutations[j][i]])):
if i<1:
train_indices=train_indices+[list(range(flag[permutations[j][i]][k]*n_epoch,(flag[permutations[j][i]][k]+1)*n_epoch))]
test_indices=test_indices+[list(range(flag[permutations_test[j][i]][k]*n_epoch,(flag[permutations_test[j][i]][k]+1)*n_epoch))]
else:
train_indices=train_indices+[list(range(flag[permutations[j][i]][k]*n_epoch,(flag[permutations[j][i]][k]+1)*n_epoch))]
custom_cv=list(zip(train_indices,test_indices))
return custom_cv | ce0d983458a089919f5581ebc0a650f73cf4c423 | 10,381 |
import pkg_resources
import json
import jsonschema
def load_schema(schema_name: str) -> dict:
"""Load a JSON schema.
This function searches within apollon's own schema repository.
If a schema is found it is additionally validated agains Draft 7.
Args:
schema_name: Name of schema. Must be file name without extension.
Returns:
Schema instance.
Raises:
IOError
"""
schema_path = 'schema/' + schema_name + SCHEMA_EXT
if pkg_resources.resource_exists('apollon', schema_path):
schema = pkg_resources.resource_string('apollon', schema_path)
schema = json.loads(schema)
jsonschema.Draft7Validator.check_schema(schema)
return schema
raise IOError(f'Schema ``{schema_path.name}`` not found.') | b8ebed15394fef7ec1bb9ab735e4f8cb2201df0b | 10,382 |
def question_12(data):
"""
Question 12 linear transform the data, plot it, and show the newly created cov matrix.
:param data: data
:return: data after linear transformation
"""
s_mat = np.array([[0.1, 0, 0], [0, 0.5, 0], [0, 0, 2]])
new_data = np.matmul(s_mat, data)
plot_3d(new_data, "Q12: Linear Transformed the prev data")
print("------ Covariance Matrix (QUESTION 12) ------")
print_cov_mat(new_data)
return new_data | e0904f6a36d2833b48e064ff62d3071d07ab448b | 10,383 |
def update_storage(user_choice):
"""It updates the Coffee Machine resources after a beverage is ordered."""
resources["water"] = resources["water"] - MENU[user_choice]["ingredients"]["water"]
resources["milk"] -= MENU[user_choice]["ingredients"]["milk"]
resources["coffee"] -= MENU[user_choice]["ingredients"]["coffee"]
return resources | 48c642fad80a124fd802a2ae1e1fc440ffa20203 | 10,384 |
def second_test_function(dataset_and_processing_pks):
"""
Pass a result of JSON processing to a function that saves result on a model.
:param dataset_and_processing_pks: tuple of two (Dataset PK, Processing PK)
:return: tuple of two (Dataset PK; JSON (Python's list of dicts))
"""
# unpack tuple; needed for Celery chain compatibility
dataset_pk, processing_pk = dataset_and_processing_pks
# re-fetch Dataset and Processing
dataset = Dataset.objects.get(pk=dataset_pk)
processing = Processing.objects.get(pk=processing_pk)
result = []
# calculate result; handle exceptions
try:
result = [{'result': pair['a'] + pair['b']} for pair in dataset.data]
except Exception as err:
# exception string = exception type + exception args
exception_message = "{type}: {message}". \
format(type=type(err).__name__, message=err)
# save exception to db
dataset.exception = exception_message
processing.exceptions = True
dataset.save()
processing.save()
return dataset_pk, result | b21960b1349c825b00997281dfbef4ef924846d0 | 10,385 |
def matplotlib_axes_from_gridspec_array(arr, figsize=None):
"""Returned axes layed out as indicated in the array
Example:
--------
>>> # Returns 3 axes layed out as indicated by the array
>>> fig, axes = matplotlib_axes_from_gridspec_array([
>>> [1, 1, 3],
>>> [2, 2, 3],
>>> [2, 2, 3],
>>> ])
"""
fig = plt.figure(figsize=figsize)
gridspecs = matplotlib_gridspecs_from_array(arr)
axes = []
for gridspec in gridspecs:
axes.append(fig.add_subplot(gridspec))
return fig, axes | e1048ea32a6c3c8ea87a82c5c32f7e009c1b5c19 | 10,386 |
def _fetch_gene_annotation(gene, gtf):
"""
Fetch gene annotation (feature boundaries) and the corresponding sequences.
Parameters:
-----------
gene
gene name that should be found in the "gene_name" column of the GTF DataFrame.
type: str
gtf
GTF annotation DataFrame loaded by the gtfparse library.
pandas.DataFrame
Returns:
--------
gene_df
subset of the input gtf DataFrame corresponding to rows that match the input gene
type: pandas.DataFrame
gene_id
name of the gene. ideally mathces the passed "gene" argument.
type: str
"""
gene_df = gtf.loc[gtf["gene_name"].str.contains(gene)]
gene_id = _check_gene_name(gene, gene_df["gene_name"])
return gene_df, gene_id | d08307fd3e079e6de3bca702a0d9f41005a6d5f7 | 10,387 |
from django.core.servers.basehttp import AdminMediaHandler
def deploy_static():
"""
Deploy static (application) versioned media
"""
if not env.STATIC_URL or 'http://' in env.STATIC_URL: return
remote_dir = '/'.join([deployment_root(),'env',env.project_fullname,'static'])
m_prefix = len(env.MEDIA_URL)
#if app media is not handled by django-staticfiles we can install admin media by default
if 'django.contrib.admin' in env.INSTALLED_APPS and not 'django.contrib.staticfiles' in env.INSTALLED_APPS:
if env.MEDIA_URL and env.MEDIA_URL == env.ADMIN_MEDIA_PREFIX[:m_prefix]:
print "ERROR: Your ADMIN_MEDIA_PREFIX (Application media) must not be on the same path as your MEDIA_URL (User media)"
sys.exit(1)
admin = AdminMediaHandler('DummyApp')
local_dir = admin.base_dir
remote_dir = ''.join([remote_dir,env.ADMIN_MEDIA_PREFIX])
else:
if env.MEDIA_URL and env.MEDIA_URL == env.STATIC_URL[:m_prefix]:
print "ERROR: Your STATIC_URL (Application media) must not be on the same path as your MEDIA_URL (User media)"
sys.exit(1)
elif env.STATIC_ROOT:
local_dir = env.STATIC_ROOT
static_url = env.STATIC_URL[1:]
if static_url:
remote_dir = '/'.join([remote_dir,static_url])
else: return
if env.verbosity:
print env.host,"DEPLOYING static",remote_dir
return deploy_files(local_dir,remote_dir) | 7c1c8d7ce725e285e08f5fa401f6e431a35fc77c | 10,388 |
import array
def randomPolicy(Ts):
""" Each action is equally likely. """
numA = len(Ts)
dim = len(Ts[0])
return ones((dim, numA)) / float(numA), mean(array(Ts), axis=0) | 6c99ecfe141cb909bceb737e9d8525c9c773ea74 | 10,389 |
def calc_TiTiO2(P, T):
"""
Titanium-Titanium Oxide (Ti-TiO2)
================================
Define TiTiO2 buffer value at 1 bar
Parameters
----------
P: float
Pressure in GPa
T: float or numpy array
Temperature in degrees K
Returns
-------
float or numpy array
log_fO2
References
----------
Barin (1993) Thermo database
"""
if isinstance(T, float) or isinstance(T, int):
log_fO2 = log10(exp((-945822 + 219.6816*T -
5.25733*T*log(T)) /
(8.314*T)))
if isinstance(T, np.ndarray):
log_fO2_list = []
for temp in T:
log_fO2_list.append(log10(exp((-945822 + 219.6816*temp -
5.25733*temp*log(temp)) /
(8.314*temp))))
log_fO2 = np.array(log_fO2_list)
return log_fO2 | c4f920db3ff6020eba896039228e7adbcdcd4234 | 10,390 |
def nlevenshtein_scoredistance(first_data, memento_data):
"""Calculates the Normalized Levenshtein Distance given the content in
`first_data` and `memento_data`.
"""
score = compute_scores_on_distance_measure(
first_data, memento_data, distance.nlevenshtein)
return score | e85776c5ae95533c500a47d550ea848e6feceed7 | 10,391 |
def parameter_from_numpy(model, name, array):
""" Create parameter with its value initialized according to a numpy tensor
Parameters
----------
name : str
parameter name
array : np.ndarray
initiation value
Returns
-------
mxnet.gluon.parameter
a parameter object
"""
p = model.params.get(name, shape=array.shape, init=mx.init.Constant(array))
return p | babf1a32e55d92bbe1ad2588167bd813836637e7 | 10,392 |
def execute_workflow_command():
"""Command that executes a workflow."""
return (
Command().command(_execute_workflow).require_migration().require_clean().with_database(write=True).with_commit()
) | f20b5be4d37f14179f0097986f2f75b1de699b79 | 10,393 |
import tqdm
def fast_parse(python_class, parse_function, data_to_parse, number_of_workers=4, **kwargs):
"""
Util function to split any data set to the number of workers,
Then return results using any give parsing function
Note that when using dicts the Index of the Key will be passed to the function
Object too, so that needs to be handled
:param python_class: Instantiated class object which contains the parse function
:param parse_function: Function to parse data, can either be list or dict
:param data_to_parse: Data to be parsed
:param number_of_workers: Number of workers to split the parsing to
:param kwargs: Optional, extra params which parse function may need
:return:
"""
try:
function_object = getattr(python_class, parse_function)
except AttributeError as e:
logger.error(f"{python_class} doesn't have {parse_function}")
return
else:
results = []
data_len = len(data_to_parse)
with tqdm(total=data_len) as pbar:
with futures.ThreadPoolExecutor(max_workers=number_of_workers) as executor:
if type(data_to_parse) == list:
future_to_result = {executor.submit(function_object, data, **kwargs): data for data in data_to_parse}
elif type(data_to_parse) == dict:
for index, data in data_to_parse.items():
future_to_result = {executor.submit(function_object, data, **kwargs)}
else:
logger.error("Unsupported data type")
return
for future in futures.as_completed(future_to_result):
try:
data = future.result()
except Exception as exc:
logger.error(f"{future_to_result[future]} generated an exception: {exc}")
else:
results.append(data)
pbar.update(1)
return results | ffbd377a53362cb532c84860f3b26ff2ed1234c6 | 10,394 |
import copy
def create_plate(dim=DIMENSION, initial_position=-1):
"""
Returns a newly created plate which is a matrix of dictionnaries (a matrix of cells) and places the first crystal cell in it at the inital_pos
The keys in a dictionnary represent the properties of the cell
:Keys of the dictionnary:
- "is_in_crystal" : (bool) True if the cell belongs to the crystal, False otherwise
- "b": (float) the proportion of quasi-liquid water
- "c" : (float) the proportion of ice
- "d" : (float) the proportion of steam
:param dim: (tuple) [DEFAULT: DIMENSION] couple of positives integers (row, column), the dimension of the plate
:param initial_position: (tuple) [DEFAULT: The middle of the plate] the coordinates of the first crystal
:return: (list of list of dictionnaries) the plate
Exemples:
>>> DEFAULT_CELL["d"] = 1 # Used in order to not have any problems with doctest
>>> plate = create_plate(dim=(3,3))
>>> for line in plate:
... print("[", end="")
... for d in line:
... print("{", end="")
... for k in sorted(d.keys()):
... print(k, ":", d[k], ", ", end="")
... print("}, ", end="")
... print("]")
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 1 , d : 0 , i : 0 , is_in_crystal : True , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
[{b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, {b : 0 , c : 0 , d : 1 , is_in_crystal : False , }, ]
>>> DEFAULT_CELL["d"] = RHO # Reverts to original state
"""
plate = [[copy(DEFAULT_CELL) for j in range(dim[1])] for i in range(dim[0])]
if initial_position == -1:
initial_position = (dim[0]//2, dim[1]//2)
plate[initial_position[0]][initial_position[1]] = {"is_in_crystal":True, "b":0, "c":1, "d":0, "i":0}
return plate | 1f1b806035dc6dc24796840f7cb31c61cf7ec5a7 | 10,395 |
import unicodedata
def CanonicalizeName(raw_name: Text):
"""Strips away all non-alphanumeric characters and converts to lowercase."""
unicode_norm = unicodedata.normalize('NFKC', raw_name).lower()
# We only match Ll (lowercase letters) since alphanumeric filtering is done
# after converting to lowercase. Nl and Nd are numeric-like letters and
# numeric digits.
return ''.join(
x for x in unicode_norm if unicodedata.category(x) in ('Ll', 'Nl', 'Nd')) | bd8d2d47dae4220e51dab8d44a0c6b603986ecff | 10,396 |
from datetime import datetime
import json
def data_v1( request ):
""" Handles all /v1/ urls. """
( service_response, rq_now, rq_url ) = ( {}, datetime.datetime.now(), common.make_request_url(request) ) # initialization
dump_param_handler = views_helper.DumpParamHandler( rq_now, rq_url )
if request.GET.get( 'data', '' ) == 'dump':
return_values = dump_param_handler.grab_all_v1()
service_response = {'data': 'dump'}
elif 'callnumber' in request.GET:
call_param_handler = views_helper.CallParamHandler( request.GET['callnumber'].split(','), rq_now, rq_url )
return_values = call_param_handler.grab_callnumbers()
service_response['query'] = { 'request_type': 'call number', 'request_numbers': call_param_handler.callnumbers }
service_response['result'] = { 'items': return_values, 'service_documentation': settings_app.README_URL }
output = json.dumps( service_response, sort_keys=True, indent=2 )
return HttpResponse( output, content_type='application/json') | 18e98f015cb92f0d0e2ac6bbe9627d4c6ab33fb0 | 10,397 |
import subprocess
import sys
def ssh(server, cmd, checked=True):
""" Runs command on a remote machine over ssh."""
if checked:
return subprocess.check_call('ssh %s "%s"' % (server, cmd),
shell=True, stdout=sys.stdout)
else:
return subprocess.call('ssh %s "%s"' % (server, cmd),
shell=True, stdout=sys.stdout) | b8d1d492b7528dc7e601cf994b2c7a32b31af0d3 | 10,398 |
def permutations(n, r=None):
"""Returns the number of ways of arranging r elements of a set of size n in
a given order - the number of permuatations.
:param int n: The size of the set containing the elements.
:param int r: The number of elements to arange. If not given, it will be\
assumed to be equal to n.
:raises TypeError: if non-integers are given.
:raises ValueError: if r is greater than n.
:rtype: ``int``"""
if not isinstance(n, int): raise TypeError("n {} must be integer".format(n))
if r is None: return factorial(n)
if not isinstance(r, int): raise TypeError("r {} must be integer".format(r))
if r > n:
raise ValueError("r {} is larger than n {}".format(r, n))
return factorial(n) / factorial(n - r) | 7eab1c02ab0864f2abd9d224145938ab580ebd74 | 10,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.