content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _add_experimental_function_notice_to_docstring(doc):
"""Adds an experimental notice to a docstring for experimental functions."""
return decorator_utils.add_notice_to_docstring(
doc, '',
'EXPERIMENTAL FUNCTION',
'(experimental)', ['THIS FUNCTION IS EXPERIMENTAL. It may change or '
'be removed at any time, and without warning.'])
|
449ab32b4ddae2d383776d0c90dbc56dc6041da6
| 25,993 |
def format_args(args):
"""Formats the command line arguments so that they can be logged.
Args:
The args returned from the `config` file.
Returns:
A formatted human readable string representation of the arguments.
"""
formatted_args = "Training Arguments: \n"
args = args.__dict__
for key in args.keys():
formatted_args += "\t > {} : {} \n".format(key, args[key])
return formatted_args
|
22d4334daba7cdfd77329f5a6de93a2411f0594d
| 25,994 |
import uuid
def generate_id() -> str:
"""Generates random string with length of `ID_LENGTH`"""
return int_to_base36(uuid.uuid4().int)[:LENGTH_OF_ID]
|
8cf317741edf02ca79ef72bf51c7958877a98d98
| 25,995 |
def getHG37PositionsInRange(chromosome, startPos, endPos):
"""Return a DataFrame containing hg37 positions for all rsids in a range.
args:
chromosome (int or str): the chromosome number
startPos (int or str): the start position on the chromosome
endPos (int or str): the end position on the chromosome
returns:
df (DataFrame): all the rsids found in the genomic range
between startPos and endPos, indexed by rsid
chromosome (int or str): the chromosome number
"""
queryString = f'chr{chromosome}:{startPos}-{endPos}'
mv = myvariant.MyVariantInfo()
gen = mv.query(queryString, scopes='dbsnp.rsid',
fields='dbsnp.rsid, dbsnp.hg19.start', fetch_all=True,
assembly='hg37')
rsids = {}
for row in gen:
try:
rsid = (row['dbsnp']['rsid'])
start = (row['dbsnp']['hg19']['start'])
rsids[rsid] = start
except KeyError:
continue
df = pd.DataFrame.from_dict(rsids, orient='index')
return df, chromosome
|
64107a375588737e6fedcd336fe5d1a648a93efc
| 25,996 |
def spherical_from_cart_np(xyz_vector):
"""
Convert a vector from cart to spherical.
cart_vector is [idx][x, y, z]
"""
if len(xyz_vector.shape) != 2:
xyz_vector = np.expand_dims(xyz_vector, axis=0)
expanded = True
else:
expanded = False
sph_vector = np.zeros(xyz_vector.shape)
xy = xyz_vector[:, 0]**2 + xyz_vector[:, 1]**2
sph_vector[:, 0] = np.sqrt(xy + xyz_vector[:, 2]**2)
# for elevation angle defined from Z-axis down
sph_vector[:, 1] = np.arctan2(np.sqrt(xy), xyz_vector[:, 2])
# for elevation angle defined from XY-plane up
# sph_vector[:,1] = np.arctan2(xyz_vector[:,2], np.sqrt(xy))
sph_vector[:, 2] = np.arctan2(xyz_vector[:, 1], xyz_vector[:, 0])
if expanded:
return sph_vector[0]
return sph_vector
|
86da4d3426b6327c5fd00f431e36655b9213a027
| 25,997 |
def _execute_query(connection, query):
"""Executes the query and returns the result."""
with connection.cursor() as cursor:
cursor.execute(query)
return cursor.fetchall()
|
9f71eb650d323f7a5ead3451810a7b9f9d77b4b0
| 25,998 |
def mediaValues(x):
"""
return the media of a list
"""
return sum(x)/len(x)
|
ab4a436d3383e5df7d8d891c9661eabb0af81ef8
| 25,999 |
def _plot_NWOE_bins(NWOE_dict, feats):
"""
Plots the NWOE by bin for the subset of features interested in (form of list)
Parameters
----------
- NWOE_dict = dictionary output of `NWOE` function
- feats = list of features to plot NWOE for
Returns
-------
- plots of NWOE for each feature by bin
"""
for feat in feats:
fig, ax = _plot_defaults()
feat_df = NWOE_dict[feat].reset_index()
plt.bar(range(len(feat_df)), feat_df['NWOE'], tick_label=feat_df[str(feat)+'_bin'], color='k', alpha=0.5)
plt.xticks(rotation='vertical')
ax.set_title('NWOE by bin for '+str(feat))
ax.set_xlabel('Bin Interval');
return ax
|
2c034c311ae406f1267256c3de975e021e9ba283
| 26,000 |
def net_model_fn(features, labels, mode, model,
resnet_size, weight_decay, learning_rate_fn, momentum,
data_format, resnet_version, loss_scale,
loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
conv_type=resnet_model.DEFAULT_CONV_TYPE,
optimizer_type=DEFAULT_OPTIMIZER,
run_type=DEFAULT_RUN_TYPE):
"""Shared functionality for different nets model_fns.
Uses that model to build the necessary EstimatorSpecs for
the `mode` in question. For training, this means building losses,
the optimizer, and the train op that get passed into the EstimatorSpec.
For evaluation and prediction, the EstimatorSpec is returned without
a train op, but with the necessary parameters for the given mode.
Args:
features: tensor representing input images
labels: tensor representing class labels for all input images
mode: current estimator mode; should be one of
`tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
model: a TensorFlow model that has a __call__ function.
resnet_size: A single integer for the size of the ResNet model.
weight_decay: weight decay loss rate used to regularize learned variables.
learning_rate_fn: function that returns the current learning rate given
the current global_step
momentum: momentum term used for optimization
data_format: Input format ('channels_last', 'channels_first', or None).
If set to None, the format is dependent on whether a GPU is available.
resnet_version: Integer representing which version of the ResNet network to
use. See README for details. Valid values: [1, 2]
loss_scale: The factor to scale the loss for numerical stability.
A detailed summary is present in the arg parser help text.
loss_filter_fn: function that takes a string variable name and returns
True if the var should be included in loss calculation, and False
otherwise. If None, batch_normalization variables will be excluded
from the loss.
dtype: the TensorFlow dtype to use for calculations.
Returns:
EstimatorSpec parameterized according to the input params and the
current mode.
"""
# Generate a summary node for the images
# `tensor` features which must be 4-D with shape `[batch_size, height,
# width, channels]`
tf.summary.image('images', features, max_outputs=6)
features = tf.cast(features, dtype)
logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)
# This acts as a no-op if the logits are already in fp32 (provided logits are
# not a SparseTensor). If dtype is is low precision, logits must be cast to
# fp32 for numerical stability.
logits = tf.cast(logits, tf.float32)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
}
if mode == tf.estimator.ModeKeys.PREDICT:
# Return the predictions and the specification for serving a SavedModel
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
export_outputs={
'predict': tf.estimator.export.PredictOutput(predictions)
})
# Calculate loss, which includes softmax cross entropy and L2 regularization.
cross_entropy = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=labels)
# Create a tensor named cross_entropy for logging purposes.
tf.identity(cross_entropy, name='cross_entropy')
tf.summary.scalar('cross_entropy', cross_entropy)
# If no loss_filter_fn is passed, assume we want the default behavior,
# which is that batch_normalization variables are excluded from loss.
def exclude_batch_norm(name):
return 'batch_normalization' not in name
loss_filter_fn = loss_filter_fn or exclude_batch_norm
# Add weight decay to the loss.
l2_loss = weight_decay * tf.add_n(
# loss is computed using fp32 for numerical stability.
[tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()
if loss_filter_fn(v.name)])
tf.summary.scalar('l2_loss', l2_loss)
loss = cross_entropy + l2_loss
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = learning_rate_fn(global_step)
# Create a tensor named learning_rate for logging purposes
tf.identity(learning_rate, name='learning_rate')
tf.summary.scalar('learning_rate', learning_rate)
tf.logging.info("optimizer_type: " + optimizer_type.name)
if optimizer_type is OptimizerType.MOMENTUM:
optimizer = tf.train.MomentumOptimizer(
learning_rate=learning_rate,
momentum=momentum
)
elif optimizer_type is OptimizerType.ADAM:
optimizer = tf.train.AdamOptimizer(
learning_rate=learning_rate,
epsilon=1e-8
)
else:
raise ValueError(
"Unsupported optimizer type: " + str(optimizer_type) +
". Please choose from: " + ",".join(
[optimizer_type.name for optimizer_type in OptimizerType]))
if loss_scale != 1:
# When computing fp16 gradients, often intermediate tensor values are
# so small, they underflow to 0. To avoid this, we multiply the loss by
# loss_scale to make these tensor values loss_scale times bigger.
scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)
# Once the gradient computation is complete we can scale the gradients
# back to the correct scale before passing them to the optimizer.
unscaled_grad_vars = [(grad / loss_scale, var)
for grad, var in scaled_grad_vars]
minimize_op = optimizer.apply_gradients(unscaled_grad_vars,
global_step)
else:
minimize_op = optimizer.minimize(loss, global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
train_op = tf.group(minimize_op, update_ops)
else:
train_op = None
if not tf.contrib.distribute.has_distribution_strategy():
accuracy = tf.metrics.accuracy(labels, predictions['classes'])
else:
# Metrics are currently not compatible with distribution strategies during
# training. This does not affect the overall performance of the model.
accuracy = (tf.no_op(), tf.constant(0))
metrics = {'accuracy': accuracy}
# Create a tensor named train_accuracy for logging purposes
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=metrics)
|
a9758bbbf5220091ee8faffde82bc271ea676c44
| 26,002 |
def vectorvalued(f):
""" Decorates a distribution function to disable automatic vectorization.
Parameters
----------
f: The function to decorate
Returns
-------
Decorated function
"""
f.already_vectorized = True
return f
|
cc498fe0731acdbde0c4d9b820a1accb5dc94fea
| 26,003 |
import unicodedata
def remove_diacritics(input_str: str) -> str:
"""Remove diacritics and typographical ligatures from the string.
- All diacritics (i.e. accents) will be removed.
- Typographical ligatures (e.g. ffi) are broken into separated characters.
- True linguistic ligatures (e.g. œ) will remain.
- Non-latin scripts will remain.
Args:
input_str (str): The original string with diacritics and ligatures.
Returns:
str: The string without diacritics and typographical ligatures.
"""
nfkd_form = unicodedata.normalize('NFKD', input_str)
return u"".join([c for c in nfkd_form if not unicodedata.combining(c)])
|
23c3e9ce0029704f0012a825460f10f370e3c681
| 26,004 |
def extract_model_field_meta_data(form, attributes_to_extract):
""" Extract meta-data from the data model fields the form is handling. """
if not hasattr(form, 'base_fields'):
raise AttributeError('Form does not have base_fields. Is it a ModelForm?')
meta_data = dict()
for field_name, field_data in form.base_fields.items():
meta_data[field_name] = dict()
for attrib in attributes_to_extract:
meta_data[field_name][attrib] = getattr(field_data, attrib, '')
return meta_data
|
e41a63935379c5d3310646c79c25a43ad7f6d5fe
| 26,005 |
import json
def beam_search(image, encoder, embedder, attention, decoder,
beam_width=30, max_length=18, redundancy=0.4,
ideal_length=7, candidates=0, as_words=True):
"""
beam_search is a breadth limited sorted-search: from root <start> take next
best beam-width children out of vocab_size sorted by probability, then
calculate children for each child and take next best beam-width children
out of vocab_size * beamwidth possible children, repeating this process
until you hit beam-width leaves <end> or a maximum path-size of max_length.
Each candidate path from root to leaf (or forced end node) is rescored
based on ideal_length -- different from traditional method of normalizing
based on caption length and some small value alpha.
Parameters
----------
image : preprocessed image tensor
image to be captioned
encoder : encoder model
embedder : embedder model
attention : attention model
decoder : decoder model
beam_width : int, greater than 1
size of scope to find best candidate
max_length : int
max tolerated length of caption with <start> and <end> tokens
redundancy : float from 0 to 1
percentage of repeated words in caption, high redundancy is nonsensical
ideal_length : int from 0 to max_length
represents ideal length of a caption and is used to rescore captions
to bias those whose length are closest
candidates : int from 0 to beam_width
represents number of additional captions to predict
as_words : bool
whether output should be a words or encoded as number sequences based
on word_list.json
"""
assert beam_width > candidates
with open(dl.WORDS_PATH, 'r') as fp:
word_list = json.load(fp)
bad_endings = ['a', 'with', 'of', 'in', 'on', 'for', 'by']
bad_endings = [word_list.index(word) for word in bad_endings]
features = embedder(encoder(tf.expand_dims(image, 0)))
# to use after root <start>
features_dup = tf.repeat(features, beam_width, 0)
decoder_state = tf.zeros((1, decoder.output[1].shape[1]))
end = word_list.index('<end>')
word = tf.expand_dims([word_list.index('<start>')], 1)
# initialize
context_vector, _ = attention([decoder_state, features])
word_logits, decoder_state = decoder([word, context_vector])
scores = tf.nn.log_softmax(word_logits)
topk = tf.math.top_k(scores, beam_width)
# minheaps to store scores with tuples (score, sequence tuple, decoder_state)
# throws ValueError when value of first elem equal another value on heap
# to resolve make second value guaranteed unique, which the sequence tuple is,
# also, it will preference more common (smaller indexed) words, a good behavior
# need to invert values to use minheap as maxheap
min_heap = []
candidate_nodes = []
for i in range(beam_width):
node = tuple(
[float(-1 * topk.values[0][i].numpy()), # word score
tuple([topk.indices[0][i].numpy()]), # word
decoder_state]
)
heappush(min_heap, node)
while len(candidate_nodes) < beam_width:
nodes = [heappop(min_heap) for i in range(beam_width)]
min_heap.clear()
word = tf.reshape([[node[1][-1] for node in nodes]], [beam_width, 1])
_decoder_state = tf.squeeze(
tf.concat([[node[2] for node in nodes]], axis=0))
# get next states and words
context_vector, _ = attention([_decoder_state, features_dup])
word_logits, decoder_state = decoder([word, context_vector])
# get top beamwidth possibilities from each node in nodes
scores = tf.nn.log_softmax(word_logits)
topk = tf.math.top_k(scores, beam_width)
for n, node in enumerate(nodes):
if len(candidate_nodes) == beam_width:
break
# add best nodes to candidates
# only the nodes that come off the heap should be added
if node[1][-1] == end:
if node[1][-2] in bad_endings:
continue
candidate_nodes.append(node)
continue
elif len(node[1]) == max_length - 1:
lst = list(node)
lst[1] += tuple([end])
node = tuple(lst)
candidate_nodes.append(node)
continue
# create next nodes to add to heap
for i in range(beam_width):
new_word = topk.indices[n][i].numpy()
new_node = tuple(
[(-1 * topk.values[n][i].numpy()) + node[0],
node[1] + tuple([new_word]),
decoder_state[n]]
)
# don't follow nodes that lead to high redundancy
counts = {}
for word in new_node[1]:
if word not in counts:
counts[word] = 0
counts[word] += 1
score = sum(map(lambda x: x if x > 1 else 0,
list(counts.values()))) / len(node[1])
if score >= redundancy:
continue
heappush(min_heap, new_node)
# collect answer here
sentences = []
min_heap.clear()
# calculate length normalize with alpha and find sentence with best score
for node in candidate_nodes:
# alternate ways to rescore captions
#normalizer = 1 / ((len(node[1]) ** alpha))
#normalizer = 1 / (abs(ideal_length - len(node[1])) ** alpha + 1)
score = len(node[1])
score = (2.5 * ideal_length - score) / 2
score += node[0]
new_node = (-score, node[1][:-1])
heappush(min_heap, new_node)
if not candidates:
sentences = heappop(min_heap)[1]
if as_words:
sentences = [word_list[i] for i in sentences]
else:
sentences = [heappop(min_heap)[1] for n in range(candidates + 1)]
if as_words:
sentences = [[word_list[i] for i in n] for n in sentences]
return sentences
|
c55bcbbff739db7cb25513bdb62b4d165359ab1c
| 26,006 |
def strip_leading_and_trailing_lines(lines, comment):
"""
Removes and leading and trailing blank lines and comments.
:param lines: An array of strings containing the lines to be stripped.
:param comment: The block comment character string.
:return: An updated array of lines.
"""
comment = comment.strip()
return strip_lines(strip_lines(lines, 0, comment), -1, comment)
|
19fe932532f12c5602c4c319c761fc5229efe37b
| 26,007 |
def bold(msg: str) -> str:
"""Bold version of the message
"""
return bcolors.BOLD + msg + bcolors.ENDC
|
25cca7e505b0d0155ff46b86f2f899830cce4216
| 26,008 |
def escape(value):
"""
extends the classic escaping also to the apostrophe
@Reviewer: Do you please have a better way?
"""
value = bleach.clean(value)
value = value.replace("'", "'")
return value
|
7c048a915b1d11ededd45042040215c0089e019b
| 26,009 |
def login(request):
"""
:param request:
:return:
"""
if request.user.is_authenticated:
return redirect('/')
if request.method == "POST":
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
auth_login(request, user)
return redirect("/")
else:
return render(request, "login.html")
return render(request, "login.html")
|
13482776aabe90eaeec6cbc730945df816897471
| 26,010 |
def plot_date_randomisation(
ax: plt.axes,
replicates: np.array or list,
rate: float,
log10: bool = True
) -> plt.axes:
""" Plot distribution of substitution rates for date randomisation test
:param ax: axes object to plot the date randomisation
:param replicates: list of replicate substitution rate estimates
:param rate: true rate estimate vertical line for evaluation
:param log10: plot log10 of substitution rates on horizontal axis
:returns axes object
"""
if log10:
replicates = np.log10(replicates)
with plt.style.context('seaborn-colorblind'):
ax.hist(x=replicates, color='gray')
ax.axvline(x=rate, color='r')
return ax
|
35b99e0f464149e9b948203209ea4ad0fb9c52ef
| 26,011 |
def dict(filename, cols=None, dtype=float, include=None, exclude='#',
delimiter='', removechar='#', hmode='1', header_start=1,
data_start=0, hsep='', lower=False):
"""
Creates a dictionary in which each chosen column in the file is an
element of the dictionary, where keys correspond to column names.
Uses the functions header and table to create a dictionary with the
columns of the file (see each function's help).
The file must have a header in which all column names are given.
"""
if isinstance(cols, string_types) or isinstance(cols, int):
cols = (cols,)
full_output = False
if cols is not None:
if isinstance(cols[0], string_types):
full_output = True
head = header(filename, cols=cols, removechar=removechar, hmode=hmode,
header_start=header_start, hsep=hsep, lower=lower,
full_output=full_output)
if full_output:
head, cols = head
data = table(filename, cols=cols, dtype=dtype, include=include,
exclude=exclude, data_start=data_start, delimiter=delimiter)
if isinstance(data, np.ndarray):
if len(data.shape) == 1:
dic = {head[0]: data}
else:
dic = {head[0]: data[0]}
for i in range(1, len(head)):
dic[head[i]] = data[i]
else:
dic = {head[0]: data[0]}
for i in range(1, len(head)):
dic[head[i]] = data[i]
return dic
|
ffa00f53a52e84143cb3a612730db1a57c130d87
| 26,013 |
def solution(p, flux, error, line, cont, sens, model_wave, coeffs, fjac=None):
""" Fitting function for mpfit, which will minimize the returned deviates.
This compares the model stellar spectra*sensitivity to observed spectrum
to get wavelength of each pixel."""
# Convert pixels to wavelengths
xref = coeffs[0] + coeffs[1]*(0-p[0])
pixel = list(range(len(flux)))
x = p[1]*(pixel+p[2]) + xref
model = (p[3]*line + p[4]*cont) / model_wave / model_wave * sens
# model = (p[3]*line + p[4]*cont) / model_wave * sens
model = model / np.max(model)
# Interpolate model to match data wavelength
theory = np.interp(x, model_wave, model)
status = 0
return [status, (flux-theory)/error]
|
0568751e1e564ae01ee72af28446d8cfdac50324
| 26,014 |
import torch
def filter_image(image, model, scalings):
"""
Filter an image with the first layer of a VGG16 model.
Apply filter to each scale in scalings.
Parameters
----------
image: 2d array
image to filter
model: pytorch model
first layer of VGG16
scalings: list of ints
downsampling factors
Returns
-------
all_scales: list of 2d array
list of filtered images. For each downsampling factor,
there are N images, N being the number of filters of the model.
"""
n_filters = 64
# convert to np if dasks array
image = np.asarray(image)
all_scales=[]
for s in scalings:
im_tot = image[::s,::s].astype(np.float32)
#im_tot = np.ones((3,im_tot.shape[0],im_tot.shape[1]), dtype=np.float32) * im_tot
im_torch = torch.from_numpy(im_tot[np.newaxis, np.newaxis, ::])
out = model.forward(im_torch)
out_np = out.detach().numpy()
if s > 1:
out_np = skimage.transform.resize(
out_np, (1, n_filters, image.shape[0],image.shape[1]), preserve_range=True)
all_scales.append(out_np)
return all_scales
|
031bdf98d0220437afedd489068abb195866ed13
| 26,015 |
def gen_init_params(m_states: int, data: np.ndarray) -> tuple:
"""
Generate initila parameters for HMM training.
"""
init_lambda = gen_sdm(data, m_states)
init_gamma = gen_prob_mat(m_states, m_states)
init_delta = gen_prob_mat(1, m_states)
return init_lambda, init_gamma, init_delta
|
7d57ccb5ba9144ba78be9486795638f6409a6730
| 26,016 |
import tempfile
def generate_temp_csvfile(headers: list, data: list) -> object:
"""
Generates in-memory csv files
:param
headers: list
A list of file headers where each item is a string
data: list
A list containing another list representing the rows for the CSV
:returns: IO
csvfile :
In-memory temporary csv file
"""
with tempfile.NamedTemporaryFile(mode="w", delete=False) as csvfile:
_write_data_to_csv(csvfile, headers, data)
return csvfile
|
6d527794fbfee8c045e2bee31c0d85997beb371b
| 26,017 |
def read_raw_antcnt(input_fname, montage=None, eog=(), event_id=None,
event_id_func='strip_to_integer', preload=False,
verbose=None):
"""Read an ANT .cnt file
Parameters
----------
input_fname : str
Path to the .cnt file.
montage : str | None | instance of montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list | tuple | 'auto'
Names or indices of channels that should be designated EOG channels.
If 'auto', the channel names containing ``EOG`` or ``EYE`` are used.
Defaults to empty tuple.
event_id : dict | None
The ids of the events to consider. If None (default), an empty dict is
used and ``event_id_func`` (see below) is called on every event value.
If dict, the keys will be mapped to trigger values on the stimulus
channel and only keys not in ``event_id`` will be handled by
``event_id_func``. Keys are case-sensitive.
Example::
{'SyncStatus': 1; 'Pulse Artifact': 3}
event_id_func : None | str | callable
What to do for events not found in ``event_id``. Must take one ``str``
argument and return an ``int``. If string, must be 'strip-to-integer',
in which case it defaults to stripping event codes such as "D128" or
"S 1" of their non-integer parts and returns the integer.
If the event is not in the ``event_id`` and calling ``event_id_func``
on it results in a ``TypeError`` (e.g. if ``event_id_func`` is
``None``) or a ``ValueError``, the event is dropped.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory). Note that
preload=False will be effective only if the data is stored in a
separate binary file.
verbose : bool | str | int | None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : Instance of RawANTCNT
A Raw object containing ANT .cnt data.
Notes
-----
.. versionadded:: 0.11.0
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawANTCNT(input_fname=input_fname, montage=montage, preload=preload,
eog=eog, event_id=event_id, event_id_func=event_id_func,
verbose=verbose)
|
db1ff04097b1ea8125aec90dfd308023cc5b3dba
| 26,018 |
def addrstr(ip: netaddr.IPNetwork) -> str:
"""
helper for mapping IP addresses to config statements
"""
address = str(ip.ip)
if netaddr.valid_ipv4(address):
return "ip address %s" % ip
elif netaddr.valid_ipv6(address):
return "ipv6 address %s" % ip
else:
raise ValueError("invalid address: %s", ip)
|
2faf35da4f6739db5b3e5eece92366e44595c11b
| 26,019 |
def upload_fixture_file(domain, filename, replace, task=None, skip_orm=False):
"""
should only ever be called after the same file has been validated
using validate_fixture_file_format
"""
workbook = get_workbook(filename)
if skip_orm is True:
return _run_fast_fixture_upload(domain, workbook, task=task)
return _run_fixture_upload(domain, workbook, replace=replace, task=task)
|
ac61bbd25a4605a11a9133ef7d4340444be46d8a
| 26,020 |
def lambda_plus_mu_elimination(
offspring: list, population: list, lambda_: int):
""" Performs the (λ+μ)-elimination step of the evolutionary algorithm
Args:
offspring (list): List of the offspring
population (list): List of the individuals in a population
lambda_ (int): Number of top lambda_ candidates that will be retained
Returns:
new_combined: Top lambda_ candidates that are retained
"""
# combine population and offspring
combined = population + offspring
# sort new population
combined = sorted(combined, key=lambda k: k.fitness, reverse=False)
# pick top lambda candidates
combined = combined[:lambda_]
return combined
|
d4f55fa621e3f33e2773da81a6cf0b2fc0439ba9
| 26,021 |
def dVdtau(z):
""" cosmological time-volume element [Mpc^3 /redshift /sr]
it is weighted by an extra (1+z) factor to reflect the rate
in the rest frame vs the observer frame
"""
DH=c_light_kms/cosmo.H0
return DH*(1+z)*DA(z)**2/E(z)
|
80384549a5ab30bcccbd45076f0b25044402abc0
| 26,022 |
def receive(message):
"""
Function to read whatever is presented to the serial port and print it to the console.
Note: For future use: Currently not used in this code.
"""
messageLength = len(message)
last_message = []
try:
while arduinoData.in_waiting > 0:
for i in range(0, messageLength):
last_message.append(int.from_bytes(arduinoData.read(), "little"))
#print("GOT: ", last_message)
return last_message
except:
print("Failed to receive serial message")
pass
|
b526d1888d089f77dc0953488bbafeaa74e5ba45
| 26,023 |
from typing import List
from typing import Optional
from typing import Callable
def fit(model_types: List[str],
state_cb_arg_name: Optional[str] = None,
instance_arg_name: Optional[str] = None) -> Callable:
"""Decorator used to indicate that the wrapped function is a fitting
function. The decorated function should take at least one argument - model
instance (passed as the first positional argument by default). It may also
take any number of additional positional and keyword arguments and should
return the updated model instance.
Args:
model_types: types of models supported by the decorated function
state_cb_arg_name: if set, indicates that the caller should pass a
state callback function as a keyword argument and use the passed
value as the argument name. The function is of type Callable[Any]
instance_arg_name: if set, indicates under which
argument name to pass the concrete model instance. If not set, it
is passed in the first positional argument
Returns:
Decorated function"""
def decorator(function):
for model_type in model_types:
if model_type in _declarations['fit']:
raise ValueError(f'duplicate fitting function under model '
f'type {model_type}')
_declare('fit', model_type, common.FitPlugin(
function=function, state_cb_arg_name=state_cb_arg_name,
instance_arg_name=instance_arg_name))
return function
return decorator
|
0b8217184118269dab7095e424550736e495ba04
| 26,024 |
def infer_filetype(filepath, filetype):
"""
The function which infer file type
Parameters
----------
filepath : str
command line argument of filepath
filetype : str
command line argument of filetype
Returns
-------
filepath : str
filepath
filetype : str
filetype
"""
if filetype == DIC_DEFAULT_VALUES['filetype']:
# Extract filename from filepath
filename = extract_filename(filepath)
filetype = extract_extension(filepath)
# Convert extension to alias
if filename in DIC_LEXER_CONST.keys():
return filepath, DIC_LEXER_CONST[filename]
elif filetype in DIC_LEXER_WC.keys():
return filepath, DIC_LEXER_WC[filetype]
else:
return filepath, filetype
else:
return filepath, filetype
|
6e47f60dd8cabe8ba14d26736c3d2508952c1334
| 26,025 |
def suite_for_devices(devices):
"""Create a TyphonSuite to display multiple devices"""
suite = TyphonSuite()
for device in devices:
suite.add_device(device)
return suite
|
08911e85b775e98cdb09b96daaa74f27076c51f6
| 26,026 |
def get_3d_points(preds_3d):
"""
Scales the 3D points.
Parameters
----------
preds_3d : numpy.ndarray
The raw 3D points.
Returns
-------
preds_3d : numpy.ndarray
The scaled points.
"""
for i,p in enumerate(preds_3d):
preds_3d[i] = preds_3d[i] - preds_3d[i].mean(0)*np.ones((16,1));
return preds_3d;
|
e0c93af3f1d803a9276deb86fb3a3bcb2d90859f
| 26,027 |
from datetime import datetime
import logging
def mms_load_data(trange=['2015-10-16', '2015-10-17'], probe='1', data_rate='srvy', level='l2',
instrument='fgm', datatype='', anc_product=None, descriptor=None,
varformat=None, prefix='', suffix='', get_support_data=False, time_clip=False,
no_update=False, center_measurement=False, notplot=False, data_root=None):
"""
This function loads MMS data into a dictionary by variable name.
Parameters:
trange : list of str
time range of interest [starttime, endtime] with the format
'YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
probe : str or list of str
list of probes, valid values for MMS probes are ['1','2','3','4'].
data_rate : str or list of str
instrument data rates for FGM include 'brst' 'fast' 'slow' 'srvy'. The
default is 'srvy'.
level : str
indicates level of data processing. the default if no level is specified is 'l2'
instrument : str or list of str
Name(s) of instrument(s) for which to load data.
datatype : str or list of str
One or more types of data to load.
Must be selected from this list: ['ancillary', 'hk', 'science']
If given as an empty string or not provided, will default to 'science' data.
anc_product : str or list of str
One or more ancillary products to load.
descriptor : str or list of str
Optional name(s) of data subset(s) to load.
varformat: str
The file variable formats to load. Wildcard character
"*" is accepted. By default, all variables are loaded in.
prefix: str
The variable names will be given this prefix. By default,
no prefix is added.
suffix: str
The variable names will be given this suffix. By default,
no suffix is added.
get_support_data: bool
If True, data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into data tables. If False, only loads in data with a
"VAR_TYPE" attribute of "data". Defaults to False.
time_clip: bool
Data will be clipped to the exact trange specified by the trange keyword.
no_update: bool
If true, do not poll the upstream MMS data repository for new/updated data.
This will limit loading to only files already available from the local system.
center_measurement: bool
If True, the CDF epoch variables are time-shifted to the middle
of the accumulation interval by their DELTA_PLUS_VAR and
DELTA_MINUS_VAR variable attributes
notplot: bool
[Deprecated] No effect. Parameter is preserved for partial
compatibility with original pyspedas implementation.
data_root: str
Full path to the root directory where MMS directory structure begins.
If not provided, will default to '<user_home>/data/mms'
Returns:
Tuple of dictionaries with the loaded data and metadata.
ie. (data, metadata)
"""
if not (isinstance(probe, list) or probe is None): probe = [probe]
if not (isinstance(data_rate, list) or data_rate is None): data_rate = [data_rate]
if not isinstance(datatype, list): datatype = [datatype]
if not isinstance(level, list): level = [level]
if not isinstance(descriptor, list): descriptor = [descriptor]
if probe:
probe = [('mms'+(str(p))) for p in probe]
# We're going to handle everything as datetime objects fo consistancy and easy conversion at-need.
local_trange = [None,None]
if type(trange[0]) == datetime: # Already a datetime.
local_trange[0] = trange[0]
elif type(trange[0]) in (int,float,np.float32,np.float64): # Convert from posix timestamp if provided.
local_trange[0] = datetime.fromtimestamp(trange[0], timezone.utc)
elif type(trange[0]) == str: # Parse the string and generate a datetime.
local_trange[0] = parse(trange[0])
else:
raise TypeError("Unsupported input format for start date/time.")
if type(trange[1]) == datetime: # Already a datetime.
local_trange[1] = trange[1]
elif type(trange[1]) == int: # Convert from posix timestamp if provided.
local_trange[1] = datetime.fromtimestamp(trange[1], timezone.utc)
elif type(trange[1]) == str: # Parse the string and generate a datetime.
local_trange[1] = parse(trange[1])
else:
raise TypeError("Unsupported input format for end date/time.")
# Replicating behavior of pyspedas:
start_date = local_trange[0].isoformat()
end_date = local_trange[1].isoformat()
out_files = []
for dtype in datatype:
# Default to 'science' data, as the old SPEDAS implementation assumed that and used "datatype" for something else entirely.
if len(dtype) == 0: dtype = 'science'
for lvl in level:
for desc in descriptor:
mms_api_client = mms_sdc_api_client.MMS_SDC_API_CLIENT(
sc=probe,
instr=instrument,
mode=data_rate,
level=lvl,
data_type=dtype,
anc_product=anc_product,
data_root=data_root,
end_date=end_date,
offline=no_update,
optdesc=desc,
site='public',
start_date=start_date)
logging.info('download URI: '+mms_api_client.url())
out_files.extend(mms_api_client.Download())
out_files = sorted(out_files)
#Because we're not using pytplot, new_variables is a simple dictionary containing the data.
# eg.
# pytplot: get_data(Varname)
# current: new_variables[Varname].values()
#new_variables,new_metadata = load_cdf(out_files, varformat=varformat, get_support_data=get_support_data, prefix=prefix, suffix=suffix, center_measurement=center_measurement)
new_variables = {}
new_metadata = {}
logging.info('Beginning parallel load of '+str(len(out_files))+' data files...')
# This attempts to load all requested cdf files into memory concurrently, using as many threads as the system permits.
# The load_cdf function returns a tuple of (data, metadata), so pile_o_data will be a list of these tuples.
# pile_o_data = p_map(load_cdf, out_files, varformat, get_support_data, prefix, suffix, center_measurement)
#pile_o_data = p_map(partial(load_datafile, varformat=varformat, get_support_data=get_support_data, prefix=prefix, suffix=suffix, center_measurement=center_measurement), out_files)
##pile_o_data = p_map(partial(load_cdf, varformat=varformat, get_support_data=get_support_data, prefix=prefix, suffix=suffix, center_measurement=center_measurement), out_files)
with ThreadPoolExecutor() as p:
pile_o_data = p.map(partial(load_datafile, varformat=varformat, get_support_data=get_support_data, prefix=prefix, suffix=suffix, center_measurement=center_measurement), out_files)
# Merge matching variable names across loaded files.
logging.info('Stitching together the data...')
for data,metadata in pile_o_data:
# merge data dictionary
if isinstance(data, pd.DataFrame):
# Ancillary data loaded via Pandas magic.
dataset = metadata['Set_Name']
# Metadata
if dataset not in new_metadata.keys():
# No previous entries for this dataset. Add the whole thing as-is.
new_metadata[dataset] = metadata
else:
# Compare the new set's metadata with the existing metadata.
for meta in [key for key in metadata.keys() if key in set(new_metadata[dataset].keys())]:
# Print a notice for any unexpected differences, but don't raise exceptions.
if metadata[meta] != new_metadata[dataset][meta]:
#Ancillary data is wierd. Just append any new metadata to the existing metadata field.
metadata[meta] = str(metadata[meta]) + ', ' +str(new_metadata[dataset][meta])
#logging.warning("Dataset '"+dataset+"' has non-matching metadata between input files. Old: {'"+meta+"': '"+new_metadata[dataset][meta]+"'} -- Now using: {'"+meta+"': '"+metadata[dataset][meta]+"'}")
# Update the metadata, overwriting old values if appliciable.
new_metadata[dataset].update(metadata)
# Data values
if dataset not in new_variables.keys():
# No previous entries for this dataset. Add the whole thing as-is.
new_variables[dataset] = data
else:
# Panic and error out if the datasets with identical names don't have the same axes/variables being tracked.
if len(new_variables[dataset].keys()) != len(data.columns):
logging.error('Failure to merge new data with axes ('+(','.join(data.columns))+') with existing data with axes ('+(','.join((new_variables[dataset].keys())))+')'+'.')
raise TypeError('Failure to merge new data with axes ('+(','.join(data.columns))+') with existing data with axes ('+(','.join((new_variables[dataset].keys())))+')'+'.')
# Update existing dataset entry with the additional data.
new_variables[dataset] = new_variables[dataset].append(data)
else:
# Direct loaded from CDF.
for dataset in data.keys():
if dataset not in new_variables.keys():
# No previous entries for this dataset. Add the whole thing as-is.
new_variables[dataset] = data[dataset]
else:
# Panic and error out if the datasets with identical names don't have the same axes/variables being tracked.
if len(new_variables[dataset].keys()) != len(data[dataset].keys()):
logging.error('Failure to merge new data with axes ('+(','.join((data[dataset].keys())))+') with existing data with axes ('+(','.join((new_variables[dataset].keys())))+')'+'.')
raise TypeError('Failure to merge new data with axes ('+(','.join((data[dataset].keys())))+') with existing data with axes ('+(','.join((new_variables[dataset].keys())))+')'+'.')
# Update existing dataset entry with the additional data.
for axis in data[dataset].keys():
new_variables[dataset][axis] = np.concatenate((new_variables[dataset][axis],data[dataset][axis]))
# write/revise metadata
for dataset in metadata.keys():
if dataset not in new_metadata.keys():
# No previous entries for this dataset. Add the whole thing as-is.
new_metadata[dataset] = metadata[dataset]
else:
# Compare the new set's metadata with the existing metadata.
for meta in [key for key in metadata[dataset].keys() if key in set(new_metadata[dataset].keys())]:
# Print a notice for any unexpected differences, but don't raise exceptions.
if metadata[dataset][meta] != new_metadata[dataset][meta]:
logging.warning("Dataset '"+dataset+"' has non-matching metadata between input files. Old: {'"+meta+"': '"+new_metadata[dataset][meta]+"'} -- Now using: {'"+meta+"': '"+metadata[dataset][meta]+"'}")
# Update the metadata, overwriting old values if appliciable.
new_metadata[dataset].update(metadata[dataset])
if len(new_variables) == 0:
logging.warning('No data loaded.')
return
if len(new_metadata) == 0:
logging.warning('No metadata loaded.')
return
# Drop any duplicate entries in the pandas dataframes.
for dataset in new_variables.values():
if isinstance(dataset, pd.DataFrame):
dataset = dataset.drop_duplicates()
logging.info('Loaded variables:')
for new_var in new_variables.keys():
print(new_var)
if time_clip:
logging.info('Clipping variables to time range...')
mms_data_time_clip(new_variables, local_trange[0], local_trange[1])
return new_variables, new_metadata
#else:
# return out_files
|
85d4e6b2b28bbcbe9dd7854f306f8b11131cb274
| 26,028 |
def get_random_card_id_in_value_range(min, max, offset):
"""
Randomly picks a card ranged between `min` and `max` from a given offset.
The offset determines the type of card.
"""
card_id = roll(
min + offset,
max + offset)
return card_id
|
dd888f1ace9638d68e611763bd7d844b43309594
| 26,029 |
def task_configuration(config_path):
""" NLP-Task configuration/mapping """
df = pd.read_json(config_path)
names = df.name.values.tolist()
mapping = {
df['name'].iloc[i]: (
df['text'].iloc[i],
df['labels'].iloc[i],
df['description'].iloc[i],
df['model_name'].iloc[i],
df['direction'].iloc[i],
df['mapper'].iloc[i],
) for i in range(len(names))}
return names, mapping
|
be794664800189ac42e39cc8639598f488a91e4d
| 26,030 |
def train(model, optimizer, criterion, data_loader, imshape=(-1, 1, 299, 299),
device='cpu'):
"""
Train a model given a labeled dataset
Args:
model (nn.Module): Model
criterion (torch.nn.optim): Criterion for loss function calculation
data_loader (DataLoader): DataLoader for input validation data
imshape (tuple, optional): Shape of input image.
Defaults to (-1, 1, 299, 299)
device (str, optional): Device to run on. Defaults to 'cpu'.
Returns:
float: validation loss (normalized to dataset length)
float: validation accuracy (normalized to dataset length)
"""
model.train()
train_loss, train_accuracy = 0, 0
i = 0
for X, y in data_loader:
if i % 100 == 0:
print(i)
i += 1
X, y = X.to(device), y.to(device)
optimizer.zero_grad()
out = model(X.view(*imshape))
loss = criterion(out, y)
loss.backward()
optimizer.step()
train_loss += loss*X.size(0)
y_pred = F.log_softmax(out, dim=1).max(1)[1]
train_accuracy += accuracy_score(y.cpu().numpy(), y_pred.detach()
.cpu().numpy())*X.size(0)
return train_loss/len(data_loader.dataset), train_accuracy/len(data_loader.
dataset)
|
4b1d769adbef1c9b3d97d1e38bc9828a0fc48178
| 26,031 |
def get_random_rep(embedding_dim=50, scale=0.62):
"""The `scale=0.62` is derived from study of the external GloVE
vectors. We're hoping to create vectors with similar general
statistics to those.
"""
return np.random.normal(size=embedding_dim, scale=0.62)
|
eed26688657121c7651a611bf49fd7549f3f639f
| 26,032 |
def calculate_pretrain_epoches(stage_ds:DatasetStage,
train_batch_size:int,
train_steps_per_epoch:int=DEF_STEPS_PER_EPOCH)->int:
""" Calculate the number of epoches required to match the reference model in
the number of parameter updates.
Ref. https://arxiv.org/pdf/1810.04805.pdf, section A.2. "Pre-training
procedure"
"""
upstream_train_steps=10**6
upstream_batch_size=256
upstream_seq_length=512
upstream_total_tokens=upstream_train_steps * upstream_batch_size * upstream_seq_length
# Calculate number of training epoches for our model to match the upstream
our_batch_size=train_batch_size
our_seq_length=mklens(instantiate(stage_ds).dref).max_seq_length.val
our_train_steps=upstream_total_tokens / (our_batch_size * our_seq_length)
out_epoches=our_train_steps // train_steps_per_epoch
return out_epoches
|
72a51eb842d79fe575e97a8059705220d145e0de
| 26,033 |
def get_container_info(node, repositories):
"""Check the node name for errors (underscores)
Returns:
toscaparser.nodetemplate.NodeTemplate: a deepcopy of a NodeTemplate
"""
if not repositories:
repositories = []
NodeInfo = namedtuple(
"NodeInfo",
[
"name",
"type",
"properties",
"inputs",
"artifacts",
"parent",
"sidecars",
"mounts",
"hosts",
"requirements",
"repositories",
],
)
return NodeInfo(
name=node.name,
type=node.type,
properties={x: y.value for x, y in node.get_properties().items()},
inputs=utils.get_lifecycle(node, Interface.KUBERNETES).get(
"create", {}
),
artifacts=node.entity_tpl.get("artifacts", {}),
parent=node.type_definition.defs,
sidecars=_get_related_nodes(node, NodeType.CONTAINER, repositories),
mounts=_get_related_mounts(node),
hosts=_get_related_hosts(node),
requirements=node.requirements,
repositories={repo.name: repo.reposit for repo in repositories},
)
|
f2fbdcee89f869f9ec4b5347ad1667e2ea23c427
| 26,034 |
def query_db_to_build_2by2_table(
db,
drug1_id,
drug2_id,
drug1_efficacy_definition_query_sql,
drug2_efficacy_definition_query_sql,
environment,
):
"""
Query the given DB to build a 2-by-2 table comparing the given
drugs.
Return a 2-by-2 table where the exposure is drug 1 (non-exposure is
drug 2) and the outcome is efficacy according to the given
definitions.
db: SQLite DB connection.
drug1_id, drug2_id: IDs of drug 1 and 2. Python data type must
match drug IDs in DB.
drug{1,2}_efficacy_definition_query_sql: SQL query to identify
subjects for whom drug 1 (2) is effective. Must return a
collection of subject IDs ("select <subject-id> from ...").
Must have a single parameter, the drug ID ("... where
... and <drug-id> is ? ...").
environment: Dictionary containing definitions describing the table
of prescriptions: `drug_table_name`, `drug_id_column_name`,
`subject_id_column_name`.
"""
# Build query for subjects with a drug
query = _select_subjects_with_drug_id_sql.format(
environment['subject_id_column_name'],
environment['drug_table_name'],
environment['drug_id_column_name'],
)
# Get subjects with drug 1
cursor = db.execute(query, (drug1_id,))
subjects1 = set(record[0] for record in cursor)
# Get subjects with drug 2
cursor = db.execute(query, (drug2_id,))
subjects2 = set(record[0] for record in cursor)
# Get subjects for whom drug 1 is effective
cursor = db.execute(
drug1_efficacy_definition_query_sql, (drug1_id,))
effective1 = set(record[0] for record in cursor)
# Get subjects for whom drug 2 is effective
cursor = db.execute(
drug2_efficacy_definition_query_sql, (drug2_id,))
effective2 = set(record[0] for record in cursor)
# Classify the subjects by efficacy. Trust the sets of subjects
# rather than the sets of effectives; this allows the efficacy
# queries to overcount.
drug1_non = subjects1 - effective1
drug1_eff = subjects1 - drug1_non
drug2_non = subjects2 - effective2
drug2_eff = subjects2 - drug2_non
# Make the 2-by-2 table. Exposure is drug1, non-exposure is drug2,
# outcome is efficacy.
table = contab.TwoByTwoTable(
len(drug1_eff), len(drug1_non), # +exp +out | +exp -out
len(drug2_eff), len(drug2_non), # -exp +out | -exp -out
)
return table
|
bb10095c692cb81182bb79140af30a9e742d3106
| 26,035 |
def rmedian(image, r_inner, r_outer, **kwargs):
"""
Median filter image with a ring footprint. This
function produces results similar to the IRAF
task of the same name (except this is much faster).
Parameters
----------
image : ndarray, MaskedImageF, or ExposureF
Original image data.
r_inner : int
The inner radius of the ring in pixels.
r_outer : int
The outer radius of the ring in pixels.
Returns
-------
filtered_data : ndarray
Ring filtered image.
"""
data = get_image_ndarray(image)
fp = _ring(r_inner, r_outer, **kwargs)
filtered_data = ndi.median_filter(data, footprint=fp)
return filtered_data
|
9614b6f964e8f6031ad08743c0f614e2bcb25753
| 26,036 |
def docstring():
"""
Decorator: Insert docstring header to a pre-existing docstring
"""
sep="\n"
def _decorator(func):
docstr = func.__doc__
title = docstr.split("Notes",1)[0]
docstr = docstr.replace(title,"")
func.__doc__ = sep.join([docstr_header(title,func.__name__),docstr])
func.__doc__ = sep.join([func.__doc__,docstr_example(func.__name__)])
return func
return _decorator
|
5157cdfe25bc346bbf10c6a2368a6a78539d5160
| 26,037 |
def load_class_names():
"""
Load the names for the classes in the CIFAR-10 data-set.
Returns a list with the names. Example: names[3] is the name
associated with class-number 3.
"""
# Load the class-names from the pickled file.
raw = _unpickle(filename="batches.meta")[b'label_names']
# Convert from binary strings.
names = [x.decode('utf-8') for x in raw]
return names
|
668f01e943b2dcd99dd7b98266a8553fb8395251
| 26,039 |
def categories():
"""Router for categories page."""
categories = get_categories_list()
return render_template('categories.html',
categories=categories)
|
2bcc78a7bb1fceeb0aeff1c8edd49d804a4f5c5c
| 26,040 |
def load_images(imgpaths, h, w, imf='color'):
"""Read in images and pre-processing 'em
Args:
imgpaths: a list contains all the paths and names of the images we want to load
h: height image is going to resized to
width: width image is going to resized to
imf: image format when loaded as color or grayscale
Returns:
image_data: 2-d array with shape [len(imgpaths, h*w*num_channels)]
"""
if imf == 'gray':
num_chnls = 1
elif imf =='color':
num_chnls = 3
image_data = np.empty([len(imgpaths), h*w*num_chnls], dtype=np.float32)
for i in range(len(imgpaths)):
# read in image according to imf
if imf == 'gray':
img_raw = cv2.imread(imgpaths[i], 0)
elif imf == 'color':
img_raw = cv2.imread(imgpaths[i], 1)
# crop image to (360, 144, num_channels)
img_crp = img_raw[:, int(img_raw.shape[1]/2)-72:int(img_raw.shape[1]/2)+72]
# resize image according to h and w
img_rsz = cv2.resize(img_crp, (h, w))
# flatten image tensor to 1-d and save into the image_data array
image_data[i] = np.resize(img_rsz, (h*w*num_chnls))
print("{} of {} images loaded...".format(i+1, len(imgpaths)))
return image_data
|
008c265354da9c545edcfeb72484786923dcf1f6
| 26,041 |
def nextpow2(value):
"""
Find exponent such that 2^exponent is equal to or greater than abs(value).
Parameters
----------
value : int
Returns
-------
exponent : int
"""
exponent = 0
avalue = np.abs(value)
while avalue > np.power(2, exponent):
exponent += 1
return exponent
|
1c856a64c578e88aacd931c0e15ba4756c7d9d4f
| 26,042 |
def clarkezones(reference, test, units,
numeric=False):
"""Provides the error zones as depicted by the
Clarke error grid analysis for each point in the reference and test datasets.
Parameters
----------
reference, test : array, or list
Glucose values obtained from the reference and predicted methods, preferably provided in a np.array.
units : str
The SI units which the glucose values are provided in. Options: 'mmol', 'mgdl' or 'mg/dl'.
numeric : bool, optional
If this is set to true, returns integers (0 to 4) instead of characters for each of the
zones.
Returns
-------
clarkezones : list
Returns a list depecting the zones for each of the reference and test values.
"""
# obtain zones from a Clarke reference object
_zones = _Clarke(reference, test, units,
None, None, None,
None, None,
'#000000', 'auto', 'auto',
True, False,
None, None)._calc_error_zone()
if numeric:
return _zones
else:
labels = ['A', 'B', 'C', 'D', 'E']
return [labels[i] for i in _zones]
|
4fe3544649e28ccf06cb1c74f282f1963da0854d
| 26,043 |
def normalize_graph(graph):
"""
Take an instance of a ``Graph`` and return the instance's identifier and ``type``.
Types are ``U`` for a :class:`~rdflib.graph.Graph`, ``F`` for
a :class:`~rdflib.graph.QuotedGraph` and ``B`` for a
:class:`~rdflib.graph.ConjunctiveGraph`
>>> from rdflib import plugin
>>> from rdflib.graph import Graph, ConjunctiveGraph, QuotedGraph
>>> from rdflib.store import Store
>>> from rdflib import URIRef, Namespace
>>> from rdflib_sqlalchemy.termutils import normalize_graph
>>> memstore = plugin.get('IOMemory', Store)()
>>> g = Graph(memstore, URIRef("http://purl.org/net/bel-epa/gjh"))
>>> normalize_graph(g)
(rdflib.term.URIRef(u'http://purl.org/net/bel-epa/gjh'), 'U')
>>> g = ConjunctiveGraph(memstore, Namespace("http://rdflib.net/ns"))
>>> normalize_graph(g) #doctest: +ELLIPSIS
(rdflib.term.URIRef(u'http://rdflib.net/ns'), 'U')
>>> g = QuotedGraph(memstore, Namespace("http://rdflib.net/ns"))
>>> normalize_graph(g)
(rdflib.term.URIRef(u'http://rdflib.net/ns'), 'F')
"""
if isinstance(graph, QuotedGraph):
return graph.identifier, "F"
else:
return graph.identifier, term_to_letter(graph.identifier)
|
478987e943e27077e1f2ecce454b33dfd347812b
| 26,044 |
def findTargetNode(root, nodeName, l):
"""
Recursive parsing of the BVH skeletal tree using breath-first
search to locate the node that has the name of the targeted body part.
Args:
root (object): root node of the BVH skeletal tree
nodeName (string): name of the targeted body part
l (list): empty list
Returns:
list: list containing the node representing the targeted body part
"""
if root.name == nodeName:
l.append(root)
else:
for child in root.children:
findTargetNode(child, nodeName, l)
return l
|
81d63c032260b496b29dd2890e32753554b93e1a
| 26,045 |
def ddtodms(decLat: float, decLon: float):
""" Converts coord point from decimal degrees to Hddd.mm.ss.sss """
try:
lat = float(decLat)
lon = float(decLon)
except ValueError as e:
raise e
# Get declination
ns = "N" if lat >= 0 else "S"
ew = "E" if lon >= 0 else "W"
lat = abs(lat)
lon = abs(lon)
# Floor to get degrees
latD = int(lat)
lonD = int(lon)
# Get minutes
latM = 60*(lat - latD)
lonM = 60*(lon - lonD)
# Get seconds
latS = 60*(latM - int(latM))
lonS = 60*(lonM - int(lonM))
# Assemble output
latOut = f"{ns}{int(latD):03}.{int(latM):02}.{latS:06.3f}"
lonOut = f"{ew}{int(lonD):03}.{int(lonM):02}.{lonS:06.3f}"
return latOut, lonOut
|
e1d05d5edf274427b42cb88496fe41ddaf58f7fd
| 26,046 |
def gen_colors(img):
""" Ask backend to generate 16 colors. """
raw_colors = fast_colorthief.get_palette(img, 16)
return [util.rgb_to_hex(color) for color in raw_colors]
|
6a780cdff2e4aebbe90667da584ad4f1e2692347
| 26,048 |
import html
import time
import http
import json
def request(match, msg):
"""Make an ESI GET request, if the path is known.
Options:
--headers nest the response and add the headers
"""
match_group = match.groupdict()
if "evepc.163.com" in (match_group["esi"] or ""):
base_url = ESI_CHINA
else:
base_url = esi_base_url(msg)
version, *req_sections = match_group["esi_path"].split("/")
if version not in ESI_SPECS[base_url]:
req_sections.insert(0, version)
version = "latest"
params = ""
if "?" in req_sections[-1]:
if req_sections[-1].startswith("?"):
params = req_sections.pop()
params = params[1:]
else:
# qsparams passed w/out trailing slash
final_path, params = req_sections.pop().split("?")
req_sections.append(final_path)
params = html.unescape(params)
path = "/{}/".format("/".join(x for x in req_sections if x))
if _valid_path(base_url, path, version):
url = "{}/{}{}{}{}".format(
base_url,
version,
path,
"?" * int(params != ""),
params,
)
start = time.time()
res = do_request(url, return_response=True)
try:
content = res.json()
except ValueError:
content = res.text
try:
status = http.HTTPStatus(res.status_code) # pylint: disable=E1120
except ValueError:
status = str(res.status_code)
else:
status = "{} {}".format(status.value, status.name) # pylint: disable=E1101
if "--headers" in msg.args:
res = {"response": content, "headers": dict(res.headers)}
else:
res = content
return SNIPPET(
content=json.dumps(res, sort_keys=True, indent=4),
filename="response.json",
filetype="json",
comment="{} ({:,.0f}ms)".format(
status,
(time.time() - start) * 1000,
),
title=url,
)
return "failed to find GET {} in the {} ESI{} spec".format(
path,
version,
" China" * int(base_url == ESI_CHINA),
)
|
963482efa0bdb02fbfcf1f6ce0b107718c69563a
| 26,049 |
def __getattr__(attr):
"""
This dynamically creates the module level variables, so if
we don't call them, they are never created, saving time - mostly in the CLI.
"""
if attr == "config":
return get_config()
elif attr == "leader_hostname":
return get_leader_hostname()
else:
raise AttributeError
|
f080f3b82afa6050dbaf870e11d1651faded6361
| 26,050 |
import numpy
def _diff_st(p,dl,salt,temp,useext=False):
"""Calculate sea-ice disequilibrium at ST.
Calculate both sides of the equations
given pressure = pressure of liquid water
chemical potential of ice = potential of liquid water
and their Jacobians with respect to pressure and liquid water
density. Solving these equations gives equilibrium values at the
given salinity and temperature.
:arg float p: Pressure in Pa.
:arg float dl: Liquid water density in kg/m3.
:arg float salt: Salinity in kg/kg.
:arg float temp: Temperature in K.
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
pl = _eq_pressure(0,0,temp,dl)
gi = _ice_g(0,0,temp,p)
gl = _eq_chempot(0,0,temp,dl)
gl += _eq_liqpot(0,0,0,salt,temp,p,useext=useext)
lhs = numpy.array([p, gi])
rhs = numpy.array([pl, gl])
pl_d = _eq_pressure(0,1,temp,dl)
gi_p = _ice_g(0,1,temp,p)
gl_d = _eq_chempot(0,1,temp,dl)
gl_p = _eq_liqpot(0,0,1,salt,temp,p,useext=useext)
dlhs = numpy.array([[1.,0.], [gi_p,0.]])
drhs = numpy.array([[0.,pl_d], [gl_p,gl_d]])
return lhs, rhs, dlhs, drhs
|
11c3c09deeea9e36d1a9dc25bfae4609c982c082
| 26,051 |
def getNormalizedISBN10(inputISBN):
"""This function normalizes an ISBN number.
>>> getNormalizedISBN10('978-90-8558-138-3')
'90-8558-138-9'
>>> getNormalizedISBN10('978-90-8558-138-3 test')
'90-8558-138-9'
>>> getNormalizedISBN10('9789085581383')
'90-8558-138-9'
>>> getNormalizedISBN10('9031411515')
'90-314-1151-5'
>>> getNormalizedISBN10('9791032305690')
''
>>> getNormalizedISBN10('')
''
>>> getNormalizedISBN10('979-10-235-1393-613')
''
>>> getNormalizedISBN10('978-10-235-1393-613')
Traceback (most recent call last):
...
stdnum.exceptions.InvalidFormat: Not a valid ISBN13.
"""
inputISBNNorm = preprocessISBNString(inputISBN)
if inputISBNNorm:
isbn10 = None
try:
isbn10 = isbn.format(isbn.to_isbn10(inputISBNNorm))
return isbn10
except exceptions.InvalidComponent:
# Probably an ISBN number with 979 prefix for which no ISBN10 can be created
if inputISBNNorm.startswith('979'):
return ''
else:
raise
else:
return ''
|
986dcd09470cc3bf0badb59f8c0ba069382f0c7c
| 26,052 |
import json
import logging
def hashtag_is_valid(tag, browser, delay=5):
"""
Check if a given hashtag is banned by Instagram
:param delay: Maximum time to search for information on a page
:param browser: A Selenium Driver
:param tag: The hashtag to check
:return: True if the hashtag is valid, else False
"""
try:
url = f"https://www.instagram.com/explore/tags/{tag}/?__a=1"
browser.get(url)
WebDriverWait(browser, delay).until(expected_conditions.presence_of_element_located((By.ID, 'json')))
content = browser.find_element_by_id('json').text
parsed_json = json.loads(content)
return parsed_json['graphql']['hashtag']['allow_following']
except TimeoutException:
logging.warning(
f'Error while checking the tag #{tag}. Loading took too much time, Please check your internet connexion.')
return True
except KeyError:
return False
|
15e88713670454a713d27650b61672ecc06a9d53
| 26,053 |
import json
def get_price_data(startdate, enddate):
"""
returns a dataframe containing btc price data on every day between startdate and enddate
:param startdate:
:param enddate:
:return:
"""
url = 'https://api.coindesk.com/v1/bpi/historical/close.json?start=' + startdate + '&end=' + enddate
driver.get(url)
body = driver.find_element_by_tag_name("body")
text = json.loads(body.text)
timeindex = pd.DatetimeIndex(freq='d', start=startdate, end=enddate)
data = pd.DataFrame()
for date in timeindex:
fdate = format_date(date)
data.at[date, 'price'] = text['bpi'][fdate]
return data
|
b506069c7246c64530906ff86a743b0d717ec173
| 26,055 |
import csv
def read_keywords(fname):
"""Read id file"""
with open(fname, 'r') as f:
reader = csv.reader(f)
header = next(reader)
assert header == ['keyword']
return list(row[0] for row in reader)
|
566c1924ae8d4ae7316a2c5e3947170fe23af45d
| 26,058 |
def inscribe(mask):
"""Guess the largest axis-aligned rectangle inside mask.
Rectangle must exclude zero values. Assumes zeros are at the
edges, there are no holes, etc. Shrinks the rectangle's most
egregious edge at each iteration.
"""
h, w = mask.shape
i_0, i_1 = 0, h - 1
j_0, j_1 = 0, w - 1
def edge_costs(i_0, i_1, j_0, j_1):
a = mask[i_0, j_0:j_1 + 1].sum()
b = mask[i_1, j_0:j_1 + 1].sum()
c = mask[i_0:i_1 + 1, j_0].sum()
d = mask[i_0:i_1 + 1, j_1].sum()
return a,b,c,d
def area(i_0, i_1, j_0, j_1):
return (i_1 - i_0) * (j_1 - j_0)
coords = [i_0, i_1, j_0, j_1]
while area(*coords) > 0:
costs = edge_costs(*coords)
if sum(costs) == 0:
return coords
worst = costs.index(max(costs))
coords[worst] += 1 if worst in (0, 2) else -1
return
|
06042faebedb82dc0044cf2108fae7a3570895e0
| 26,059 |
def vaf_above_or_equal(vaf):
"""
"""
return lambda columns, mapper: float(columns[mapper['Variant_allele_ratio']]) >= vaf
|
4b2134d63193699f8ca490a8d7537ba8aaf4c8cf
| 26,060 |
from typing import Dict
def user_token_headers(client_target: TestClient, sql_session: Session) -> Dict[str, str]:
"""fake user data auth"""
return auth_token(
client=client_target, username="johndoe", sql=sql_session)
|
e230762c82e6c81e493430f2ffe59f97f7e33721
| 26,061 |
def conv_1_0_string_to_packed_binary_string(s):
"""
'10101111' -> ('\xAF', False)
Basically the inverse of conv_packed_binary_string_to_1_0_string,
but also returns a flag indicating if we had to pad with leading zeros
to get to a multiple of 8.
"""
if not is_1_0_string(s):
raise ValueError, "Input must be a string containing only 0's and 1's"
# pad to multiple of 8
padded = False
rem = len(s) % 8
if rem != 0:
npad = 8 - rem
s = '0' * npad + s
padded = True
assert len(s) % 8 == 0
r = []
i = 0
while i < len(s):
t = 0
for j in range(8):
t = (t << 1) | (ord(s[i + j]) - ord('0'))
r.append(chr(t))
i += 8
return (''.join(r), padded)
|
65555abe9eae515c41ddf422bec28394b7612e37
| 26,063 |
def accuracy_win_avg(y_true, y_proba):
"""
Parameters
----------
y_true: n x n_windows
y_proba: n x n_windows x n_classes
"""
y_pred = win_avg(y_proba)
return accuracy(y_true[:,0], y_pred)
|
ca2629127dd0fca3591f8e6ae30f337cd3b92b69
| 26,064 |
def ortho_projection(left=-1, right=1, bottom=-1, top=1, near=.1, far=1000):
"""Orthographic projection matrix."""
return np.array((
(2 / (right-left), 0, 0, -(right+left) / (right-left)),
(0, 2 / (top-bottom), 0, -(top+bottom) / (top-bottom)),
(0, 0, 2 / (near-far), (far+near) / (far-near)),
(0, 0, 0, 1)
), dtype='f4')
|
cdd00f1fcb706a1a19476f8fa387fe45a11deebd
| 26,065 |
def ais_InitLengthBetweenCurvilinearFaces(*args):
"""
* Finds attachment points on two curvilinear faces for length dimension. @param thePlaneDir [in] the direction on the dimension plane to compute the plane automatically. It will not be taken into account if plane is defined by user.
:param theFirstFace:
:type theFirstFace: TopoDS_Face &
:param theSecondFace:
:type theSecondFace: TopoDS_Face &
:param theFirstSurf:
:type theFirstSurf: Handle_Geom_Surface &
:param theSecondSurf:
:type theSecondSurf: Handle_Geom_Surface &
:param theFirstAttach:
:type theFirstAttach: gp_Pnt
:param theSecondAttach:
:type theSecondAttach: gp_Pnt
:param theDirOnPlane:
:type theDirOnPlane: gp_Dir
:rtype: void
"""
return _AIS.ais_InitLengthBetweenCurvilinearFaces(*args)
|
992f2c26c87f07a0460ef56cff784b00185cf3b0
| 26,066 |
def get_methylation_dataset(methylation_array, outcome_col, convolutional=False, cpg_per_row=1200, predict=False, categorical=False, categorical_encoder=False, generate=False):
"""Turn methylation array into pytorch dataset.
Parameters
----------
methylation_array : MethylationArray
Input MethylationArray.
outcome_col : str
Pheno column to train on.
convolutional : bool
Whether running CNN on methylation data
cpg_per_row : int
If convolutional, number of cpgs per image row.
predict : bool
Running prediction algorithm vs VAE.
categorical : bool
Whether training on categorical vs continuous variables.
categorical_encoder :
Scikit learn encoder.
Returns
-------
Pytorch Dataset
"""
if generate:
return MethylationGenerationDataSet(methylation_array, Transformer(convolutional, cpg_per_row, methylation_array.beta.shape), outcome_col, categorical=categorical, categorical_encoder=categorical_encoder)
elif predict:
return MethylationPredictionDataSet(methylation_array, Transformer(convolutional, cpg_per_row, methylation_array.beta.shape), outcome_col, categorical=categorical, categorical_encoder=categorical_encoder)
else:
return MethylationDataSet(methylation_array, Transformer(convolutional, cpg_per_row, methylation_array.beta.shape), outcome_col)
|
709d1eb5f5887b9f0ff0724e8a4aa18d86020fb5
| 26,068 |
def es_config_fixture() -> ElastalkConf:
"""
This fixture returns an `ElasticsearchConf` (configuration) object.
:return: the configuration object
"""
return ElastalkConf()
|
2479bf01f454ea409e36647a417b3dcee82fafd0
| 26,069 |
def axis_angle_to_quaternion(rotation: ArrayOrList3) -> np.ndarray:
"""Converts a Rodrigues axis-angle rotation to a quaternion.
Args:
rotation: axis-angle rotation in [x,y,z]
Returns:
equivalent quaternion in [x,y,z,w]
"""
r = Rotation.from_rotvec(rotation)
return r.as_quat()
|
002970755835395bc349c418de94455ac0ce52c3
| 26,071 |
def encode(encoding, data):
"""
Encodes the given data using the encoding that is specified
:param str encoding: encoding to use, should be one of the supported encoding
:param data: data to encode
:type data: str or bytes
:return: multibase encoded data
:rtype: bytes
:raises ValueError: if the encoding is not supported
"""
data = ensure_bytes(data, 'utf8')
try:
return ENCODINGS_LOOKUP[encoding].code + ENCODINGS_LOOKUP[encoding].converter.encode(data)
except KeyError:
raise ValueError('Encoding {} not supported.'.format(encoding))
|
08ffd6540ed6da7728b6725e035afb652bf8893e
| 26,072 |
import json
def tiny_video_net(model_string,
num_classes,
num_frames,
data_format='channels_last',
dropout_keep_prob=0.5,
get_representation=False,
max_pool_predictions=False):
"""Builds TinyVideoNet based on model string.
Args:
model_string: string defining the tiny video model (see top for example
model string)
num_classes: int number of classes to classify
num_frames: int, number of frames in clip
data_format: string, either channels_last or channels_first
dropout_keep_prob: float, dropout keep probability
get_representation: bool, True to return the representation.
max_pool_predictions: bool, if True, will max pool the predictions over the
temporal dimension. If False, will average pool. Max pooling is useful for
long videos where the action only happens over a short sub-sequence of the
whole video, such as in the Charades dataset.
Returns:
model function (inputs, is_training)
"""
if dropout_keep_prob is None:
dropout_keep_prob = 1.0
model = json.loads(model_string)
def model_fn(inputs, is_training):
"""Creation of the model graph."""
input_shape = inputs.shape
if 'input_streams' in model:
batch_size = input_shape[0]
inputs, blocks = multistream_tvn(model, inputs, is_training, num_frames)
else:
batch_size = input_shape[0] // num_frames
inputs, blocks = tvn(model, inputs, is_training)
feature_shape = inputs.shape
current_frames = feature_shape[0] // batch_size
if get_representation:
representation = inputs
if max_pool_predictions:
batch_size = int(feature_shape[0].value // current_frames)
inputs = tf.reshape(inputs,
[batch_size, current_frames, -1, inputs.shape[3]])
# Spatial average pooling.
inputs = tf.reduce_mean(inputs, axis=2)
if is_training:
inputs = tf.nn.dropout(inputs, dropout_keep_prob)
# Per-frame predictions.
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01))
# Per-frame max-pooling. The max pooling is done in a softmax manner for
# more stable training.
pre_logits = inputs / tf.sqrt(tf.cast(current_frames, inputs.dtype))
acts = tf.nn.softmax(pre_logits, axis=1)
inputs = tf.math.multiply(inputs, acts)
inputs = tf.reduce_sum(inputs, axis=1)
else:
# Global-average-pool.
inputs = tf.reshape(inputs, [
int(feature_shape[0].value // current_frames),
current_frames * feature_shape[1], feature_shape[2], -1
])
inputs = tf.reduce_mean(inputs, axis=[1, 2])
if is_training:
inputs = tf.nn.dropout(inputs, dropout_keep_prob)
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01))
if get_representation:
return {
'feats-emb': representation,
'blocks': blocks,
'frames': current_frames,
'predictions': inputs,
'logits': inputs
}
return {'logits': inputs}
def tvn(model, inputs, is_training):
"""Standard single-stream TVN."""
current_frames = num_frames
b = 0
blocks = {}
for block in model['blocks']:
with tf.variable_scope('Block-%d-0' % b):
inputs, new_frames = tiny_block(block, inputs, current_frames,
is_training, data_format)
current_frames = new_frames
# Repeat block with no stride and alternating use of temporal conv1.
use_temporal = False
for i in range(block['repeats'] - 1):
with tf.variable_scope('Block-%d-%d' % (b, i + 1)):
inputs, _ = tiny_block(
block,
inputs,
current_frames,
is_training,
data_format,
strides=1,
use_temporal=use_temporal)
use_temporal = not use_temporal
blocks['block-' + str(b)] = inputs
b += 1
return inputs, blocks
def multistream_tvn(model, inputs, is_training, input_num_frames):
"""Multi-stream (assemblenet-like) TVN."""
input_shape = inputs.shape
is_4d = False
if len(input_shape) == 4:
# Handle 4D input tensor [batch*time, height, width, channels].
batch_size = input_shape[0] // input_num_frames
is_4d = True
else:
# Handle 5D input tensor.
batch_size = input_shape[0]
b = 0
blocks = {}
# Get input streams.
input_streams = []
dtype = inputs.dtype
for stream in model['input_streams']:
img_size = stream['image_size']
num_frames = stream['num_frames']
height = inputs.shape[2]
if is_4d:
# Maintain 4D tensor always
strm = tf.reshape(inputs, [batch_size, input_num_frames,
height*height, 3])
else:
strm = inputs
strm = strm[:, :num_frames]
strm = tf.reshape(strm, [batch_size * strm.shape[1], height, height, 3])
if height != img_size:
strm = tf.image.resize(strm, (img_size, img_size))
if strm.dtype != dtype:
strm = tf.cast(strm, dtype)
input_streams.append(tf.stop_gradient(strm))
for block in model['blocks']:
with tf.variable_scope('Block-%d-0' % b):
# Get block input.
inputs = input_streams[block['inputs'][0]]
inputs, current_frames = tiny_block(block, inputs, current_frames,
is_training, data_format)
# Repeat block with no stride and alternating use of temporal conv1.
use_temporal = False
for i in range(block['repeats'] - 1):
with tf.variable_scope('Block-%d-%d' % (b, i + 1)):
inputs, _ = tiny_block(
block,
inputs,
current_frames,
is_training,
data_format,
strides=1,
use_temporal=use_temporal)
use_temporal = not use_temporal
blocks['block-' + str(b)] = inputs
b += 1
input_streams.append(inputs)
return input_streams[-1], blocks
return model_fn
|
854f8bd2315c3d529c4691b66d1ee6bc00465fac
| 26,073 |
def get_tensor_from_cntk_convolutional_weight_value_shape(tensorValue, tensorShape):
"""Returns an ell.math.DoubleTensor from a trainable parameter
Note that ELL's ordering is row, column, channel.
4D parameters (e.g. those that represent convolutional weights) are stacked vertically in the row dimension.
CNTK has them in filter, channel, row, column order.
"""
if (len(tensorShape) == 4):
orderedWeights = np.moveaxis(tensorValue, 1, -1)
orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
tensorShape[0] * tensorShape[2], tensorShape[3], tensorShape[1])
elif (len(tensorShape) == 3):
orderedWeights = np.moveaxis(tensorValue, 0, -1)
orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
tensorShape[1], tensorShape[2], tensorShape[0])
elif (len(tensorShape) == 2):
orderedWeights = np.moveaxis(tensorValue, 0, -1)
orderedWeights = orderedWeights.ravel().astype(
np.float).reshape(tensorShape[1], tensorShape[0], 1)
else:
orderedWeights = tensorValue.ravel().astype(
np.float).reshape(1, 1, tensorValue.size)
return ell.math.DoubleTensor(orderedWeights)
|
69191e295abfd818d380e626db1276687c1e7a8b
| 26,074 |
from typing import TextIO
from typing import List
from typing import Optional
import csv
def load_f0(fhandle: TextIO) -> annotations.F0Data:
"""Load a Dagstuhl ChoirSet F0-trajectory.
Args:
fhandle (str or file-like): File-like object or path to F0 file
Returns:
F0Data Object - the F0-trajectory
"""
times = []
freqs = []
voicings = []
confs: List[Optional[float]]
conf_array: Optional[np.ndarray]
confs = []
reader = csv.reader(fhandle, delimiter=",")
for line in reader:
times.append(float(line[0]))
freq_val = float(line[1])
voicings.append(float(freq_val > 0))
freqs.append(np.abs(freq_val))
if len(line) == 3:
confs.append(float(line[2]))
else:
confs.append(None)
if all([not c for c in confs]):
conf_array = None
conf_unit = None
else:
conf_array = np.array(confs)
conf_unit = "likelihood"
return annotations.F0Data(
np.array(times),
"s",
np.array(freqs),
"hz",
np.array(voicings),
"binary",
conf_array,
conf_unit,
)
|
84bea706b1d985f91c872764e8e388ec8fe7f576
| 26,075 |
def to_str_constant(s: str, quote="'") -> str:
"""
Convert a given string into another string that is a valid Python representation of a string constant.
:param s: the string
:param quote: the quote character, either a single or double quote
:return:
"""
if s is None:
raise ValueError()
if quote not in _PYTHON_QUOTE_CHARS:
raise ValueError()
return quote + s.replace('\\', '\\\\').replace(quote, "\\%s" % quote) + quote
|
bfd1fd2989a96cd5fdadc01e7a0e1e0f2846db97
| 26,076 |
def signum(x):
"""
Return -1 if x < 0, 1 if 0 < x, or 0 if x == 0
"""
return (x > 0) - (x < 0)
|
59568d4fbf1f5a226528b7f12f8c5011b641bc4e
| 26,077 |
def _construct_dataloader(dataset, batch_size, shuffle, seed=0, num_workers=0, class_balance=False):
"""Construct a data loader for the provided data.
Args:
data_set ():
batch_size (int): The batch size.
shuffle (bool): If True the data will be loaded in a random order. Defaults to True.
Returns:
A DataLoader object that yields batches of padded molecule features.
"""
if dataset is not None:
loader = MoleculeDataLoader(
dataset=dataset,
batch_size=batch_size,
num_workers=num_workers,
class_balance=class_balance,
shuffle=shuffle
)
else:
loader = None
return loader
|
7fd7a7b3eaa4c82473978735167067c64533adcb
| 26,078 |
import platform
def _get_cpu_type():
"""
Return the CPU type as used in the brink.sh script.
"""
base = platform.processor()
if not base:
base = platform.machine()
if base == 'aarch64': # noqa:cover
return 'arm64'
if base == 'x86_64':
return 'x64'
if base == 'i686':
return 'x86'
return base
|
b8a81367d1a4a7ac34e2965cfedcb947f6a66165
| 26,079 |
def DatesRangeFieldWidget(field, request): # pylint: disable=invalid-name
"""Dates range widget factory"""
return FieldWidget(field, DatesRangeWidget(request))
|
0e692d025b458340e2c0d588cd73b7ea206beca6
| 26,080 |
def get_aux_dset_slicing(dim_names, last_ind=None, is_spectroscopic=False):
"""
Returns a dictionary of slice objects to help in creating region references in the position or spectroscopic
indices and values datasets
Parameters
------------
dim_names : iterable
List of strings denoting the names of the position axes or spectroscopic dimensions arranged in the same order
that matches the dimensions in the indices / values dataset
last_ind : (Optional) unsigned int, default = None
Last pixel in the positon or spectroscopic matrix. Useful in experiments where the
parameters have changed (eg. BEPS new data format) during the experiment.
is_spectroscopic : bool, optional. default = True
set to True for position datasets and False for spectroscopic datasets
Returns
------------
slice_dict : dictionary
Dictionary of tuples containing slice objects corresponding to
each position axis.
"""
dim_names = validate_list_of_strings(dim_names, 'dim_names')
if len(dim_names) == 0:
raise ValueError('No valid dim_names provided')
slice_dict = dict()
for spat_ind, curr_dim_name in enumerate(dim_names):
val = (slice(last_ind), slice(spat_ind, spat_ind + 1))
if is_spectroscopic:
val = val[::-1]
slice_dict[str(curr_dim_name)] = val
return slice_dict
|
3a254dea086227354030fd6b13ef33182fe505f0
| 26,081 |
def make_etag(value, is_weak=False):
"""Creates and returns a ETag object.
Args:
value (str): Unquated entity tag value
is_weak (bool): The weakness indicator
Returns:
A ``str``-like Etag instance with weakness indicator.
"""
etag = ETag(value)
etag.is_weak = is_weak
return etag
|
2d2d2f7f7d0fc59f89b20cfb79932c16ade90e35
| 26,082 |
def is_closer_to_goal_than(a, b, team_index):
""" Returns true if a is closer than b to goal owned by the given team """
return (a.y < b.y, a.y > b.y)[team_index]
|
016cb7f19b2d0046d4f349dbf52da93ca0e9a2cc
| 26,084 |
def is_url(url):
"""
Check if given URL is a valid URL.
:param str url: The url to validate
:returns: if the url is valid or not
:rtype: bool
"""
return urlparse(url).scheme != ""
|
10b7b37e4d6075877388b6564e004b1775a4ea71
| 26,086 |
def star(locid, tmass_id):
"""Return data on an individual star.
"""
apstar_id = 'apogee.apo25m.s.stars.{0:d}.{1}'.format(locid, tmass_id)
data = stars[apstar_id]
flagtxt = ", ".join(starflag.flagname(stars[apstar_id].ORstarflag))
return render_template('star.html',
title="RV - {0:d}.{1}".format(locid, tmass_id),
locids=locids, locid=locid,
stars=sorted(tmass_ids[locid]),
tmass_id=tmass_id,
data=stars[apstar_id],
flags=flagtxt)
|
9762fd912b6dbbf4088333c4cf9d0689c04b99c7
| 26,087 |
def dot_product_attention(q,
k,
v,
bias,
dropout_rate=0.0,
image_shapes=None,
name=None,
save_weigths_to=None,
dropout_broadcast_dims=None):
"""dot-product attention.
Args:
q: a Tensor with shape [batch, heads, length_q, depth_k]
k: a Tensor with shape [batch, heads, length_kv, depth_k]
v: a Tensor with shape [batch, heads, length_kv, depth_v]
bias: bias Tensor (see attention_bias())
dropout_rate: a floating point number
image_shapes: optional tuple of integer scalars.
see comments for attention_image_summary()
name: an optional string
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
dropout_broadcast_dims: an optional list of integers less than 4
specifying in which dimensions to broadcast the dropout decisions.
saves memory.
Returns:
A Tensor.
"""
with tf.variable_scope(
name, default_name="dot_product_attention", values=[q, k, v]) as scope:
# [batch, num_heads, query_length, memory_length]
logits = tf.matmul(q, k, transpose_b=True)
if bias is not None:
bias = cast_like(bias, logits)
logits += bias
weights = tf.nn.softmax(logits, name="attention_weigths")
if save_weigths_to is not None:
save_weigths_to[scope.name] = weights
save_weigths_to[scope.name + "/logits"] = logits
# dropping out the attention links for each of the heads
weights = dropout_with_broadcast_dims(
weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims)
return tf.matmul(weights, v)
|
3efc760516b37656f1d6e2e71c30ed37c3d6e298
| 26,088 |
def extract_words(text):
"""Return the words in a tweet, not including punctuation.
>>> extract_words('anything else.....not my job')
['anything', 'else', 'not', 'my', 'job']
>>> extract_words('i love my job. #winning')
['i', 'love', 'my', 'job', 'winning']
>>> extract_words('make justin # 1 by tweeting #vma #justinbieber :)')
['make', 'justin', 'by', 'tweeting', 'vma', 'justinbieber']
>>> extract_words("paperclips! they're so awesome, cool, & useful!")
['paperclips', 'they', 're', 'so', 'awesome', 'cool', 'useful']
"""
"*** YOUR CODE HERE ***"
return text.split()
|
720095b29c8bdd5427796afd34385dcaae5fa8d8
| 26,090 |
async def async_setup_platform( # pylint: disable=too-many-locals
hass: HomeAssistant,
config: ConfigType, # pylint: disable=unused-argument
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Calibration sensor."""
if discovery_info is None:
return
calibration = discovery_info[CONF_CALIBRATION]
conf = hass.data[DATA_CALIBRATION][calibration]
unique_id = f"{DOMAIN}.{calibration}"
name = conf.get(CONF_NAME) or calibration.replace("_", " ").title()
source = conf[CONF_SOURCE]
unit_of_measurement = conf.get(CONF_UNIT_OF_MEASUREMENT)
device_class = conf.get(CONF_DEVICE_CLASS)
ent_reg = entity_registry.async_get(hass)
source_entity: RegistryEntry | None = ent_reg.async_get(source)
source_state: State | None = hass.states.get(source)
if not (attribute := conf.get(CONF_ATTRIBUTE)):
def get_value(attr: str):
if source_state and (unit := source_state.attributes.get(attr)):
return unit
if source_entity and (unit := getattr(source_entity, attr)):
return unit
return None
unit_of_measurement = unit_of_measurement or get_value(ATTR_UNIT_OF_MEASUREMENT)
device_class = device_class or get_value(ATTR_DEVICE_CLASS)
if conf.get(CONF_HIDE_SOURCE) and source_entity and not source_entity.hidden:
ent_reg.async_update_entity(source, hidden_by=RegistryEntryHider.INTEGRATION)
async_add_entities(
[
CalibrationSensor(
unique_id,
name,
source,
attribute,
conf[CONF_PRECISION],
conf[CONF_POLYNOMIAL],
unit_of_measurement,
device_class,
)
]
)
|
4af7dcfa49bebccc991b4e91550fbac570301192
| 26,091 |
from typing import Tuple
from typing import Mapping
from typing import Dict
from typing import Set
def calculate_subgraph_edge_overlap(
graph: BELGraph,
annotation: str = 'Subgraph',
) -> Tuple[
Mapping[str, EdgeSet],
Mapping[str, Mapping[str, EdgeSet]],
Mapping[str, Mapping[str, EdgeSet]],
Mapping[str, Mapping[str, float]],
]:
"""Build a Dataframe to show the overlap between different sub-graphs.
Options:
1. Total number of edges overlap (intersection)
2. Percentage overlap (tanimoto similarity)
:param graph: A BEL graph
:param annotation: The annotation to group by and compare. Defaults to 'Subgraph'
:return: {subgraph: set of edges}, {(subgraph 1, subgraph2): set of intersecting edges},
{(subgraph 1, subgraph2): set of unioned edges}, {(subgraph 1, subgraph2): tanimoto similarity},
"""
sg2edge = defaultdict(set)
for u, v, d in graph.edges(data=True):
if edge_has_annotation(d, annotation):
sg2edge[d[ANNOTATIONS][annotation]].add((u, v))
subgraph_intersection: Dict[str, Dict[str, Set[EdgeSet]]] = defaultdict(dict)
subgraph_union: Dict[str, Dict[str, Set[EdgeSet]]] = defaultdict(dict)
result: Dict[str, Dict[str, float]] = defaultdict(dict)
for sg1, sg2 in itt.product(sg2edge, repeat=2):
subgraph_intersection[sg1][sg2] = sg2edge[sg1] & sg2edge[sg2]
subgraph_union[sg1][sg2] = sg2edge[sg1] | sg2edge[sg2]
result[sg1][sg2] = len(subgraph_intersection[sg1][sg2]) / len(subgraph_union[sg1][sg2])
return sg2edge, subgraph_intersection, subgraph_union, result
|
9ac07522d16976f52fa38690bdcd77f9f19b61e0
| 26,092 |
def d2logistic_offset_p(x, p):
"""
Wrapper function for :func:`d2logistic_offset`: `d2logistic_offset(x, *p)`
"""
return d2logistic_offset(x, *p)
|
1aa6a291af76d92273b71bd3a0d738aac4fb366c
| 26,093 |
def extract_coeffs_knots_from_splines(attitude_splines, k):
"""
Extract spline characteristics (coeffs, knots, splines). The spline being
defined as
.. math:: S(t) = \sum_{n=0}^{N-1} a_n B_n(t)
where :math:`c_n` are the spline coefficients and :math:`B_n(t)` is the
spline basis evaluated at time t. **N** is the number of coefficients. The
knots are the time discritization used for the spline.
:param attitude_splines: list or array of splines of
scipy.interpolate.InterpolatedUnivariateSpline
:param k: [int] Spline degree
:returns:
* [array] coeff
* [array] knots
* [array] splines (Bspline interpolating with degree k )
"""
att_coeffs, att_splines = ([], [])
internal_knots = attitude_splines[0].get_knots() # chose [0] since all the same
att_knots = extend_knots(internal_knots, k) # extend the knots to have all the needed ones
for i, spline in enumerate(attitude_splines):
coeffs = spline.get_coeffs()
att_coeffs.append(coeffs)
att_splines.append(BSpline(att_knots, coeffs, k))
return np.array(att_coeffs), np.array(att_knots), np.array(att_splines)
|
306702c3abf49b2b9f93a3c261fbfbfae0a47297
| 26,094 |
import requests
def get_token(username: str = AUTH[0], password: str = AUTH[1]):
"""
Gets an access token from Cisco DNA Center always-on sandbox. Returns the token
string if successful; False (None) otherwise
"""
# Declare Useful local variabales to simplify request process
api_path = "https://sandboxdnac.cisco.com/dna"
auth = (username, password)
#
headers = {"Content-type": "application/json"}
# Issue http POST request to the proper URL to request a token
# If successful, print token. Else, raise HTTPError with details
try:
auth_resp = requests.post(
f"{api_path}/system/api/v1/auth/token", auth=auth, headers=headers
)
auth_resp.raise_for_status()
logger.info(msg=f"Successful POST - {auth_resp.status_code}")
token = auth_resp.json()["Token"]
return token
except HTTPError as err:
logger.error(msg=err)
raise err
|
ab40a1de758dccc4e8f65021b4122c3b866b2671
| 26,095 |
def disk_info(hard_disk):
"""Return a dictionary with information regarding a virtul hard disk.
The dictionary with information regarding hard disk image
contains:
:VHD_UUID:
:VHD_PARENT_UUID:
:VHD_STATE:
:VHD_TYPE:
:VHD_PATH: the path for virtual hard drive
:VHD_IMAGE_TYPE: value from ALL_DISK_FORMATS
:VHD_VARIANT: value from ALL_VARIANTS
:VHD_CAPACITY: (int) virtual disk capacity
:VHD_SIZE_ON_DISK: (int) size on disk for virtual disk
:VHD_USED_BY: (list)
"""
current_vhd = {}
child_uuids = []
output = manage.VBoxManage.show_hd_info(hard_disk)
for line in output.splitlines():
if not _process_vhd_field(line, current_vhd):
child_uuids.append(line.strip())
if constants.VHD_CHILD_UUIDS in current_vhd:
child_uuids.append(current_vhd[constants.VHD_CHILD_UUIDS])
current_vhd[constants.VHD_CHILD_UUIDS] = child_uuids
return current_vhd
|
b12befa8ef3082b755b18fe17d62f3221648c050
| 26,096 |
def get_corporation(corporation_id):
"""
Get corporation details for a corporation id from ESI
:param corporation_id: ID for the required corporation
:return: Dictionary containing corporation details
"""
op = esiapp.op['get_corporations_corporation_id'](corporation_id=corporation_id)
return _get_esi(op)
|
42b6ec8b4ed0d5e278d56a359ce8a8bc668007e3
| 26,097 |
def _is_list(v):
"""
Returns True if the given value is a @list.
:param v: the value to check.
:return: True if the value is a @list, False if not.
"""
# Note: A value is a @list if all of these hold True:
# 1. It is an Object.
# 2. It has the @list property.
return _is_object(v) and '@list' in v
|
2568e7dc5035f8a3006dd39be0a04538e29c27b1
| 26,098 |
def _find_onehot_actual(x):
"""Map one-hot value to one-hot name"""
try:
value = list(x).index(1)
except:
value = np.nan
return value
|
6bafb852e89479803ab824b3f621863724b12146
| 26,099 |
def lower_text(text: str) -> str:
"""Transform all the text to lowercase.
Args:
text : Input text
Returns:
Output text
"""
return text.lower()
|
2a657464a014703464ca47eeb77ed6a630535819
| 26,101 |
def pca(X,keep=2):
"""Perfrom PCA on data X. Assumes that data points correspond to columns.
"""
# Z = (X.T - mean(X,1)).T # subtract mean
C = dot(X,X.T)
V,D = eig(C)
B = D[:,0:keep]
return dot(B.T,X)
|
01cd4414ac881b986d74247aeaa7dc96dd456721
| 26,102 |
import array
def stock_span(prices: array) -> list:
"""
Time Complexity: O(n*n)
"""
span_values: list = []
for i, price in enumerate(prices):
count: int = 1
for j in range(i - 1, -1, -1):
if prices[j] > price:
break
count += 1
span_values.append(count)
return span_values
|
5a619bb1ce31c0e65dd5fc3d52af2e8b881a87b7
| 26,103 |
def get_replication_tasks(replication_instance_arn):
"""Returns the ist of replication tasks"""
existing_tasks = []
dms_client = boto3.client('dms')
replication_tasks = dms_client.describe_replication_tasks()
for task in replication_tasks['ReplicationTasks']:
if task['ReplicationInstanceArn'] == replication_instance_arn:
existing_tasks.append(task)
return existing_tasks
|
2ee3f7ca108f502fa1e512319e6f630a1b0f54ff
| 26,104 |
def getProgramFields():
"""
Get the data element names and return them as a list.
"""
_, progFields = callAPI("GET", "program-fields", quiet='True')
return progFields
|
92799c4147ad86d2d52ade676953d2a1e60dbece
| 26,105 |
import base64
def base64url_decode(msg):
"""
Decode a base64 message based on JWT spec, Appendix B.
"Notes on implementing base64url encoding without padding"
"""
rem = len(msg) % 4
if rem:
msg += b'=' * (4 - rem)
return base64.urlsafe_b64decode(msg)
|
f0f46749ae21ed8166648c52c673eab25f837881
| 26,106 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.