content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def triplet_margin_loss(
anchor,
positive,
negative,
margin=0.1,
p=2,
use_cosine=False,
swap=False,
eps=1e-6,
scope='',
reduction=tf.losses.Reduction.SUM
):
"""
Computes the triplet margin loss
Args:
anchor: The tensor containing the anchor embeddings
postiive: The tensor containg the positive embeddings
negative: The tensor containg the negative embeddings
The shapes of anchor, positive and negative must all be equal
margin: The margin in the triplet loss
p: The norm degree for pairwise distances Options: 1, 2 Default: 2
use_cosine: Should cosine distance be used?
swap: Should we swap anchor and positive to get the harder negative?
eps: A value used to prevent numerical instability
reduction: The reduction method to use
"""
assert anchor.shape == positive.shape == negative.shape
assert p in {1, 2}
if use_cosine:
def dist_fn(labels, preds):
return tf.losses.cosine_distance(
labels, preds, axis=1,
reduction=tf.losses.Reduction.NONE
)
elif p == 2:
def dist_fn(labels, preds):
return tf.losses.mean_squared_error(
labels, preds,
reduction=tf.losses.Reduction.NONE
)
elif p == 1:
def dist_fn(labels, preds):
return tf.losses.absolute_difference(
labels, preds,
reduction=tf.losses.Reduction.NONE
)
else:
raise NotImplementedError()
with tf.variable_scope(scope):
pdist = dist_fn(anchor, positive)
ndist = dist_fn(anchor, negative)
if swap:
# ndist_2 is the distance between postive and negative
ndist_2 = dist_fn(positive, negative)
ndist = tf.maximum(ndist, ndist_2)
loss = tf.maximum(pdist - ndist + margin, 0)
if reduction == tf.losses.Reduction.NONE:
return loss
elif reduction == tf.losses.Reduction.SUM:
return tf.sum(loss)
elif reduction == tf.losses.Reduction.MEAN:
return tf.reduce_mean(loss)
elif reduction == tf.losses.Reduction.SUM_OVER_BATCH_SIZE:
return tf.sum() / tf.shape(anchor)[0]
elif reduction == tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS:
return tf.sum(loss) / tf.sum(tf.greater(loss, 0))
else:
msg = '{} has not been implemented for triplet_margin_loss'.format(
reduction)
raise NotImplementedError(msg) | 55e85a9ae98ab57458ae1a61a1dbd445deddd7cb | 13,900 |
def f_raw(x, a, b):
"""
The raw function call, performs no checks on valid parameters..
:return:
"""
return a * x + b | 89bbe9e7a08e3bf4bf37c3efa695ed20fdca95c5 | 13,901 |
from pyapprox.cython.barycentric_interpolation import \
def compute_barycentric_weights_1d(samples, interval_length=None,
return_sequence=False,
normalize_weights=False):
"""
Return barycentric weights for a sequence of samples. e.g. of sequence
x0,x1,x2 where order represents the order in which the samples are added
to the interpolant.
Parameters
----------
return_sequence : boolean
True - return [1],[1/(x0-x1),1/(x1-x0)],
[1/((x0-x2)(x0-x1)),1/((x1-x2)(x1-x0)),1/((x2-x1)(x2-x0))]
False- return [1/((x0-x2)(x0-x1)),1/((x1-x2)(x1-x0)),1/((x2-x1)(x2-x0))]
Note
----
If length of interval [a,b]=4C then weights will grow or decay
exponentially at C^{-n} where n is number of points causing overflow
or underflow.
To minimize this effect multiply each x_j-x_k by C^{-1}. This has effect
of rescaling all weights by C^n. In rare situations where n is so large
randomize or use Leja ordering of the samples before computing weights.
See Barycentric Lagrange Interpolation by
Jean-Paul Berrut and Lloyd N. Trefethen 2004
"""
if interval_length is None:
scaling_factor = 1.
else:
scaling_factor = interval_length/4.
C_inv = 1/scaling_factor
num_samples = samples.shape[0]
try:
compute_barycentric_weights_1d_pyx
weights = compute_barycentric_weights_1d_pyx(samples, C_inv)
except (ImportError, ModuleNotFoundError) as e:
msg = 'compute_barycentric_weights_1d extension failed'
trace_error_with_msg(msg, e)
weights = np.empty((num_samples, num_samples), dtype=float)
weights[0, 0] = 1.
for jj in range(1, num_samples):
weights[jj, :jj] = C_inv * \
(samples[:jj]-samples[jj])*weights[jj-1, :jj]
weights[jj, jj] = np.prod(C_inv*(samples[jj]-samples[:jj]))
weights[jj-1, :jj] = 1./weights[jj-1, :jj]
weights[num_samples-1, :num_samples] =\
1./weights[num_samples-1, :num_samples]
if not return_sequence:
result = weights[num_samples-1, :]
# make sure magintude of weights is approximately O(1)
# useful to sample sets like leja for gaussian variables
# where interval [a,b] is not very useful
# print('max_weights',result.min(),result.max())
if normalize_weights:
raise NotImplementedError('I do not think I want to support this option')
result /= np.absolute(result).max()
# result[I]=result
else:
result = weights
assert np.all(np.isfinite(result)), (num_samples)
return result | 1711328af31b756c040455e0b03363def08e6504 | 13,902 |
import collections
def _generate_conversions():
"""
Generate conversions for unit systems.
"""
# conversions to inches
to_inch = {'microinches': 1.0 / 1000.0,
'mils': 1.0 / 1000.0,
'inches': 1.00,
'feet': 12.0,
'yards': 36.0,
'miles': 63360,
'angstroms': 1.0 / 2.54e8,
'nanometers': 1.0 / 2.54e7,
'microns': 1.0 / 2.54e4,
'millimeters': 1.0 / 2.54e1,
'centimeters': 1.0 / 2.54e0,
'meters': 1.0 / 2.54e-2,
'kilometers': 1.0 / 2.54e-5,
'decimeters': 1.0 / 2.54e-1,
'decameters': 1.0 / 2.54e-3,
'hectometers': 1.0 / 2.54e-4,
'gigameters': 1.0 / 2.54e-11,
'AU': 5889679948818.897,
'light years': 3.72461748e17,
'parsecs': 1.21483369e18}
# if a unit is known by other symbols, include them here
synonyms = collections.defaultdict(list)
synonyms.update({'millimeters': ['mm'],
'inches': ['in', '"'],
'feet': ["'"],
'meters': ['m']})
# add non- plural version of units to conversions
# eg, millimeters -> millimeter
for key in to_inch.keys():
if key[-2:] == 'es' and key != 'miles':
synonyms[key].append(key[:-2])
elif key[-1] == 's':
synonyms[key].append(key[:-1])
# update the dict with synonyms
for key, new_keys in synonyms.items():
value = to_inch[key]
for new_key in new_keys:
to_inch[new_key] = value
# convert back to regular dictionary and make keys all lower case
to_inch = {k.strip().lower(): v for k, v in to_inch.items()}
return to_inch | 8fa4f625e693fe352b2bba0082d0b18c46f5bec1 | 13,903 |
def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
"""
Rather than attempting to merge files that were modified on both
branches, it marks them as unresolved. The resolve command must be
used to resolve these conflicts."""
return 1 | 278bb52f96e1a82ce9966626be08bc6fdd0df65d | 13,904 |
from typing import Pattern
from typing import Optional
from typing import Callable
from typing import Union
import logging
def parser(
text: str,
*,
field: str,
pattern: Pattern[str],
type_converter: Optional[Callable] = None,
clean_up: Optional[Callable] = None,
limit_size: Optional[int] = None,
null_value: Optional[Union[str, int, bool, None]] = None,
) -> str:
"""
Returns text based on regex pattern and other provided conditions.
:param text: Str. Text to parse.
:param field: Str. Label for output info, eg 'charges', 'bail'.
:param pattern: Pattern. Regex, compiled pattern used to search.
:param type_converter: Callable. Optional. Set type for return value.
Defaults to string converter.
:param clean_up: Callable. Optional. Function that does any final
formatting.
:param limit_size: Int. Optional. Max number of chars in returned string.
:param null_value: Any. Optional. Value to set when parse conditions
aren't met.
Default None.
:return: Str. Desired pattern in text.
"""
# set default if no type converter func is provided
if not type_converter:
type_converter = lambda x: str(x)
# parse
logging.info("Attempting to extract charges from text with Regex...")
try:
match = pattern.search(text)
final_value = match.group(field)
logging.info(f"{field.upper()}, FIRST PASS: {final_value}")
# Options
if clean_up:
final_value = clean_up(final_value)
if limit_size:
final_value = final_value[0:limit_size]
# Trim
final_value = final_value.strip()
# Type
final_value = type_converter(final_value)
except (AttributeError, ValueError) as e:
logging.info(
"Parsing failed or couldn't find target value - setting " "to None"
)
final_value = null_value
logging.info(f"{field.upper()}, FINAL: {final_value}")
return final_value | 0b44fecf252399b3109efedffe0f561809982ea6 | 13,905 |
import os
def checkpoint_metrics_path(checkpoint_path, eval_name, file_name=None):
"""Gets a path to the JSON of eval metrics for checkpoint in eval_name."""
checkpoint_dir = os.path.dirname(checkpoint_path)
checkpoint_name = os.path.basename(checkpoint_path)
if eval_name:
# This bit of magic is defined by the estimator framework, and isn't easy
# to change. We only get to specify the suffix.
checkpoint_dir = os.path.join(checkpoint_dir, 'eval_' + eval_name)
if not file_name:
return os.path.join(checkpoint_dir, checkpoint_name + '.metrics')
return os.path.join(checkpoint_dir, file_name) | e176b873d13ae28f6a53100adba6ca437c4ce805 | 13,906 |
def colorize(text='', opts=(), **kwargs):
"""
Return your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Return the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in kwargs.items():
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '') | 02ad24710413770cebdaa4265a1d40c69212ecc8 | 13,907 |
from re import T
def get_prediction(img_path, threshold):
"""
get_prediction
parameters:
- img_path - path of the input image
- threshold - threshold value for prediction score
method:
- Image is obtained from the image path
- the image is converted to image tensor using PyTorch's Transforms
- image is passed through the model to get the predictions
- class, box coordinates are obtained, but only prediction score > threshold
are chosen.
"""
img = Image.open(img_path)
transform = T.Compose([T.ToTensor()])
img = transform(img)
pred = model([img])
pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())]
pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())]
pred_score = list(pred[0]['scores'].detach().numpy())
pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1]
pred_boxes = pred_boxes[:pred_t + 1]
pred_class = pred_class[:pred_t + 1]
return pred_boxes, pred_class | d6df91fb464b072b06ef759ad53aa00fb7d624ec | 13,908 |
def make_fixed_size(protein, shape_schema, msa_cluster_size, extra_msa_size,
num_res, num_templates=0):
"""Guess at the MSA and sequence dimensions to make fixed size."""
pad_size_map = {
NUM_RES: num_res,
NUM_MSA_SEQ: msa_cluster_size,
NUM_EXTRA_SEQ: extra_msa_size,
NUM_TEMPLATES: num_templates,
}
for k, v in protein.items():
if k == 'extra_cluster_assignment':
continue
shape = list(v.shape)
schema = shape_schema[k]
assert len(shape) == len(schema), f'Rank mismatch between ' + \
f'shape and shape schema for {k}: {shape} vs {schema}'
pad_size = [pad_size_map.get(s2, None) or s1
for (s1, s2) in zip(shape, schema)]
padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)]
if padding:
protein[k] = np.pad(v, padding)
protein[k].reshape(pad_size)
return protein | f74306815dd7cd5291305c7b5c67cae4625c4d38 | 13,909 |
def plot_skymap_tract(skyMap, tract=0, title=None, ax=None):
"""
Plot a tract from a skyMap.
Parameters
----------
skyMap: lsst.skyMap.SkyMap
The SkyMap object containing the tract and patch information.
tract: int [0]
The tract id of the desired tract to plot.
title: str [None]
Title of the tract plot. If None, the use `tract <id>`.
ax: matplotlib.axes._subplots.AxesSubplot [None]
The subplot object to contain the tract plot. If None, then make a new one.
Returns
-------
matplotlib.axes._subplots.AxesSubplot: The subplot containing the tract plot.
"""
if title is None:
title = 'tract {}'.format(tract)
tractInfo = skyMap[tract]
tractBox = afw_geom.Box2D(tractInfo.getBBox())
tractPosList = tractBox.getCorners()
wcs = tractInfo.getWcs()
xNum, yNum = tractInfo.getNumPatches()
if ax is None:
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
tract_center = wcs.pixelToSky(tractBox.getCenter()) .getPosition(afw_geom.degrees)
ax.text(tract_center[0], tract_center[1], '%d' % tract, size=16,
ha="center", va="center", color='blue')
for x in range(xNum):
for y in range(yNum):
patchInfo = tractInfo.getPatchInfo([x, y])
patchBox = afw_geom.Box2D(patchInfo.getOuterBBox())
pixelPatchList = patchBox.getCorners()
path = make_patch(pixelPatchList, wcs)
patch = patches.PathPatch(path, alpha=0.1, lw=1)
ax.add_patch(patch)
center = wcs.pixelToSky(patchBox.getCenter()) .getPosition(afw_geom.degrees)
ax.text(center[0], center[1], '%d,%d'%(x,y), size=6,
ha="center", va="center")
skyPosList = [wcs.pixelToSky(pos).getPosition(afw_geom.degrees)
for pos in tractPosList]
ax.set_xlim(max(coord[0] for coord in skyPosList) + 1,
min(coord[0] for coord in skyPosList) - 1)
ax.set_ylim(min(coord[1] for coord in skyPosList) - 1,
max(coord[1] for coord in skyPosList) + 1)
ax.grid(ls=':',color='gray')
ax.set_xlabel("RA (deg.)")
ax.set_ylabel("Dec (deg.)")
ax.set_title(title)
return ax | a8f1b25d8afedfbb0ed643b7954e615932031419 | 13,910 |
import json
def label(vertex):
""" Graph vertex label in dot format """
label = f"{vertex.name} {vertex.state or ''}\n{vertex.traceback or ''}"
label = json.dumps(label).replace("\\n", r"\l")
return f"[label={label}]" | a8604cfd837afbdba8b8ee7666d81df4b015ad2a | 13,911 |
import six
import hashlib
def compute_hashes_from_fileobj(fileobj, chunk_size=1024 * 1024):
"""Compute the linear and tree hash from a fileobj.
This function will compute the linear/tree hash of a fileobj
in a single pass through the fileobj.
:param fileobj: A file like object.
:param chunk_size: The size of the chunks to use for the tree
hash. This is also the buffer size used to read from
`fileobj`.
:rtype: tuple
:return: A tuple of (linear_hash, tree_hash). Both hashes
are returned in hex.
"""
# Python 3+, not binary
if six.PY3 and hasattr(fileobj, 'mode') and 'b' not in fileobj.mode:
raise ValueError('File-like object must be opened in binary mode!')
linear_hash = hashlib.sha256()
chunks = []
chunk = fileobj.read(chunk_size)
while chunk:
# It's possible to get a file-like object that has no mode (checked
# above) and returns something other than bytes (e.g. str). So here
# we try to catch that and encode to bytes.
if not isinstance(chunk, bytes):
chunk = chunk.encode(getattr(fileobj, 'encoding', '') or 'utf-8')
linear_hash.update(chunk)
chunks.append(hashlib.sha256(chunk).digest())
chunk = fileobj.read(chunk_size)
if not chunks:
chunks = [hashlib.sha256(b'').digest()]
return linear_hash.hexdigest(), bytes_to_hex(tree_hash(chunks)) | 8c6aed21ae59ecb3e5449ee0856be1d032108aa6 | 13,912 |
def imshow(axim, img, amp_range=None, extent=None,\
interpolation='nearest', aspect='auto', origin='upper',\
orientation='horizontal', cmap='jet') :
"""
extent - list of four image physical limits for labeling,
cmap: 'gray_r'
#axim.cla()
"""
imsh = axim.imshow(img, interpolation=interpolation, aspect=aspect, origin=origin, extent=extent, cmap=cmap)
if amp_range is not None : imsh.set_clim(amp_range[0],amp_range[1])
return imsh | 3483690b01c5d182877c3bf944fa5409d4cb9e69 | 13,913 |
def get_total():
"""
Return the rounded total as properly rounded string.
Credits:
https://github.com/dbrgn/coverage-badge
"""
cov = coverage.Coverage()
cov.load()
total = cov.report(file=Devnull())
class Precision(coverage.results.Numbers):
"""
A class for using the percentage rounding of the main coverage package,
with any percentage.
To get the string format of the percentage, use the ``pc_covered_str``
property.
"""
def __init__(self, percent):
self.percent = percent
@property
def pc_covered(self):
return self.percent
return Precision(total).pc_covered_str | 9df511f0d895721061642c2fb88268490e27cc0b | 13,914 |
def _infer_subscript_list(context, index):
"""
Handles slices in subscript nodes.
"""
if index == ':':
# Like array[:]
return ValueSet([iterable.Slice(context, None, None, None)])
elif index.type == 'subscript' and not index.children[0] == '.':
# subscript basically implies a slice operation, except for Python 2's
# Ellipsis.
# e.g. array[:3]
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif el.type == 'sliceop':
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return ValueSet([iterable.Slice(context, *result)])
elif index.type == 'subscriptlist':
return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)])
# No slices
return context.infer_node(index) | bde1de5e7604d51e6c85e429ceb2102d79e91ca6 | 13,915 |
def count_by_guess(dictionary, correctly=False):
"""
Count the number of correctly/incorrectly guessed images for a dataset
:param dictionary:
:param correctly:
:return:
"""
guessed = 0
for response in dictionary:
guessed = guessed + count_by_guess_user(response, correctly)
return guessed | d1328a63d3029707131f1932be1535dabb62ab66 | 13,916 |
def get_game_by_index(statscursor, table, index):
""" Holds get_game_by_index db related data """
query = "SELECT * FROM " + table + " WHERE num=:num"
statscursor.execute(query, {'num': index})
return statscursor.fetchone() | 754a83f2281ad095ffc32eb8a03c95490bd5f815 | 13,917 |
def create_queue():
"""Creates the SQS queue and returns the queue url and metadata"""
conn = boto3.client('sqs', region_name=CONFIG['region'])
queue_metadata = conn.create_queue(QueueName=QUEUE_NAME, Attributes={'VisibilityTimeout':'3600'})
if 'queue_tags' in CONFIG:
conn.tag_queue(QueueUrl=queue_metadata['QueueUrl'], Tags=CONFIG['queue_tags'])
"""Get the SQS queue object from the queue URL"""
sqs = boto3.resource('sqs', region_name=CONFIG['region'])
queue = sqs.Queue(queue_metadata['QueueUrl'])
return conn, queue | ae61c542182bc1238b76bf94991e50809bace595 | 13,918 |
def db_describe(table, **args):
"""Return the list of columns for a database table
(interface to `db.describe -c`). Example:
>>> run_command('g.copy', vector='firestations,myfirestations')
0
>>> db_describe('myfirestations') # doctest: +ELLIPSIS
{'nrows': 71, 'cols': [['cat', 'INTEGER', '20'], ... 'ncols': 22}
>>> run_command('g.remove', flags='f', type='vector', name='myfirestations')
0
:param str table: table name
:param list args:
:return: parsed module output
"""
if 'database' in args and args['database'] == '':
args.pop('database')
if 'driver' in args and args['driver'] == '':
args.pop('driver')
s = read_command('db.describe', flags='c', table=table, **args)
if not s:
fatal(_("Unable to describe table <%s>") % table)
cols = []
result = {}
for l in s.splitlines():
f = l.split(':')
key = f[0]
f[1] = f[1].lstrip(' ')
if key.startswith('Column '):
n = int(key.split(' ')[1])
cols.insert(n, f[1:])
elif key in ['ncols', 'nrows']:
result[key] = int(f[1])
else:
result[key] = f[1:]
result['cols'] = cols
return result | 6265a2f6dcc26fcd1fcebb5ead23abfb37cfa179 | 13,919 |
def objective_func(x, cs_objects, cs_data):
"""
Define the objective function
:param x: 1D array containing the voltages to be set
:param args: tuple containing all extra parameters needed
:return: average count rate for 100 shots
"""
x = np.around(x,2)
try:
flag_range = 0
for i in xrange(len(x)):
if (x[i] <= float(cs_objects[i,4])) or (x[i] >= float(cs_objects[i,5])):
flag_range = 1
raise ValueError
for i in xrange(len(x)):
if flag_range == 0:
if int(cs_objects[i,2]) != -1:
cs.call_process2(cs_objects[i,0], cs_objects[i,1], "I:1,D:1", cs.pack_ch_val([int(cs_objects[i,2])], [x[i]]))
else:
cs.call_process2(cs_objects[i,0], cs_objects[i,1], "D:1", cs.pack_val([x[i]]))
else:
return
time.sleep(1)
flag = 0
value = total_counts(flag, *cs_data)
# value = scop.rosen(x)
return value
except ValueError:
print "Value error : value went out of bound" | 677b6455b0db177a3a4f716ced3dd309c711cf74 | 13,920 |
def getHPELTraceLogAttribute(nodename, servername, attributename):
""" This function returns an attribute of the HPEL Trace Log for the specified server.
Function parameters:
nodename - the name of the node on which the server to be configured resides.
servername - the name of the server whose HPEL Trace is to be configured.
attributename - the following attribute names can be specified:
- 'dataDirectory' - Specifies the name of the directory where the HPEL logs
will be stored.
- 'bufferingEnabled' - Specifies whether or not log record buffering should
be enabled. Valid values are 'true' and 'false'.
- 'fileSwitchEnabled' - Specifies whether or not a new log file should be
started each day. Valid values are 'true' and 'false'.
- 'fileSwitchTime' - If 'fileSwitchEnabled' is set to 'true', this field
specifies the time that new log file should be started.
A value from 0 - 23 should be specified. A value of 0
means 12 AM 1 means 1 AM, 2 means 2 AM, ..., 23 means
11 PM. If a value greater than 23 is entered, this
field will be set to 0 (12 AM).
- 'memoryBufferSize' - Specifies the size (in MB) of the memory trace buffer.
- 'outOfSpaceAction' - Specifies which action to take if the hard disk runs
out of space. Valid values are 'StopLogging',
'StopServer', and 'PurgeOld'.
- 'purgeBySizeEnabled' - Specifies whether or not to purge the logs based
on size. Valid values are 'true' and 'false'.
- 'purgeByTimeEnabled' - Specifies whether or not to purge the logs based
on time. Valid values are 'true' and 'false'.
- 'purgeMaxSize' - Specifies the maximum total size of the logs (in MB).
- 'purgeMinTime' - Specifies the minimum amount of time to keep the logs
(in hours).
- 'storageType' - Specifies whether the trace log should be written to a
directory or to memory. Valid values are 'DIRECTORY'
and 'MEMORYBUFFER'.
"""
m = "getHPELTraceLogAttribute:"
sop (m, "Entering function...")
sop (m, "Calling getNodeId() with nodename = %s." % (nodename))
nodeID = getNodeId(nodename)
sop (m, "Returned from getNodeID; returned nodeID = %s" % nodeID)
if nodeID == "":
raise "Could not find node name '%s'" % (nodename)
else:
sop (m, "Calling getServerId() with nodename = %s and servername = %s." % (nodename, servername))
serverID = getServerId(nodename, servername)
sop (m, "Returned from getServerID; returned serverID = %s" % serverID)
if serverID == None:
raise "Could not find server '%s' on node '%s'" % (servername, nodename)
else:
serviceName = "HighPerformanceExtensibleLogging"
sop (m, "Calling AdminConfig.list with serviceName = %s and serverID = %s." % (serviceName, serverID))
HPELID = AdminConfig.list(serviceName, serverID)
sop (m, "Returned from AdminConfig.list; HPELID = %s" % HPELID)
sop (m, "Calling AdminConfig.list to get the config ID of the HPEL Trace object.")
HPELTraceID = AdminConfig.list("HPELTrace", HPELID)
sop (m, "Returned from AdminConfig.list; HPELTraceID = %s" % HPELTraceID)
sop(m, "Calling AdminConfig.showAttribute to get the value of attribute = %s" % ( attributename ))
attributevalue = AdminConfig.showAttribute(HPELTraceID, attributename)
sop (m, "Returned from AdminConfig.showAttribute; attributevalue = %s" % ( attributevalue ))
sop (m, "Exiting function...")
return attributevalue
#endif
#endif | 8003066ec41ee07dab311690d0687d7f79e6952a | 13,921 |
def dispersionTable(adata):
"""
Parameters
----------
adata
Returns
-------
"""
if adata.uns["ispFitInfo"]["blind"] is None:
raise ("Error: no dispersion model found. Please call estimateDispersions() before calling this function")
disp_df = pd.DataFrame({"gene_id": adata.uns["ispFitInfo"]["blind"]["disp_table"]["gene_id"],
"mean_expression": adata.uns["ispFitInfo"]["blind"]["disp_table"]["mu"],
"dispersion_fit": adata.uns["ispFitInfo"]["blind"]["disp_table"]["blind"]["mu"],
"dispersion_empirical": adata.uns["ispFitInfo"]["blind"]["disp_table"]["disp"]})
return disp_df | 7f7b4c122ffc42402248ec55155c774c77fbad51 | 13,922 |
def L10_indicator(row):
"""
Determine the Indicator of L10 as one of five indicators
"""
if row < 40:
return "Excellent"
elif row < 50:
return "Good"
elif row < 61:
return "Fair"
elif row <= 85:
return "Poor"
else:
return "Hazard" | 10656a76e72f99f542fd3a4bc2481f0ef7041fa9 | 13,923 |
def create_ip_record(
heartbeat_df: pd.DataFrame, az_net_df: pd.DataFrame = None
) -> IpAddress:
"""
Generate ip_entity record for provided IP value.
Parameters
----------
heartbeat_df : pd.DataFrame
A dataframe of heartbeat data for the host
az_net_df : pd.DataFrame
Option dataframe of Azure network data for the host
Returns
-------
IP
Details of the IP data collected
"""
ip_entity = IpAddress()
# Produce ip_entity record using available dataframes
ip_hb = heartbeat_df.iloc[0]
ip_entity.Address = ip_hb["ComputerIP"]
ip_entity.hostname = ip_hb["Computer"] # type: ignore
ip_entity.SourceComputerId = ip_hb["SourceComputerId"] # type: ignore
ip_entity.OSType = ip_hb["OSType"] # type: ignore
ip_entity.OSName = ip_hb["OSName"] # type: ignore
ip_entity.OSVMajorersion = ip_hb["OSMajorVersion"] # type: ignore
ip_entity.OSVMinorVersion = ip_hb["OSMinorVersion"] # type: ignore
ip_entity.ComputerEnvironment = ip_hb["ComputerEnvironment"] # type: ignore
ip_entity.OmsSolutions = [ # type: ignore
sol.strip() for sol in ip_hb["Solutions"].split(",")
]
ip_entity.VMUUID = ip_hb["VMUUID"] # type: ignore
ip_entity.SubscriptionId = ip_hb["SubscriptionId"] # type: ignore
geoloc_entity = GeoLocation() # type: ignore
geoloc_entity.CountryName = ip_hb["RemoteIPCountry"] # type: ignore
geoloc_entity.Longitude = ip_hb["RemoteIPLongitude"] # type: ignore
geoloc_entity.Latitude = ip_hb["RemoteIPLatitude"] # type: ignore
ip_entity.Location = geoloc_entity # type: ignore
# If Azure network data present add this to host record
if az_net_df is not None and not az_net_df.empty:
if len(az_net_df) == 1:
priv_addr_str = az_net_df["PrivateIPAddresses"].loc[0]
ip_entity["private_ips"] = convert_to_ip_entities(priv_addr_str)
pub_addr_str = az_net_df["PublicIPAddresses"].loc[0]
ip_entity["public_ips"] = convert_to_ip_entities(pub_addr_str)
else:
if "private_ips" not in ip_entity:
ip_entity["private_ips"] = []
if "public_ips" not in ip_entity:
ip_entity["public_ips"] = []
return ip_entity | 63deb15081f933b0a445d22eed25646782af4221 | 13,924 |
import re
def extract_version(version_file_name):
"""Extracts the version from a python file.
The statement setting the __version__ variable must not be indented. Comments after that
statement are allowed.
"""
regex = re.compile(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]\s*(#.*)?$")
with open(version_file_name, "r") as version_file:
lines = version_file.read().splitlines()
for line in reversed(lines):
version_match = regex.match(line)
if version_match:
return version_match.group(1)
else:
raise RuntimeError("Unable to find version string.") | 1cc70ba4bf69656bb8d210a49c236e38eba59513 | 13,925 |
def powerlaw_loglike(data, theta):
"""Return the natural logarithm of the likelihood P(data | theta) for our
model of the ice flow.
data is expected to be a tuple of numpy arrays = (x, y, sigma)
theta is expected to be an array of parameters = (intercept, slope)
"""
x, y, sigma = data
n = len(x)
model = powerlaw_model(x, theta)
lnlike = -0.5 * (n*np.log(2.*np.pi) + np.sum(2.*np.log(errs) + (
y-model)**2 / sigma**2))
return lnlike | 98650e66d2a16762b2534be9083b6b92e0d9e9fd | 13,926 |
def get_conv(dim=3):
"""Chooses an implementation for a convolution layer."""
if dim == 3:
return nn.Conv3d
elif dim == 2:
return nn.Conv2d
else:
raise ValueError('dim has to be 2 or 3') | 4152984ecf7220dc4693013ee567822a2487e225 | 13,927 |
import os
import errno
def resolve_path(path, parent=None):
"""Resolves the absolute path of the specified file.
Args:
path (str): Path to resolve.
parent (str): The directory containing ``path`` if ``path`` is relative.
Returns:
The absolute path.
Raises:
IOError: if the path does not exist.
"""
apath = abspath(path)
if not os.path.exists(apath) and parent is not None:
apath = abspath(os.path.join(parent, path))
if not os.path.exists(apath):
raise IOError(errno.ENOENT, "%s does not exist" % apath, apath)
return apath | c8088bc2dcee62b0ed12e8b0902a35e2e291313c | 13,928 |
import ipykernel
from notebook.notebookapp import list_running_servers
import re
import requests
def notebook_metadata():
"""Attempts to query jupyter for the path and name of the notebook file"""
error_message = "Failed to query for notebook name, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable"
try:
kernel_id = re.search('kernel-(.*).json', ipykernel.connect.get_connection_file()).group(1)
servers = list(list_running_servers()) # TODO: sometimes there are invalid JSON files and this blows up
except Exception:
logger.error(error_message)
return {}
for s in servers:
try:
if s['password']:
raise ValueError("Can't query password protected kernel")
res = requests.get(urljoin(s['url'], 'api/sessions'), params={'token': s.get('token', '')}).json()
except (requests.RequestException, ValueError):
logger.error(error_message)
return {}
for nn in res:
# TODO: wandb/client#400 found a case where res returned an array of strings...
if isinstance(nn, dict) and nn.get("kernel") and 'notebook' in nn:
if nn['kernel']['id'] == kernel_id:
return {"root": s['notebook_dir'], "path": nn['notebook']['path'], "name": nn['notebook']['name']}
return {} | 47cd98371605240ae52ca90fda23c46b9bde52d0 | 13,929 |
async def create_mute_role(bot, ctx):
"""Create the mute role for a guild"""
perms = discord.Permissions(
send_messages=False, read_messages=True)
mute_role = await ctx.guild.create_role(
name='Muted', permissions=perms,
reason='Could not find a muted role in the process of muting or unmuting.')
await bot.config.update_one({"_id": ctx.guild.id},
{'$set': {"mute_role": mute_role.id}}, upsert=True)
for channel in ctx.guild.channels:
try:
await channel.set_permissions(mute_role, read_messages=True, send_messages=False)
except discord.Forbidden:
continue
except discord.HTTPException:
continue
return mute_role | 9128de3a7f4f841e47531699a878a1c18d8be9d5 | 13,930 |
import json
import uuid
def build_request_data(useralias,
req_node):
"""build_request_data
:param useralias: user alias for directory name
:param req_node: simulated request node
"""
if "file" not in req_node:
return None
use_uniques = req_node["unique_names"]
use_file = req_node["file"].format(
useralias)
use_data = json.loads(open(use_file, 'r').read())
if use_uniques:
if "title" in use_data:
use_data["title"] = "{}_{}".format(
use_data["title"],
str(uuid.uuid4()))
if "full_file" in use_data:
use_data["full_file"] = \
use_data["full_file"].format(
str(uuid.uuid4()))
if "clean_file" in use_data:
use_data["clean_file"] = \
use_data["clean_file"].format(
str(uuid.uuid4()))
if "csv_file" in use_data:
use_data["csv_file"] = \
use_data["csv_file"].format(
str(uuid.uuid4()))
if "meta_file" in use_data:
use_data["meta_file"] = \
use_data["meta_file"].format(
str(uuid.uuid4()))
if "meta_suffix" in use_data:
use_data["meta_suffix"] = \
use_data["meta_suffix"].format(
str(uuid.uuid4()))
return use_data | 938c79c290e1e4c086e6d48f71cbd0b965d36b36 | 13,931 |
def _get_stmt_lists(self):
"""
Returns a tuple of the statement lists contained in this `ast.stmt`
node. This method should only be called by an `ast.stmt` node.
"""
if self.is_simple():
return ()
elif self.is_body():
return (self.body,)
elif self.is_body_orelse():
return (self.body, self.orelse)
elif self.is_body_finally():
return (self.body, self.finalbody)
else:
# Every statement has to be simple or complex.
assert(False) | 0ec85481bc4261ae77ced0ae32c72081ef80c651 | 13,932 |
def get_article(name):
"""a general function to get an article, returns None if doesn't exist
"""
article = None
if name is not None:
try:
article = Article.objects.get(name=name)
except Article.DoesNotExist:
pass
return article | d69e801a1d18ccf81753cc35ce2afa645b304fba | 13,933 |
import tempfile
import os
import shutil
def _CreateNginxConfigMapDir():
"""Returns a TemporaryDirectory containing files in the Nginx ConfigMap."""
if FLAGS.nginx_conf:
nginx_conf_filename = FLAGS.nginx_conf
else:
nginx_conf_filename = (
data.ResourcePath('container/kubernetes_nginx/http.conf'))
temp_dir = tempfile.TemporaryDirectory()
config_map_filename = os.path.join(temp_dir.name, 'default')
shutil.copyfile(nginx_conf_filename, config_map_filename)
return temp_dir | 9f9cde4e270e60ae03e65af61017ead886e89d18 | 13,934 |
def abbreviateLab(lab):
"""Lab names are very long and sometimes differ by punctuation or typos. Abbreviate for easier comparison."""
labAbbrev = apostropheSRe.sub('', lab)
labAbbrev = firstLetterRe.sub(r'\1', labAbbrev, count=0)
labAbbrev = spacePunctRe.sub('', labAbbrev, count=0)
return labAbbrev | dce4a1d0f6302a2968fe701d067b209fb61b8930 | 13,935 |
def backproject(depth, intrinsics, instance_mask):
""" Back-projection, use opencv camera coordinate frame.
"""
cam_fx = intrinsics[0, 0]
cam_fy = intrinsics[1, 1]
cam_cx = intrinsics[0, 2]
cam_cy = intrinsics[1, 2]
non_zero_mask = (depth > 0)
final_instance_mask = np.logical_and(instance_mask, non_zero_mask)
idxs = np.where(final_instance_mask)
z = depth[idxs[0], idxs[1]]
x = (idxs[1] - cam_cx) * z / cam_fx
y = (idxs[0] - cam_cy) * z / cam_fy
pts = np.stack((x, y, z), axis=1)
return pts, idxs | 9828197b646342ec76cc21b1083540d0fe62978f | 13,936 |
def if_any(
_data,
*args,
_names=None,
_context=None,
**kwargs,
):
"""Apply the same predicate function to a selection of columns and combine
the results True if any element is True.
See Also:
[`across()`](datar.dplyr.across.across)
"""
if not args:
args = (None, None)
elif len(args) == 1:
args = (args[0], None)
_cols, _fns, *args = args
_data = _context.meta.get("input_data", _data)
return IfAny(
_data,
_cols,
_fns,
_names,
args,
kwargs,
).evaluate(_context) | 41bf4a14cc8b16845f7d0dd8138871a7ccfad66f | 13,937 |
def get_inpgen_para_from_xml(inpxmlfile, inpgen_ready=True):
"""
This routine returns an python dictionary produced from the inp.xml
file, which can be used as a calc_parameters node by inpgen.
Be aware that inpgen does not take all information that is contained in an inp.xml file
:param inpxmlfile: and xml etree of a inp.xml file
:param inpgen_ready: Bool, return a dict which can be inputed into inpgen while setting atoms
:return new_parameters: A Dict, which will lead to the same inp.xml (in case if other defaults,
which can not be controlled by input for inpgen, were changed)
"""
# TODO: convert econfig
# TODO: parse kpoints, somehow count is bad (if symmetry changes), mesh is not known, path cannot be specified
# Disclaimer: this routine needs some xpath expressions. these are hardcoded here,
# therefore maintainance might be needed, if you want to circumvent this, you have
# to get all the paths from somewhere.
#######
# all hardcoded xpaths used and attributes names:
# input
film_xpath = '/fleurInput/atomGroups/atomGroup/filmPos/' # check for film pos
# atom, for each species\
species_xpath = '/fleurInput/atomSpecies/species'
atom_id_xpath = '' # is reconstruction possible at all now?
atom_z_xpath = '@atomicNumber'
atom_rmt_xpath = 'mtSphere/@radius'
atom_dx_xpath = 'mtSphere/@logIncrement'
atom_jri_xpath = 'mtSphere/@gridPoints'
atom_lmax_xpath = 'atomicCutoffs/@lmax'
atom_lnosph_xpath = 'atomicCutoffs/@lnonsphr'
#atom_ncst_xpath = '@coreStates'
atom_econfig_xpath = 'electronConfig' # converting todo
atom_bmu_xpath = '@magMom'
atom_lo_xpath = 'lo' # converting todo
atom_element_xpath = '@element'
atom_name_xpath = '@name'
# comp
jspins_xpath = 'calculationSetup/magnetism/@jspins'
frcor_xpath = 'calculationSetup/coreElectrons/@frcor'
ctail_xpath = 'calculationSetup/coreElectrons/@ctail'
kcrel_xpath = 'calculationSetup/coreElectrons/@kcrel'
gmax_xpath = 'calculationSetup/cutoffs/@Gmax'
gmaxxc_xpath = 'calculationSetup/cutoffs/@GmaxXC'
kmax_xpath = 'calculationSetup/cutoffs/@Kmax'
# exco
exco_xpath = 'xcFunctional/@name'
# film
# soc
l_soc_xpath = '//calculationSetup/soc/@l_soc'
theta_xpath = '//calculationSetup/soc/@theta'
phi_xpath = '//calculationSetup/soc/@phi'
# qss
# kpt
title_xpath = '/fleurInput/comment/text()' # text
########
new_parameters = {}
#print('parsing inp.xml without XMLSchema')
#tree = etree.parse(inpxmlfile)
tree = inpxmlfile
root = tree.getroot()
# Create the cards
# &input # most things are not needed for AiiDA here. or we ignor them for now.
# film is set by the plugin depended on the structure
# symor per default = False? to avoid input which fleur can't take
# &comp
# attrib = get_xml_attribute(
comp_dict = {}
comp_dict = set_dict_or_not(comp_dict, 'jspins', convert_to_int(eval_xpath(root, jspins_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'frcor', convert_from_fortran_bool(eval_xpath(root, frcor_xpath)))
comp_dict = set_dict_or_not(comp_dict, 'ctail', convert_from_fortran_bool(eval_xpath(root, ctail_xpath)))
comp_dict = set_dict_or_not(comp_dict, 'kcrel', eval_xpath(root, kcrel_xpath))
comp_dict = set_dict_or_not(comp_dict, 'gmax', convert_to_float(eval_xpath(root, gmax_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'gmaxxc', convert_to_float(eval_xpath(root, gmaxxc_xpath), suc_return=False))
comp_dict = set_dict_or_not(comp_dict, 'kmax', convert_to_float(eval_xpath(root, kmax_xpath), suc_return=False))
new_parameters['comp'] = comp_dict
# &atoms
species_list = eval_xpath2(root, species_xpath)
for i, species in enumerate(species_list):
atom_dict = {}
atoms_name = 'atom{}'.format(i)
atom_z = convert_to_int(eval_xpath(species, atom_z_xpath), suc_return=False)
atom_rmt = convert_to_float(eval_xpath(species, atom_rmt_xpath), suc_return=False)
atom_dx = convert_to_float(eval_xpath(species, atom_dx_xpath), suc_return=False)
atom_jri = convert_to_int(eval_xpath(species, atom_jri_xpath), suc_return=False)
atom_lmax = convert_to_int(eval_xpath(species, atom_lmax_xpath), suc_return=False)
atom_lnosph = convert_to_int(eval_xpath(species, atom_lnosph_xpath), suc_return=False)
#atom_ncst = convert_to_int(eval_xpath(species, atom_ncst_xpath), suc_return=False)
atom_econfig = eval_xpath(species, atom_econfig_xpath)
atom_bmu = convert_to_float(eval_xpath(species, atom_bmu_xpath), suc_return=False)
atom_lo = eval_xpath(species, atom_lo_xpath)
atom_element = eval_xpath(species, atom_element_xpath)
atom_name_2 = eval_xpath(species, atom_name_xpath)
if not inpgen_ready:
atom_dict = set_dict_or_not(atom_dict, 'z', atom_z)
#atom_dict = set_dict_or_not(atom_dict, 'name', atom_name_2)
#atom_dict = set_dict_or_not(atom_dict, 'ncst', atom_ncst) (deprecated)
atom_dict = set_dict_or_not(atom_dict, 'rmt', atom_rmt)
atom_dict = set_dict_or_not(atom_dict, 'dx', atom_dx)
atom_dict = set_dict_or_not(atom_dict, 'jri', atom_jri)
atom_dict = set_dict_or_not(atom_dict, 'lmax', atom_lmax)
atom_dict = set_dict_or_not(atom_dict, 'lnonsph', atom_lnosph)
atom_dict = set_dict_or_not(atom_dict, 'econfig', atom_econfig)
atom_dict = set_dict_or_not(atom_dict, 'bmu', atom_bmu)
if atom_lo is not None:
atom_dict = set_dict_or_not(atom_dict, 'lo', convert_fleur_lo(atom_lo))
atom_dict = set_dict_or_not(atom_dict, 'element', '{}'.format(atom_element))
new_parameters[atoms_name] = atom_dict
# &soc
attrib = convert_from_fortran_bool(eval_xpath(root, l_soc_xpath))
theta = convert_to_float(eval_xpath(root, theta_xpath), suc_return=False)
phi = convert_to_float(eval_xpath(root, phi_xpath), suc_return=False)
if attrib:
new_parameters['soc'] = {'theta': theta, 'phi': phi}
# &kpt
#attrib = convert_from_fortran_bool(eval_xpath(root, l_soc_xpath))
#theta = eval_xpath(root, theta_xpath)
#phi = eval_xpath(root, phi_xpath)
# if kpt:
# new_parameters['kpt'] = {'theta' : theta, 'phi' : phi}
# # ['nkpt', 'kpts', 'div1', 'div2', 'div3', 'tkb', 'tria'],
# title
title = eval_xpath(root, title_xpath) # text
if title:
new_parameters['title'] = title.replace('\n', '').strip()
# &exco
#TODO, easy
exco_dict = {}
exco_dict = set_dict_or_not(exco_dict, 'xctyp', eval_xpath(root, exco_xpath))
# 'exco' : ['xctyp', 'relxc'],
new_parameters['exco'] = exco_dict
# &film
# TODO
# &qss
# TODO
# lattice, not supported?
return new_parameters | e0454061da7c817b4dfe3f1eb0257493dc92437b | 13,938 |
import os
def confirm_revocation(cert):
"""Confirm revocation screen.
:param cert: certificate object
:type cert: :class:
:returns: True if user would like to revoke, False otherwise
:rtype: bool
"""
return util(interfaces.IDisplay).yesno(
"Are you sure you would like to revoke the following "
"certificate:{0}{cert}This action cannot be reversed!".format(
os.linesep, cert=cert.pretty_print())) | d64c64a6426e521fa8d9edc817b44a50fdd75894 | 13,939 |
def Gaussian(y, model, yerr):
"""Returns the loglikelihood for a Gaussian distribution.
In this calculation, it is assumed that the parameters
are true, and the loglikelihood that the data is drawn from
the distribution established by the parameters is calculated
Parameters
----------
model : array_like
theoretical model data to be compared against
y : array_like
data points
yerr : standard deviations on individual data points,
assumed to be gaussian
Returns
-------
float
loglikelihood for the data."""
inv_sigma2 = 1.0/(yerr**2.0)
return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2))) | d9eaa41b95006a9d17907582b804a4921f672141 | 13,940 |
def clean_us_demographics(us_demographics_spark, spark_session):
"""
Clean data from us_demographics
Args:
us_demographics (object): Pyspark dataframe object
spark_session (object): Pyspark session
Returns:
(object): Pyspark dataframe with cleaned data
"""
spark = spark_session
us_demographics_spark.createOrReplaceTempView('us_demographics')
dum = spark.sql("""
SELECT City, State, cast(`Median Age` as float) as Median_Age, cast(`Male Population` as int) as Male_Population,
cast(`Female Population` as int) as Female_Population, cast(`Total Population` as int) as Total_Population,
cast(`Number of Veterans` as int) as Number_of_Veterans, cast(`Foreign-born` as int) as Foregin_born,
cast(`Average Household Size` as float) as Average_Household_Size, `State Code` as State_Code,Race, cast(Count as int)
FROM us_demographics
""")
us_demographics_spark_cleaned = dum.dropDuplicates()
us_demographics_spark_cleaned = us_demographics_spark_cleaned.na.drop()
us_demographics_spark_race = us_demographics_spark_cleaned.groupBy(['City','State']).pivot("Race").agg(F.first("Count"))
us_demographics_spark_race = us_demographics_spark_race.select('City', 'State', F.col('American Indian and Alaska Native').alias('American_Indian_and_Alaska_Native'),
'Asian', F.col('Black or African-American').alias('Black_or_African_American'), F.col('Hispanic or Latino').alias('Hispanic_or_Latino'), 'White')
us_demographics_spark_cleaned = us_demographics_spark_cleaned.drop('Race', 'Count')
us_demographics_spark_cleaned = us_demographics_spark_cleaned.dropDuplicates()
us_demographics_spark_cleaned = us_demographics_spark_cleaned.join(us_demographics_spark_race, ['State', 'City'])
us_demographics_spark_cleaned = us_demographics_spark_cleaned.fillna(
{'American_Indian_and_Alaska_Native':0,
'Asian':0,
'Black_or_African_American':0,
'Hispanic_or_Latino':0,
'White':0})
us_demographics_spark_cleaned = us_demographics_spark_cleaned.orderBy(['City','State'])
return us_demographics_spark_cleaned | dcf812bf64a2f6c3b908d895488e1a57e1729301 | 13,941 |
from datetime import datetime
def parse_date(date=None):
"""
Parse a string in YYYY-MM-DD format into a datetime.date object.
Throws ValueError if input is invalid
:param date: string in YYYY-MM-DD format giving a date
:return: a datetime.date object corresponding to the date given
"""
if date is None:
raise ValueError
fields = date.split('-')
if len(fields) != 3:
raise ValueError
return datetime.date(year=int(fields[0]),
month=int(fields[1]),
day=int(fields[2])) | a4c6cef85dabd445dd308fdd5f2c20a38accd6de | 13,942 |
def status():
""" Incoming status handler: forwarded by ForwardServerProvider """
req = jsonex_loads(request.get_data())
status = g.provider._receive_status(req['status'])
return {'status': status} | 3a50ff8d829a7bf37b84871897335345496dbc49 | 13,943 |
def get_feature_extractor_info():
"""Return tuple of pretrained feature extractor and its best-input image size for the extractor"""
return get_pretrained_feature_extractor(), K_MODEL_IMAGE_SIZE | bdec6d5a2d402f659b9a001f4082f6b5e33ca3cc | 13,944 |
import networkx
def nx_find_connected_limited(graph, start_set, end_set, max_depth=3):
"""Return the neurons in end_set reachable from start_set with limited depth."""
reverse_graph = graph.reverse()
reachable = []
for e in end_set:
preorder_nodes = list(
(
networkx.algorithms.traversal.depth_first_search.dfs_preorder_nodes(
reverse_graph, source=e, depth_limit=max_depth
)
)
)
for s in start_set:
if s in preorder_nodes:
reachable.append(e)
break
return reachable | 4322f4231be73b575d05442f09608c71c3b9f605 | 13,945 |
def hexbyte_2integer_normalizer(first_int_byte, second_int_btye):
"""Function to normalize integer bytes to a single byte
Transform two integer bytes to their hex byte values and normalize
their values to a single integer
Parameters
__________
first_int_byte, second_int_byte : int
integer values to normalize (0 to 255)
Returns
_______
integer: int
Single normalized integer
"""
first_hex = f'{hex(first_int_byte)}'.lstrip('0x')
second_hex = f'{hex(second_int_btye)}'.lstrip('0x')
first_hex = first_hex if len(f'{first_hex}') == 2 else f'0{first_hex}'
second_hex = second_hex if len(f'{second_hex}') == 2 else f'0{second_hex}'
hex_string = f'{first_hex}{second_hex}'
integer = int(hex_string, 16)
return integer | a3bbe75014b6e08607314b615440039bab245f04 | 13,946 |
def wrapAngle(angle):
""" Ensures angle is between -360 and 360
arguments:
angle - float angle that you want to be between -360 and 360
returns:
float - angle between -360 and 360
"""
printDebug("In wrapAngle, angle is " + str(angle), DEBUG_INFO)
if angle >= 0:
return angle % 360
else:
return angle % -360 | 4ec1ee51b895075053468dfa5d09f988d15413d1 | 13,947 |
import os
def _save_first_checkpoint(keras_model, custom_objects, config):
"""Save first checkpoint for the keras Estimator.
Args:
keras_model: an instance of compiled keras model.
custom_objects: Dictionary for custom objects.
config: Estimator config.
Returns:
The path where keras model checkpoint is saved.
"""
# save checkpoint into subdirectory to allow warm start
keras_model_dir = os.path.join(config.model_dir, 'keras')
# Load weights and save to checkpoint if there is no checkpoint
latest_path = checkpoint_management.latest_checkpoint(keras_model_dir)
if not latest_path:
keras_weights = None
if _any_weight_initialized(keras_model):
keras_weights = keras_model.get_weights()
if not gfile.IsDirectory(keras_model_dir):
gfile.MakeDirs(keras_model_dir)
with ops.Graph().as_default():
random_seed.set_random_seed(config.tf_random_seed)
training_util.create_global_step()
model = _clone_and_build_model(model_fn_lib.ModeKeys.TRAIN, keras_model,
custom_objects)
# save to checkpoint
with session.Session(config=config.session_config) as sess:
if keras_weights:
model.set_weights(keras_weights)
# Make update ops and initialize all variables.
if not model.train_function:
# pylint: disable=protected-access
model._make_train_function()
K._initialize_variables(sess)
# pylint: enable=protected-access
saver = saver_lib.Saver()
latest_path = os.path.join(keras_model_dir, 'keras_model.ckpt')
saver.save(sess, latest_path)
return latest_path | 790cc96785c6a2a66d19af886c82e0dc354704c9 | 13,948 |
import logging
import os
def build_reference_spectrum_list_from_config_file(config):
"""
Read reference spectrum file glob(s) from configuration file to create
and return a list of ReferenceSpectrum instances.
:param config: configparser instance
:return: list of ReferenceSpectrum instances
"""
log = logging.getLogger(name=__name__)
references = config.items("references")
log.debug(references)
reference_spectrum_list, _ = ReferenceSpectrum.read_all(
[
os.path.expanduser(reference_file_glob)
for reference_file_glob, _ in references
]
)
if len(reference_spectrum_list) == 0:
raise ConfigurationFileError(
'no reference spectrum files were found using globs "{}"'.format(references)
)
else:
return reference_spectrum_list | 31d4f54e786846122845b7eb6d73dfa1353ef7d6 | 13,949 |
def make_window(signal, sample_spacing, which=None, alpha=4):
"""Generate a window function to be used in PSD analysis.
Parameters
----------
signal : `numpy.ndarray`
signal or phase data
sample_spacing : `float`
spacing of samples in the input data
which : `str,` {'welch', 'hann', None}, optional
which window to produce. If auto, attempts to guess the appropriate
window based on the input signal
alpha : `float`, optional
alpha value for welch window
Notes
-----
For 2D welch, see:
Power Spectral Density Specification and Analysis of Large Optical Surfaces
E. Sidick, JPL
Returns
-------
`numpy.ndarray`
window array
"""
s = signal.shape
if which is None:
# attempt to guess best window
ysamples = int(round(s[0] * 0.02, 0))
xsamples = int(round(s[1] * 0.02, 0))
corner1 = signal[:ysamples, :xsamples] == 0
corner2 = signal[-ysamples:, :xsamples] == 0
corner3 = signal[:ysamples, -xsamples:] == 0
corner4 = signal[-ysamples:, -xsamples:] == 0
if corner1.all() and corner2.all() and corner3.all() and corner4.all():
# four corners all "black" -- circular data, Welch window is best
# looks wrong but 2D welch takes x, y while indices are y, x
y, x = (e.arange(N) - (N / 2) for N in s)
which = window_2d_welch(x, y)
else:
# if not circular, square data; use Hanning window
y, x = (e.hanning(N) for N in s)
which = e.outer(y, x)
else:
if type(which) is str:
# known window type
wl = which.lower()
if wl == 'welch':
y, x = (e.arange(N) - (N / 2) for N in s)
which = window_2d_welch(x, y, alpha=alpha)
elif wl in ('hann', 'hanning'):
y, x = (e.hanning(N) for N in s)
which = e.outer(y, x)
else:
raise ValueError('unknown window type')
return which | 5ef18c990225b6610ee10c848ab4ee0b2ce0fc9b | 13,950 |
from typing import Dict
from typing import Union
def set_units(
df: pd.DataFrame, units: Dict[str, Union[pint.Unit, str]]
) -> pd.DataFrame:
"""Make dataframe unit-aware. If dataframe is already unit-aware, convert to specified
units. If not, assume values are in specified unit.
Parameters
----------
df : pd.DataFrame
units : Dict[str, Union[pint.Unit, str]]
key = column name, value = unit to set to that column
Returns
-------
pd.DataFrame
Same as input dataframe, but with specified units.
"""
df = df.copy() # don't change incoming dataframe
for name, unit in units.items():
df[name] = set_unit(df[name], unit)
return df | 8a0cf821e3e0d1ba7b1b8c3dbdddb5f517ea0acb | 13,951 |
def address_repr(buf, reverse: bool = True, delimit: str = "") -> str:
"""Convert a buffer into a hexlified string."""
order = range(len(buf) - 1, -1, -1) if reverse else range(len(buf))
return delimit.join(["%02X" % buf[byte] for byte in order]) | 6b4b8921d6280cd688c3bfcfca82b2b5546001e7 | 13,952 |
import re
def _highlight(line1, line2):
"""Returns the sections that should be bolded in the given lines.
Returns:
two tuples. Each tuple indicates the start and end of the section
of the line that should be bolded for line1 and line2 respectively.
"""
start1 = start2 = 0
match = re.search(r'\S', line1) # ignore leading whitespace
if match:
start1 = match.start()
match = re.search(r'\S', line2)
if match:
start2 = match.start()
length = min(len(line1), len(line2)) - 1
bold_start1 = start1
bold_start2 = start2
while (bold_start1 <= length and bold_start2 <= length and
line1[bold_start1] == line2[bold_start2]):
bold_start1 += 1
bold_start2 += 1
match = re.search(r'\s*$', line1) # ignore trailing whitespace
bold_end1 = match.start() - 1
match = re.search(r'\s*$', line2)
bold_end2 = match.start() - 1
while (bold_end1 >= bold_start1 and bold_end2 >= bold_start2 and
line1[bold_end1] == line2[bold_end2]):
bold_end1 -= 1
bold_end2 -= 1
if bold_start1 - start1 > 0 or len(line1) - 1 - bold_end1 > 0:
return (bold_start1 + 1, bold_end1 + 2), (bold_start2 + 1, bold_end2 + 2)
return None, None | d9bf7667e24d21e6f91b656af0697765c2b74f55 | 13,953 |
import subprocess
def get_comrec_build(pkg_dir, build_cmd=build_py):
""" Return extended build command class for recording commit
The extended command tries to run git to find the current commit, getting
the empty string if it fails. It then writes the commit hash into a file
in the `pkg_dir` path, named ``COMMIT_INFO.txt``.
In due course this information can be used by the package after it is
installed, to tell you what commit it was installed from if known.
To make use of this system, you need a package with a COMMIT_INFO.txt file -
e.g. ``myproject/COMMIT_INFO.txt`` - that might well look like this::
# This is an ini file that may contain information about the code state
[commit hash]
# The line below may contain a valid hash if it has been substituted
during 'git archive' archive_subst_hash=$Format:%h$
# This line may be modified by the install process
install_hash=
The COMMIT_INFO file above is also designed to be used with git substitution
- so you probably also want a ``.gitattributes`` file in the root directory
of your working tree that contains something like this::
myproject/COMMIT_INFO.txt export-subst
That will cause the ``COMMIT_INFO.txt`` file to get filled in by ``git
archive`` - useful in case someone makes such an archive - for example with
via the github 'download source' button.
Although all the above will work as is, you might consider having something
like a ``get_info()`` function in your package to display the commit
information at the terminal. See the ``pkg_info.py`` module in the nipy
package for an example.
"""
class MyBuildPy(build_cmd):
''' Subclass to write commit data into installation tree '''
def run(self):
build_cmd.run(self)
proc = subprocess.Popen('git rev-parse HEAD',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
repo_commit, _ = proc.communicate()
# Fix for python 3
repo_commit = str(repo_commit)
# We write the installation commit even if it's empty
cfg_parser = ConfigParser()
cfg_parser.read(pjoin(pkg_dir, 'COMMIT_INFO.txt'))
cfg_parser.set('commit hash', 'install_hash', repo_commit)
out_pth = pjoin(self.build_lib, pkg_dir, 'COMMIT_INFO.txt')
cfg_parser.write(open(out_pth, 'wt'))
return MyBuildPy | f704fecc1c0001c3feeb66ccc4e251c019694c1b | 13,954 |
def get_detected_objects_new(df, siglim=5, Terr_lim=3, Toffset=2000):
"""
Get a dataframe with only the detected objects.
:param df: A DataFrame such as one output by get_ccf_summary with N > 1
:param siglim: The minimum significance to count as detected
:param Terr_lim: The maximum number of standard deviations of (Measured - Actual) to allow for detected objects
:param Toffset: The absolute difference to allow between the true and measured temperature.
:return: A dataframe similar to df, but with fewer rows
"""
S = get_initial_uncertainty(df)
S['Tdiff'] = S.Tmeas - S.Tactual
mean, std = S.Tdiff.mean(), S.Tdiff.std()
detected = S.loc[(S.significance > siglim) & (S.Tdiff - mean < Terr_lim * std) & (abs(S.Tdiff) < Toffset)]
return pd.merge(detected[['Primary', 'Secondary']], df, on=['Primary', 'Secondary'], how='left') | 7662086053c093b9eb19ffe7c56f5cf7914b1ab8 | 13,955 |
def cmp(a, b):
"""
Python 3 does not have a cmp function, this will do the cmp.
:param a: first object to check
:param b: second object to check
:return:
"""
# convert to lower case for string comparison.
if a is None:
return -1
if type(a) is str and type(b) is str:
a = a.lower()
b = b.lower()
# if list has string element, convert string to lower case.
if type(a) is list and type(b) is list:
a = [x.lower() if type(x) is str else x for x in a]
b = [x.lower() if type(x) is str else x for x in b]
a.sort()
b.sort()
return (a > b) - (a < b) | c82837a0d8887f55fdd1175b5d828742529b3e37 | 13,956 |
def pe(cmd, shell=True):
"""
Print and execute command on system
"""
ret = []
for line in execute(cmd, shell=shell):
ret.append(line)
print(line, end="")
return ret | 0a238be68a7c383153834d45fbf3193f9b8c9a72 | 13,957 |
import os
def create_photo(user_id, text: str, greencolor: bool): # color: tuple(R,G,B)
"""
:param user_id: int or str
:param text: str
:param greencolor: bool
True = зеленый (204, 255, 204)
False = серый (240, 238, 237)
"""
color = (204, 255, 204)
if not greencolor:
color = (240, 238, 237)
fontname = os.path.join(dirs['font'], 'OpenSans-Regular.ttf')
fontsize = 14
font = ImageFont.truetype(fontname, fontsize)
preimg = Image.new('RGB', (2000, 1000), color)
text_draw = ImageDraw.Draw(preimg)
text_width, text_height = text_draw.multiline_textsize(text, font)
text_draw.multiline_text((10, 10), text, fill="black", font=font)
img = preimg.crop((0, 0, text_width + 20, text_height + 24))
path = os.path.join(dirs['images'], f'{user_id}.png')
img.save(path, "PNG")
return path | c06c15f450d614febfc52d3f274e80b6d79d6688 | 13,958 |
def crop(image):
"""
Method to crop out the uncessary white parts of the image.
Inputs:
image (numpy array): Numpy array of the image label.
Outputs:
image (numpy array): Numpy array of the image label, cropped.
"""
image = ImageOps.invert(image)
imageBox = image.getbbox()
image = image.crop(imageBox)
return ImageOps.invert(image) | 37a12733bcda66a9da16d72ff3fae749784481a0 | 13,959 |
def all_pairs_normalized_distances(X):
"""
We can't really compute distances over incomplete data since
rows are missing different numbers of entries.
The next best thing is the mean squared difference between two vectors
(a normalized distance), which gets computed only over the columns that
two vectors have in common. If two vectors have no features in common
then their distance is infinity.
Parameters
----------
X : np.ndarray
Data matrix of shape (n_samples, n_features) with missing entries
marked using np.nan
Returns a (n_samples, n_samples) matrix of pairwise normalized distances.
"""
n_rows, n_cols = X.shape
# matrix of mean squared difference between between samples
D = np.ones((n_rows, n_rows), dtype="float32", order="C") * np.inf
# we can cheaply determine the number of columns that two rows share
# by taking the dot product between their finite masks
observed_elements = np.isfinite(X).astype(int)
n_shared_features_for_pairs_of_rows = np.dot(
observed_elements,
observed_elements.T)
no_overlapping_features_rows = n_shared_features_for_pairs_of_rows == 0
number_incomparable_rows = no_overlapping_features_rows.sum(axis=1)
row_overlaps_every_other_row = (number_incomparable_rows == 0)
row_overlaps_no_other_rows = number_incomparable_rows == n_rows
valid_rows_mask = ~row_overlaps_no_other_rows
valid_row_indices = np.where(valid_rows_mask)[0]
# preallocate all the arrays that we would otherwise create in the
# following loop and pass them as "out" parameters to NumPy ufuncs
diffs = np.zeros_like(X)
missing_differences = np.zeros_like(diffs, dtype=bool)
valid_rows = np.zeros(n_rows, dtype=bool)
ssd = np.zeros(n_rows, dtype=X.dtype)
for i in valid_row_indices:
x = X[i, :]
np.subtract(X, x.reshape((1, n_cols)), out=diffs)
np.isnan(diffs, out=missing_differences)
# zero out all NaN's
diffs[missing_differences] = 0
# square each difference
diffs **= 2
observed_counts_per_row = n_shared_features_for_pairs_of_rows[i]
if row_overlaps_every_other_row[i]:
# add up all the non-missing squared differences
diffs.sum(axis=1, out=D[i, :])
D[i, :] /= observed_counts_per_row
else:
np.logical_not(no_overlapping_features_rows[i], out=valid_rows)
# add up all the non-missing squared differences
diffs.sum(axis=1, out=ssd)
ssd[valid_rows] /= observed_counts_per_row[valid_rows]
D[i, valid_rows] = ssd[valid_rows]
return D | c744c6ac87cbd3760d6512178747ac60794d616a | 13,960 |
import torch
def forward_pass(model, target_angle, mixed_data, conditioning_label, args):
"""
Runs the network on the mixed_data
with the candidate region given by voice
"""
target_pos = np.array([
FAR_FIELD_RADIUS * np.cos(target_angle),
FAR_FIELD_RADIUS * np.sin(target_angle)
])
data, _ = utils.shift_mixture(
torch.tensor(mixed_data).to(args.device), target_pos, args.mic_radius,
args.sr)
data = data.float().unsqueeze(0) # Batch size is 1
# Normalize input
data, means, stds = normalize_input(data)
# Run through the model
valid_length = model.valid_length(data.shape[-1])
delta = valid_length - data.shape[-1]
padded = F.pad(data, (delta // 2, delta - delta // 2))
output_signal = model(padded, conditioning_label)
output_signal = center_trim(output_signal, data)
output_signal = unnormalize_input(output_signal, means, stds)
output_voices = output_signal[:, 0] # batch x n_mics x n_samples
output_np = output_voices.detach().cpu().numpy()[0]
energy = librosa.feature.rms(output_np).mean()
return output_np, energy | e9644b01ea04b08ae92d50d3c7944e0d72213b2b | 13,961 |
import select
from typing import Optional
from datetime import datetime
import pytz
async def get_event_by_code(code: str, db: AsyncSession) -> Event:
"""
Get an event by its code
"""
statement = select(Event).where(Event.code == code)
result = await db.execute(statement)
event: Optional[Event] = result.scalars().first()
if event is None:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail="invalid attendance code",
)
# Check that the code is still valid
with tracer.start_as_current_span("check-validity"):
now = datetime.now(tz=pytz.utc)
if not event.enabled or now < event.valid_from or now > event.valid_until:
raise HTTPException(status_code=HTTPStatus.FORBIDDEN, detail="invalid code")
return event | 592cd6b5aad7b12a98889bf82ea7e32a55b8832e | 13,962 |
def get(name):
"""Returns an OpDef for a given `name` or None if the lookup fails."""
with _sync_lock:
return _registered_ops.get(name) | 75e3ba3601f1ad8f67e77046a9b286bee8e60be6 | 13,963 |
def angle_detect_dnn(img, adjust=True):
"""
文字方向检测
"""
h, w = img.shape[:2]
ROTATE = [0, 90, 180, 270]
if adjust:
thesh = 0.05
xmin, ymin, xmax, ymax = int(thesh * w), int(thesh * h), w - int(thesh * w), h - int(thesh * h)
img = img[ymin:ymax, xmin:xmax] ##剪切图片边缘
inputBlob = cv2.dnn.blobFromImage(img,
scalefactor=1.0,
size=(224, 224),
swapRB=True,
mean=[103.939, 116.779, 123.68], crop=False);
angleNet.setInput(inputBlob)
pred = angleNet.forward()
index = np.argmax(pred, axis=1)[0]
return ROTATE[index] | a3fc8513afce26e96a315a606acfd9be9feaa376 | 13,964 |
def get_correct_line(df_decisions):
"""
The passed df has repeated lines for the same file (same chemin_source).
We take the most recent one.
:param df_decisions: Dataframe of decisions
:return: Dataframe without repeated lines (according to the chemin_source column)
"""
return df_decisions.sort_values('timestamp_modification').drop_duplicates('chemin_source', keep='last') | 989f1aba1c5e0c61f8b7ca1c883baf4dd181ebbc | 13,965 |
def fix_1(lst1, lst2):
"""
Divide all of the elements in `lst1` by each element in `lst2`
and return the values in a list.
>>> fix_1([1, 2, 3], [0, 1])
[1.0, 2.0, 3.0]
>>> fix_1([], [])
[]
>>> fix_1([10, 20, 30], [0, 10, 10, 0])
[1.0, 2.0, 3.0, 1.0, 2.0, 3.0]
"""
out = []
for div in lst2:
for num in lst1:
try:
out.append(num / div) # add try-except block
except ZeroDivisionError:
pass
return out | 7929cfc19952a829c66c18af967668d1015f8477 | 13,966 |
def user_wants_upload():
"""
Determines whether or not the user wants to upload the extension
:return: boolean
"""
choice = input("Do you want to upload your extension right now? :")
if "y" in choice or "Y" in choice:
return True
else:
return False | 67643d1ccf8d1ffe23ddc503cd8e9f4dc4e98707 | 13,967 |
def has_genus_flag(df, genus_col="mhm_Genus", bit_col="mhm_HasGenus", inplace=False):
"""
Creates a bit flag: `mhm_HasGenus` where 1 denotes a recorded Genus and 0 denotes the contrary.
Parameters
----------
df : pd.DataFrame
A mosquito habitat mapper DataFrame
genus_col : str, default="mhm_Genus"
The name of the column in the mosquito habitat mapper DataFrame that contains the genus records.
bit_col : str, default="mhm_HasGenus"
The name of the column which will store the generated HasGenus flag
inplace : bool, default=False
Whether to return a new DataFrame. If True then no DataFrame copy is not returned and the operation is performed in place.
Returns
-------
pd.DataFrame
A DataFrame with the HasGenus flag. If `inplace=True` it returns None.
"""
if not inplace:
df = df.copy()
df[bit_col] = (~pd.isna(df[genus_col].to_numpy())).astype(int)
if not inplace:
return df | 7e178f7570f8de436521047e012518e6f5ee6a72 | 13,968 |
from typing import Tuple
def compass(
size: Tuple[float, float] = (4.0, 2.0),
layer: Layer = gf.LAYER.WG,
port_type: str = "electrical",
) -> Component:
"""Rectangular contact pad with centered ports on rectangle edges
(north, south, east, and west)
Args:
size: rectangle size
layer: tuple (int, int)
port_type:
"""
c = gf.Component()
dx, dy = size
points = [
[-dx / 2.0, -dy / 2.0],
[-dx / 2.0, dy / 2],
[dx / 2, dy / 2],
[dx / 2, -dy / 2.0],
]
c.add_polygon(points, layer=layer)
c.add_port(
name="e1",
midpoint=[-dx / 2, 0],
width=dy,
orientation=180,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e2",
midpoint=[0, dy / 2],
width=dx,
orientation=90,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e3",
midpoint=[dx / 2, 0],
width=dy,
orientation=0,
layer=layer,
port_type=port_type,
)
c.add_port(
name="e4",
midpoint=[0, -dy / 2],
width=dx,
orientation=-90,
layer=layer,
port_type=port_type,
)
c.auto_rename_ports()
return c | fefa0842958fb91b870eb78e2170a81d7c8daaa9 | 13,969 |
def get_service(vm, port):
"""Return the service for a given port."""
for service in vm.get('suppliedServices', []):
if service['portRange'] == port:
return service | d617771c25c69ee874b0bc64adcc735aa876f929 | 13,970 |
async def async_setup_entry(hass, config_entry):
"""Set up AirVisual as config entry."""
entry_updates = {}
if not config_entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = config_entry.data[CONF_API_KEY]
if not config_entry.options:
# If the config entry doesn't already have any options set, set defaults:
entry_updates["options"] = DEFAULT_OPTIONS
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
websession = aiohttp_client.async_get_clientsession(hass)
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = AirVisualData(
hass, Client(websession, api_key=config_entry.data[CONF_API_KEY]), config_entry
)
try:
await hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id].async_update()
except InvalidKeyError:
_LOGGER.error("Invalid API key provided")
raise ConfigEntryNotReady
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
async def refresh(event_time):
"""Refresh data from AirVisual."""
await hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id].async_update()
hass.data[DOMAIN][DATA_LISTENER][config_entry.entry_id] = async_track_time_interval(
hass, refresh, DEFAULT_SCAN_INTERVAL
)
config_entry.add_update_listener(async_update_options)
return True | e09b0c8e499a055123a88503cac4d1d1492a3d53 | 13,971 |
def rotation_point_cloud(pc):
"""
Randomly rotate the point clouds to augment the dataset
rotation is per shape based along up direction
:param pc: B X N X 3 array, original batch of point clouds
:return: BxNx3 array, rotated batch of point clouds
"""
# rotated_data = np.zeros(pc.shape, dtype=np.float32)
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
# rotation_matrix = np.array([[cosval, 0, sinval],
# [0, 1, 0],
# [-sinval, 0, cosval]])
rotation_matrix = np.array([[1, 0, 0],
[0, cosval, -sinval],
[0, sinval, cosval]])
# rotation_matrix = np.array([[cosval, -sinval, 0],
# [sinval, cosval, 0],
# [0, 0, 1]])
rotated_data = np.dot(pc.reshape((-1, 3)), rotation_matrix)
return rotated_data | f1f84b9dad06bea6c377559d8b4a64be88031847 | 13,972 |
import time
def alliance_system_oneday(mongohandle, alliance_id, system):
"""find by corp and system - one day"""
allkills = mongohandle.allkills
system = int(system)
timeframe = 24 * 60 * 60
gmtminus = time.mktime(time.gmtime()) - timeframe
cursor = allkills.find({"alliance_id": alliance_id,
"solar_system_id": system,
"unix_kill_time": {
"$gte": gmtminus}},
{"ship": 1,
"items": 1,
"_id": 0}).hint('alliancesystemtime')
(ships, items, ammos) = parsecursor.ships_and_items(cursor)
return (ships, items, ammos) | b951f11f606352dc6614e1ff1c587c3a64ed1ea8 | 13,973 |
def slit_select(ra, dec, length, width, center_ra=0, center_dec=0, angle=0):
"""
:param ra: angular coordinate of photon/ray
:param dec: angular coordinate of photon/ray
:param length: length of slit
:param width: width of slit
:param center_ra: center of slit
:param center_dec: center of slit
:param angle: orientation angle of slit, angle=0 corresponds length in RA direction
:return: bool, True if photon/ray is within the slit, False otherwise
"""
ra_ = ra - center_ra
dec_ = dec - center_dec
x = np.cos(angle) * ra_ + np.sin(angle) * dec_
y = - np.sin(angle) * ra_ + np.cos(angle) * dec_
if abs(x) < length / 2. and abs(y) < width / 2.:
return True
else:
return False | a3047a59bbc8566d261f1d52f92b437ad2b26d52 | 13,974 |
def login():
""" Logs in user """
req = flask.request.get_json(force=True)
username = req.get('username', None)
password = req.get('password', None)
user = guard.authenticate(username, password)
ret = {'access_token': guard.encode_jwt_token(user)}
return ret, 200 | b577c7982bf65d3a24cfd3f116f5cb128079cd1f | 13,975 |
def statuses_filter(auth, **params):
"""
Collect tweets from the twitter statuses_filter api.
"""
endpoint = "https://stream.twitter.com/1.1/statuses/filter.json"
if "follow" in params and isinstance(params["follow"], (list, tuple)):
params["follow"] = list_to_csv(params["follow"])
if "track" in params and isinstance(params["track"], (list, tuple)):
params["track"] = list_to_csv(params["track"])
params.setdefault("delimited", 0)
params.setdefault("stall_warnings", 1)
return stream_call(endpoint, auth, params, "post") | e81f85d5c747a4bcca8fc9b3b82d362905404452 | 13,976 |
def adjust_hue(image, hue_factor):
"""Adjusts hue of an image.
The image hue is adjusted by converting the image to HSV and
cyclically shifting the intensities in the hue channel (H).
The image is then converted back to original image mode.
`hue_factor` is the amount of shift in H channel and must be in the
interval `[-0.5, 0.5]`.
Args:
image (PIL.Image): PIL Image to be adjusted.
hue_factor (float): How much to shift the hue channel. Should be in
[-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
HSV space in positive and negative direction respectively.
0 means no shift. Therefore, both -0.5 and 0.5 will give an image
with complementary colors while 0 gives the original image.
Returns:
PIL.Image: Hue adjusted image.
"""
if not (-0.5 <= hue_factor <= 0.5):
raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor))
dtype = image.dtype
image = image.astype(np.uint8)
hsv_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV_FULL)
h, s, v = cv2.split(hsv_img)
alpha = np.random.uniform(hue_factor, hue_factor)
h = h.astype(np.uint8)
# uint8 addition take cares of rotation across boundaries
with np.errstate(over="ignore"):
h += np.uint8(alpha * 255)
hsv_img = cv2.merge([h, s, v])
return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2RGB_FULL).astype(dtype) | 52390b83a60cc8f23632f198a558b518d687f94e | 13,977 |
import json
import requests
import time
def lambda_handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
Event doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
#print("Received event: " + json.dumps(event, indent=2))
body = json.loads(event['body'])
print(f"Body is: {body}")
url = body['url']
print(f"Getting image from URL: {url}")
response = requests.get(url)
print("Load image into memory")
img = PILImage.create(BytesIO(response.content))
print("Doing forward pass")
start = time.time()
pred,pred_idx,probs = learn.predict(img)
end = time.time()
inference_time = np.round((end - start) * 1000, 2)
print(f'class: {pred}, probability: {probs[pred_idx]:.04f}')
print(f'Inference time is: {str(inference_time)} ms')
return {
"statusCode": 200,
"body": json.dumps(
{
"class": pred,
"probability": "%.4f" % probs[pred_idx]
}
),
} | 05b5da6e2c2aff16c43a3822978f0cd800370bed | 13,978 |
def compareDict(a, b):
"""
Compare two definitions removing the unique Ids from the entities
"""
ignore = ['Id']
_a = [hashDict(dict(x), ignore) for x in a]
_b = [hashDict(dict(y), ignore) for y in b]
_a.sort()
_b.sort()
return _a == _b | 19f0340064c95584a4e80ecb4a090c25944f6923 | 13,979 |
import traceback
import time
def create_twitter_auth(cf_t):
"""Function to create a twitter object
Args: cf_t is configuration dictionary.
Returns: Twitter object.
"""
# When using twitter stream you must authorize.
# these tokens are necessary for user authentication
# create twitter API object
auth = OAuth(cf_t['access_token'], cf_t['access_token_secret'], cf_t['consumer_key'], cf_t['consumer_secret'])
try:
# create twitter API object
twitter = Twitter(auth = auth)
except TwitterHTTPError:
traceback.print_exc()
time.sleep(cf_t['sleep_interval'])
return twitter | 0eff78ce2dba182d739cc2bb082d5053a6a8847a | 13,980 |
def _project(doc, projection):
"""Return new doc with items filtered according to projection."""
def _include_key(key, projection):
for k, v in projection.items():
if key == k:
if v == 0:
return False
elif v == 1:
return True
else:
raise ValueError('Projection value must be 0 or 1.')
if projection and key != '_id':
return False
return True
return {k: v for k, v in doc.items() if _include_key(k, projection)} | 0f2cd190e73b39ceeec0f850054baab1dd357587 | 13,981 |
import random
def random_swap(words, n):
"""
Randomly swap two words in the sentence n times
Args:
words ([type]): [description]
n ([type]): [description]
Returns:
[type]: [description]
"""
def swap_word(new_words):
random_idx_1 = random.randint(0, len(new_words) - 1)
random_idx_2 = random_idx_1
counter = 0
while random_idx_2 == random_idx_1:
random_idx_2 = random.randint(0, len(new_words) - 1)
counter += 1
if counter > 3:
return new_words
new_words[random_idx_1], new_words[random_idx_2] = (
new_words[random_idx_2],
new_words[random_idx_1],
)
return new_words
new_words = words.copy()
for _ in range(n):
new_words = swap_word(new_words)
return new_words | d6916404c363176f13010d006cd61354dcd4e16e | 13,982 |
def get_dist_for_angles(dict_of_arrays, clusters, roll, pitch, yaw, metric='3d', kind='max'):
"""
Calculate a single distance metric for a combination of angles
"""
if (dict_of_arrays['yaw_corr'] == 0).all():
rot_by_boresight = apply_boresight_same(dict_of_arrays, roll, pitch, yaw)
else:
rot_by_boresight = apply_boresight_yaw_correct(dict_of_arrays, roll, pitch, yaw)
rot_to_real_world = rotate_to_real_world(rot_by_boresight)
real_wrld_coords = shift_to_real_world(rot_to_real_world)
if kind == 'mean':
distance = get_mean_3D_distance(real_wrld_coords, clusters, metric)
elif kind == 'median':
distance = get_median_3D_distance(real_wrld_coords, clusters, metric)
else:
distance = get_max_3D_distance(real_wrld_coords, clusters, metric)
return distance | 4db8a68cebc845de942817eb9eb28e57d2db5cc4 | 13,983 |
import asyncio
async def stream():
"""Main streaming loop for PHD"""
while True:
if phd_client.is_connected and manager.active_connections:
response = await phd_client.get_responses()
if response is not None:
# Add to the websocket queue
# If it is the initial data, put in variable
if response.get('Event') == 'Version':
phd_client.initial_data = response
q.put_nowait(response)
await asyncio.sleep(STREAM_INTERVAL)
return None | 19e1934e8cb48fa66f8ab3f61ca013fd19b040fc | 13,984 |
def filter_camera_angle(places, angle=1.):
"""Filter pointclound by camera angle"""
bool_in = np.logical_and((places[:, 1] * angle < places[:, 0]),
(-places[:, 1] * angle < places[:, 0]))
return places[bool_in] | 9956c5b001989c5f64d935087a1e13ffbc6469b7 | 13,985 |
def load_nifti(path: str) \
-> tuple[np.ndarray, np.ndarray, nib.nifti1.Nifti1Header]:
"""
This function loads a nifti image using
the nibabel library.
"""
# Extract image
img = nib.load(path)
img_aff = img.affine
img_hdr = img.header
# Extract the actual data in a numpy array
data = img.get_fdata()
return data, img_aff, img_hdr | 9e76e3f6e6d200b3cd3be34b3780f8fe84cad53e | 13,986 |
def f5_list_policy_hostnames_command(client: Client, policy_md5: str) -> CommandResults:
"""
Get a list of all policy hostnames.
Args:
client (Client): f5 client.
policy_md5 (str): MD5 hash of the policy.
"""
result = client.list_policy_hostnames(policy_md5)
table_name = 'f5 data for listing policy hostname:'
readable_output, printable_result = build_command_result(result, table_name)
command_results = CommandResults(
outputs_prefix='f5.Hostname',
outputs_key_field='id',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results | 38263c85480ba5d7de8a21509820052444b4cdab | 13,987 |
def predict(m, count, s, A):
"""predict the chain after s
calculate the probability of a m-length chain,
then return chains.
CAUTION the number of chains maybe less then count
args:
m: the length of predict chain
count: the number of predict chain
s: the last element of the current chain
A: transition matrix
return:
some chains save in list
"""
process = []
start = {}
start[s] = [1, None]
process.append(start)
for i in range(m):
line = process[-1]
next_line = {}
for key in line.keys():
if A.get(key, None) is None:
continue
for k in A[key].keys():
p = next_line.get(k, [0, None])[0]
if p < A[key][k] * line[key][0]:
next_line[k] = [A[key][k] * line[key][0], key]
process.append(next_line)
ans = process[-1]
# sort according to probability from high to low
ans = sorted(ans.iteritems(), key=lambda item: item[1][0], reverse=True)
if len(ans) == 0:
return None # Can't predict, because of no answer can be find
else:
count = min(len(ans), count) # the number of ans maybe less than count
chains = []
length = len(process)
for i in range(count):
elem = ans[i][0]
chain = get_chain(elem, length-1, process)
chains.append(chain[1:])
return chains | f45acc67c97204efdabb48f29d73277fb4b75967 | 13,988 |
import mimetypes
import gzip
import os
def read_lengths_from_fastx_file(fastx_file):
"""
@param fastx_file: file path
@type fastx_file: str
@rtype: dict[str, int]
"""
file_type = mimetypes.guess_type(fastx_file)[1]
if file_type == 'gzip':
f = gzip.open(fastx_file, "rt")
elif not file_type:
f = open(fastx_file, "rt")
else:
raise RuntimeError("Unknown type of file: '{}".format(fastx_file))
length = {}
if os.path.getsize(fastx_file) == 0:
return length
file_format = None
line = f.readline()
if line.startswith('@'):
file_format = "fastq"
elif line.startswith(">"):
file_format = "fasta"
f.seek(0)
if not file_format:
raise RuntimeError("Invalid sequence file: '{}".format(fastx_file))
for seq_record in SeqIO.parse(f, file_format):
length[seq_record.id] = len(seq_record.seq)
f.close()
return length | 6aef86176269674a96a707bc5f7cbb9798237f57 | 13,989 |
def f_multidim(anchors, basis, distance_measurements, coeffs):
"""
:param anchors: anchors dim x N
:param basis: basis vectors K x M
:param distance_measurements: matrix of squared distances M x N
:param coeffs: coefficient matrix dim x K
:return: vector of differences between estimate distance and measured distance.
"""
assert basis.shape[0] == coeffs.shape[1]
assert anchors.shape[0] == coeffs.shape[0]
assert anchors.shape[1] == distance_measurements.shape[1]
assert basis.shape[1] == distance_measurements.shape[0]
X = coeffs.dot(basis) # is (dim x M)
diff = anchors[:, :, np.newaxis] - X[:, np.newaxis, :]
distance_estimates = np.linalg.norm(diff, axis=0)**2
diff = distance_measurements.T - distance_estimates
nnz_diffs = diff[distance_measurements.T > 0].flatten()
return nnz_diffs | cd9f7fa67e6cbf3cfb5fe14e53b019713c56aa26 | 13,990 |
def getHomography(indict, outdict, outsize=None):
"""Returns a transformation to go from input pts to output pts using a homography.
'indict' and 'outdict' should contain identical keys mapping to 2-tuples.
We create A:
x1 y1 1 0 0 0 -x1*x1' -y1*x1'
0 0 0 x1 y1 1 -x1*y1' -y1*y1'
x2 y2 1 0 0 0 -x2*x2' -y2*x2'
0 0 0 x2 y2 1 -x2*y2' -y2*y2'
...
And b:
[x1' y1' x2' y2' x3' y3' ...].T
Then solve for h in Ah = b using linear least squares, where h is:
[h11 h12 h13 h21 h22 h23 h31 h32].T
and h33 is 1.
Returns (h, Ah), where the 2nd term is the transformed locations of the inputs.
"""
# initialize both matrices
A = np.zeros((2*len(outdict), 8), dtype=np.double)
b = np.zeros((2*len(outdict), 1), dtype=np.double)
inputs, outputs = getFidsFromDicts(indict, outdict, outsize=outsize)
# copy over data
for i, ((xi, yi, _), (xo, yo, _)) in enumerate(zip(inputs, outputs)):
A[2*i,:] = [xi, yi, 1, 0, 0, 0, -xi*xo, -yi*xo]
A[2*i+1, :] = [0, 0, 0, xi, yi, 1, -xi*yo, -yi*yo]
b[2*i] = xo
b[2*i+1] = yo
#print A, A.shape, b, b.shape, inputs, inputs.shape
# Linear least squares solve
h, resids, rank, s = np.linalg.lstsq(A, b)
h = h.flatten()
ret = np.ones((3,3), dtype=np.double)
ret[:, :] = [h[:3], h[3:6], [h[6], h[7], 1.0]]
ret = ret.transpose()
# we need transposed version of h throughout
ah = np.dot(inputs, ret)
ah /= ah[:, -1:]
if 0:
print h, len(h)
print 'ret\n', ret, ret.shape
print 'normed ah\n', ah, ah.shape
print 'outputs\n', outputs
print 'inputs\n', inputs
print 'diff %\n', 100.0*(outputs-ah)/outputs
return ret, ah | 709fad7ffba7047e8d2c15e79611c3ac897733b7 | 13,991 |
def variables_to_restore(scope=None, strip_scope=False):
"""Returns a list of variables to restore for the specified list of methods.
It is supposed that variable name starts with the method's scope (a prefix
returned by _method_scope function).
Args:
methods_names: a list of names of configurable methods.
strip_scope: if True will return variable names without method's scope.
If methods_names is None will return names unchanged.
model_scope: a scope for a whole model.
Returns:
a dictionary mapping variable names to variables for restore.
"""
if scope:
variable_map = {}
method_variables = slim.get_variables_to_restore(include=[scope])
for var in method_variables:
if strip_scope:
var_name = var.op.name[len(scope) + 1:]
else:
var_name = var.op.name
variable_map[var_name] = var
return variable_map
else:
return {v.op.name: v for v in slim.get_variables_to_restore()} | bc1f433b6a67898d8c010a56c6c51821f50df81a | 13,992 |
def from_strings(data, gaps="-", length=None, dtype=np.int8):
"""Convert a series of strings to an array of integer encoded alleles.
Parameters
----------
data : array_like, str
Sequence of strings of alleles.
gaps : str, optional
String of symbols to be interpreted as gaps in the sequence.
length : int, optional
Truncate or extend sequence to a set length by padding with gap values.
dtype : dtype, optional
Specify dtype of returned array.
Returns
-------
array : ndarray, int
Array of alleles encoded as integers.
"""
if isinstance(data, str):
return vector_from_string(data, gaps=gaps, length=length, dtype=dtype)
if isinstance(data, np.ndarray):
pass
else:
data = np.array(data, copy=False)
sequences = data.ravel()
# default to length of longest element
if length is None:
length = max(len(i) for i in sequences)
# number of sequences
n_seq = len(sequences)
# new array with gap as default
array = np.empty((n_seq, length), dtype=dtype)
for i in range(n_seq):
array[i] = vector_from_string(
sequences[i], gaps=gaps, length=length, dtype=dtype
)
shape = data.shape + (length,)
return array.reshape(shape) | 7405e208613aa75b132f686fcf5fe7451a4160cc | 13,993 |
def get_relationship_targets(item_ids, relationships, id2rec):
"""Get item ID set of item IDs in a relationship target set"""
# Requirements to use this function:
# 1) item Terms must have been loaded with 'relationships'
# 2) item IDs in 'item_ids' arguement must be present in id2rec
# 3) Arg, 'relationships' must be True or an iterable
reltgt_objs_all = set()
for goid in item_ids:
obj = id2rec[goid]
for reltype, reltgt_objs_cur in obj.relationship.items():
if relationships is True or reltype in relationships:
reltgt_objs_all.update(reltgt_objs_cur)
return reltgt_objs_all | 55542448af0eb2b46442bff0e0464361b669241a | 13,994 |
def cli(ctx, newick, analysis_id, name="", xref_db="null", xref_accession="", match_on_name=False, prefix=""):
"""Load a phylogenetic tree (Newick format) into Chado db
Output:
Number of inserted trees
"""
return ctx.gi.phylogeny.load_tree(newick, analysis_id, name=name, xref_db=xref_db, xref_accession=xref_accession, match_on_name=match_on_name, prefix=prefix) | 9b68dec5584a692f2fe04746d9bb179c9e002682 | 13,995 |
def roll_neighbors(sites, site, dims=None, radius=1):
""" N-dimensional pixel neighborhood
for periodic images on regular grids """
index = np.unravel_index(site, dims=dims)
neighs = sites.take(nbr_range+index, axis=0, mode='wrap')
return neighs.flatten() | e653604c07f4824ef766c3a7f41a6c6c8a35bad0 | 13,996 |
import os
def extract_node_name(path, ignore_missing_nodes=False):
"""extracts the token after the 'nodes'"""
tokens = path.split(os.sep)
last_nodes_index = -1
for i, token in enumerate(tokens):
if token == "nodes":
last_nodes_index = i
if last_nodes_index == -1:
if ignore_missing_nodes:
return path
raise "path '%s' does not contain 'nodes' and " + "is not a valid diag tarball, so cannot determine the node" % path
try:
# we're interested in getting the token after nodes
return tokens[last_nodes_index + 1]
except IndexError:
raise "there is nothing after the 'nodes' entry of '%s'" % path | 0d81e46ef2812e5b087fdef5264ad20a3f3bef2d | 13,997 |
import scipy
import os
import tqdm
def run_model(model, raw_cohort, delta_encoder):
"""
Run the given model using the given cohort and experimental settings contained in args.
This function:
(1) balanced the dataset
(2) splits the cohort intro training:development:testing sets at the patient-level
(3) trains PRONTO and saves checkpoint/summaries for TensorBoard
(4) evaluates PRONTO on the development and testing set
:param model: an instantiated PRONTO model
:type model: modeling.PRONTOModel
:param raw_cohort: the cohort to use for this experimental run
:type raw_cohort: preprocess.Cohort
:param delta_encoder: encoder used to represented elapsed time deltas
:type delta_encoder: preprocess.DeltaEncoder
:return: nothing
"""
snapshot_sizes = []
for chronology in raw_cohort.chronologies():
for snapshot in chronology.snapshots:
snapshot_sizes.append(len(snapshot))
print('Statistics on snapshot sizes:', scipy.stats.describe(snapshot_sizes))
days_til_onset = []
for chronology in raw_cohort.chronologies():
seconds = 0
for delta in chronology.deltas:
seconds += delta
days_til_onset.append(seconds / 60 / 60 / 24)
print('Statistics on days until disease onset:', scipy.stats.describe(days_til_onset))
elapsed_times = []
for chronology in raw_cohort.chronologies():
for delta in chronology.deltas:
elapsed_times.append(delta / 60 / 60 / 24)
print('Statistics on elapsed time:', scipy.stats.describe(elapsed_times))
lengths = []
for chronology in raw_cohort.chronologies():
lengths.append(len(chronology))
print('Statistics on chronology lengths:', scipy.stats.describe(lengths))
# Balance the cohort to have an even number of positive/negative chronologies for each patient
cohort = raw_cohort.balance_chronologies()
# Split into training:development:testing
train, devel, test = make_train_devel_test_split(cohort.patients(), FLAGS.tdt_ratio)
# Save summaries and checkpoints into the directories passed to the script
model_file = 'ln=%d_delta=%s_d=%.2f_vd=%.2f_lr=%g_bs=%d' % (
1 if FLAGS.rnn_layer_norm else 0,
'disc' if FLAGS.use_discrete_deltas else 'tanh',
FLAGS.dropout,
FLAGS.vocab_dropout,
FLAGS.learning_rate,
FLAGS.batch_size,
)
model_summaries_dir = os.path.join(FLAGS.output_dir, FLAGS.optimizer, FLAGS.rnn_cell_type,
FLAGS.snapshot_encoder, model_file)
model_checkpoint_dir = os.path.join(FLAGS.output_dir, FLAGS.optimizer, FLAGS.rnn_cell_type,
FLAGS.snapshot_encoder, model_file, 'pronto_model')
# Clear any previous summaries/checkpoints if asked
if FLAGS.clear_prev:
nio.delete_dir_quiet(model_summaries_dir)
nio.delete_dir_quiet(model_checkpoint_dir)
print('Deleted previous model summaries/checkpoints')
# Make output directories so we don't blow up when saving
nio.make_dirs_quiet(model_checkpoint_dir)
# Instantiate PRONTO optimizer and summarizer classes
if FLAGS.optimizer == 'PRONTO':
optimizer = optimization.PRONTOOptimizer(model, learning_rate=FLAGS.learning_rate, sparse=True)
elif FLAGS.optimizer == 'BERT':
epoch_steps = len(cohort[train].make_epoch_batches(batch_size=FLAGS.batch_size,
max_snapshot_size=FLAGS.max_snapshot_size,
max_chrono_length=FLAGS.max_chrono_length,
delta_encoder=delta_encoder))
optimizer = optimization.BERTOptimizer(model,
num_train_steps=epoch_steps * FLAGS.num_epochs,
num_warmup_steps=epoch_steps * 3,
init_lr=FLAGS.learning_rate)
print('Created BERT-like optimizer with initial learning rate of %f' % FLAGS.learning_rate)
else:
raise NotImplementedError('No optimizer available for %s' % FLAGS.optimizer)
# noinspection PyUnboundLocalVariable
summarizer = summarization.PRONTOSummarizer(model, optimizer)
# Now that everything has been defined in TensorFlow's computation graph, initialize our model saver
saver = tf.train.Saver(tf.global_variables())
first_cohort = cohort
# Tell TensorFlow to wake up and get ready to rumble
with tf.Session() as sess:
# If we specified a TensorBoard debug server, connect to it
# (this is actually pretty sweet but you have to manually step through your model's flow so 99% of the time
# you shouldn't need it)
if FLAGS.debug is not None:
sess = tf_debug.TensorBoardDebugWrapperSession(sess, FLAGS.debug)
# Create our summary writer (used by TensorBoard)
summary_writer = tf.summary.FileWriter(model_summaries_dir, sess.graph)
# Restore model if it exists (and we didn't clear it), otherwise create a shiny new one
checkpoint = tf.train.get_checkpoint_state(model_checkpoint_dir)
if checkpoint and gfile.Exists(checkpoint.model_checkpoint_path + '.index'):
print("Reading model parameters from '%s'...", checkpoint.model_checkpoint_path)
saver.restore(sess, checkpoint.model_checkpoint_path)
else:
print("Creating model with fresh parameters...")
sess.run(tf.global_variables_initializer())
# Initialize local variables (these are just used for computing average metrics)
sess.run(tf.local_variables_initializer())
# Create a progress logger to monitor training (this is a wrapped version of range()
with trange(FLAGS.num_epochs, desc='Training') as train_log:
# Save the training, development, and testing metrics for our best model (as measured by devel F1)
# I'm lazy so I initialize best_devel_metrics with a zero F1 so I can compare the first iteration to it
best_train_metrics, best_devel_metrics, best_test_metrics = {}, {'F2': 0}, {}
# Iterate over training epochs
for i in train_log:
# Get global step and reset training metrics
global_step, _ = sess.run([optimizer.global_step, summarizer.train.reset_op])
# Log our progress on the current epoch using tqdm cohort.make_epoch_batches shuffles the order of
# chronologies and prepares them into mini-batches with zero-padding if needed
total_loss = 0.
batches = cohort[train].make_epoch_batches(batch_size=FLAGS.batch_size,
max_snapshot_size=FLAGS.max_snapshot_size,
max_chrono_length=FLAGS.max_chrono_length,
delta_encoder=delta_encoder)
num_batches = len(batches)
with tqdm(batches, desc='Epoch %d' % (i + 1)) as batch_log:
# Iterate over each batch
for j, batch in enumerate(batch_log):
# We train the model by evaluating the optimizer's training op. At the same time we update the
# training metrics and get metrics/summaries for the current batch and request the new global
# step number (used by TensorBoard to coordinate metrics across different runs
_, batch_summary, batch_metrics, global_step = sess.run(
[[optimizer.train_op, summarizer.train.metric_ops], # All fetches we aren't going to read
summarizer.batch_summary, summarizer.batch_metrics,
optimizer.global_step],
batch.feed(model, training=True))
# Update tqdm progress indicator with current training metrics on this batch
batch_log.set_postfix(batch_metrics)
# Save batch-level summaries
summary_writer.add_summary(batch_summary, global_step=global_step)
total_loss += batch_metrics['Loss']
# Save epoch-level training metrics and summaries
train_metrics, train_summary = sess.run([summarizer.train.metrics, summarizer.train.summary])
train_metrics['Loss'] = total_loss / num_batches
summary_writer.add_summary(train_summary, global_step=global_step)
# Re-sample chronologies in cohort
cohort = raw_cohort.balance_chronologies()
# Evaluate development performance
sess.run(summarizer.devel.reset_op)
# Update local variables used to compute development metrics as we process each batch
for devel_batch in first_cohort[devel].make_epoch_batches(batch_size=FLAGS.batch_size,
max_snapshot_size=FLAGS.max_snapshot_size,
max_chrono_length=FLAGS.max_chrono_length,
delta_encoder=delta_encoder):
sess.run([summarizer.devel.metric_ops], devel_batch.feed(model, training=False))
# Compute the development metrics
devel_metrics, devel_summary = sess.run([summarizer.devel.metrics, summarizer.devel.summary])
# Update training progress bar to indicate current performance on development set
train_log.set_postfix(devel_metrics)
# Save TensorBoard summary
summary_writer.add_summary(devel_summary, global_step=global_step)
def format_metrics(metrics: dict):
return dict((key, '%6.4f' % value) for key, value in metrics.items())
train_log.write('Epoch %d. Train: %s | Devel: %s' % (i + 1,
format_metrics(train_metrics),
format_metrics(devel_metrics)))
# Evaluate testing performance exactly as described above for development
sess.run(summarizer.test.reset_op)
for batch in first_cohort[test].make_epoch_batches(batch_size=FLAGS.batch_size,
max_snapshot_size=FLAGS.max_snapshot_size,
max_chrono_length=FLAGS.max_chrono_length,
delta_encoder=delta_encoder):
sess.run([summarizer.test.metrics, summarizer.test.metric_ops], batch.feed(model, training=False))
test_metrics, test_summary = sess.run([summarizer.test.metrics, summarizer.test.summary])
summary_writer.add_summary(test_summary, global_step=global_step)
# If this run did better on the dev set, save it as the new best model
if devel_metrics['F2'] > best_devel_metrics['F2']:
best_devel_metrics = devel_metrics
best_train_metrics = train_metrics
best_test_metrics = test_metrics
# Save the model
saver.save(sess, model_checkpoint_dir, global_step=global_step)
elif FLAGS.early_term:
tqdm.write('Early termination!')
break
print('Training complete!')
if FLAGS.print_performance:
print('Train: %s' % str(best_train_metrics))
print('Devel: %s' % str(best_devel_metrics))
print('Test: %s' % str(best_test_metrics))
if FLAGS.save_tabbed_results:
with open(os.path.join(model_summaries_dir, 'results.tsv'), 'w') as outfile:
print_table_results(best_train_metrics, best_devel_metrics, best_test_metrics, 'simple',
file=outfile)
if FLAGS.save_latex_results:
with open(os.path.join(model_summaries_dir, 'results.tex'), 'w') as outfile:
print_table_results(best_train_metrics, best_devel_metrics, best_test_metrics, 'latex_booktabs',
file=outfile) | 94aec871db5b4e57014444d57fb1ef6844f516a1 | 13,998 |
import requests
import json
def folder0_content(folder0_id, host, token):
"""
Modules
-------
request, json
----------
Parameters
----------
folder0_id : Onedata folder level 0 id containing the data to publish.
host : OneData provider (e.g., ceta-ciemat-02.datahub.egi.eu).
token : OneData personal access token.
-------
Returns
-------
all_level0: "name" and "id" of the folders contained in the folder defined by "folder0_id"
"""
OneData_urlchildren = "https://" + host + '/api/v3/oneprovider/data/' + folder0_id + "/children"
request_param = {'X-Auth-Token': token}
r_level0 = requests.get(OneData_urlchildren, headers=request_param)
all_level0 = json.loads(r_level0.text)
return (all_level0) | 8ce6ae617666f936643b9599ae115e140b30bd2b | 13,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.