content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import List
from typing import Union
def bytes_to_string(
bytes_to_convert: List[int], strip_null: bool = False
) -> Union[str, None]:
"""
Litteral bytes to string
:param bytes_to_convert: list of bytes in integer format
:return: resulting string
"""
try:
value = "".join(chr(i) for i in bytes_to_convert)
if strip_null:
return value.strip("\x00")
return value
# AttributeError when None object has no strip attribute
except (ValueError, TypeError, AttributeError):
return None | a04dee89fb8aed33b6069a7ff0ca8c497d0a6062 | 16,300 |
def interpolate(t,y,num_obs=50):
"""
Interpolates each trajectory such that observation times coincide for each one.
Note: initially cubic interpolation gave great power, but this happens as an artifact of the interpolation,
as both trajectories have the same number of observations. Type I error was increased as a result. To avoid
this we settled for a linear interpolation between observations.
Splines were also tried but gave very bad interpolations.
"""
t = np.array([np.sort(row) for row in t])
t = np.insert(t, 0, 0, axis=1)
t = np.insert(t, len(t[0]), 1, axis=1)
y = np.insert(y, 0, y[:,0], axis=1)
y = np.insert(y, len(y[0]), y[:,-1], axis=1)
new_t = np.zeros(num_obs)
new_y = np.zeros(num_obs)
for i in range(len(t)):
f = interp1d(t[i], y[i], kind='linear')
#f = splrep(t[i], y[i])
t_temp = np.random.uniform(low=0.0, high=1.0, size=num_obs)#np.linspace(0.1,0.9,num_obs)
y_temp = f(t_temp)
#y_temp = splev(t_temp, f, der=0)
new_y = np.vstack((new_y, y_temp))
new_t = np.vstack((new_t, t_temp))
return new_t[1:], new_y[1:] | 2418aaf207b214069f45571a21a2b97ecd25f244 | 16,301 |
import re
def locktime_from_duration(duration):
"""
Parses a duration string and return a locktime timestamp
@param duration: A string represent a duration if the format of XXhXXmXXs and return a timestamp
@returns: number of seconds represented by the duration string
"""
if not duration:
raise ValueError("Duration needs to be in the format {}".format(DURATION_TEMPLATE))
match = re.search(DURATION_REGX_PATTERN, duration)
if not match:
raise ValueError("Duration needs to be in the format {}".format(DURATION_TEMPLATE))
values = match.groupdict()
result = 0
if values['hours']:
result += int(values['hours']) * 60 * 60
if values['minutes']:
result += int(values['minutes']) * 60
if values['seconds']:
result += int(values['seconds'])
return int(result) | c65339ee00e750e4425a68215b0600c71136ee68 | 16,302 |
def black_payers_swaption_value_fhess_by_strike(
init_swap_rate,
option_strike,
swap_annuity,
option_maturity,
vol):
"""black_payers_swaption_value_fhess_by_strike
Second derivative of value of payer's swaption with respect to strike
under black model.
See :py:func:`black_payers_swaption_value`.
.. math::
\\frac{\partial^{2} }{\partial K^{2}}
V_{\mathrm{payer}}(K; S, A, T, \sigma)
= - A\phi(d_{2}(K)) d_{2}^{\prime}(K)
where
:math:`S` is `init_swap_rate`,
:math:`K` is `option_strike`,
:math:`A` is `swap_annuity`,
:math:`T` is `option_maturity`,
:math:`\sigma` is `vol`,
:math:`d_{1}, d_{2}` is defined
in :py:func:`black_payers_swaption_value`,
:math:`\Phi(\cdot)` is c.d.f. of standard normal distribution,
:math:`\phi(\cdot)` is p.d.f. of standard normal distribution.
:param float init_swap_rate: initial swap rate.
:param float option_strike:
:param float swap_annuity:
:param float option_maturity:
:param float vol: volatility. must be non-negative.
:return: value of derivative.
:rtype: float
:raises AssertionError: if volatility is not positive.
"""
assert(vol > 0.0)
value = mafipy.function.black_scholes_call_value_fhess_by_strike(
init_swap_rate, option_strike, 0.0, option_maturity, vol)
return swap_annuity * value | 0645992c65e9e13ee44ad3debfe30fb0b05bfae7 | 16,303 |
def get_resource(cls):
""" gets the resource of a timon class if existing """
if not cls.resources:
return None
resources = cls.resources
assert len(resources) == 1
return TiMonResource.get(resources[0]) | 370f0af23fcfe0bf5da3b39012a5e1e9c29b6f0e | 16,304 |
def _log(x):
"""_log
to prevent np.log_log(0), caluculate np.log(x + EPS)
Args:
x (array)
Returns:
array: same shape as x, log equals np.log(x + EPS)
"""
if np.any(x < 0):
print("log < 0")
exit()
return np.log(x + EPS) | e7e7b963cf3cec02ace34256ccdf954a2d61dd4a | 16,305 |
import math
def gauss_distribution(x, mu, sigma):
"""
Calculate value of gauss (normal) distribution
Parameters
----------
x : float
Input argument
mu :
Mean of distribution
sigma :
Standard deviation
Returns
-------
float
Probability, values from range [0-1]
"""
return 1 / (2 * math.sqrt(math.pi) * sigma) * math.exp(-(1 / 2) * ((x - mu) / sigma) ** 2) | 05cf2c14b337b45a81ddbe7655b4d7cf21e352cd | 16,306 |
def extend_vocab_OOV(source_words, word2id, vocab_size, max_unk_words):
"""
Map source words to their ids, including OOV words. Also return a list of OOVs in the article.
WARNING: if the number of oovs in the source text is more than max_unk_words, ignore and replace them as <unk>
Args:
source_words: list of words (strings)
word2id: vocab word2id
vocab_size: the maximum acceptable index of word in vocab
Returns:
ids: A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs: A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.
"""
src_ext = []
oov_dict = {}
for w in source_words:
if w in word2id and word2id[w] < vocab_size: # a OOV can be either outside the vocab or id>=vocab_size
src_ext.append(word2id[w])
else:
if len(oov_dict) < max_unk_words:
# e.g. 50000 for the first article OOV, 50001 for the second...
word_id = oov_dict.get(w, len(oov_dict) + vocab_size)
oov_dict[w] = word_id
src_ext.append(word_id)
else:
# exceeds the maximum number of acceptable oov words, replace it with <unk>
word_id = word2id[UNK_WORD]
src_ext.append(word_id)
oov_list = [w for w, w_id in sorted(oov_dict.items(), key=lambda x:x[1])]
return src_ext, oov_dict, oov_list | 2d1b92d9d6b9b3885a7dda6c8d72d80d3b8ecad0 | 16,307 |
def isint(s):
"""**Returns**: True if s is the string representation of an integer
:param s: the candidate string to test
**Precondition**: s is a string
"""
try:
x = int(s)
return True
except:
return False | b15598aee937bcce851ee6c39aa2ba96a84a5dd5 | 16,308 |
def create_app(config_name):
"""function creating the flask app"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config[config_name])
app.config.from_pyfile('config.py')
app.register_blueprint(v2)
app.register_error_handler(404, not_found)
app.register_error_handler(405, bad_request)
app.register_error_handler(500, internal_server_error)
db_conn.create_tables()
return app | 7e49a1ee9bae07a7628842855c3524794efaa9c5 | 16,309 |
def build_attention_network(features2d,
attention_groups,
attention_layers_per_group,
is_training):
"""Builds attention network.
Args:
features2d: A Tensor of type float32. A 4-D float tensor of shape
[batch_size, height, width, channels].
attention_groups: (Integer) Number of network groups.
attention_layers_per_group: (Integer) Number of layers per group.
is_training: (Boolean) To indicate training or inference modes.
Returns:
features_embedded: A Tensor of type float32. A 4-D float tensor of shape
[batch_size, height, width, channels].
"""
channels = features2d.shape.as_list()[-1]
with tf.variable_scope("attention_network"):
features_embedded = features2d
for i in range(attention_groups):
filters = channels // 2**(i+1)
for j in range(attention_layers_per_group):
features_embedded = tf.layers.conv2d(
features_embedded,
filters=filters,
kernel_size=3 if j == (attention_layers_per_group-1)
else 1,
strides=1,
dilation_rate=(2, 2) if j == (attention_layers_per_group-1)
else (1, 1),
activation=None,
use_bias=False,
name="features2d_embedding%d_%d" %(i, j),
padding="same")
features_embedded = tf.layers.batch_normalization(
features_embedded, training=is_training,
momentum=MOMENTUM, epsilon=EPS,
name="features2d_embedding%d_%d" %(i, j))
features_embedded = tf.nn.relu(features_embedded)
tf.logging.info("Constructing layer: %s", features_embedded)
return features_embedded | c665994b88027c24ed86e01514fa3fc176a3258a | 16,310 |
def get_catalog_config(catalog):
"""
get the config dict of *catalog*
"""
return resolve_config_alias(available_catalogs[catalog]) | 4d36bc8be8ca2992424f0b97f28d3ac8d852c027 | 16,311 |
def manhatten(type_profile, song_profile):
"""
Calculate the Manhatten distance between the profile of specific
output_colums value (e.g. specific composer) and the profile of a
song
"""
# Sort profiles by frequency
type_profile = type_profile.most_common()
song_profile = song_profile.most_common()
flat_type_profile = [ngram for (ngram, freq) in type_profile]
flat_song_profile = [ngram for (ngram, freq) in song_profile]
manhatten = 0
for i in range(len(flat_song_profile)):
ngram = flat_song_profile[i]
if ngram in flat_type_profile:
manhatten += abs(flat_type_profile.index(ngram) - i)
else:
manhatten += abs(len(flat_type_profile) - i)
return manhatten | 4703585f9f60551bf2a5e2762612d45efb47a453 | 16,312 |
def raven(request):
"""lets you know whether raven is being used"""
return {
'RAVEN': RAVEN
} | 3e047db45a597cf808e5227b358a9833fc0a4fc3 | 16,313 |
from typing import Union
def _non_max_suppress_mask(
bbox: np.array,
scores: np.array,
classes: np.array,
masks: Union[np.array, None],
filter_class: int,
iou: float = 0.8,
confidence: float = 0.001,
) -> tuple:
"""Perform non max suppression on the detection output if it is mask.
:param bbox: Bbox outputs.
:param scores: Score outputs.
:param classes: Class outputs.
:param masks: Mask outputs
:param filter_class: The specific class required.
:param iou: The intersection of union value to be considered.
:param confidence: The confidence threshold for scores.
:returns: tuple of suppressed bbox, suppressed scores,
suppressed classes, and suppressed masks.
"""
filter_idx = _filter_class_and_zero_scores(
scores,
classes,
filter_class,
confidence,
)
scores_filter = np.array(np.array(scores)[filter_idx])
bbox_filter = np.array(np.array(bbox)[filter_idx])
classes_filter = np.array(np.array(classes)[filter_idx])
masks_filter = np.array(np.array(masks)[filter_idx])
areas = np.empty(masks_filter.shape[0])
for index, mask in enumerate(masks_filter):
areas[index] = np.count_nonzero(mask)
sorted_scores = scores_filter.argsort()[::-1]
keep = []
while sorted_scores.size > 0:
score = sorted_scores[0]
# keep the largest sorted score (sorted_scores[0] represent the largest score)
keep.append(score)
# with:
# x = [0 0 1 1] and y = [0 1 1 0],
# the intersect is x && y element-wise -> [0 0 1 0]
intersect = np.empty_like(sorted_scores[1:])
for index, others in enumerate(masks_filter[sorted_scores[1:]]):
intersect[index] = np.count_nonzero(
np.logical_and(masks_filter[score], others)
)
overlap = intersect / (
areas[score] + areas[sorted_scores[1:]] - intersect
)
sorted_scores = sorted_scores[
np.union1d(
np.where(overlap <= 1 - iou)[0],
np.where(
classes_filter[sorted_scores[1:]] != classes_filter[score]
),
)
+ 1
]
detection_boxes = list(map(tuple, bbox_filter[keep]))
detection_scores = list(scores_filter[keep])
detection_classes = list(classes_filter[keep])
detection_masks = list(masks_filter[keep])
detection_boxes = [
(float(item[0]), float(item[1]), float(item[2]), float(item[3]))
for item in detection_boxes
]
detection_scores = [float(item) for item in detection_scores]
detection_classes = [int(item) for item in detection_classes]
return (
detection_boxes,
detection_scores,
detection_classes,
detection_masks,
) | 742261f1854f2ad6d01046926c6017b72a1917a4 | 16,314 |
import sys
import os
import csv
def main(args):
"""
In order to detect discontinuity, two lines are loaded. If the timestamp differs
by more than the threshold set by -time-threshold, then the distance between
points is calculated. If the distance is greater than the threshold set by
-distance-threshold then the points are assumed to be discontinuous.
Currently the output file is just a count of MMSI's and number of discontinuous points
"""
global UTIL_NAME
#/* ----------------------------------------------------------------------- */#
#/* Print usage
#/* ----------------------------------------------------------------------- */#
if len(args) is 0:
return print_usage()
#/* ----------------------------------------------------------------------- */#
#/* Defaults
#/* ----------------------------------------------------------------------- */#
write_mode = 'w'
skip_lines = 0
overwrite_mode = False
assign_srs_from_cmdl = 'EPSG:4326'
time_threshold = 269200
distance_threshold = 1
quiet_mode = False
output_product = 'csv'
input_file_format = None
#/* ----------------------------------------------------------------------- */#
#/* Containers
#/* ----------------------------------------------------------------------- */#
input_file = None
output_file = None
input_schema = None
valid_output_products = ('frequency', 'csv', 'csv-no-schema', 'newline', 'flag-csv', 'flag-no-schema', 'flag-newline')
valid_input_file_formats = ('csv', 'newline', 'json')
#/* ----------------------------------------------------------------------- */#
#/* Parse arguments
#/* ----------------------------------------------------------------------- */#
i = 0
arg = None
arg_error = False
while i < len(args):
try:
arg = args[i]
# Help arguments
if arg in ('--help', '-help'):
return print_help()
elif arg in ('--usage', '-usage'):
return print_usage()
elif arg in ('--long-usage', '-long-usage', '-lu'):
return print_long_usage()
# Algorithm settings
elif arg in ('-tt', '-time-threshold'):
i += 2
time_threshold = int(args[i - 1])
elif arg in ('-dt', '-distance-threshold'):
i += 2
distance_threshold = int(args[i - 1])
# Define the output schema
elif arg in ('-s', '-schema', '-header'):
i += 2
input_schema = args[i - 1].split(',')
# Skip lines in input file
elif arg in ('-sl', '-skip-lines'):
i += 2
skip_lines = int(args[i - 1])
# Determine if reading from stdin
elif arg == '-' and not input_file and sys.stdin.isatty():
i += 1
arg_error = True
print("ERROR: Trying to read from empty stdin")
# Additional options
elif arg in ('-q', '-quiet'):
i += 1
quiet_mode = True
elif arg in ('-overwrite', '--overwrite'):
i += 1
overwrite_mode = True
elif arg in ('-a-srs', '-assign-srs'):
i += 2
assign_srs_from_cmdl = args[i - 1]
elif arg in ('-wm', '-write-mode'):
i += 2
write_mode = args[i - 1]
elif arg in ('-op', '-output-product'):
i += 2
output_product = args[i - 1].lower()
elif arg in ('-ff', '-file-format'):
i += 2
input_file_format = args[i - 1]
elif arg == '-stdin':
i += 1
input_file = '-'
elif arg == '-stdout':
i += 1
output_file = '-'
# Catch invalid arguments
elif arg[0] == '-' and arg != '-':
i += 1
arg_error = True
print("ERROR: Unrecognized argument: %s" % arg)
# Positional arguments and errors
else:
i += 1
# Catch input file
if input_file is None:
if arg == '-':
input_file = arg
else:
input_file = abspath(expanduser(arg))
# Catch output file
elif output_file is None:
if arg == '-':
output_file = arg
else:
output_file = abspath(expanduser(arg))
# Unrecognized argument
else:
arg_error = True
print("ERROR: Unrecognized argument: %s" % arg)
# This catches several conditions:
# 1. The last argument is a flag that requires parameters but the user did not supply the parameter
# 2. The arg parser did not properly consume all parameters for an argument
# 3. The arg parser did not properly iterate the 'i' variable
# 4. An argument split on '=' doesn't have anything after '=' - e.g. '--output-file='
except (IndexError, ValueError):
i += 1
arg_error = True
print("ERROR: An argument has invalid parameters: %s" % arg)
#/* ----------------------------------------------------------------------- */#
#/* Transform arguments
#/* ----------------------------------------------------------------------- */#
# Attempt to sniff file type
if not input_file_format and input_file != '-':
input_file_format = input_file.rsplit('.')[-1]
#/* ----------------------------------------------------------------------- */#
#/* Validate parameters
#/* ----------------------------------------------------------------------- */#
bail = False
# Check arguments
if arg_error:
bail = True
print("ERROR: Did not successfully parse arguments")
# Create SRS to apply to points
try:
assign_srs = osr.SpatialReference()
assign_srs.SetFromUserInput(str(assign_srs_from_cmdl))
except RuntimeError:
assign_srs = None
bail = True
print("Invalid assign SRS: '%s'" % assign_srs_from_cmdl)
# Check algorithm options
if not 0 <= time_threshold:
bail = True
print("ERROR: Invalid time threshold - must be >= 0: '%s'" % time_threshold)
if not 0 <= distance_threshold:
bail = True
print("ERROR: Invalid distance threshold - must be >= 0: '%s'" % distance_threshold)
# Check output product options
if output_product not in valid_output_products:
bail = True
print("ERROR: Invalid output product: '%s'" % output_product)
print(" Options: %s" % ', '.join(valid_output_products))
# Check input file format
if input_file_format not in valid_input_file_formats:
bail = True
print("ERROR: Invalid input file format: '%s'" % input_file_format)
print(" Options: %s" % ', '.join(valid_input_file_formats))
# Check input files
if input_file is None:
bail = True
print("ERROR: Need an input file")
elif input_file != '-' and not os.access(input_file, os.R_OK):
bail = True
print("ERROR: Can't access input file: '%s'" % input_file)
# Check output file
if output_file is None:
bail = True
print("ERROR: Need an output file")
elif output_file != '-' and not overwrite_mode and isfile(output_file):
bail = True
print("ERROR: Overwrite=%s but output file exists: '%s'" % (overwrite_mode, output_file))
elif output_file != '-' and isfile(output_file) and not os.access(output_file, os.W_OK):
bail = True
print("ERROR: Need write access for output file: '%s'" % output_file)
elif output_file != '-' and not isfile(output_file) and not os.access(dirname(output_file), os.W_OK):
bail = True
print("ERROR: Need write access for output dir: '%s'" % dirname(output_file))
# Exit if something did not pass validation
if bail:
return 1
#/* ----------------------------------------------------------------------- */#
#/* Prepare data
#/* ----------------------------------------------------------------------- */#
# Be absolutely sure quiet mode is on if the output is stdout, otherwise the output will contain user feedback
if output_file == '-':
quiet_mode = True
if not quiet_mode:
print("Input file: %s" % input_file)
print("Output file: %s" % output_file)
print("Schema: %s" % (','.join(input_schema) if isinstance(input_schema, (list, tuple)) else input_schema))
# Get line count, which is only used when writing to a file and NOT for stdout
prog_total = 0
if not quiet_mode and output_file != '-':
with sys.stdin if input_file == '-' else open(input_file) as i_f:
for row in i_f:
prog_total += 1
# Remove the number of skipped lines and CSV header
prog_total -= skip_lines
if input_schema is None:
prog_total -= 1
#/* ----------------------------------------------------------------------- */#
#/* Process data
#/* ----------------------------------------------------------------------- */#
flag_field = UTIL_NAME
flag_val = 1
# Open input file or stdin
with sys.stdin if input_file == '-' else open(input_file) as i_f:
# Open output file or stdin
with sys.stdout if output_file == '-' else open(output_file, write_mode) as o_f:
# Construct a reader
if input_file_format == 'json':
try:
reader = NewlineJSONReader(i_f)
fieldnames = reader.fieldnames
except ValueError:
print("ERROR: Input file format is '%s' but could not be decoded" % input_file_format)
return 1
elif input_file_format == 'csv':
if input_schema:
reader = csv.DictReader(i_f, fieldnames=input_schema)
else:
reader = csv.DictReader(i_f)
fieldnames = reader.fieldnames
else:
raise IOError("Could not determine input file format - valid formats are newline delimited JSON and CSV")
# Make sure the writer has the flag field if necessary
if 'flag' in output_product:
writer_fieldnames = reader.fieldnames + [flag_field]
else:
writer_fieldnames = reader.fieldnames
# Construct a writer for the output product
if output_product == 'frequency':
# The 'frequency' writer is established later once all data is collected
pass
elif 'csv' in output_product:
writer = csv.DictWriter(o_f, writer_fieldnames)
if output_product in ('csv', 'flag-csv'):
writer.writeheader()
elif 'newline' in output_product:
writer = NewlineJSONWriter(o_f, writer_fieldnames)
else:
raise IOError("Invalid output product: '%s'" % output_product)
# Loop over input file
discontinuity_counts = {}
last_row = None
for prog_i, row in enumerate(reader):
# Only process rows once the proper number of lines has been skipped
if prog_i >= skip_lines:
# Update user, but NOT if writing to stdout
if not quiet_mode and output_file != '-':
sys.stdout.write("\r\x1b[K" + " %s/%s" % (prog_i, prog_total))
sys.stdout.flush()
# Compare MMSI values - if they don't match then re-set the last row to start processing the new MMSI
try:
if last_row and row['mmsi'] != last_row['mmsi']:
last_row = None
except KeyError:
print(row)
print(last_row)
return 1
# If flagging output, make sure all rows contain the field
if 'flag' in output_product:
row[flag_field] = ''
# Normal processing
if last_row is not None and is_discontinuous(row, last_row, tt=time_threshold,
dt=distance_threshold, a_srs=assign_srs):
# Flag output
if 'flag' in output_product:
row[flag_field] = flag_val
# Collect frequency counts
if output_product == 'frequency':
if row['mmsi'] not in discontinuity_counts:
discontinuity_counts[row['mmsi']] = 1
else:
discontinuity_counts[row['mmsi']] += 1
# Write discontinous row
else:
writer.writerow(row)
# Make sure all rows are written when flagging output
# This also catches MMSi's containing only a single point AND the firs row of every MMSI
elif 'flag' in output_product:
writer.writerow(row)
# Mark the row just processed as the last row in preparation for processing the next row
last_row = row.copy()
#/* ----------------------------------------------------------------------- */#
#/* Dump results if output product is 'frequency'
#/* ----------------------------------------------------------------------- */#
if output_product == 'frequency':
writer = csv.DictWriter(o_f, ['mmsi', 'count'])
writer.writeheader()
for mmsi, count in discontinuity_counts.iteritems():
writer.writerow({'mmsi': mmsi, 'count': count})
#/* ----------------------------------------------------------------------- */#
#/* Cleanup and return
#/* ----------------------------------------------------------------------- */#
if not quiet_mode:
print(" - Done")
return 0 | 0371b44a1855bce71e411c24413fabd100d9ad79 | 16,315 |
def _mark_untranslated_strings(translation_dict):
"""Marks all untranslated keys as untranslated by surrounding them with
lte and gte symbols.
This function modifies the translation dictionary passed into it in-place
and then returns it.
"""
# This was a requirement when burton was written, but may be an unwanted
# side effect for other projects that adopt burton. We should replace it
# with something more flexible.
for key in translation_dict:
if key is not None and translation_dict[key] is None:
translation_dict[key] = u"\u2264" + key + u"\u2265"
return translation_dict | d15ac2d0fe8d50d5357bcc1e54b9666f7076aefd | 16,316 |
import warnings
import codecs
def build(app, path):
"""
Build and return documents without known warnings
:param app:
:param path:
:return:
"""
with warnings.catch_warnings():
# Ignore warnings emitted by docutils internals.
warnings.filterwarnings(
"ignore",
"'U' mode is deprecated",
DeprecationWarning)
app.build()
#return (app.outdir / path).read_text()
with codecs.open((app.outdir / path), 'r', encoding='utf-8') as content_file:
return content_file.read() | 09049aad0d46d07144c3d564deb0e5aaf1b828ca | 16,317 |
import argparse
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Fasta parser for GC content',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'fasta', metavar='FILE', nargs='+', help='FASTA file(s)')
parser.add_argument(
'-o',
'--outdir',
help='Output directory',
metavar='DIR',
type=str,
default='out')
parser.add_argument(
'-p',
'--pct_gc',
help='Dividing line for percent GC',
metavar='int',
type=int,
default=50)
return parser.parse_args() | 6222faed678045afb430bd993df1b53f12130ddc | 16,318 |
def SMWatConstrained(CSM, ci, cj, matchFunction, hvPenalty = -0.3, backtrace = False):
"""
Implicit Smith Waterman alignment on a binary cross-similarity matrix
with constraints
:param CSM: A binary N x M cross-similarity matrix
:param ci: The index along the first sequence that must be matched to cj
:param cj: The index along the second sequence that must be matched to ci
:param matchFunction: A function that scores matching/mismatching
:param hvPenalty: The amount by which to penalize horizontal/vertical moves
:returns (Distance (scalar), (N+1)x(M+1) dynamic programming matrix)
"""
res1 = SMWat(CSM[0:ci+1, 0:cj+1], matchFunction, hvPenalty, backtrace = backtrace, backidx = [ci+1, cj+1])
CSM2 = np.fliplr(np.flipud(CSM[ci::, cj::]))
res2 = SMWat(CSM2, matchFunction, hvPenalty, backtrace = backtrace, backidx = [CSM2.shape[0], CSM2.shape[1]])
res = {'score':res1['D'][-1, -1] + res2['D'][-1, -1]}
res['D1'] = res1['D']
res['D2'] = res2['D']
if backtrace:
path2 = [[ci+1+(CSM2.shape[0]+1-x), cj+1+(CSM2.shape[1]+1-y)] for [x, y] in res2['path']]
res['path'] = res1['path'] + path2
return res | a66f17bb40e201a6758c1add4a1590672724dc3e | 16,319 |
def check_images(
coords,
species,
lattice,
PBC=[1, 1, 1],
tm=Tol_matrix(prototype="atomic"),
tol=None,
d_factor=1.0,
):
"""
Given a set of (unfiltered) frac coordinates, checks if the periodic images are too close.
Args:
coords: a list of fractional coordinates
species: the atomic species of each coordinate
lattice: a 3x3 lattice matrix
PBC: the periodic boundary conditions
tm: a Tol_matrix object
tol: a single override value for the distance tolerances
d_factor: the tolerance is multiplied by this amount. Larger values
mean atoms must be farther apart
Returns:
False if distances are too close. True if distances are not too close
"""
# If no PBC, there are no images to check
if PBC == [0, 0, 0]:
return True
# Create image coords from given coords and PBC
coords = np.array(coords)
m = create_matrix(PBC=PBC, omit=True)
new_coords = []
new_species = []
for v in m:
for v2 in coords + v:
new_coords.append(v2)
new_coords = np.array(new_coords)
# Create a distance matrix
dm = distance_matrix(coords, new_coords, lattice, PBC=[0, 0, 0])
# Define tolerances
if tol is None:
tols = np.zeros((len(species), len(species)))
for i, s1 in enumerate(species):
for j, s2 in enumerate(species):
if i <= j:
tols[i][j] = tm.get_tol(s1, s2)
tols[j][i] = tm.get_tol(s1, s2)
tols2 = np.tile(tols, int(len(new_coords) / len(coords)))
if (dm < tols2).any():
return False
else:
return True
elif tol is not None:
if (dm < tol).any():
return False
else:
return True
return True | 20f3ada0aa391d989b638a835581226bd79439f7 | 16,320 |
def get_hamming_distances(genomes):
"""Calculate pairwise Hamming distances between the given list of genomes
and return the nonredundant array of values for use with scipy's squareform function.
Bases other than standard nucleotides (A, T, C, G) are ignored.
Parameters
----------
genomes : list
a list of strings corresponding to genomes that should be compared
Returns
-------
list
a list of distinct Hamming distances as a vector-form distance vector
>>> genomes = ["ATGCT", "ATGCT", "ACGCT"]
>>> get_hamming_distances(genomes)
[0, 1, 1]
>>> genomes = ["AT-GCT", "AT--CT", "AC--CT"]
>>> get_hamming_distances(genomes)
[0, 1, 1]
"""
# Define an array of valid nucleotides to use in pairwise distance calculations.
# Using a numpy array of byte strings allows us to apply numpy.isin later.
nucleotides = np.array([b'A', b'T', b'C', b'G'])
# Convert genome strings into numpy arrays to enable vectorized comparisons.
genome_arrays = [
np.frombuffer(genome.encode(), dtype="S1")
for genome in genomes
]
# Precalculate positions of valid bases (A, T, C, and G) in each genome to speed up later comparisons.
valid_bases = [
np.isin(genome_array, nucleotides)
for genome_array in genome_arrays
]
# Calculate Hamming distance between all distinct pairs of genomes at valid bases.
# The resulting list is a reduced representation of a symmetric matrix that can be
# converted to a square matrix with scipy's squareform function:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html
hamming_distances = []
for i in range(len(genomes)):
# Only compare the current genome, i, with all later genomes.
# This avoids repeating comparisons or comparing each genome to itself.
for j in range(i + 1, len(genomes)):
# Find all mismatches between these two genomes.
mismatches = genome_arrays[i] != genome_arrays[j]
# Count the number of mismatches where both genomes have valid bases.
hamming_distances.append((mismatches & valid_bases[i] & valid_bases[j]).sum())
return hamming_distances | dad2e9583bd7fcbbbb87dd93d180e4de39ea3083 | 16,321 |
from typing import Dict
def serialize(name: str, engine: str) -> Dict:
"""Get dictionary serialization for a dataset locator.
Parameters
----------
name: string
Unique dataset name.
engine: string
Unique identifier of the database engine (API).
Returns
-------
dict
"""
return {'name': name, 'database': engine} | 9ab11318050caf3feb4664310e491ed48e7e5357 | 16,322 |
import torch
def repackage_hidden(h):
"""
Wraps hidden states in new Variables, to detach them from their history.
"""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(v.detach() for v in h) | 0ab8cffeaafaf6f39e2938ce2005dbca1d3d7496 | 16,323 |
import shutil
import os
import stat
def test_exception_during_cleanup(capfd):
""" Report exceptions during cleanup to stderr """
original_rmtree = shutil.rmtree
delete_paths = []
def make_directory_unremoveable(path, *args, **kwargs):
os.chmod(path, stat.S_IRUSR) # remove x permission so we can't delete directory
delete_paths.append(path)
return original_rmtree(path, *args, **kwargs)
try:
with mock.patch.object(main.shutil, 'rmtree', make_directory_unremoveable):
run_command("- echo hello")
out, err = capfd.readouterr()
assert "Unable to remove" in err
finally:
for path in delete_paths:
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IEXEC)
shutil.rmtree(path) | 668643b56cce15c75ea4b2560aa7c58c4b8cff62 | 16,324 |
def support_acctgroup_acctproject(version):
"""
Whether this Lustre version supports acctgroup and acctproject
"""
if version.lv_name == "es2":
return False
return True | 858ec772a90e66431731ffcdd145fa7e56daad02 | 16,325 |
def decodeInventoryEntry_level1(document):
"""
Decodes a basic entry such as: '6 lobster cake' or '6' cakes
@param document : NLP Doc object
:return: Status if decoded correctly (true, false), and Inventory object
"""
count = Inventory(str(document))
for token in document:
if token.pos_ == (u'NOUN' or u'NNS' or u'NN'):
item = str(token)
for child in token.children:
if child.dep_ == u'compound' or child.dep_ == u'ad':
item = str(child) + str(item)
elif child.dep_ == u'nummod':
count.amount = str(child).strip()
for numerical_child in child.children:
# this isn't arithmetic rather than treating it such as a string
count.amount = str(numerical_child) + str(count.amount).strip()
else:
print "WARNING: unknown child: " + str(child) + ':'+str(child.dep_)
count.item = item
count.unit = item
return count | a283f3630a18cdbb0cc22664e583f00866ff759b | 16,326 |
import os
def target_precheck(root_dir, configs_dir, target_name,
info_defaults, required_scripts):
"""
Checks:
1. That the target (subsys or experiment) config includes an 'active' field indicating whether to run it
2. If the target is active, check that all required
scripts are present and executable
This function returns:
1. a dict containing a 'status' field
(boolean, true if all is preconfigured correctly) and a 'message' containing an
explanation as a string if one is necessary
2. A dict containing the target config's entries for
each of the fields in info_defaults (uses the default
if it's not specified)
"""
target_conf = attempt_parse_config(configs_dir, target_name)
if target_conf is None:
return ({'success': False,
'message': 'config.json for {} is missing or fails to parse'.format(target_name)},
None)
update_fields = []
target_info = {}
for field, default in info_defaults.items():
update_fields.append(field)
target_info[field] = default
for field in update_fields:
if field in target_conf:
target_info[field] = target_conf[field]
# no need to check target subdirectory if it is not active
if not target_conf['active']:
return ({'success': True, 'message': 'Inactive'}, target_info)
target_subdir = os.path.join(root_dir, target_name)
if not os.path.exists(target_subdir):
return ({'success': False,
'message': 'Script subdirectory for {} missing'.format(target_name)}, None)
invalid_scripts = check_present_and_executable(target_subdir, required_scripts)
if invalid_scripts:
return ({
'success': False,
'message': 'Necessary files are missing from {} or not executable: {}'.format(
target_subdir,
', '.join(invalid_scripts))
},
None)
return ({'success': True, 'message': ''}, target_info) | fc5bb4f3406805dffcbd06a30e41af41fbf270bb | 16,327 |
from typing import Collection
def from_ir_objs(ir_objs: Collection[IrCell]) -> AnnData:
"""\
Convert a collection of :class:`IrCell` objects to an :class:`~anndata.AnnData`.
This is useful for converting arbitrary data formats into
the scirpy :ref:`data-structure`.
{doc_working_model}
Parameters
----------
ir_objs
Returns
-------
:class:`~anndata.AnnData` object with :term:`IR` information in `obs`.
"""
ir_df = pd.DataFrame.from_records(
(_process_ir_cell(x) for x in ir_objs), index="cell_id"
)
adata = AnnData(obs=ir_df, X=np.empty([ir_df.shape[0], 0]))
_sanitize_anndata(adata)
return adata | 55e95b2673d6aec02ae5aa7fb5cec014db17cdc7 | 16,328 |
def s3_is_mobile_client(request):
"""
Simple UA Test whether client is a mobile device
@todo: parameter description?
"""
env = request.env
if env.http_x_wap_profile or env.http_profile:
return True
if env.http_accept and \
env.http_accept.find("text/vnd.wap.wml") > 0:
return True
keys = ["iphone", "ipod", "android", "opera mini", "blackberry", "palm",
"windows ce", "iemobile", "smartphone", "medi", "sk-0", "vk-v",
"aptu", "xda-", "mtv ", "v750", "p800", "opwv", "send", "xda2",
"sage", "t618", "qwap", "veri", "t610", "tcl-", "vx60", "vx61",
"lg-k", "lg-l", "lg-m", "lg-o", "lg-a", "lg-b", "lg-c", "xdag",
"lg-f", "lg-g", "sl45", "emul", "lg-p", "lg-s", "lg-t", "lg-u",
"lg-w", "6590", "t250", "qc21", "ig01", "port", "m1-w", "770s",
"n710", "ez60", "mt50", "g1 u", "vk40", "bird", "tagt", "pose",
"jemu", "beck", "go.w", "jata", "gene", "smar", "g-mo", "o2-x",
"htc_", "hei-", "fake", "qc-7", "smal", "htcp", "htcs", "craw",
"htct", "aste", "htca", "htcg", "teli", "telm", "kgt", "mwbp",
"kwc-", "owg1", "htc ", "kgt/", "htc-", "benq", "slid", "qc60",
"dmob", "blac", "smt5", "nec-", "sec-", "sec1", "sec0", "fetc",
"spv ", "mcca", "nem-", "spv-", "o2im", "m50/", "ts70", "arch",
"qtek", "opti", "devi", "winw", "rove", "winc", "talk", "pant",
"netf", "pana", "esl8", "pand", "vite", "v400", "whit", "scoo",
"good", "nzph", "mtp1", "doco", "raks", "wonu", "cmd-", "cell",
"mode", "im1k", "modo", "lg-d", "idea", "jigs", "bumb", "sany",
"vulc", "vx70", "psio", "fly_", "mate", "pock", "cdm-", "fly-",
"i230", "lge-", "lge/", "argo", "qc32", "n701", "n700", "mc21",
"n500", "midp", "t-mo", "airn", "bw-u", "iac", "bw-n", "lg g",
"erk0", "sony", "alav", "503i", "pt-g", "au-m", "treo", "ipaq",
"dang", "seri", "mywa", "eml2", "smb3", "brvw", "sgh-", "maxo",
"pg-c", "qci-", "vx85", "vx83", "vx80", "vx81", "pg-8", "pg-6",
"phil", "pg-1", "pg-2", "pg-3", "ds12", "scp-", "dc-s", "brew",
"hipt", "kddi", "qc07", "elai", "802s", "506i", "dica", "mo01",
"mo02", "avan", "kyoc", "ikom", "siem", "kyok", "dopo", "g560",
"i-ma", "6310", "sie-", "grad", "ibro", "sy01", "nok6", "el49",
"rim9", "upsi", "inno", "wap-", "sc01", "ds-d", "aur ", "comp",
"wapp", "wapr", "waps", "wapt", "wapu", "wapv", "wapy", "newg",
"wapa", "wapi", "wapj", "wapm", "hutc", "lg/u", "yas-", "hita",
"lg/l", "lg/k", "i-go", "4thp", "bell", "502i", "zeto", "ez40",
"java", "n300", "n302", "mmef", "pn-2", "newt", "1207", "sdk/",
"gf-5", "bilb", "zte-", "maui", "qc-3", "qc-2", "blaz", "r600",
"hp i", "qc-5", "moto", "cond", "motv", "virg", "ccwa", "audi",
"shar", "i-20", "samm", "sama", "sams", "sch-", "mot ", "http",
"505i", "mot-", "n502", "topl", "n505", "mobi", "3gso", "wmlb",
"ezwa", "qc12", "abac", "tdg-", "neon", "mio8", "sp01", "rozo",
"vx98", "dait", "t600", "anyw", "tx-9", "sava", "m-cr", "tsm-",
"mioa", "tsm5", "klon", "capi", "tsm3", "hcit", "libw", "lg50",
"mc01", "amoi", "lg54", "ez70", "se47", "n203", "vk52", "vk53",
"vk50", "webc", "haie", "semc", "grun", "play", "palm", "a wa",
"anny", "prox", "o2 x", "ezze", "symb", "hs-c", "pg13", "mits",
"kpt ", "qa-a", "501i", "pdxg", "iris", "pluc", "acoo", "soft",
"hpip", "iac/", "iac-", "aus ", "s55/", "vx53", "vx52", "chtm",
"meri", "merc", "your", "huaw", "cldc", "voda", "smit", "x700",
"mozz", "lexi", "up.b", "sph-", "keji", "jbro", "wig ", "attw",
"pire", "r380", "lynx", "anex", "vm40", "hd-m", "504i", "w3c ",
"c55/", "w3c-", "upg1", "t218", "tosh", "acer", "hd-t", "eric",
"hd-p", "noki", "acs-", "dbte", "n202", "tim-", "alco", "ezos",
"dall", "leno", "alca", "asus", "m3ga", "utst", "aiko", "n102",
"n101", "n100", "oran"]
ua = (env.http_user_agent or "").lower()
if [key for key in keys if key in ua]:
return True
return False | 916e6b01c6375c18335f332a0411cb2a034c962a | 16,329 |
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end"
) | bac64c3b2d2e5d883f627dada658cdd9359b61b0 | 16,330 |
from typing import List
def get_cases_from_input_df(input_df: pd.DataFrame) -> List[Case]:
"""
Get the case attributes
:return:
"""
cases: List[Case] = []
for index, row in input_df.iterrows():
# Create a case object from the row values in the input df
cases.append(Case.from_dict(row.to_dict()))
return cases | 34b820880691456fde3ab260be02646590aeafd7 | 16,331 |
import torch
def init_STRFNet(sample_batch,
num_classes,
num_kernels=32,
residual_channels=[32, 32],
embedding_dimension=1024,
num_rnn_layers=2,
frame_rate=None, bins_per_octave=None,
time_support=None, frequency_support=None,
conv2d_sizes=(3, 3),
mlp_hiddims=[],
activate_out=nn.LogSoftmax(dim=1)
):
"""Initialize a STRFNet for multi-class classification.
This is a one-stop solution to create STRFNet and its variants.
Parameters
----------
sample_batch: [Batch,Time,Frequency] torch.FloatTensor
A batch of training examples that is used for training.
Some dimension parameter of the network is inferred cannot be changed.
num_classes: int
Number of classes for the classification task.
Keyword Parameters
------------------
num_kernels: int, 32
2*num_kernels is the number of STRF/2D kernels.
Doubling is due to the two orientations of the STRFs.
residual_channels: list(int), [32, 32]
Specify the number of conv2d channels for each residual block.
embedding_dimension: int, 1024
Dimension of the learned embedding (RNN output).
frame_rate: float, None
Sampling rate [samples/second] / hop size [samples].
No STRF kernels by default.
bins_per_octave: int, None
Frequency bins per octave in CQT sense. (TODO: extend for non-CQT rep.)
No STRF kernels by default.
time_support: float, None
Number of seconds spanned by each STRF kernel.
No STRF kernels by default.
frequency_support: int/float, None
If frame_rate or bins_per_octave is None, interpret as GaborSTRFConv.
- Number of frequency bins (int) spanned by each STRF kernel.
Otherwise, interpret as STRFConv.
- Number of octaves spanned by each STRF kernel.
No STRF kernels by default.
conv2d_sizes: (int, int), (3, 3)
nn.Conv2d kernel dimensions.
mlp_hiddims: list(int), []
Final MLP hidden layer dimensions.
Default has no hidden layers.
activate_out: callable, nn.LogSoftmax(dim=1)
Activation function at the final layer.
Default uses LogSoftmax for multi-class classification.
"""
if all(p is not None for p in (time_support, frequency_support)):
is_strfnet = True
if all(p is not None for p in (frame_rate, bins_per_octave)):
kernel_type = 'wavelet'
else:
assert all(
type(p) is int for p in (time_support, frequency_support)
)
kernel_type = 'gabor'
else:
is_strfnet = False
is_cnn = conv2d_sizes is not None
is_hybrid = is_strfnet and is_cnn
if is_hybrid:
print(f"Preparing for Hybrid STRFNet; kernel type is {kernel_type}.")
elif is_strfnet:
print(f"Preparing for STRFNet; kernel type is {kernel_type}.")
elif is_cnn:
print("Preparing for CNN.")
else:
raise ValueError("Insufficient parameters. Check example_STRFNet.")
if not is_strfnet:
strf_layer = None
elif kernel_type == 'wavelet':
strf_layer = STRFConv(
frame_rate, bins_per_octave,
time_support, frequency_support, num_kernels
)
else:
strf_layer = GaborSTRFConv(
time_support, frequency_support, num_kernels
)
if is_cnn:
d1, d2 = conv2d_sizes
if d1 % 2 == 0:
d1 += 1
print("Enforcing odd conv2d dimension.")
if d2 % 2 == 0:
d2 += 1
print("Enforcing odd conv2d dimension.")
conv2d_layer = nn.Conv2d(
1, 2*num_kernels, # Double to match the total number of STRFs
(d1, d2), padding=(d1//2, d2//2)
)
else:
conv2d_layer = None
residual_layer = ModResnet(
(4 if is_hybrid else 2)*num_kernels, residual_channels, False
)
with torch.no_grad():
flattened_dimension = STRFNet.cnn_forward(
sample_batch, strf_layer, conv2d_layer, residual_layer
).shape[-1]
linear_layer = nn.Linear(flattened_dimension, embedding_dimension)
rnn = nn.GRU(
embedding_dimension, embedding_dimension, batch_first=True,
num_layers=num_rnn_layers, bidirectional=True
)
mlp = MLP(
2*embedding_dimension, num_classes, hiddims=mlp_hiddims,
activate_hid=nn.LeakyReLU(),
activate_out=activate_out,
batchnorm=[True]*len(mlp_hiddims)
)
return STRFNet(strf_layer, conv2d_layer, residual_layer,
linear_layer, rnn, mlp) | bdc39fef1889c21d18fda8da308f81f5f03a485c | 16,332 |
from typing import AnyStr
import unicodedata
def normalize_nfc(txt: AnyStr) -> bytes:
"""
Normalize message to NFC and return bytes suitable for protobuf.
This seems to be bitcoin-qt standard of doing things.
"""
str_txt = txt.decode() if isinstance(txt, bytes) else txt
return unicodedata.normalize("NFC", str_txt).encode() | 12b6e037225878e0bbca1d52d9f58d57abb35746 | 16,333 |
from typing import Callable
from typing import Any
import threading
import functools
def synchronized(wrapped: Callable[..., Any]) -> Any:
"""The missing @synchronized decorator
https://git.io/vydTA"""
_lock = threading.RLock()
@functools.wraps(wrapped)
def _wrapper(*args, **kwargs):
with _lock:
return wrapped(*args, **kwargs)
return _wrapper | 39da1efeb93c8dbdba570763d2e66dc8d9d84fc5 | 16,334 |
def corrgroups60__decision_tree():
""" Decision Tree
"""
return sklearn.tree.DecisionTreeRegressor(random_state=0) | fb2405c54208705a105b225e1dd269d45892b7be | 16,335 |
def auth_required(*auth_methods):
"""
Decorator that protects enpoints through multiple mechanisms
Example::
@app.route('/dashboard')
@auth_required('token', 'session')
def dashboard():
return 'Dashboard'
:param auth_methods: Specified mechanisms.
"""
login_mechanisms = {
'token': lambda: _check_token(),
'basic': lambda: _check_http_auth(),
'session': lambda: current_user.is_authenticated()
}
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
h = {}
mechanisms = [(method, login_mechanisms.get(method)) for method in auth_methods]
for method, mechanism in mechanisms:
if mechanism and mechanism():
return fn(*args, **kwargs)
elif method == 'basic':
r = _security.default_http_auth_realm
h['WWW-Authenticate'] = 'Basic realm="%s"' % r
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_response(headers=h)
return decorated_view
return wrapper | c6613e594abbb979352fe3ec96018fe52109bab0 | 16,336 |
def _get_default_data_dir_name():
"""
Gets default data directory
"""
return _get_path(DATA_DIR) | b4207e108a9f08a72b47c44ab43b3971e67e8165 | 16,337 |
def point_inside_triangle(p, t, tol=None):
"""
Test to see if a point is inside a triangle. The point is first
projected to the plane of the triangle for this test.
:param ndarray p: Point inside triangle.
:param ndarray t: Triangle vertices.
:param float tol: Tolerance for barycentric coordinate check.
:return: *True* if point is inside triangle, *False* if not.
:rtype: bool
"""
if tol is None:
tol = Settings.ptol
v01 = t[1] - t[0]
v02 = t[2] - t[0]
vp = p - t[0]
d01 = dot(v01, v01)
d12 = dot(v01, v02)
d02 = dot(v02, v02)
dp1 = dot(vp, v01)
dp2 = dot(vp, v02)
denom = d01 * d02 - d12 * d12
if denom == 0.:
return False
u = (d02 * dp1 - d12 * dp2) / denom
v = (d01 * dp2 - d12 * dp1) / denom
if u >= -tol and v >= -tol and u + v <= 1. + tol:
return True
return False | a7a4dd52dfa65fdd9e3cb3ac151c7895acb3abb8 | 16,338 |
from datetime import datetime
def merge_dfs(x, y):
"""Merge the two dataframes and download a CSV."""
df = pd.merge(x, y, on='Collection_Number', how='outer')
indexed_df = df.set_index(['Collection_Number'])
indexed_df['Access_Notes_Regarding_Storage_Locations'].fillna('No note', inplace=True)
today = datetime.datetime.today().strftime('%Y-%m-%d')
output_file = 'storage_locations_' + str(today) + '.csv'
indexed_df.to_csv(output_file)
print('Location report exported as ' + output_file)
return indexed_df | 9856d4394ca628fd7eb0f58e6cc805494410c51e | 16,339 |
def consumer(func):
"""A decorator function that takes care of starting a coroutine automatically on call.
See http://www.dabeaz.com/generators/ for more details.
"""
def start(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start | e834a081c1f43545684bb4102a92b186c8825f30 | 16,340 |
def convert_openfermion_op(openfermion_op, n_qubits=None):
"""convert_openfermion_op
Args:
openfermion_op (:class:`openfermion.ops.QubitOperator`)
n_qubit (:class:`int`):
if None (default), it automatically calculates the number of qubits required to represent the given operator
Returns:
:class:`qulacs.GeneralQuantumOperator`
"""
if n_qubits is None:
_n_qubits = _count_qubit_in_qubit_operator(openfermion_op)
else:
_n_qubits = n_qubits
res = GeneralQuantumOperator(_n_qubits)
for pauli_product in openfermion_op.terms:
coef = float(np.real(openfermion_op.terms[pauli_product]))
pauli_string = ''
for pauli_operator in pauli_product:
pauli_string += pauli_operator[1] + ' ' + str(pauli_operator[0])
pauli_string += ' '
res.add_operator(coef, pauli_string[:-1])
return res | 416eccc82fbd7dbdcf61ba62f5176ca3e12a01db | 16,341 |
def ssqueeze(Wx, w, ssq_freqs=None, scales=None, fs=None, t=None, transform='cwt',
squeezing='sum'):
"""Calculates the synchrosqueezed CWT or STFT of `x`. Used internally by
`synsq_cwt` and `synsq_stft_fwd`.
# Arguments:
Wx or Sx: np.ndarray
CWT or STFT of `x`.
w: np.ndarray
Phase transform of `Wx` or `Sx`. Must be >=0.
ssq_freqs: str['log', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
scales: str['log', 'linear'] / np.ndarray
CWT scales. Ignored if transform='stft'.
- 'log': exponentially distributed scales, as pow of 2:
`[2^(1/nv), 2^(2/nv), ...]`
- 'linear': linearly distributed scales.
!!! EXPERIMENTAL; default scheme for len(x)>2048 performs
poorly (and there may not be a good non-piecewise scheme).
fs: float / None
Sampling frequency of `x`. Defaults to 1, which makes ssq
frequencies range from 1/dT to 0.5, i.e. as fraction of reference
sampling rate up to Nyquist limit; dT = total duration (N/fs).
Overridden by `t`, if provided.
Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Overrides `fs` if not None.
transform: str['cwt', 'stft']
Whether `Wx` is from CWT or STFT (`Sx`).
squeezing: str['sum', 'lebesgue']
- 'sum' = standard synchrosqueezing using `Wx`.
- 'lebesgue' = as in [4], setting `Wx=ones()/len(Wx)`, which is
not invertible but has better robustness properties in some cases.
Not recommended unless purpose is understood.
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
# References:
1. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
4. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_squeeze.m
"""
def _ssqueeze(w, Wx, nv, ssq_freqs, transform, ssq_scaletype, cwt_scaletype):
# incorporate threshold by zeroing out Inf values, so they get ignored
Wx = replace_at_inf(Wx, ref=w, replacement=0)
# do squeezing by finding which frequency bin each phase transform point
# w[a, b] lands in (i.e. to which f in ssq_freqs each w[a, b] is closest)
# equivalent to argmin(abs(w[a, b] - ssq_freqs)) for every a, b
with np.errstate(divide='ignore'):
k = (find_closest(w, ssq_freqs) if ssq_scaletype != 'log' else
find_closest(np.log2(w), np.log2(ssq_freqs)))
# Tx[k[i, j], j] += Wx[i, j] * norm
if transform == 'cwt':
# Eq 14 [2]; Eq 2.3 [1]
if cwt_scaletype == 'log':
# ln(2)/nv == diff(ln(scales))[0] == ln(2**(1/nv))
Tx = indexed_sum(Wx / scales**(1/2) * np.log(2) / nv, k)
elif cwt_scaletype == 'linear':
# omit /dw since it's cancelled by *dw in inversion anyway
da = (scales[1] - scales[0])
Tx = indexed_sum(Wx / scales**(3/2) * da, k)
else: # 'stft'
# TODO validate
Tx = indexed_sum(Wx * (ssq_freqs[1] - ssq_freqs[0]), k)
return Tx
def _compute_associated_frequencies(dt, na, N, transform, ssq_scaletype):
dT = dt * N
# normalized frequencies to map discrete-domain to physical:
# f[[cycles/samples]] -> f[[cycles/second]]
# maximum measurable (Nyquist) frequency of data
fM = 1 / (2 * dt)
# minimum measurable (fundamental) frequency of data
fm = 1 / dT
# frequency divisions `w_l` to search over in Synchrosqueezing
if ssq_scaletype == 'log':
# [fm, ..., fM]
ssq_freqs = fm * np.power(fM / fm, np.arange(na) / (na - 1))
else:
if transform == 'cwt':
ssq_freqs = np.linspace(fm, fM, na)
elif transform == 'stft':
# ??? seems to be 0 to f_sampling/2, but why use N?
# what about fm and fM?
ssq_freqs = np.linspace(0, 1, N) / dt
ssq_freqs = ssq_freqs[:N // 2]
return ssq_freqs
def _process_args(w, fs, t, N, transform, squeezing, scales):
if w.min() < 0:
raise ValueError("found negatives in `w`")
if transform not in ('cwt', 'stft'):
raise ValueError("`transform` must be one of: cwt, stft "
"(got %s)" % squeezing)
if squeezing not in ('sum', 'lebesgue'):
raise ValueError("`squeezing` must be one of: sum, lebesgue "
"(got %s)" % squeezing)
if scales is None and transform == 'cwt':
raise ValueError("`scales` can't be None if `transform == 'cwt'`")
dt, *_ = _process_fs_and_t(fs, t, N)
return dt
na, N = Wx.shape
dt = _process_args(w, fs, t, N, transform, squeezing, scales)
scales, cwt_scaletype, _, nv = process_scales(scales, N, get_params=True)
if not isinstance(ssq_freqs, np.ndarray):
if isinstance(ssq_freqs, str):
ssq_scaletype = ssq_freqs
else:
# default to same scheme used by `scales`
ssq_scaletype = cwt_scaletype
ssq_freqs = _compute_associated_frequencies(dt, na, N, transform,
ssq_scaletype)
else:
ssq_scaletype = _infer_scaletype(ssq_freqs)
if squeezing == 'lebesgue': # from reference [3]
Wx = np.ones(Wx.shape) / len(Wx)
Tx = _ssqueeze(w, Wx, nv, ssq_freqs, transform, ssq_scaletype, cwt_scaletype)
return Tx, ssq_freqs | 4c862d13b1cf046c927e187a4b3c9aaed55a7277 | 16,342 |
def ConvertStringsToColumnHeaders(proposed_headers):
"""Converts a list of strings to column names which spreadsheets accepts.
When setting values in a record, the keys which represent column names must
fit certain rules. They are all lower case, contain no spaces or special
characters. If two columns have the same name after being sanitized, the
columns further to the right have _2, _3 _4, etc. appended to them.
If there are column names which consist of all special characters, or if
the column header is blank, an obfuscated value will be used for a column
name. This method does not handle blank column names or column names with
only special characters.
"""
headers = []
for input_string in proposed_headers:
# TODO: probably a more efficient way to do this. Perhaps regex.
sanitized = input_string.lower().replace('_', '').replace(
':', '').replace(' ', '')
# When the same sanitized header appears multiple times in the first row
# of a spreadsheet, _n is appended to the name to make it unique.
header_count = headers.count(sanitized)
if header_count > 0:
headers.append('%s_%i' % (sanitized, header_count+1))
else:
headers.append(sanitized)
return headers | dd3ce0f9710aeaa778975b1c14d5166d1d2a2446 | 16,343 |
import os
def parse_interactions(filename="train.csv"):
""" Parse the train data and return the interaction matrix alone """
with open(os.path.join(data_path, filename), "r") as f:
# Discard first line
lines = f.readlines()[1:]
num_lines = len(lines)
# Create container
interactions = sp.dok_matrix((NUM_PLAYLIST, NUM_TRACKS), dtype=np.uint8)
for i, line in enumerate(lines):
playlist, track = [int(i) for i in line.split(",")]
interactions[playlist, track] = 1
print("\rParsing interactions: {:.4}%".format((i / num_lines) * 100), end="")
print("\n")
# Return matrix
return interactions | fb35ccee5cf577a2c85239175859781c84fe7eed | 16,344 |
def recommend(uid, data, model, top_n = 100):
"""
Returns the mean and covariance matrix of the demeaned dataset X (e.g. for PCA)
Parameters
----------
uid : int
user id
data : surprise object with data
The entire system, ratings of users (Constructed with reader from surprise)
model : susrprise object
Trained algorithm
top_n : int
The number of movies to recommend
Returns
-------
pd.DataFrame
recommended movies
pd.DataFram
predicted ratings for the recommended movies
data_update
predicted movies and ratings in the movielens format (uid, iid, rating)
"""
all_movie_ids = data.df['iid'].unique()
uid_rated = data.df[data.df['uid'] == uid]['iid']
movies_to_recommend = np.setdiff1d(all_movie_ids, uid_rated)
if len(movies_to_recommend) == 0:
print('NO MOVIES TO RECOMMEND!')
prediction_set = [[uid, iid, 0] for iid in movies_to_recommend] #here 0 is arbitrary, ratings don't matter
predictions = model.test(prediction_set)
pred_ratings = np.array([pred.est for pred in predictions])
top = pred_ratings.argsort()[::-1][:top_n]
data_update = pd.DataFrame([[uid, movies_to_recommend[top][i], pred_ratings[top][i]] for i in range(top_n)], columns = ['uid', 'iid', 'rating'])
return movies_to_recommend[top], pred_ratings[top], data_update | b156826359e3310c8872a07428d0073795ef071b | 16,345 |
import os
import threading
def threading_data(data=None, fn=None, thread_count=None, path = None):
"""Process a batch of data by given function by threading.
Usually be used for data augmentation.
Parameters
-----------
data : numpy.array or others
The data to be processed.
thread_count : int
The number of threads to use.
fn : function
The function for data processing.
more args : the args for `fn`
Ssee Examples below.
Returns
-------
list or numpyarray
The processed results.
References
----------
- `python queue <https://pymotw.com/2/Queue/index.html#module-Queue>`__
- `run with limited queue <http://effbot.org/librarybook/queue.htm>`__
"""
def apply_fn(results, i, data, path):
path = os.path.join(path, data)
results[i] = fn(path)
if thread_count is None:
results = [None] * len(data)
threads = []
# for i in range(len(data)):
# t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs))
for i, d in enumerate(data):
t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, d, path))
t.start()
threads.append(t)
else:
divs = np.linspace(0, len(data), thread_count + 1)
divs = np.round(divs).astype(int)
results = [None] * thread_count
threads = []
for i in range(thread_count):
t = threading.Thread(
name='threading_and_return', target=apply_fn, args=(results, i, data[divs[i]:divs[i + 1]], path)
)
t.start()
threads.append(t)
for t in threads:
t.join()
if thread_count is None:
try:
return np.asarray(results, dtype=object)
except Exception:
return results
else:
return np.concatenate(results) | 5a21912de3ac6f146975b26390a9a7d6a719de52 | 16,346 |
def cluster_info(arr):
""" number of clusters (nonzero fields separated by 0s) in array
and size of cluster
"""
data = []
k2coord = []
coord2k = np.empty_like(arr).astype(np.int64)
k = -1
new_cluster = True
for i in range(0,len(arr)):
if arr[i] == 0:
new_cluster = True
coord2k[i] = -1
else:
if new_cluster == True:
k += 1
k2coord.append([i])
data.append(0)
else:
k2coord[k].append(i)
data[k] += 1
coord2k[i] = k
new_cluster = False
Ncl = len(data) # number of clusters
Nk = data # Nk[k] = size of cluster k
return Ncl, Nk, k2coord, coord2k | 23a3d58b13ba4af4977cd25a1dc45d116fd812b5 | 16,347 |
def set_or_none(list_l):
"""Function to avoid list->set transformation to return set={None}."""
if list_l == [None]:
res = None
else:
res = set(list_l)
return res | ee5fb4539e63afc7fd8013610229d9ab784b88c5 | 16,348 |
import re
def case_mismatch(vm_type, param):
"""Return True if vm_type matches a portion of param in a case
insensitive search, but does not equal that portion;
return False otherwise.
The "portions" of param are delimited by "_".
"""
re_portion = re.compile(
"(^(%(x)s)_)|(_(%(x)s)_)|(_(%(x)s)$)" % dict(x=vm_type), re.IGNORECASE
)
found = re_portion.search(param)
if found:
param_vm_type = [x for x in found.groups()[1::2] if x][0]
return param_vm_type != vm_type
else:
return False | e7fb565ac6e10fd15dd62a64fbf7f14a8bcfde6b | 16,349 |
def _async_os(cls):
""" Aliases for aiofiles.os"""
return aiofiles.os | ad37b21f22ed5203451ac8eb4b7a53f4572fec73 | 16,350 |
import torch
def corruption_function(x: torch.Tensor):
""" Applies the Gsaussian blur to x """
return torchdrift.data.functional.gaussian_blur(x, severity=5) | 54b98c6bddb187689c0e70fc2dbf0f3c56e25ad1 | 16,351 |
def filter_by_filename(conn, im_ids, imported_filename):
"""Filter list of image ids by originalFile name
Sometimes we know the filename of an image that has been imported into
OMERO but not necessarily the image ID. This is frequently the case when
we want to annotate a recently imported image. This funciton will help
to filter a list of image IDs to only those associated with a particular
filename.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object
OMERO connection.
im_ids : list of int
List of OMERO image IDs.
imported_filename : str
The full filename (with extension) of the file whose OMERO image
we are looking for. NOT the path of the image.
Returns
-------
filtered_im_ids : list of int
Filtered list of images with originalFile name matching
``imported_filename``.
Notes
-----
This function should be used as a filter on an image list that has been
already narrowed down as much as possible. Note that many different images
in OMERO may share the same filename (e.g., image.tif).
Examples
--------
>>> im_ids = get_image_ids(conn, dataset=303)
>>> im_ids = filter_by_filename(conn, im_ids, "feb_2020.tif")]
"""
q = conn.getQueryService()
params = Parameters()
params.map = {"oname": rstring(imported_filename)}
results = q.projection(
"SELECT i.id FROM Image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" JOIN u.originalFile o"
" WHERE o.name=:oname",
params,
conn.SERVICE_OPTS
)
im_id_matches = [r[0].val for r in results]
return list(set(im_ids) & set(im_id_matches)) | bf9625c06929a80f21a4683b1da687535f296e59 | 16,352 |
def get_count():
"""
:return: 计数的值
"""
counter = Counters.query.filter(Counters.id == 1).first()
return make_succ_response(0) if counter is None else make_succ_response(counter.count) | be0ab2773e661b8e5e34f685b59f16cfdee6b26d | 16,353 |
import math
def perm(x, y=None):
"""Return the number of ways to choose k items from n items without repetition and with order."""
if not isinstance(x, int) or (not isinstance(y, int) and y is not None):
raise ValueError(f"Expected integers. Received [{type(x)}] {x} and [{type(y)}] {y}")
return math.perm(x, y) | c9ad65c6ce3cc3e5ba488c5f2ddd1aabbdc7da6a | 16,354 |
import os
import logging
def check_if_need_update(source, year, states, datadir, clobber, verbose):
"""
Do we really need to download the requested data? Only case in which
we don't have to do anything is when the downloaded file already exists
and clobber is False.
"""
paths = paths_for_year(source=source, year=year, states=states,
datadir=datadir)
need_update = False
message = None
for path in paths:
if os.path.exists(path):
if clobber:
message = f'{source} data for {year} already present, CLOBBERING.'
need_update = True
else:
message = f'{source} data for {year} already present, skipping.'
else:
message = ''
need_update = True
# if verbose and message is not None:
logging.info(message)
return need_update | 407824edcaa783f22cdf03814ffd110a88fd06e4 | 16,355 |
import sys
import os
import shutil
import textwrap
def main(argv, cfg=None):
"""Main method"""
# Replace stdout and stderr with /dev/tty, so we don't mess up with scripts
# that use ssh in case we error out or similar.
if cfg is None:
cfg = {}
try:
sys.stdout = open("/dev/tty", "w")
sys.stderr = open("/dev/tty", "w")
except IOError:
pass
config = Config(cfg).load()
check_exit(argv, config)
autodetect_binary(argv, config)
# Check that BINARY_SSH is not repassh.
# This can happen if the user sets a binary name only (e.g. 'scp') and a
# symlink with the same name was set up.
# Note that this relies on argv[0] being set sensibly by the caller,
# which is not always the case. argv[0] may also just have the binary
# name if found in a path.
binary_path = os.path.realpath(
shutil.which(config.get("BINARY_SSH")))
if argv[0]:
ssh_ident_path = os.path.realpath(
shutil.which(argv[0]))
if binary_path == ssh_ident_path:
message = textwrap.dedent("""\
repassh found '{0}' as the next command to run.
Based on argv[0] ({1}), it seems like this will create a
loop.
Please use BINARY_SSH, BINARY_DIR, or change the way
repassh is invoked (eg, a different argv[0]) to make
it work correctly.""")
config.print(message.format(config.get("BINARY_SSH"), argv[0]), loglevel=LOG_ERROR)
config.exit(255)
parse_command_line(argv, config)
identity = find_identity(argv, config)
keys = find_keys(identity, config)
sshconfig = find_ssh_config(identity, config)
agent = AgentManager(identity, sshconfig, config)
if not config.get("SSH_BATCH_MODE"):
# do not load keys in BatchMode
agent.load_unloaded_keys(keys)
portknock(argv, config)
return agent.run_ssh(
argv[1:],
cfg.get("stdin", None),
cfg.get("stdout", None),
cfg.get("stderr", None)
) | 7d820da8b1303fa1b0bfe7236ab42c277ab4c0c6 | 16,356 |
from typing import Union
def ui(candles: np.ndarray, period: int = 14, scalar: float = 100, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]:
"""
Ulcer Index (UI)
:param candles: np.ndarray
:param period: int - default: 14
:param scalar: float - default: 100
:param source_type: str - default: "close"
:param sequential: bool - default: False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
highest_close = talib.MAX(source, period)
downside = scalar * (source - highest_close)
downside /= highest_close
d2 = downside * downside
res = np.sqrt(talib.SUM(d2, period) / period)
return res if sequential else res[-1] | 1f99a6ee849094f3a695812e37035a13f36e8c49 | 16,357 |
def _padwithzeros(vector, pad_width, iaxis, kwargs):
"""Pad with zeros"""
vector[: pad_width[0]] = 0
vector[-pad_width[1] :] = 0
return vector | 1a3a9fc4fd3b0fc17a905fa9ecd283d60310655d | 16,358 |
def fill_from_sparse_coo(t,elems):
"""
:param elems: non-zero elements defined in COO format (tuple(indices),value)
:type elems: list[tuple(tuple(int),value)]
"""
for e in elems:
t[e[0]]=e[1]
return t | 73c6892464d7d7cf34f40fe1dde9973950cdef79 | 16,359 |
def download_responses(survey_id):
"""Download survey responses."""
if request.method == 'GET':
csv = survey_service.download_responses(survey_id)
return Response(
csv,
mimetype='text/csv',
headers={'Content-disposition': 'attachment; filename=surveydata.csv'}) | 8513caf582b87bf0cd5db80622c530d1ec1c3ef2 | 16,360 |
from collections import deque
from typing import Iterable
from typing import Deque
def array_shift(data: Iterable, shift: int) -> Deque:
"""
left(-) or right(+) shift of array
>>> arr = range(10)
>>> array_shift(arr, -3)
deque([3, 4, 5, 6, 7, 8, 9, 0, 1, 2])
>>> array_shift(arr, 3)
deque([7, 8, 9, 0, 1, 2, 3, 4, 5, 6])
"""
deq = deque(data)
deq.rotate(shift)
return deq | c14e115808592808bc9b0cf20fa8bc3d5ece7768 | 16,361 |
def convert_to_timetable(trains):
"""
列車データを時刻表データに変換する関数
Args:
trains (list of list of `Section`): 列車データ
Returns:
timetable (list): 時刻表データ
timetable[from_station][to_station][dep_time] = (from_time, to_time)
-> 現在時刻が dep_time の時に from_station から to_station まで直近の列車で移動する場合の
乗車・下車時刻(0時からの経過分)のタプル
"""
max_time = 1 + max([section.to_time for train in trains for section in train])
n_stations = len(set([section.to_station for train in trains for section in train]))
timetable = [[[(max_time, max_time) for _ in range(max_time)] for _ in range(n_stations)] for _ in range(n_stations)]
# Step0: 次ステップの探索用に (時刻, 駅) についてのグラフ(adj)を作成
adj = defaultdict(list)
target_time_flag = [0 for _ in range(max_time)]
for train in trains:
for section in train:
adj[(section.from_time, section.from_station)].append((section.to_time, section.to_station))
target_time_flag[section.from_time] = 1
target_time_flag[section.to_time] = 1
target_times = [t for t in range(max_time) if target_time_flag[t] == 1]
for station in range(n_stations):
for from_time, to_time in zip(target_times[:-1], target_times[1:]):
adj[(from_time, station)].append((to_time, station))
# Step1: 出発時刻 = 乗車時刻 のデータを登録
for train in trains:
for section in train:
# 他の駅への最速到着時刻をBFSで求める
min_to_time = [max_time for _ in range(n_stations)]
min_to_time[section.from_station] = section.from_time
que = deque([(section.from_time, section.from_station)])
visited = defaultdict(int)
visited[(section.from_time, section.from_station)] = 1
while len(que) > 0:
from_time, from_station = que.popleft()
for to_time, to_station in adj[(from_time, from_station)]:
if visited[(to_time, to_station)] == 1:
continue
min_to_time[to_station] = min(to_time, min_to_time[to_station])
que.append((to_time, to_station))
visited[(to_time, to_station)] = 1
# 出発時刻 = 乗車時刻 のデータを登録
for to_station in range(n_stations):
if to_station == section.from_station:
continue
to_time = min_to_time[to_station]
if to_time == max_time:
continue
timetable[section.from_station][to_station][section.from_time] = (section.from_time, to_time)
# Step2: 出発時刻 != 乗車時刻 のデータを登録
# 例えば駅1→2の始発列車を考え、5:00(300)発・5:05(305)着だとする。
# step1では timetable[1][2][300] = (300, 305) とデータが登録される。
# ここで駅1を5:00(300)より前に出発するとしても、駅1で待機して同じ列車に乗ることになるため、
# t < 300 に対して timetable[1][2][t] = (300, 305) となるはず。
# step1ではこのデータは入らないので、ここで入れる。
for t in range(max_time - 2, - 1, - 1):
for from_station in range(n_stations):
for to_station in range(n_stations):
timetable[from_station][to_station][t] = \
min(timetable[from_station][to_station][t], timetable[from_station][to_station][t + 1])
return timetable | 042238b090af1b4b4e4a8cf469f9bbcd49edc9af | 16,362 |
import math
def parents(level, idx):
"""
Return all the (grand-)parents of the Healpix pixel idx at level (in nested format)
:param level: Resolution level
:param idx: Pixel index
:return: All the parents of the pixel
"""
assert idx < 12 * 2 ** (2 * level)
plpairs = []
for ind in range(level, 0, -1):
idx = int(math.floor(idx / 4))
plpairs.append(tuple((ind - 1, idx)))
level -= 1
return plpairs[::-1] | 355c3acffa07065de10049059ef064abefdd7ca0 | 16,363 |
def precise_inst_ht(vert_list, spacing, offset):
"""
Uses a set of Vertical Angle Observations taken to a
levelling staff at regular intervals to determine the
height of the instrument above a reference mark
:param vert_list: List of Vertical (Zenith) Angle Observations (minimum of 3) in Decimal Degrees format
:param spacing: Distance in metres between each vertical angle observation
:param offset: Lowest observed height above reference mark
:return: Instrument Height above reference mark and its standard deviation
"""
if len(vert_list) < 3:
raise ValueError('ValueError: 3 or more vertical angles required')
vert_list.sort(reverse=True)
vert_pairs = [(va1, va2) for va1, va2 in zip(vert_list, vert_list[1:])]
base_ht = []
height_comp = []
for num, pair in enumerate(vert_pairs):
base_ht_pair = offset + num * spacing
base_ht.append(base_ht_pair)
dist_a = sin(radians(pair[1])) * (spacing / (sin(radians(pair[0] - pair[1]))))
delta_ht = dist_a * (sin(radians(pair[0] - 90)))
height_comp.append(delta_ht + base_ht[num])
return round(mean(height_comp), 5), round(stdev(height_comp), 5) | d88cf0dc289f2ef96d4b60dabf17c6e4bd04e549 | 16,364 |
def _parse_transform_set(transform_dict, imputer_string, n_images=None):
"""Parse a dictionary read from yaml into a TransformSet object
Parameters
----------
transform_dict : dictionary
The dictionary as read from the yaml config file containing config
key-value pairs
imputer_string : string
The name of the imputer (could be None)
n_images : int > 0
The number of images being read in. Required because we need to create
a new image transform for each image
Returns
-------
image_transforms : list
A list of image Transform objects
imputer : Imputer
An Imputer object
global_transforms : list
A list of global Transform objects
"""
image_transforms = []
global_transforms = []
if imputer_string in _imputers:
imputer = _imputers[imputer_string]()
else:
imputer = None
if transform_dict is not None:
for t in transform_dict:
if type(t) is str:
t = {t: {}}
key, params = list(t.items())[0]
if key in _image_transforms:
image_transforms.append([_image_transforms[key](**params)
for k in range(n_images)])
elif key in _global_transforms:
global_transforms.append(_global_transforms[key](**params))
return image_transforms, imputer, global_transforms | 47e3bf72c9e70bff22bebee7e73a14c349761116 | 16,365 |
import json
import random
def initialize_train_test_dataset(dataset):
""" Create train and test dataset by random sampling.
pct: percentage of training
"""
pct = 0.80
if dataset in ['reddit', 'gab']:
dataset_fname = './data/A-Benchmark-Dataset-for-Learning-to-Intervene-in-Online-Hate-Speech-master/' + dataset + '.csv'
xlist, ylist, zlist = read_EMNLP2019(dataset_fname)
hate_num = 0
for y in ylist:
for i in y.strip('[]').split(', '):
hate_num += 1
X_text, Y_text = [], []
line_num = 0
for x, y, z in zip(xlist, ylist, zlist):
x = x.strip().split('\n')
for i in y.strip('[]').split(', '):
X_text.append('. '.join(x[int(i) - 1].split('. ')[1:]).strip('\t')) # Only the hate speech line.
temp = []
for j in split_response_func(z):
if j.lower() == 'n/a':
continue
temp.append(j)
Y_text.append(temp)
line_num += 1
elif dataset == 'conan':
all_text = [json.loads(line) for line in open('./data/CONAN/CONAN.json', 'r')]
EN_text = [x for x in all_text[0]['conan'] if x['cn_id'][:2] == 'EN']
X_text = [x['hateSpeech'].strip() for x in EN_text]
Y_text = [[x['counterSpeech'].strip()] for x in EN_text]
hate_num = len(X_text)
random_index = [x for x in range(hate_num)]
random.shuffle(random_index)
train_index = sorted(random_index[:int(pct*len(random_index))])
train_x_text = [X_text[i] for i in range(hate_num) if i in train_index]
train_y_text = [Y_text[i] for i in range(hate_num) if i in train_index]
test_x_text = [X_text[i] for i in range(hate_num) if i not in train_index]
test_y_text = [Y_text[i] for i in range(hate_num) if i not in train_index]
return train_x_text, train_y_text, test_x_text, test_y_text | bac5876be313a85213badcce667af550e8f3f65a | 16,366 |
def load_raw_data_xlsx(files):
"""
Load data from an xlsx file
After loading, the date column in the raw data is converted to a UTC datetime
Parameters
----------
files : list
A list of files to read. See the Notes section for more information
Returns
-------
list
A list containing a DataFrame for each file that was read
Notes
-----
- Files is an array of maps containing the following data with the keyword (keyword)
+ ('file_name') the name of the xlsx file
+ ('date_column') the name of the date_column in the raw_data
+ ('time_zone') specifier for the timezone the raw data is recorded in
+ ('sheet_name') name or list of names of the sheets that are to be read
+ ('combine') boolean, all datasheets with true are combined into one, all others are read individually
+ ('start_column') Columns between this and ('end_column') are loaded
+ ('end_column')
"""
print('Importing XLSX Data...')
combined_files = []
individual_files = []
for xlsx_file in files:
print('importing ' + xlsx_file['file_name'])
# if isinstance(file_name, str):
# file_name = [file_name,'UTC']
date_column = xlsx_file['date_column']
raw_data = pd.read_excel(INPATH + xlsx_file['file_name'], xlsx_file['sheet_name'],
parse_dates=[date_column])
# convert load data to UTC
if(xlsx_file['time_zone'] != 'UTC'):
raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(xlsx_file['time_zone'], ambiguous="infer").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')
else:
if (xlsx_file['dayfirst']):
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)
else:
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)
if(xlsx_file['data_abs']):
raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']] = raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']].abs()
# rename column IDs, specifically Time, this will be used later as the df index
raw_data.rename(columns={date_column: 'Time'}, inplace=True)
raw_data.head() # now the data is positive and set to UTC
raw_data.info()
# interpolating for missing entries created by asfreq and original missing values if any
raw_data.interpolate(method='time', inplace=True)
if(xlsx_file['combine']):
combined_files.append(raw_data)
else:
individual_files.append(raw_data)
if(len(combined_files) > 0):
individual_files.append(pd.concat(combined_files))
return individual_files | a2aebdb4d972ef7f46970b3e8fc14ef40ae42bb8 | 16,367 |
def filter_production_hosts(nr):
"""
Filter the hosts inventory, which match the production
attribute.
:param nr: An initialised Nornir inventory, used for processing.
:return target_hosts: The targeted nornir hosts after being
processed through nornir filtering.
"""
# Execute filter based on hosts being in production
target_hosts = nr.filter(F(production__eq=True))
# Print seperator and header
print("=" * 50)
print("The hosts running in Production are:")
# Iterate over filtered results and printout information
for host, data in target_hosts.inventory.hosts.items():
print(
f"Host: {Fore.CYAN}{host} "
+ Fore.RESET
+ f"- Platform: {Fore.CYAN}{data.platform} "
+ Fore.RESET
+ f"- OS Version: {Fore.CYAN}{data['os_version']} "
+ Fore.RESET
+ f"- Production?: {Fore.CYAN}{data['production']}"
)
# Print total and seperator
print(f"Total: {len(target_hosts.inventory.hosts.items())}")
print("=" * 50)
# Return filtered hosts
return target_hosts | 006524e7b014d3f908955fb81d9f928ac7df25d8 | 16,368 |
import random
def get_lightmap(map_name="random"):
"""
Fetches the right lightmap given command line argument.
"""
assert map_name in ["default", "random"] + list(CONSTANTS.ALL_LIGHTMAPS.keys()), f"Unknown lightmap {map_name}..."
if map_name == "random":
map_name = random.choice(list(CONSTANTS.ALL_LIGHTMAPS.keys()))
elif map_name == "default":
map_name = "Subway_Lights"
lightmap = sl.LightMap(CONSTANTS.ALL_LIGHTMAPS[map_name])
return lightmap | 04ea7e901bbde8ba900469d8ed87b1b3c158809a | 16,369 |
def kill_instance(cook_url, instance, assert_response=True, expected_status_code=204):
"""Kill an instance"""
params = {'instance': [instance]}
response = session.delete(f'{cook_url}/rawscheduler', params=params)
if assert_response:
assert expected_status_code == response.status_code, response.text
return response | 3daa954579b15deedc5a66e77a2178a5682bd1a3 | 16,370 |
def _get_n_batch_from_dataloader(dataloader: DataLoader) -> int:
"""Get a batch number in dataloader.
Args:
dataloader: torch dataloader
Returns:
A batch number in dataloader
"""
n_data = _get_n_data_from_dataloader(dataloader)
n_batch = dataloader.batch_size if dataloader.batch_size else 1
return n_data // n_batch | 182e5566c6b9c83d3dabc3c99f32aedf1e3c21e7 | 16,371 |
def get_hidden() -> list:
"""
Returns places that should NOT be shown in the addressbook
"""
return __hidden_places__ | 8d201c25dd3272b2a3b2292ef3d8fa5293a97967 | 16,372 |
def wait_for_unit_state(reactor, docker_client, unit_name,
expected_activation_states):
"""
Wait until a unit is in the requested state.
:param IReactorTime reactor: The reactor implementation to use to delay.
:param docker_client: A ``DockerClient`` instance.
:param unicode unit_name: The name of the unit.
:param expected_activation_states: Activation states to wait for.
:return: ``Deferred`` that fires when required state has been reached.
"""
def is_in_states(units):
for unit in units:
if unit.name == unit_name:
if unit.activation_state in expected_activation_states:
return True
def check_if_in_states():
responded = docker_client.list()
responded.addCallback(is_in_states)
return responded
return loop_until(reactor, check_if_in_states) | 73278f8762a9b0c5d78ea4d5e098bb7a41b97072 | 16,373 |
def get_list_primitives():
"""Get list of primitive words."""
return g_primitives | 2429b646fbe2fbcc344e08ddffb64ccf2a2d853d | 16,374 |
def make_graph(edge_list, threshold=0.0, max_connections=10):
"""Return 2 way graph from edge_list based on threshold"""
graph = defaultdict(list)
edge_list.sort(reverse=True, key=lambda x: x[1])
for nodes, weight in edge_list:
a, b = nodes
if weight > threshold:
if len(graph[a]) < max_connections:
graph[a].append(gv.connection(b, weight))
if len(graph[b]) < max_connections:
graph[b].append(gv.connection(a, weight))
print(f'Total graph nodes considered : {len(graph.keys())}')
print(f'Total graph connections considered : {sum(map(len, graph.values()))}')
return graph | c9414a0b8df8b9de46ad444b376c5316f1960cd0 | 16,375 |
def ping(request):
"""Ping view."""
checked = {}
for service in services_to_check:
checked[service.name] = service().check()
if all(item[0] for item in checked.values()):
return HttpResponse(
PINGDOM_TEMPLATE.format(status='OK'),
content_type='text/xml',
)
else:
body = PINGDOM_TEMPLATE.format(status='FALSE')
for service_result in filter(lambda x: x[0] is False, checked.values()):
body += COMMENT_TEMPLATE.format(comment=service_result[1])
return HttpResponse(
body,
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
content_type='text/xml',
) | 09b3bd76c59e4d69678a6ce9c3018f638248ff88 | 16,376 |
import numbers
def _num_samples(x):
"""Return number of samples in array-like x."""
message = 'Expected sequence or array-like, got %s' % type(x)
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError(message)
if hasattr(x, 'shape') and x.shape is not None:
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x.shape[0], numbers.Integral):
return x.shape[0] | 18133457621ec7c79add6d0ff9ab8b1b0c17d524 | 16,377 |
def inf_compress_idb(*args):
"""
inf_compress_idb() -> bool
"""
return _ida_ida.inf_compress_idb(*args) | fd4ef3c50b9fef7213d9f37a0326f5e9f06b9822 | 16,378 |
def tokens_history(corpus_id):
""" History of changes in the corpus
:param corpus_id: ID of the corpus
"""
corpus = Corpus.query.get_or_404(corpus_id)
tokens = corpus.get_history(page=int_or(request.args.get("page"), 1), limit=int_or(request.args.get("limit"), 20))
return render_template_with_nav_info('main/tokens_history.html', corpus=corpus, tokens=tokens) | d87e4486cb2141b3c59e86a3483f4c445476ca20 | 16,379 |
def find_pattern_clumps(
text: str, substring_length: int, window_length: int, minimum_frequency: int
):
"""TODO: [summary]
Returns:
[type]: [description]
"""
patterns = set()
for index in range(len(text) - window_length + 1):
window = text[index : index + window_length]
freq_map = get_frequency_map(text=window, substring_length=substring_length)
for key, value in freq_map.items():
if value >= minimum_frequency:
patterns.add(key)
return patterns | 28b102a563008a297a7d1815f99c01211081a86a | 16,380 |
def Hidden(request):
"""
Hidden Field with a visible friend..
"""
schema = schemaish.Structure()
schema.add('Visible', schemaish.String())
schema.add('Hidden', schemaish.String())
form = formish.Form(schema, 'form')
form['Hidden'].widget = formish.Hidden()
return form | 3f5d96339c39c7cf186d4d45d837b0e95402d328 | 16,381 |
def train_and_eval(trial: optuna.Trial, study_dir: str, seed: int):
"""
Objective function for the Optuna `Study` to maximize.
.. note::
Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments.
:param trial: Optuna Trial object for hyper-parameter optimization
:param study_dir: the parent directory for all trials in this study
:param seed: seed value for the random number generators, pass `None` for no seeding
:return: objective function value
"""
# Synchronize seeds between Optuna trials
pyrado.set_seed(seed)
# Load the data
data_set_name = "oscillation_50Hz_initpos-0.5"
data = pd.read_csv(osp.join(pyrado.PERMA_DIR, "time_series", f"{data_set_name}.csv"))
if data_set_name == "daily_min_temperatures":
data = to.tensor(data["Temp"].values, dtype=to.get_default_dtype()).view(-1, 1)
elif data_set_name == "monthly_sunspots":
data = to.tensor(data["Sunspots"].values, dtype=to.get_default_dtype()).view(-1, 1)
elif "oscillation" in data_set_name:
data = to.tensor(data["Positions"].values, dtype=to.get_default_dtype()).view(-1, 1)
else:
raise pyrado.ValueErr(
given=data_set_name,
eq_constraint="'daily_min_temperatures', 'monthly_sunspots', "
"'oscillation_50Hz_initpos-0.5', or 'oscillation_100Hz_initpos-0.4",
)
# Dataset
data_set_hparam = dict(
name=data_set_name,
ratio_train=0.7,
window_size=trial.suggest_int("dataset_window_size", 1, 100),
standardize_data=False,
scale_min_max_data=True,
)
dataset = TimeSeriesDataSet(data, **data_set_hparam)
# Policy
policy_hparam = dict(
dt=0.02 if "oscillation" in data_set_name else 1.0,
hidden_size=trial.suggest_int("policy_hidden_size", 2, 51),
obs_layer=None,
activation_nonlin=fcn_from_str(
trial.suggest_categorical("policy_activation_nonlin", ["to_tanh", "to_sigmoid"])
),
mirrored_conv_weights=trial.suggest_categorical("policy_mirrored_conv_weights", [True, False]),
conv_out_channels=1,
conv_kernel_size=None,
conv_padding_mode=trial.suggest_categorical("policy_conv_padding_mode", ["zeros", "circular"]),
tau_init=trial.suggest_loguniform("policy_tau_init", 1e-2, 1e3),
tau_learnable=True,
kappa_init=trial.suggest_categorical("policy_kappa_init", [0, 1e-4, 1e-2]),
kappa_learnable=True,
potential_init_learnable=trial.suggest_categorical("policy_potential_init_learnable", [True, False]),
init_param_kwargs=trial.suggest_categorical("policy_init_param_kwargs", [None, dict(bell=True)]),
use_cuda=False,
)
policy = NFPolicy(spec=EnvSpec(act_space=InfBoxSpace(shape=1), obs_space=InfBoxSpace(shape=1)), **policy_hparam)
# Algorithm
algo_hparam = dict(
windowed=trial.suggest_categorical("algo_windowed", [True, False]),
max_iter=1000,
optim_class=optim.Adam,
optim_hparam=dict(
lr=trial.suggest_uniform("optim_lr", 5e-4, 5e-2),
eps=trial.suggest_uniform("optim_eps", 1e-8, 1e-5),
weight_decay=trial.suggest_uniform("optim_weight_decay", 5e-5, 5e-3),
),
loss_fcn=nn.MSELoss(),
)
csv_logger = create_csv_step_logger(osp.join(study_dir, f"trial_{trial.number}"))
algo = TSPred(study_dir, dataset, policy, **algo_hparam, logger=csv_logger)
# Train without saving the results
algo.train(snapshot_mode="latest", seed=seed)
# Evaluate
num_init_samples = dataset.window_size
_, loss_trn = TSPred.evaluate(
policy,
dataset.data_trn_inp,
dataset.data_trn_targ,
windowed=algo.windowed,
num_init_samples=num_init_samples,
cascaded=False,
)
_, loss_tst = TSPred.evaluate(
policy,
dataset.data_tst_inp,
dataset.data_tst_targ,
windowed=algo.windowed,
num_init_samples=num_init_samples,
cascaded=False,
)
return loss_trn | 07e1cff3ab9954172ce4c09f673881109df6f08c | 16,382 |
def random_active_qubits(nqubits, nmin=None, nactive=None):
"""Generates random list of target and control qubits."""
all_qubits = np.arange(nqubits)
np.random.shuffle(all_qubits)
if nactive is None:
nactive = np.random.randint(nmin + 1, nqubits)
return list(all_qubits[:nactive]) | c9bab4d02a0afc569907c6ec838d0020878a345a | 16,383 |
import re
import requests
import random
import hashlib
from bs4 import BeautifulSoup
def main(host: str, username: str, password: str):
"""メイン.
Args:
host: ホスト名又はIPアドレス
username: ユーザ名
password: パスワード
"""
url: str = f"http://{host}/"
rlogintoken: re.Pattern = re.compile(r"creatHiddenInput\(\"Frm_Logintoken\", *\"(\d+)\"\)")
rloginchecktoken: re.Pattern = re.compile(r"creatHiddenInput\(\"Frm_Loginchecktoken\", *\"(\d+)\"\)")
s: requests.Session = requests.Session()
res: requests.Response = s.get(url)
m: typ.Optional[re.Match] = rlogintoken.search(res.text)
if m is None:
print("error 1")
return 1
logintoken: str = m[1]
m = rloginchecktoken.search(res.text)
if m is None:
print("error 2")
return 2
loginchecktoken: str = m[1]
pwd_random: int = round(random.random() * 89999999) + 10000000
before_password = hashlib.md5(f"{password}{pwd_random}".encode("utf-8")).hexdigest()
params: typ.Dict[str, str] = {}
params["action"] = "login"
params["Username"] = username
params["Password"] = before_password
params["Frm_Logintoken"] = logintoken
params["UserRandomNum"] = str(pwd_random)
params["Frm_Loginchecktoken"] = loginchecktoken
res2: requests.Response = s.post(url, data=params, allow_redirects=False)
if res2.status_code != 302:
print("error 3")
return 3
res3: requests.Response = s.get(f"{url}getpage.gch?pid=1002&nextpage=pon_status_lan_link_info_t.gch")
if res3.status_code != 200:
print("error 4")
return 4
columns: typ.List[str] = [
"ポート名",
"受信したデータ量(byte)",
"受信したパケットの総数",
"マルチキャストパケットの受信数",
"ブロードキャストパケットの受信数",
"送信したデータ量(byte)",
"送信されたパケットの総数",
"マルチキャストパケットの送信数",
"ブロードキャストパケットの送信数",
]
indexdic: typ.Dict[str, int] = {}
for i, c in enumerate(columns):
indexdic[c] = i
print(", ".join(columns))
soup = BeautifulSoup(res3.text, "html.parser")
index: int = -1
values: typ.List = []
for td in soup.find_all("td"):
if index != -1:
values[index] = td.text.strip()
index = -1
else:
index = indexdic.get(td.text.strip(), -1)
if index == 0:
if len(values) > 0:
print(", ".join(values))
values = [""] * len(columns)
if len(values) > 0:
print(", ".join(values)) | 36efbd8dc18b891934f690091ef8709e0eddb3ce | 16,384 |
from typing import List
import requests
def create_label(project_id: int, label_name: str, templates: list, session=konfuzio_session()) -> List[dict]:
"""
Create a Label and associate it with templates.
If no templates are specified, the label is associated with the first default template of the project.
:param project_id: Project ID where to create the label
:param label_name: Name for the label
:param templates: Templates that use the label
:param session: Session to connect to the server
:return: Label ID in the Konfuzio APP.
"""
url = get_create_label_url()
if len(templates) == 0:
prj_templates = get_project_templates()
default_template = [t for t in prj_templates if t['is_default']][0]
templates_ids = [default_template['id']]
else:
templates_ids = [template.id for template in templates]
data = {"project": project_id, "text": label_name, "templates": templates_ids}
r = session.post(url=url, data=data)
assert r.status_code == requests.codes.created, f'Status of request: {r}'
label_id = r.json()['id']
return label_id | 4dda5f7ac6473be76212c03deb6beb7980b44105 | 16,385 |
def home():
""" Home page """
return render_template("index.html") | 0ac607593cc98871d97c111fc2ca89aa980af83f | 16,386 |
import argparse
def get_argparser():
"""Argument parser"""
parser = argparse.ArgumentParser(description='Bort pretraining example.')
parser.add_argument('--num_steps', type=int, default=20,
help='Number of optimization steps')
parser.add_argument('--num_eval_steps', type=int,
default=None, help='Number of eval steps')
parser.add_argument('--num_buckets', type=int, default=10,
help='Number of buckets for variable length sequence sampling')
parser.add_argument('--dtype', type=str,
default='float16', help='data dtype')
parser.add_argument('--batch_size', type=int,
default=8, help='Batch size per GPU.')
parser.add_argument('--accumulate', type=int, default=1,
help='Number of batches for gradient accumulation. '
'The effective batch size = batch_size * accumulate.')
parser.add_argument('--use_avg_len', action='store_true',
help='Use average length information for the bucket sampler. '
'The batch size is approximately the number of tokens in the batch')
parser.add_argument('--batch_size_eval', type=int, default=8,
help='Batch size per GPU for evaluation.')
parser.add_argument('--dataset_name', type=str, default='book_corpus_wiki_en_uncased',
choices=['book_corpus_wiki_en_uncased', 'book_corpus_wiki_en_cased',
'wiki_multilingual_uncased', 'wiki_multilingual_cased',
'wiki_cn_cased', 'openwebtext_ccnews_stories_books_cased'],
help='The pre-defined dataset from which the vocabulary is created. '
'Default is book_corpus_wiki_en_uncased.')
parser.add_argument('--pretrained', action='store_true',
help='Load the pretrained model released by Google.')
parser.add_argument('--model', type=str, default='bort_4_8_768_1024',
choices=[b for b in bort.predefined_borts.keys()],
help='Model to run pre-training on. ')
parser.add_argument('--teacher_model', type=str, default='roberta_24_1024_16',
help='Model to run as teacher on. '
'Options are bert_12_768_12, bert_24_1024_16, roberta_24_1024_16, roberta_12_768_12, '
'others on https://gluon-nlp.mxnet.io/model_zoo/bert/index.html')
parser.add_argument('--teacher_ckpt_dir', type=str, default=None,
help='Path to teacher checkpoint directory')
parser.add_argument('--teacher_ce_weight', type=float, default=0.0, help='weight to mix teacher_ce_loss with '
'mlm_loss: should be in range (0,1)')
parser.add_argument('--distillation_temperature', type=float, default=1.0, help='temperature for teacher/student '
'distillation')
parser.add_argument('--mlm_weight', type=float, default=1.0, help='weight to mix teacher_ce_loss with mlm_loss: '
'should be in range (0,1)')
parser.add_argument('--data', type=str, default=None,
help='Path to training data. Training is skipped if not set.')
parser.add_argument('--data_eval', type=str, required=True,
help='Path to evaluation data. Evaluation is skipped if not set.')
parser.add_argument('--ckpt_dir', type=str, default='./ckpt_dir',
help='Path to checkpoint directory')
parser.add_argument('--start_step', type=int, default=0,
help='Start optimization step from the checkpoint.')
parser.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
parser.add_argument('--warmup_ratio', type=float, default=0.01,
help='ratio of warmup steps used in NOAM\'s stepsize schedule')
parser.add_argument('--log_interval', type=int,
default=250, help='Report interval')
parser.add_argument('--ckpt_interval', type=int,
default=1000, help='Checkpoint interval')
parser.add_argument('--verbose', action='store_true',
help='verbose logging')
parser.add_argument('--profile', type=str, default=None,
help='output profiling result to the target file')
parser.add_argument('--cpu_only', action='store_true',
help='force to only use cpu')
return parser | 2f35f40fc8276b6895ac16ef91187b6d0f60551f | 16,387 |
def get_from_parameterdata_or_dict(params,key,**kwargs):
"""
Get the value corresponding to a key from an object that can be either
a ParameterData or a dictionary.
:param params: a dict or a ParameterData object
:param key: a key
:param default: a default value. If not present, and if key is not
present in params, a KeyError is raised, as in params[key]
:return: the corresponding value
"""
if isinstance(params,ParameterData):
params = params.get_dict()
if 'default' in kwargs:
return params.get(key,kwargs['default'])
else:
return params[key] | 864936e9b43c18e4a8dfd7d88c1cedda28fdb23d | 16,388 |
import torch
def test_input_type(temp_files, fsdp_config, input_cls):
"""Test FSDP with input being a list or a dict, only single GPU."""
if torch_version() < (1, 7, 0):
# This test runs multiple test cases in a single process. On 1.6.0 it
# throw an error like this:
# RuntimeError: Container is already initialized! Cannot initialize it twice!
pytest.skip("older pytorch doesn't work well with single process dist_init multiple times")
result = dist_init(rank=0, world_size=1, filename=temp_files[0], filename_rpc=temp_files[1])
assert result, "Dist init failed"
assert isinstance(fsdp_config, dict), str(fsdp_config)
class Model(Module):
def __init__(self):
super().__init__()
self.layer = Linear(4, 4)
def forward(self, input):
if isinstance(input, list):
input = input[0]
else:
assert isinstance(input, dict), input
input = input["in"]
return self.layer(input)
model = FSDP(Model(), **fsdp_config).cuda()
optim = SGD(model.parameters(), lr=0.1)
for _ in range(5):
in_data = torch.rand(64, 4).cuda()
in_data.requires_grad = True
if input_cls is list:
in_data = [in_data]
else:
assert input_cls is dict
in_data = {"in": in_data}
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
model.assert_state(TrainingState.IDLE)
teardown() | 6bf7d03f51088518e85d3e6ea8f59bcc86e4a0b4 | 16,389 |
def get_heroesplayed_players(matchs_data, team_longname):
"""Returns a dict linking each player to
- the heroes he/she played
- if it was a win (1) or a loss (0)
"""
picks = get_picks(matchs_data, team_longname)
players = get_players(picks)
results = get_results(matchs_data, team_longname)
heroes_played = {item: [[], []] for item in players}
for pl in players:
i = 0
for rd in picks:
if pl in rd.keys():
heroes_played[pl][0].append(rd[pl])
if results[i] == 1:
heroes_played[pl][1].append(1)
else:
heroes_played[pl][1].append(0)
i += 1
return heroes_played | 53dc68642a4cca7b80ede7b2d54098eb9274b1af | 16,390 |
def autofmt(filename, validfmts, defaultfmt=None):
"""Infer the format of a file from its filename. As a convention all the
format to be forced with prefix followed by a colon (e.g. "fmt:filename").
`validfmts` is a list of acceptable file formats
`defaultfmt` is the format to use if the extension is not on the valid list
returns `filename`,`fmt`
"""
colonix = filename.find(":")
if colonix != -1:
extension = filename[:colonix]
filename = filename[(colonix+1):]
else:
extension = None
for validfmt in validfmts:
if filename.endswith(validfmt):
extension = filename[-len(validfmt):]
return filename, (extension.lower() if extension in validfmts else defaultfmt) | 3e39325f43f8b4a87074a38f7d576d17669151fb | 16,391 |
def get_or_add_dukaan():
""" Add a new business """
if request.method == "POST":
payload = request.json
# payload = change_case(payload, "lower")
business = db.dukaans.find_one({"name": payload["name"]})
if business is not None:
return (
jsonify(
{
"success": False,
"message": "Business name already exists, Please choose another name.",
}
),
400,
)
for required_key in business_schema:
if required_key not in payload.keys():
return jsonify({"message": f"Missing {required_key} parameter"}), 400
db.dukaans.insert_one(payload)
return jsonify({"success": True, "dukaan": clean_dict_helper(payload)}), 201
dukaans = list(db.dukaans.find({}).limit(5))
for dukaan in dukaans:
if len(dukaan.get("categories", [])) > 0:
dukaan["categories"] = [
db.categories.find_one({"_id": ObjectId(_id)})["name"]
for _id in dukaan["categories"]
]
ratings = list(db.ratings.find({"business": str(dukaan["_id"])}, {"rating": 1}))
if len(ratings) > 0:
ratings_sum = sum([r["rating"] for r in ratings])
dukaan["avg_rating"] = float(ratings_sum) / float(len(ratings))
else:
dukaan["avg_rating"] = 0.0
return jsonify({"success": True, "dukaans": clean_dict_helper(dukaans)}) | e522ac8394b7b70949e2854e10251f3bc51279ae | 16,392 |
def nearest(a, num):
"""
Finds the array's nearest value to a given num.
Args:
a (ndarray): An array.
num (float): The value to find the nearest to.
Returns:
float. The normalized array.
"""
a = np.array(a, dtype=float)
return a.flat[np.abs(a - num).argmin()] | cadbad68add910ced502a6802592d1c043f1c914 | 16,393 |
def hex_string(data):
"""Return a hex dump of a string as a string.
The output produced is in the standard 16 characters per line hex +
ascii format:
00000000: 40 00 00 00 00 00 00 00 40 00 00 00 01 00 04 80 @....... @.......
00000010: 01 01 00 00 00 00 00 01 00 00 00 00 ........ ....
"""
pos = 0 # Position in data
line = 0 # Line of data
hex = "" # Hex display
ascii = "" # ASCII display
result = ""
while pos < len(data):
# Start with header
if pos % 16 == 0:
hex = "%08x: " % (line * 16)
ascii = ""
# Add character
hex = hex + "%02x " % (ord(data[pos]))
if ord(data[pos]) < 32 or ord(data[pos]) > 176:
ascii = ascii + '.'
else:
ascii = ascii + data[pos]
pos = pos + 1
# Add separator if half way
if pos % 16 == 8:
hex = hex + " "
ascii = ascii + " "
# End of line
if pos % 16 == 0:
result = result + "%s %s\n" % (hex, ascii)
line = line + 1
# Leftover bits
if pos % 16 != 0:
# Pad hex string
for i in range(0, (16 - (pos % 16))):
hex = hex + " "
# Half way separator
if (pos % 16) < 8:
hex = hex + " "
result = result + "%s %s\n" % (hex, ascii)
return result | 7f827b4f8049b43e86d35bd972f5b6aaa2190869 | 16,394 |
import os
import re
def load_codes_mat(backup_dir, savefile=False, thread_num=1):
""" load all the code mat file in the experiment folder and summarize it into nparrays"""
if "codes_all.npz" in os.listdir(backup_dir):
# if the summary table exist, just read from it!
with np.load(os.path.join(backup_dir, "codes_all.npz")) as data:
codes_all = data["codes_all"]
generations = data["generations"]
return codes_all, generations
codes_fns = sorted([fn for fn in os.listdir(backup_dir) if "_code.mat" in fn])
codes_all = []
img_ids = []
for i, fn in enumerate(codes_fns[:]):
matdata = loadmat(os.path.join(backup_dir, fn))
codes_all.append(matdata["codes"])
img_ids.extend(list(matdata["ids"]))
codes_all = np.concatenate(tuple(codes_all), axis=0)
img_ids = np.concatenate(tuple(img_ids), axis=0)
img_ids = [img_ids[i][0] for i in range(len(img_ids))]
generations = [int(re.findall("gen(\d+)", img_id)[0]) if 'gen' in img_id else -1 for img_id in img_ids]
if savefile:
np.savez(os.path.join(backup_dir, "codes_all.npz"), codes_all=codes_all, generations=generations)
return codes_all, generations | 1d368c3b4b337de5a9b42853eda7e30a0826a8b4 | 16,395 |
def extract_ego_time_point(history: SimulationHistory) -> npt.NDArray[int]:
"""
Extract time point in simulation history.
:param history: Simulation history.
:return An array of time in micro seconds.
"""
time_point = np.array(
[sample.ego_state.time_point.time_us for sample in history.data]
)
return time_point | 4860b2c7032ea232ace2680c704e4a59051b6c5c | 16,396 |
def compare_data_identifiers(a, b):
"""Checks if all the identifiers match, besides those that are not in both lists"""
a = {tuple(key): value for key, value in a}
b = {tuple(key): value for key, value in b}
matching_keys = a.keys() & b.keys()
a = {k: v for k, v in a.items() if k in matching_keys}
b = {k: v for k, v in b.items() if k in matching_keys}
return a == b | f0f5f08e4cc685b62b2af19e0c724561988ed1b9 | 16,397 |
import re
def expand_abbr(abbr, doc_type = 'html'):
"""
Разворачивает аббревиатуру
@param abbr: Аббревиатура
@type abbr: str
@return: str
"""
tree = parse_into_tree(abbr, doc_type)
if tree:
result = tree.to_string(True)
if result:
result = re.sub('\|', insertion_point, result, 1)
return re.sub('\|', sub_insertion_point, result)
return '' | 23d0edebd9660303d4c361b468c5cb2f6e0e0f03 | 16,398 |
def SaveSettings (event=None, SettingsNotebook=None, filename = "settings.hdf5", title="Open HDF5 file to save settings", OpenDialog=True ) :
"""
Method for saving setting
"""
if OpenDialog :
# Ask user to select the file
openFileDialog = wx.FileDialog(SettingsNotebook, title, "", filename, "HDF5 files (*.hdf5)|*.hdf5",
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT | wx.FD_CHANGE_DIR)
# Check whether user cancelled
if openFileDialog.ShowModal() == wx.ID_CANCEL: return None
filename = openFileDialog.GetPath()
with h5py.File (filename, 'a') as file_settings :
# Crete the grope if it does not exist
try : parameters_grp = file_settings["settings"]
except KeyError : parameters_grp = file_settings.create_group("settings")
# Loop over all settings tab
for SettingsTabName, SettingsTab in SettingsNotebook.settings_to_tabs.items() :
# Save all settings on a given tab
try : del parameters_grp[SettingsTabName]
except KeyError : pass
grp = parameters_grp.create_group(SettingsTabName)
for key, value in SettingsTab.GetSettings().items() : grp[key] = value
# return valid filename
return filename | 7e2a221c78ef78f542877a754034084ed8dd8492 | 16,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.