content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _column_sel_dispatch(columns_to_select, df): # noqa: F811
"""
Base function for column selection.
Applies only to slices.
The start slice value must be a string or None;
same goes for the stop slice value.
The step slice value should be an integer or None.
A slice, if passed correctly in a Multindex column,
returns a list of tuples across all levels of the
column.
A list of column names is returned.
"""
df_columns = df.columns
filtered_columns = None
start_check = None
stop_check = None
step_check = None
if not df_columns.is_unique:
raise ValueError(
"""
The column labels are not unique.
Kindly ensure the labels are unique
to ensure the correct output.
"""
)
start, stop, step = (
columns_to_select.start,
columns_to_select.stop,
columns_to_select.step,
)
start_check = any((start is None, isinstance(start, str)))
stop_check = any((stop is None, isinstance(stop, str)))
step_check = any((step is None, isinstance(step, int)))
if not start_check:
raise ValueError(
"""
The start value for the slice
must either be a string or `None`.
"""
)
if not stop_check:
raise ValueError(
"""
The stop value for the slice
must either be a string or `None`.
"""
)
if not step_check:
raise ValueError(
"""
The step value for the slice
must either be an integer or `None`.
"""
)
start_check = any((start is None, start in df_columns))
stop_check = any((stop is None, stop in df_columns))
if not start_check:
raise ValueError(
"""
The start value for the slice must either be `None`
or exist in the dataframe's columns.
"""
)
if not stop_check:
raise ValueError(
"""
The stop value for the slice must either be `None`
or exist in the dataframe's columns.
"""
)
if start is None:
start = 0
else:
start = df_columns.get_loc(start)
if stop is None:
stop = len(df_columns) + 1
else:
stop = df_columns.get_loc(stop)
if start > stop:
filtered_columns = df_columns[slice(stop, start + 1, step)][::-1]
else:
filtered_columns = df_columns[slice(start, stop + 1, step)]
df_columns = None
return [*filtered_columns] | 177fd2f84884e068b08b509037788c998c026502 | 3,657,800 |
def create_or_update(*, db_session, monitor_in: MonitorCreate) -> Monitor:
"""Creates or updates a monitor."""
monitor = get_by_weblink(db_session=db_session, weblink=monitor_in.weblink)
if monitor:
monitor = update(db_session=db_session, monitor=monitor, monitor_in=monitor_in)
else:
monitor = create(db_session=db_session, monitor_in=monitor_in)
return monitor | 08057b527fc6bf9fc36b66133b78a88b63e05d34 | 3,657,801 |
import itertools
def get_record_map(index_array, true_false_ratio):
"""Get record map.
:param index_array: the indexes of the images
:type index_array: numpy array
:param true_false_ratio: the number of occurrences of true cases over the number of occurrences of false cases
:type true_false_ratio: int or float
:return: record_index_pair_array refers to the indexes of the image pairs,
while record_index_pair_label_array refers to whether these two images represent the same person.
:rtype: tuple
"""
# Generate record_index_pair_array and record_index_pair_label_array
record_index_pair_list = []
record_index_pair_label_list = []
for record_index_1, record_index_2 in itertools.combinations(
range(index_array.size), 2):
record_index_pair_list.append((record_index_1, record_index_2))
record_index_pair_label_list.append(
index_array[record_index_1] == index_array[record_index_2])
record_index_pair_array = np.array(record_index_pair_list)
record_index_pair_label_array = np.array(record_index_pair_label_list)
# Do not need sampling
if true_false_ratio is None:
return (record_index_pair_array, record_index_pair_label_array)
# Perform sampling based on the true_false_ratio
pair_label_true_indexes = np.where(record_index_pair_label_array)[0]
pair_label_false_indexes = np.where(~record_index_pair_label_array)[0]
selected_pair_label_false_indexes = np.random.choice(pair_label_false_indexes, \
1.0 * pair_label_true_indexes.size / true_false_ratio, \
replace=False)
selected_pair_label_indexes = np.hstack(
(pair_label_true_indexes, selected_pair_label_false_indexes))
return (record_index_pair_array[selected_pair_label_indexes, :],
record_index_pair_label_array[selected_pair_label_indexes]) | 1a610b8842ac3259322cd0ef613bee3d305d3014 | 3,657,802 |
def sdfGetMolBlock(mol):
"""
sdfGetMolBlock() returns the MOL block of the molecule
"""
return mol["molblock"] | 399874a696f30f492ee878ef661094119bd5f96f | 3,657,803 |
import inspect
def get_methods(klass):
"""Get all methods, include regular, static, class method.
"""
methods = list()
attributes = get_attributes(klass)
for key, value in inspect.getmembers(MyClass):
if (not (key.startswith("__") and key.endswith("__"))) and \
(key not in attributes):
methods.append(key)
return methods | 1b4ada9c3fed32e09fa25434e2c547e5d8f81ca8 | 3,657,804 |
import math
def get_sample_column(table_file_name, sample_name, sex='U'):
"""
Get a VCF column as a Pandas Series for one sample.
:param table_file_name: Name of genotyped features table file output by the genotyper after applying the genotyping
model and annotating the genotype for no-call variants.
:param sex: "M", "F", or "U" depending on if the sample is male, female, or unknown. For males, chrX is never het.
for females, chrY is absent. For males or unknown, chrY is never het.
:param sample_name: Name of the sample. This is saved to the name of the returned Series object.
:return: Series of a column to add to the VCF for one genotyped sample.
"""
df_gt = pd.read_csv(
table_file_name, sep='\t', header=0,
usecols=('#CHROM', 'CALLABLE', 'BP_REF_COUNT', 'BP_ALT_COUNT', 'HOM_REF', 'HET', 'HOM_ALT')
)
df_gt = df_gt.loc[:, ('#CHROM', 'CALLABLE', 'BP_REF_COUNT', 'BP_ALT_COUNT', 'HOM_REF', 'HET', 'HOM_ALT')]
# Adjust density estimates on sex
if sex == 'M':
adjust_chrx_for_males(df_gt)
adjust_chry(df_gt, False)
elif sex == 'F':
adjust_chry(df_gt, True)
elif sex == 'U':
adjust_chry(df_gt, False)
# Set genotype (GT), genotype quality (GQ), and genotype likelihood (GL)
df_gt['CLASS'] = df_gt.apply(
lambda row: str(np.argmax(row[['HOM_REF', 'HET', 'HOM_ALT']])) if row['CALLABLE'] else 'NO_CALL',
axis=1
)
df_gt['GT'] = df_gt['CLASS'].apply(lambda gt_class: GENOTYPE_TO_GT[gt_class])
df_gt['GQ'] = df_gt.apply(
lambda row: (
int(10 * -math.log10(1 - row[row['CLASS']])) if row[row['CLASS']] < 1 else 255
) if row['CALLABLE'] else '.',
axis=1
)
df_gt['GL'] = df_gt.apply(lambda row: '{HOM_REF:.4f},{HET:.4f},{HOM_ALT:.4f}'.format(**row), axis=1)
# Get a series representing the column to be added to the VCF
sample_column = df_gt.apply(lambda row: '{GT}:{GQ}:{GL}:{BP_REF_COUNT:.1f}:{BP_ALT_COUNT:.1f}'.format(**row), axis=1)
sample_column.name = sample_name
# Return
return sample_column | 21a0461c9779d71b77fc5a510dcbb3660764d05a | 3,657,805 |
import os
import traceback
def find_plugin_models():
"""
Find custom models
"""
# List of plugin objects
plugins_dir = find_plugins_dir()
# Go through files in plug-in directory
if not os.path.isdir(plugins_dir):
msg = "SasView couldn't locate Model plugin folder %r." % plugins_dir
logger.warning(msg)
return {}
plugin_log("looking for models in: %s" % plugins_dir)
# compile_file(plugins_dir) #always recompile the folder plugin
logger.info("plugin model dir: %s", plugins_dir)
plugins = {}
for filename in os.listdir(plugins_dir):
name, ext = os.path.splitext(filename)
if ext == '.py' and not name == '__init__':
path = os.path.abspath(os.path.join(plugins_dir, filename))
try:
model = load_custom_model(path)
# TODO: add [plug-in] tag to model name in sasview_model
if not model.name.startswith(PLUGIN_NAME_BASE):
model.name = PLUGIN_NAME_BASE + model.name
plugins[model.name] = model
except Exception:
msg = traceback.format_exc()
msg += "\nwhile accessing model in %r" % path
plugin_log(msg)
logger.warning("Failed to load plugin %r. See %s for details",
path, PLUGIN_LOG)
return plugins | 47bc32c5eca743ed200c0d9baa6c43e6446601a7 | 3,657,806 |
def write_to_variable(tensor, fail_if_exists=True):
"""Saves a tensor for later retrieval on CPU."""
if not isinstance(tensor, tf.Tensor):
raise ValueError('Expected tf.Tensor but got {}'.format(type(tensor)))
# Only relevant for debugging.
debug_name = 'tpu_util__' + tensor.name.split(':')[0]
reuse = False if fail_if_exists else tf.compat.v1.AUTO_REUSE
with tf.variable_scope(top_level_scope, reuse=reuse):
variable = tf.get_variable(
name=debug_name,
shape=tensor.shape,
dtype=tensor.dtype,
trainable=False,
use_resource=True)
var_store[tensor] = variable
with tf.control_dependencies([variable.assign(tensor)]):
tensor_copy = tf.identity(tensor)
var_store[tensor_copy] = variable
return tensor_copy | 8c353fecb29730401e0aad8c7fc01107f9793753 | 3,657,807 |
def is_close(A, B, tol=np.sqrt(_eps)):
"""
Check if two matrices are close in the sense of trace distance.
"""
if tracedist(A, B) < tol:
return True
else:
A[np.abs(A) < tol] = 0.0
B[np.abs(B) < tol] = 0.0
A /= np.exp(1j*np.angle(A[0,0]))
B /= np.exp(1j*np.angle(B[0,0]))
return ((tracedist(A, B) < tol) or (tracedist(A, -1.0*B) < tol)) | 07c81f94d8a131bd097b1739040e761e37ebe998 | 3,657,808 |
def hist3d_numba_seq_weight(tracks, weights, bins, ranges, use_memmap=False, tmp=None):
"""Numba-compiled weighted 3d histogram
From https://iscinumpy.dev/post/histogram-speeds-in-python/
Parameters
----------
tracks : (x, y, z)
List of input arrays of identical length, to be histogrammed
weights : array-like
List of weights for each point of the input arrays
bins : (int, int, int)
shape of the final histogram
ranges : [[xmin, xmax], [ymin, ymax], [zmin, zmax]]]
Minimum and maximum value of the histogram, in each dimension
Other parameters
----------------
use_memmap : bool
If ``True`` and the number of bins is above 10 million,
the histogram is created into a memory-mapped Numpy array
tmp : str
Temporary file name for the memory map (only relevant if
``use_memmap`` is ``True``)
Returns
-------
histogram: array-like
Output Histogram
From https://iscinumpy.dev/post/histogram-speeds-in-python/
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> y = np.random.uniform(2., 3., 100)
>>> z = np.random.uniform(4., 5., 100)
>>> weights = np.random.uniform(0, 1., 100)
>>> H, _ = np.histogramdd((x, y, z), bins=(5, 6, 7),
... range=[(0., 1.), (2., 3.), (4., 5)],
... weights=weights)
>>> Hn = hist3d_numba_seq_weight(
... (x, y, z), weights, bins=(5, 6, 7),
... ranges=[[0., 1.], [2., 3.], [4., 5.]])
>>> assert np.all(H == Hn)
"""
H = _allocate_array_or_memmap(bins, np.double, use_memmap=use_memmap, tmp=tmp)
return _hist3d_numba_seq_weight(
H,
np.asarray(tracks),
weights,
np.asarray(list(bins)),
np.asarray(ranges),
) | aaa60642772310f82eb5d4c69d5f5815220f8058 | 3,657,809 |
def convertTimeQuiet(s):
"""
Converts a time String in the form hh:mm:ss[.nnnnnnnnn] to a long nanoseconds offset from Epoch.
:param s: (java.lang.String) - The String to convert.
:return: (long) QueryConstants.NULL_LONG if the String cannot be parsed, otherwise long nanoseconds offset from Epoch.
"""
return _java_type_.convertTimeQuiet(s) | e80f847da06d035f12257e80b290beec06ad801f | 3,657,810 |
from typing import Optional
def pop_first_non_none_value(
*args,
msg: Optional[str] = "default error msg",
):
"""
Args:
args: a list of python values
Returns:
return the first non-none value
"""
for arg in args:
if arg is not None:
return arg
raise ValueError(f"{msg} can't find non-none value") | 917a6b0546d4bda09f5abe53c1fb0085f3f225ae | 3,657,811 |
def all_equal(iterable):
"""
Returns True if all the elements are equal.
Reference:
Add "equal" builtin function
https://mail.python.org/pipermail/python-ideas/2016-October/042734.html
"""
groups = groupby(iterable)
return next(groups, True) and not next(groups, False) | 819f94bb02e70999d00c45b8c11d9eaf80f43fac | 3,657,812 |
import re
def is_str(required=False, default=None, min_len=None, max_len=None, pattern=None):
"""
Returns a function that when invoked with a given input asserts that the input is a valid string
and that it meets the specified criteria. All text are automatically striped off of both trailing and leading
whitespaces.
:param required: False by default.
:param default: default value to be used when value is `None` (or missing).
:param min_len: the minimum length allowed. Setting this to 1 effectively rejects empty strings
:param max_len: the maximum length allowed. Strings longer than this will be rejected
:param pattern: a valid python regex pattern. Define your patterns carefully with regular expression
attacks in mind.
:return: A callable that when invoked with an input will check that it meets the criteria defined above or raise
an a validation exception otherwise. It returns the newly validated input on success.
"""
if pattern:
# compile pattern once and reuse for all validations
compiled_pattern = re.compile(pattern)
# noinspection PyShadowingBuiltins
def func(input):
input = input or default
if required and input is None:
raise ValidationException('required but was missing')
if not required and input is None:
return default
input = str(input).strip()
if min_len is not None and len(input) < min_len:
raise ValidationException("'{}' is shorter than minimum required length({})".format(input, min_len))
if max_len is not None and len(input) > max_len:
raise ValidationException("'{}' is longer than maximum required length({})".format(input, max_len))
if pattern and compiled_pattern.match(input) is None:
raise ValidationException("'{}' does not match expected pattern({})".format(input, pattern))
return input
return func | 22002a220b8e0f4db53ba0ffd6bb6c2c0c8a599d | 3,657,813 |
from typing import OrderedDict
import os
def load_property_file(filename):
"""
Loads a file containing x=a,b,c... properties separated by newlines, and
returns an OrderedDict where the key is x and the value is [a,b,c...]
:param filename:
:return:
"""
props = OrderedDict()
if not os.path.exists(filename):
return props
with open(filename) as f:
for line in f:
line = line.strip().split("=", 1)
if len(line) != 2:
continue
props[line[0].strip()] = line[1].split(",")
_log.debug("Read property file:\n%s", props)
return props | f3cedf1d475ffe04fbefc6ddeb6d8b41101b6f91 | 3,657,814 |
def fit_curve(df, country, status, start_date, end_date=None):
"""
Summary line.
Extended description of function.
Parameters:
arg1 (int): Description of arg1
Returns:
int: Description of return value
"""
# Select the data
slc_date = slice(start_date, end_date)
y_data = df.loc[(country, status), slc_date].groupby(
CTRY_K).sum().values[0]
# Generate a dummy x_data
x_data = np.arange(0, y_data.shape[0])
# Set initial guesses for the curve fit
x0_0 = x_data[np.where(y_data > 0)[0][0]] # Day of the first case
a_0 = y_data.max() # Current number of cases
b_0 = 0.1 # Arbitrary
p0 = [x0_0, a_0, b_0]
# Fit the curve
popt, pcov = opt.curve_fit(sig, x_data, y_data, p0=p0)
# Evaluate the curve fit to calculate the R²
y_fit = sig(x_data, *popt)
r2 = mt.r2_score(y_data, y_fit)
# Estimate the uncertainty of the obtained coefficients
x0, a, b, = unc.correlated_values(popt, pcov)
# Store the fit information
fit = {
"r2": r2,
"x0": x0,
"a": a,
"b": b,
"coef": popt,
"coef_cov": pcov,
"y_data": y_data,
"x_data": slc_date,
}
return x0, a, b, fit | d76e6e3f7c585dc93d70d09e43861f29a3b777ea | 3,657,815 |
def load_data_from(file_path):
"""
Loads the ttl, given by file_path and returns (file_path, data, triple_count)
Data stores triples.
"""
graph = Graph()
graph.parse(file_path, format="ttl")
data = {}
triple_count = 0
for subject, predicate, object in graph:
triple_count += 1
subject = subject
object = object
# add triple
predicate_out = (predicate, "out")
if subject not in data:
data[subject] = {}
if predicate_out not in data[subject]:
data[subject][predicate_out] = []
data[subject][predicate_out].append(object)
# add backlink
predicate_in = (predicate, "in")
if object not in data:
data[object] = {}
if predicate_in not in data[object]:
data[object][predicate_in] = []
data[object][predicate_in].append(subject)
print(file_path, ":", triple_count, "triples loaded")
return (file_path, data, triple_count) | 8f856d1600e5d387fe95b2ff06a889c72f40a69c | 3,657,816 |
def get_predictability(X, y, dtype='continuous'):
"""Returns scores for various models when given a dataframe and target set
Arguments:
X (dataframe)
y (series)
dtype (str): categorical or continuous
Note: X and y must be equal in column length
Returns:
results (dataframe)
"""
M = pd.concat([X, y], axis=1)
fortrain = M.dropna()
X_ft = fortrain.iloc[:,:-1]
y_ft = fortrain.iloc[:,-1]
X_train, X_test, y_train, y_test = train_test_split(X_ft, y_ft, test_size=0.1)
# use mean as the prediction
y_train_mean = y_train.mean()
y_pred_mean = np.zeros(len(y_test))
y_pred_mean.fill(y_train_mean)
# use median as the prediction
y_train_median = y_train.median()
y_pred_median = np.zeros(len(y_test))
y_pred_median.fill(y_train_median)
# use mode as the prediction
# zero index is required to return the first most common value
y_train_mode = y_train.mode()[0]
y_pred_mode = np.zeros(len(y_test))
y_pred_mode.fill(y_train_mode)
lm = LinearRegression()
print("Fitting linear regression model")
lm.fit(X_train, y_train)
rf = RandomForestRegressor()
print("Fitting random forest model")
rf.fit(X_train, y_train)
kN = KNeighborsRegressor()
print("Fitting kNN model")
kN.fit(X_train, y_train)
# get the r2 score for each model
mean_score = r2_score(y_test, y_pred_mean)
median_score = r2_score(y_test, y_pred_median)
mode_score = r2_score(y_test, y_pred_mode)
lm_score = lm.score(X_test, y_test)
rf_score = rf.score(X_test, y_test)
kN_score = kN.score(X_test, y_test)
# get the mse for each model
mean_mse = mean_squared_error(y_test, y_pred_mean)
median_mse = mean_squared_error(y_test, y_pred_median)
mode_mse = mean_squared_error(y_test, y_pred_mode)
lm_y_pred = lm.predict(X_test)
rf_y_pred = rf.predict(X_test)
kN_y_pred = kN.predict(X_test)
lm_mse = mean_squared_error(y_test, lm_y_pred)
rf_mse = mean_squared_error(y_test, rf_y_pred)
kN_mse = mean_squared_error(y_test, kN_y_pred)
# construct the dataframe to return to the user
names = ['mean', 'median', 'mode', 'LinearRegression', 'RandomForestRegressor', 'KNeighborsRegressor']
scores = [mean_score, median_score, mode_score, lm_score, rf_score, kN_score]
losses = [mean_mse, median_mse, mode_mse, lm_mse, rf_mse, kN_mse]
results = pd.DataFrame(data=list(zip(names, scores, losses)), columns=['names', 'r2 score', 'loss'])
results['r2 score'] = results['r2 score'].apply(lambda x: round(x, 0))
results['loss'] = results['loss'].apply(lambda x: round(x, 0))
return results | 8af7b53eef2da6c84f54081fe41641aa4aa2e66d | 3,657,817 |
import uuid
def generate_guid(shard=0, base_uuid=None):
"""
Generates an "optimized" UUID that accomodates the btree indexing
algorithms used in database index b-trees. Check the internet for
details but the tl;dr is big endian is everything.
Leveraging the following as the reference implementation:
https://www.percona.com/blog/2014/12/19/store-uuid-optimized-way/
http://stackoverflow.com/questions/412341/how-should-i-store-guid-in-mysql-tables#27845470
https://engineering.instagram.com/sharding-ids-at-instagram-1cf5a71e5a5c
It works as follows, by reorganizing the most significant bytes of the
timestamp portion of a UUID1 to ensure that UUIDs generated in close
succession all land on the same (or at least adjacent) index pages.
The implementation is provided in pure-python to ensure we aren't
delegating the calculation to the SPOF that is our database. While not
the most performant place to put this, it's by far the most flexible.
12345678-9ABC-DEFG-HIJK-LMNOPQRSTUVW
12345678 = least significant 4 bytes of the timestamp in big endian order
9ABC = middle 2 timestamp bytes in big endian
D = 1 to signify a version 1 UUID
EFG = most significant 12 bits of the timestamp in big endian
When you convert to binary, the best order for indexing would be:
EFG9ABC12345678D + the rest.
Lastly, rather than implementing this as a type, through experimentation it
was determined that the re-ordered UUID can be coerced back into the uuid
type with no problems. This lets us rely on an existing implementation
for UUIDs and instead only worry about supplying one. The alternative
would be to implement in the type a conversion back to an "unordered" UUID
when retrieving the column from the database, which would be wasted effort
The last 12 bits of the UUID generated will be replaced with a shard id. By
default we're allowing for 4096 shards, which is overkill for everyone but
Facebook. However, it's easy to work with since every character in the
UUID represents 4 bits, so all we have to do is overwrite 3 characters.
"""
base_uuid = base_uuid or str(uuid.uuid1())
if shard > MAX_SHARD:
raise exception.InvalidShardId(shard_id=shard, max_shard=MAX_SHARD)
shard_id = "{:03X}".format(shard)
return uuid.UUID(''.join([base_uuid[15:18],
base_uuid[9],
base_uuid[10:13],
base_uuid[:8],
base_uuid[14],
base_uuid[19:23],
base_uuid[24:33],
shard_id])) | 0e9b6395045c9c2f51e5f9aa19951f5ff569b914 | 3,657,818 |
def load(name):
"""Loads dataset as numpy array."""
x, y = get_uci_data(name)
if len(y.shape) == 1:
y = y[:, None]
train_test_split = 0.8
random_permutation = np.random.permutation(x.shape[0])
n_train = int(x.shape[0] * train_test_split)
train_ind = random_permutation[:n_train]
test_ind = random_permutation[n_train:]
x_train, y_train = x[train_ind, :], y[train_ind, :]
x_test, y_test = x[test_ind, :], y[test_ind, :]
x_mean, x_std = np.mean(x_train, axis=0), np.std(x_train, axis=0)
y_mean = np.mean(y_train, axis=0)
epsilon = tf.keras.backend.epsilon()
x_train = (x_train - x_mean) / (x_std + epsilon)
x_test = (x_test - x_mean) / (x_std + epsilon)
y_train, y_test = y_train - y_mean, y_test - y_mean
return x_train, y_train, x_test, y_test | 3e084ff82d092b2bfdc0fa947a1b9bd1548c336d | 3,657,819 |
def read_file(filename):
"""
Read :py:class:`msbuildpy.corflags.CorFlags` from a .NET assembly file path.
**None** is returned if the PE file is invalid.
:param filename: A file path to a .NET assembly/executable.
:return: :py:class:`msbuildpy.corflags.CorFlags` or **None**
"""
with open(filename, 'rb') as f:
return read(f) | f5992bd119f90418748f9c67e64eda6fe1338839 | 3,657,820 |
def cleanup_all(threshold_date=None):
"""Clean up the main soft deletable resources.
This function contains an order of calls to
clean up the soft-deletable resources.
:param threshold_date: soft deletions older than this date will be removed
:returns: total number of entries removed from the database
"""
LOG.debug("Cleaning up soft deletions where deletion date"
" is older than %s", str(threshold_date))
total = 0
total += cleanup_softdeletes(models.VmExpire,
threshold_date=threshold_date)
LOG.info("Cleaned up %s soft deleted entries", total)
return total | cabfb77af2c650bc05df4f86c2a719dc1a90adb7 | 3,657,821 |
import tempfile
def parse_config(config_strings):
"""Parse config from strings.
Args:
config_strings (string): strings of model config.
Returns:
Config: model config
"""
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# Update backbone config
if 'pool_mod' in config.model.backbone.backbones:
config.model.backbone.backbones.pop('pool_mod')
if 'sa_cfg' not in config.model.backbone:
config.model.backbone['sa_cfg'] = dict(
type='PointSAModule',
pool_mod='max',
use_xyz=True,
normalize_xyz=True)
if 'type' not in config.model.rpn_head.vote_aggregation_cfg:
config.model.rpn_head.vote_aggregation_cfg['type'] = 'PointSAModule'
# Update rpn_head config
if 'pred_layer_cfg' not in config.model.rpn_head:
config.model.rpn_head['pred_layer_cfg'] = dict(
in_channels=128, shared_conv_channels=(128, 128), bias=True)
if 'feat_channels' in config.model.rpn_head:
config.model.rpn_head.pop('feat_channels')
if 'vote_moudule_cfg' in config.model.rpn_head:
config.model.rpn_head['vote_module_cfg'] = config.model.rpn_head.pop(
'vote_moudule_cfg')
if config.model.rpn_head.vote_aggregation_cfg.use_xyz:
config.model.rpn_head.vote_aggregation_cfg.mlp_channels[0] -= 3
for cfg in config.model.roi_head.primitive_list:
cfg['vote_module_cfg'] = cfg.pop('vote_moudule_cfg')
cfg.vote_aggregation_cfg.mlp_channels[0] -= 3
if 'type' not in cfg.vote_aggregation_cfg:
cfg.vote_aggregation_cfg['type'] = 'PointSAModule'
if 'type' not in config.model.roi_head.bbox_head.suface_matching_cfg:
config.model.roi_head.bbox_head.suface_matching_cfg[
'type'] = 'PointSAModule'
if config.model.roi_head.bbox_head.suface_matching_cfg.use_xyz:
config.model.roi_head.bbox_head.suface_matching_cfg.mlp_channels[
0] -= 3
if 'type' not in config.model.roi_head.bbox_head.line_matching_cfg:
config.model.roi_head.bbox_head.line_matching_cfg[
'type'] = 'PointSAModule'
if config.model.roi_head.bbox_head.line_matching_cfg.use_xyz:
config.model.roi_head.bbox_head.line_matching_cfg.mlp_channels[0] -= 3
if 'proposal_module_cfg' in config.model.roi_head.bbox_head:
config.model.roi_head.bbox_head.pop('proposal_module_cfg')
temp_file.close()
return config | 88e37c8e13517534486d5914e28f5a941e439c38 | 3,657,822 |
def validate_profile_info(df, fix=False):
"""
Validates the form of an information profile dataframe. An information profile dataframe must look something like this:
pos info info_err
0 0.01 0.005
1 0.03 0.006
2 0.006 0.008
A 'pos' column reports the position within a sequence to which the information profiel applies. The 'info' column describes the information in bits. The 'info_err' column quantifies uncertainty in this mutual information value.
Specifications:
0. The dataframe must have at least one row and one column.
1. A 'pos' column is mandatory and must occur first. Values must be nonnegative integers in sequential order.
2. An 'info' column is mandatry and must come second. Values must be finite floatingpoint values.
3. An 'info_err' column is optional and must come last. Values must be finite floating point values.
Arguments:
df (pd.DataFrame): Dataset in dataframe format
fix (bool): A flag saying whether to fix the dataframe into shape if possible.
Returns:
if fix=True:
df_valid: a valid dataframe that has been fixed by the function
if fix=False:
Nothing
Function:
Raises a TyepError if the data frame violates the specifications (if fix=False) or if these violations cannot be fixed (fix=True).
"""
# Verify dataframe has at least one row
if not df.shape[0] >= 1:
raise SortSeqError(\
'Dataframe must contain at least one row')
# Validate column names
for col in df.columns:
if not is_col_type(col,['pos','infos']):
raise SortSeqError('Invalid column in dataframe: %s.'%col)
for col in ['pos','info']:
if not col in df.columns:
raise SortSeqError('%s column missing'%col)
# Validate contents of columns
df = _validate_cols(df,fix=fix)
# Make sure that all info values are nonnegative
info_cols = get_cols_from_df(df,'infos')
if not 'info' in info_cols:
raise SortSeqError('info column is missing.')
# Validate column order
new_cols = ['pos'] + info_cols
if not all(df.columns == new_cols):
if fix:
df = df[new_cols]
else:
raise SortSeqError(\
'Dataframe columns are in the wrong order; set fix=True to fix.')
return df | fa4143da7cadaeb322315da3a0aaa49fc869a1eb | 3,657,823 |
import math
def degrees(cell):
"""Convert from radians to degress"""
return math.degrees(GetNum(cell)) | 3e5b08ffb2d5ec82c0792a266eb13b6fb8110971 | 3,657,824 |
def create_address(request):
"""
List all code snippets, or create a new snippet.
"""
if request.method == 'POST':
data = JSONParser().parse(request)
serializer = AddressSerializer(data=data)
if serializer.is_valid():
serializer.save()
return JsonResponse({'Message': 'Address created successfully'}, status=201)
return JsonResponse(serializer.errors, status=400) | d9dd0ff221ddecb826cc961e5d4cb1acf867c510 | 3,657,825 |
def createPhysicalAddressDataframe(userDf):
"""
This method create PhoneNumber dataframe for CDM
:param userDf: person dataframe
:type userDf: object
"""
addressColumns = [
"id as personId","city","country","officeLocation","postalCode","state","streetAddress"
]
return userDf.selectExpr(addressColumns).where(userDf.country.isNotNull()) | 4d185175ff6719476ed843680c17d0f267fa15ff | 3,657,826 |
def test_reproject_continuous(n=100, m=20, r=10):
"""Test pre._reprojection.reproject_continuous()."""
# Construct dummy operators.
k = 1 + r + r*(r+1)//2
D = np.diag(1 - np.logspace(-1, -2, n))
W = la.qr(np.random.normal(size=(n,n)))[0]
A = W.T @ D @ W
Ht = np.random.random((n,n,n))
H = (Ht + Ht.T) / 20
H = H.reshape((n, n**2))
B = np.random.random((n,m))
U = np.random.random((m,k))
B1d = np.random.random(n)
U1d = np.random.random(k)
basis = np.eye(n)[:,:r]
X = np.random.random((n,k))
# Try with bad initial condition shape.
with pytest.raises(ValueError) as exc:
opinf.pre.reproject_continuous(lambda x:x, basis, X[:-1,:])
assert exc.value.args[0] == \
f"states and basis not aligned, first dimension {n-1} != {n}"
# Linear case, no inputs.
def f(x):
return A @ x
X_, Xdot_ = opinf.pre.reproject_continuous(f, basis, X)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.ContinuousOpInfROM("A").fit(basis, X_, Xdot_)
assert np.allclose(rom.A_.entries, basis.T @ A @ basis)
# Linear case, 1D inputs.
def f(x, u):
return A @ x + B1d * u
X_, Xdot_ = opinf.pre.reproject_continuous(f, basis, X, U1d)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.ContinuousOpInfROM("AB").fit(basis, X_, Xdot_, U1d)
assert np.allclose(rom.A_.entries, basis.T @ A @ basis)
assert np.allclose(rom.B_.entries.flatten(), basis.T @ B1d)
# Linear case, 2D inputs.
def f(x, u):
return A @ x + B @ u
X_, Xdot_ = opinf.pre.reproject_continuous(f, basis, X, U)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.ContinuousOpInfROM("AB").fit(basis, X_, Xdot_, U)
assert np.allclose(rom.A_.entries, basis.T @ A @ basis)
assert np.allclose(rom.B_.entries, basis.T @ B)
# Quadratic case, no inputs.
def f(x):
return A @ x + H @ np.kron(x,x)
X_, Xdot_ = opinf.pre.reproject_continuous(f, basis, X)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.ContinuousOpInfROM("AH").fit(basis, X_, Xdot_)
assert np.allclose(rom.A_.entries, basis.T @ A @ basis)
H_ = basis.T @ H @ np.kron(basis, basis)
for _ in range(10):
x_ = np.random.random(r)
x2_ = np.kron(x_, x_)
assert np.allclose(rom.H_(x_), H_ @ x2_) | cc418dc7b5fce06c20a907cd020641823b42355b | 3,657,827 |
def index():
""" Custom View """
module_name = deployment_settings.modules[module].name_nice
return dict(module_name=module_name) | 6cde065d07d0aeec371da20c47e46b82c8e3035d | 3,657,828 |
def _lm_map_func(hparams, sos_id, eos_id, prot_size):
"""Return a closure for the BDLM with the SOS/EOS ids"""
def lm_map_func(id, seq_len, seq, phyche):
prot_eye = tf.eye(prot_size)
# split characters
seq = tf.string_split([seq], delimiter="").values
# map to integers
seq = tf.cast(hparams.prot_lookup_table.lookup(seq), tf.int32)
# prepend/append SOS/EOS tokens
seq_in = tf.concat(([sos_id], seq, [eos_id]), 0)
if "filter_size" in vars(hparams):
k = hparams.filter_size
else:
k = 1
# pad zeros to phyche
phyche_pad = tf.zeros(shape=(k, hparams.num_phyche_features))
phyche = tf.concat([phyche_pad, phyche, phyche_pad], 0)
# map to one-hots
seq_in = tf.nn.embedding_lookup(prot_eye, seq_in)
seq_out = tf.nn.embedding_lookup(prot_eye, seq)
# pad zeros to match filters
if k-1 > 0:
pad = tf.zeros(shape=(k-1, prot_size))
seq_in = tf.concat([pad, seq_in, pad], 0)
return id, seq_len, seq_in, phyche, seq_out
return lm_map_func | f9fde8af2970f309476c845281538375564841cc | 3,657,829 |
def isSpecificInterfaceType(t, name):
""" True if `t` is an interface type with the given name, or a forward
declaration or typedef aliasing it.
`name` must not be the name of a typedef but the actual name of the
interface.
"""
t = unaliasType(t)
return t.kind in ('interface', 'forward') and t.name == name | 5358d6ac946567323d00a56149cb3309d7ef4cb8 | 3,657,830 |
from typing import Optional
from pathlib import Path
import os
def is_stem(path: Optional[str]) -> bool:
"""Check if the given path is a stem."""
if path is None:
return False
path = path.lower()
parent = str(Path(path).parent)
if parent == ".":
root, ext = os.path.splitext(path)
if ext == "":
return True
return False | c14053cb93997eaea118f9187d96869b82c97539 | 3,657,831 |
from datetime import datetime
def now():
"""Returns the current time in ISO8501 format.
"""
return datetime.datetime.now().isoformat() | 6fcd34faf9d2ca8d1d64a1e8b453a2a7eff44752 | 3,657,832 |
import math
def get_CF_matrix_from_parent_vector(parent, D, alpha, beta):
"""Documentation to be added."""
cell_ids = list(D.keys())
mut_ids = list(D[cell_ids[0]].keys())
children = {}
children[ROOT] = []
for mut_id in mut_ids:
children[mut_id] = []
for child_id, parent_id in parent.items():
if child_id != ROOT:
children[parent_id].append(child_id)
E = {}
for cell_id in cell_ids:
E[cell_id] = {}
for mut_id in mut_ids:
E[cell_id][mut_id] = None
for cell_id in cell_ids:
score = {}
score[ROOT] = 0
for mut_id in mut_ids:
observed = int(D[cell_id][mut_id])
if observed == 0:
score[ROOT] += math.log(1 - alpha)
if observed == 1:
score[ROOT] += math.log(alpha)
best_score = score[ROOT]
best_mut = ROOT
muts_to_visit = children[ROOT]
while len(muts_to_visit) > 0:
mut_id = muts_to_visit.pop(0)
parent_id = parent[mut_id]
score[mut_id] = score[
parent_id
] # this is only temporary. see changes below
observed = int(D[cell_id][mut_id])
if observed == 0:
score[mut_id] -= math.log(1 - alpha)
score[mut_id] += math.log(beta)
if observed == 1:
score[mut_id] -= math.log(alpha)
score[mut_id] += math.log(1 - beta)
if score[mut_id] > best_score:
best_score = score[mut_id]
best_mut = mut_id
for child_id in children[mut_id]:
muts_to_visit.append(child_id)
muts_present_in_true_genotype = []
current_mut = best_mut
while current_mut != ROOT:
muts_present_in_true_genotype.append(current_mut)
current_mut = parent[current_mut]
for mut_id in mut_ids:
if mut_id in muts_present_in_true_genotype:
E[cell_id][mut_id] = 1
else:
E[cell_id][mut_id] = 0
zero_one_flips = 0
one_zero_flips = 0
for cell_id in cell_ids:
for mut_id in mut_ids:
observed = int(D[cell_id][mut_id])
true = int(E[cell_id][mut_id])
if observed == 1 and true == 0:
one_zero_flips += 1
if observed == 0 and true == 1:
zero_one_flips += 1
# print("0_1_flips: " + str(zero_one_flips))
# print("1_0_flips: " + str(one_zero_flips))
return E | 17a937f130dea7849d0f6aaf0a4b62d7e97bd746 | 3,657,833 |
import os
def get_resource_path(filename: str = "") -> str:
"""
get the resource path in the resource in the test dir.
/path/to/resource/filename
"""
current = os.path.abspath(__file__)
current_path = os.path.dirname(current)
resource_dir = os.path.join(current_path, 'resource')
return os.path.join(resource_dir, filename) | 4e140c7619336a508f3eb833b95513bc7e84bd4e | 3,657,834 |
from typing import Generator
def esr_1_2(out_filename, source_folder, dest_folder=getcwd(),
one_hot=True, normalized=True, out_type="float", balanced_classes=False,
n_batch=None, batch_size=None, validation_size=30, validation_as_copy=False, test_size=160, save_stats=False):
"""Create a esr dataset for DENN."""
dataset_params = {
'esr_source_folder': source_folder,
'normalized': normalized,
'onehot': one_hot
}
val_action = "extract_to" if not validation_as_copy else 'random_copy_to'
actions = [
('modifier', 'simple_shuffle', (), {'target': "train"}),
('modifier', 'extract_to', ('train', 'test', test_size), {}),
('modifier', val_action, ('train', 'validation', validation_size), {}),
('modifier', 'split', ('train',), {
'batch_size': batch_size, 'n_batch': n_batch}),
('modifier', 'convert_type', (out_type,), {})
]
if balanced_classes:
for idx, (type_, action, args, kwargs) in enumerate(actions):
if action == "extract_to":
actions[idx] = (type_, 'extract_to_with_class_ratio', args, kwargs)
generator = Generator('ESR_1_2_Dataset', dataset_params, actions, out_type=out_type)
generator.execute_actions()
generator.save(out_filename, dest_folder)
if save_stats:
generator.save_stats(out_filename, dest_folder)
return generator | 8e7857baf4d0ac5c07614db4089d0bd4cf5a2897 | 3,657,835 |
def chec_to_2d_array(input_img, img_map=chec_transformation_map()):
"""
Convert images comming form "CHEC" cameras in order to get regular 2D
"rectangular" images directly usable with most image processing tools.
Parameters
----------
input_img : numpy.array
The image to convert
Returns
-------
A numpy.array containing the cropped image.
"""
# Check the image
if len(input_img) != 2048:
raise ValueError("The input image is not a valide CHEC camera image.")
# Copy the input flat ctapipe image and add one element with the NaN value in the end
input_img_ext = np.zeros(input_img.shape[0] + 1)
input_img_ext[:-1] = input_img[:]
input_img_ext[-1] = np.nan
# Make the output image
img_2d = input_img_ext[img_map]
return img_2d | c235e3f9751a05ffd02d82eb07f79da84a9a742a | 3,657,836 |
def noop_chew_func(_data, _arg):
"""
No-op chew function.
"""
return 0 | 82ef82b350c2a01e5ba22f288c003032bf6e63e0 | 3,657,837 |
def find_middle_snake_less_memory(old_sequence, N, new_sequence, M):
"""
A variant of the 'find middle snake' function that uses O(min(len(a), len(b)))
memory instead of O(len(a) + len(b)) memory. This does not improve the
worst-case memory requirement, but it takes the best case memory requirement
down to near zero.
"""
MAX = N + M
Delta = N - M
V_SIZE=2*min(M,N) + 2
Vf = [None] * V_SIZE
Vb = [None] * V_SIZE
Vf[1] = 0
Vb[1] = 0
for D in range(0, (MAX//2+(MAX%2!=0)) + 1):
for k in range(-(D - 2*max(0, D-M)), D - 2*max(0, D-N) + 1, 2):
if k == -D or k != D and Vf[(k - 1) % V_SIZE] < Vf[(k + 1) % V_SIZE]:
x = Vf[(k + 1) % V_SIZE]
else:
x = Vf[(k - 1) % V_SIZE] + 1
y = x - k
x_i = x
y_i = y
while x < N and y < M and old_sequence[x] == new_sequence[y]:
x = x + 1
y = y + 1
Vf[k % V_SIZE] = x
inverse_k = (-(k - Delta))
if (Delta % 2 == 1) and inverse_k >= -(D -1) and inverse_k <= (D -1):
if Vf[k % V_SIZE] + Vb[inverse_k % V_SIZE] >= N:
return 2 * D -1, x_i, y_i, x, y
for k in range(-(D - 2*max(0, D-M)), (D - 2*max(0, D-N)) + 1, 2):
if k == -D or k != D and Vb[(k - 1) % V_SIZE] < Vb[(k + 1) % V_SIZE]:
x = Vb[(k + 1) % V_SIZE]
else:
x = Vb[(k - 1) % V_SIZE] + 1
y = x - k
x_i = x
y_i = y
while x < N and y < M and old_sequence[N - x -1] == new_sequence[M - y - 1]:
x = x + 1
y = y + 1
Vb[k % V_SIZE] = x
inverse_k = (-(k - Delta))
if (Delta % 2 == 0) and inverse_k >= -D and inverse_k <= D:
if Vb[k % V_SIZE] + Vf[inverse_k % V_SIZE] >= N:
return 2 * D, N - x, M - y, N - x_i, M - y_i | d320090f975525a620a7fafc479e9eec8b9a4ffa | 3,657,838 |
def music_pos(style_name, st, at):
"""
Returns the track position to Ren'Py.
"""
global time_position
if music.get_pos(channel="music_room") is not None:
time_position = music.get_pos(channel="music_room")
readableTime = convert_time(time_position)
d = Text(readableTime, style=style_name)
return d, 0.20 | bb7287d812927b3bea15f8a0a077588c9f13ddb8 | 3,657,839 |
from datetime import datetime
def get_time(data=None):
"""Receive a dictionary or a string and return a datatime instance.
data = {"year": 2006,
"month": 11,
"day": 21,
"hour": 16,
"minute": 30 ,
"second": 00}
or
data = "21/11/06 16:30:00"
2018-04-17T17:13:50Z
Args:
data (str, dict): python dict or string to be converted to datetime
Returns:
datetime: datetime instance.
"""
if isinstance(data, str):
date = datetime.strptime(data, "%Y-%m-%dT%H:%M:%S")
elif isinstance(data, dict):
date = datetime(**data)
else:
return None
return date.replace(tzinfo=timezone.utc) | b8bf33db4225d44961991b58a6cdef7876cfef95 | 3,657,840 |
from datetime import datetime
def set_clock(child, timestamp=None):
"""Set the device's clock.
:param pexpect.spawn child: The connection in a child application object.
:param datetime timestamp: A datetime tuple (year, month, day, hour, minute, second).
:returns: The updated connection in a child application object.
:rtype: pexpect.spawn
"""
if not timestamp:
timestamp = datetime.utcnow()
child.sendline("clock set {0}\r".format(timestamp.strftime("%H:%M:%S %d %b %Y")))
child.expect_exact("{0}, configured from console by console".format(timestamp.strftime("%H:%M:%S UTC %a %b %d %Y")))
return child | b6299ab780ffc9e9d27b0715decf095b3d6a6272 | 3,657,841 |
def feature_structure(string, case, intr=False):
"""Convert person-number string to a single feature structure.
Examples:
>>> feature_structure('1s', 'Nom', True)
['Nom', '+1', '-2', '-3', '+sg', '-pl', '+intr']
>>> feature_structure('2p', 'Abs', True)
['Abs', '-1', '+2', '-3', '-sg', '+pl', '+intr']
>>> feature_structure('3d', 'Erg')
['Erg', '-1', '-2', '+3', '-sg', '-pl']
>>> feature_structure('1pi', 'Nom')
['Nom', '+1', '+2', '-3', '-sg', '+pl']
"""
first = '{}1'.format(value('1' in string))
second = '{}2'.format(value('2' in string or 'i' in string))
third = '{}3'.format(value('3' in string))
sg = '{}sg'.format(value('s' in string))
pl = '{}pl'.format(value('p' in string))
struct = [case, first, second, third, sg, pl]
if intr:
struct.append('+intr')
return struct | 32967a45156f8a476cabc91d63d9a92dcb72dc6a | 3,657,842 |
def repo_description(gurl, owner, repo):
"""
Returns: (status_code, status_text, data)
data = {"created_at": date, "description": str,
"stargazers_count": int, "subscribers_count": int}
"""
res = "/repos/{}/{}".format(owner, repo)
response = gurl.request(funcs.get_hub_url(res))
code = response.code
json = response.json
data = {}
if code == 304:
json = response.cached_response.json
if (code in (200, 304)) and json:
data["description"] = json["description"]
date = json["created_at"]
data["created_at"] = _badass_iso_8601_date_parser(date)
data["stargazers_count"] = json["stargazers_count"]
data["subscribers_count"] = json["subscribers_count"]
return *response.status, data | ab9bf82b474cddd55ad1da426fdca4d9874320c6 | 3,657,843 |
def check_omf_version(file_version):
"""Validate file version compatibility against the current OMF version
This logic may become more complex with future releases.
"""
if file_version is None:
return True
return file_version == OMF_VERSION | e834b8bafb1d9d94b12f7846d29fcbdf4c080c08 | 3,657,844 |
from typing import List
from typing import Tuple
def create_ks_scheduled_constant_graph_ops(
graph: tf_compat.Graph,
global_step: tf_compat.Variable,
var_names: List[str],
begin_step: int,
end_step: int,
ks_group: str,
) -> Tuple[tf_compat.Tensor, List[PruningOpVars]]:
"""
Creates constant model pruning ops. Does not modify the graph.
:param graph: the tf graph to pull the operator out of for applying the pruning to
:param global_step: the global optimizer step for the training graph
:param var_names: a list of names or regex patterns to create constant ops
for within the graph
:param begin_step: the global step to begin pruning at
:param end_step: the global step to end pruning at
:param ks_group: the group identifier the scope should be created under
:return: a tuple containing the update operation to run in a session,
a list of the pruning ops and vars for each desired op in the graph
"""
pruning_op_vars = []
is_start_step = tf_compat.equal(global_step, begin_step)
is_end_step = tf_compat.equal(global_step, end_step)
for op, op_input in get_ops_and_inputs_by_name_or_regex(var_names, graph):
op_vars = create_constant_op_pruning(
op, op_input, is_start_step, is_end_step, ks_group
)
pruning_op_vars.append(op_vars)
update_op = get_scheduled_update_op(pruning_op_vars, ks_group)
return update_op, pruning_op_vars | ba4ae29793d3ff5de6ad4b84966d04dff0235fae | 3,657,845 |
import os
import ntpath
def findImages(dataPath):
"""
Finds all the images needed for training on the path `dataPath`.
Returns `([centerPaths], [leftPath], [rightPath], [measurement])`
"""
directories = [x[0] for x in os.walk(dataPath)]
dataDirectories = list(filter(lambda directory: os.path.isfile(directory + '/driving_log.csv'), directories))
centerTotal = []
leftTotal = []
rightTotal = []
measurementTotal = []
for directory in dataDirectories:
lines = getLinesFromDrivingLogs(directory)
center = []
left = []
right = []
measurements = []
for line in lines:
measurements.append(float(line[3]))
center.append(directory + '/IMG/' + ntpath.basename(line[0].strip()))
left.append(directory + '/IMG/' + ntpath.basename(line[1].strip()))
right.append(directory + '/IMG/' + ntpath.basename(line[2].strip()))
centerTotal.extend(center)
leftTotal.extend(left)
rightTotal.extend(right)
measurementTotal.extend(measurements)
return (centerTotal, leftTotal, rightTotal, measurementTotal) | 3910b1571266e5d0a8f79551abdd3631fd256d19 | 3,657,846 |
def insert(user_job_id: ObjectId, classifier: str, fastq_path: str, read_type: str or None = None) -> ObjectId:
"""
Insert a new ClassificationJob into the collection.
:param user_job_id: Which UserJob is associated with this ClassificationJob
:param classifier: The classifier to use
:param fastq_path: The input fastq file to read from
:return: The ObjectId of the ClassificationJob added
"""
queue_position = -1
if read_type is None:
to_insert = dict(user_job_id=user_job_id, classifier=classifier, fastq_path=fastq_path,
queue_position=queue_position, status=JobStatus.QUEUED)
return controllers.insert_one(collection=SchemaLoader.CLASSIFICATION_JOB, data=to_insert)
else:
to_insert = dict(user_job_id=user_job_id, classifier=classifier, fastq_path=fastq_path, read_type=read_type,
queue_position=queue_position, status=JobStatus.QUEUED)
return controllers.insert_one(collection=SchemaLoader.CLASSIFICATION_JOB, data=to_insert) | 6d95d6b2eb4d55f8ec4ae1c63e3c457f66f6b2a1 | 3,657,847 |
from typing import List
from typing import Union
def as_columns(
things: List[Union[SheetColumn, list, tuple, set, str]]
) -> List[SheetColumn]:
"""A list of each thing as a SheetColumn"""
result = []
for thing in things:
if isinstance(thing, SheetColumn):
sheet_column = thing
elif isinstance(thing, (list, tuple, set)):
sheet_column = SheetColumn(*thing)
else:
sheet_column = SheetColumn(thing)
result.append(sheet_column)
return result | 43f9a0a3bf655dd1f4b818e15a666d96d9279f1c | 3,657,848 |
def generator_path(x_base, y_base):
"""[summary]
use spline 2d get path
"""
sp2d = Spline2D(x_base, y_base)
res = []
for i in np.arange(0, sp2d.s[-1], 0.1):
x, y = sp2d.calc_position(i)
yaw = sp2d.calc_yaw(i)
curvature = sp2d.calc_curvature(i)
res.append([x, y, yaw, curvature])
return res | eda2e72ecef8b0f12069c4e7154503e859c6c28c | 3,657,849 |
def index(request):
"""Render site index page."""
return {} | 38c0a1e47cdbe2eed374b6231761698efa1bc166 | 3,657,850 |
def bellman_ford_with_term_status(graph, is_multiplicative=False):
"""
An implementation of the multiplication-based Bellman-Ford algorithm.
:param: graph - The graph on which to operate. Should be a square matrix, where edges that don't
exist have value None
:param: graph_labels - An ordered list of labels that correspond to the indices in the input
graph.
:param: is_multiplicative - If this is True, performs multiplication-based Bellman Ford, where
the distances between two nodes are based on the smallest PRODUCT of
the edge weights between them. If it is False, then performs
addition-based Bellman Ford, where the distances between two nodes
are based on the SUM of the edge weights between them.
:return: a tuple, where the zero-th item is the distance array output from the Bellman-Ford
Algorithm, as well as the predecessor array to find paths
"""
# print '[{0}] Entered Bellman Ford'.format(multiprocessing.current_process().pid)
operator = (lambda x, y: x * y) if is_multiplicative else (lambda x, y: x + y)
# Create a distance array with value infinity
distance = np.zeros(len(graph)).astype(np.float128)
distance.fill(float('inf'))
distance[0] = 1.0 if is_multiplicative else 0.0
prev_distance = list(distance)
# Create a predecessor array with value None
predecessor = np.zeros(len(graph))
predecessor.fill(-1)
# print '[{0}] Initialized Bellman Ford'.format(multiprocessing.current_process().pid)
for _ in range(len(graph) - 1):
# Iterate through all the vertices
for i, node_a_weights in enumerate(graph):
for j, weight in enumerate(node_a_weights):
if weight is None:
continue
new_dist = operator(distance[i], weight)
if new_dist - distance[j] < -1.0e-8: # Accounts for floating-pt error.
distance[j] = new_dist
predecessor[j] = i
# Check for early termination
if np.all(distance == prev_distance):
return distance, predecessor, True
prev_distance = list(distance)
return distance, predecessor, False | c73839578ea662b404dcdbca7443978674f151a3 | 3,657,851 |
def decoder(z, rnn, batch_size, state=None, n_dec=64, reuse=None):
"""Summary
Parameters
----------
z : TYPE
Description
rnn : TYPE
Description
batch_size : TYPE
Description
state : None, optional
Description
n_dec : int, optional
Description
reuse : None, optional
Description
Returns
-------
name : TYPE
Description
"""
with tf.variable_scope('decoder', reuse=reuse):
if state is None:
h_dec, state = rnn(z, rnn.zero_state(batch_size, tf.float32))
else:
h_dec, state = rnn(z, state)
return h_dec, state | f0464424e6e770031003c59d0f46c85d8aca23ec | 3,657,852 |
def parse_file(filename):
"""Parses the file containing the db schema
Key Arguments:
filename - the file to parse"""
f = open(filename, 'r')
lines = f.readlines()
f.close()
db = {}
for line in lines:
s_line = line.split('\t')
if s_line[0] == 'TABLE_CATALOG':
continue
if s_line[1] in db:
db[s_line[1]].append(s_line[2])
else:
db[s_line[1]] = [s_line[2]]
return db | 0b02829505a1b07c8a1ed9cc8a34c651cf4be41c | 3,657,853 |
def est_L(sample_list, est_method, bandwidth = 0.5):
"""Estimate L from a list of samples.
Parameter
------------------------------------------
sample_list: list
a list of samples for arm i at time t
est_method: str
can be one of the choice of 'kde', 'naive'
'kde': kernel density estimation
'naive': count the samples insides [0,dt]
"""
if est_method == 'kde':
kde = KernelDensity(kernel='tophat', bandwidth=bandwidth).fit(np.asarray(sample_list)[:, np.newaxis])
log_den_0 = kde.score_samples(np.asarray([0])[:, np.newaxis])
estL = np.exp(log_den_0)[0]
elif est_method == 'naive':
sorted_data = np.asarray(sorted(sample_list))
estL = len(sorted_data[sorted_data <= bandwidth])/len(sorted_data)
#if len(sample_list) ==1 or estL == 0:
# TODO: init value
# L = 0.01
else:
print('Unkown estimation method.')
return estL | f01d29c1d8863d27d7c7af8c001d108a240d48bf | 3,657,854 |
def create_test_area(test_tiles):
"""Create geometry from test images
Parameters
----------
test_tiles : list
directory with test images
Returns
-------
GeoPandas DataFrame
all test images merged into a GeoDataFrame
"""
multipolygon = ogr.Geometry(ogr.wkbMultiPolygon)
for name in test_tiles:
TileX, TileY, zoom = parse_tile_name(name)
polygon = geometry_from_tile_coords(TileX, TileY, zoom)
multipolygon.AddGeometry(polygon)
multipolygon.FlattenTo2D()
test_area = gpd.read_file(multipolygon.ExportToJson())
test_area.to_file("predictions/area_extent.geojson")
test_area = test_area.explode()
return test_area | 8f8cd7834ce5c9923d1ba2f5677155badd1176aa | 3,657,855 |
import os
import requests
def check_token(token) -> bool:
"""Check ReCaptcha token
Args:
token
Returns:
bool
"""
if os.getenv("CI"):
return True
url = "https://www.google.com/recaptcha/api/siteverify"
secret_key = os.getenv("RECAPTCHA_SECRET_KEY")
payload = {
"secret": secret_key,
"response": token,
}
response = requests.post(url, data=payload)
return response.json()["success"] and response.json()["score"] >= 0.5 | 5c78e55333e4e5484ae30acaea58a988247152dd | 3,657,856 |
def covSEard(x,
z,
ell,
sf2
):
"""GP squared exponential kernel.
This function is based on the 2018 GP-MPC library by Helge-André Langåker
Args:
x (np.array or casadi.MX/SX): First vector.
z (np.array or casadi.MX/SX): Second vector.
ell (np.array or casadi.MX/SX): Length scales.
sf2 (float or casadi.MX/SX): output scale parameter.
Returns:
SE kernel (casadi.MX/SX): SE kernel.
"""
dist = ca.sum1((x - z)**2 / ell**2)
return sf2 * ca.SX.exp(-.5 * dist) | a911df160227b5cac356101befe0df29eb8c47aa | 3,657,857 |
import binascii
def make_devid(identity):
"""
Generate device ID from device identity data trying to follow the same
logic as devauth does. Returns a string containing device ID.
"""
d = SHA256.new()
# convert to binary as needed
bid = identity if type(identity) is bytes else identity.encode()
d.update(bid)
return binascii.b2a_hex(d.digest()).decode() | 979c9d379c001e79ebdec2bcf7a7f256c195bbee | 3,657,858 |
from typing import List
def get_recomm_products(user_id: str) -> List[Text]:
"""
Gets the top 10 products the user is most likely to purchase.
:returns: List of product ids.
"""
instances_packet = {
"instances": [user_id]
}
prediction = aiplatform_recomm_endpoint.predict(instances=instances_packet)
return prediction[0][0]["output_2"] | 76086a2fea1d6b5a55c4a431510a333c2919e893 | 3,657,859 |
def relu(Z):
"""
:param Z: -- the linear output in this layer
:return:
A -- the activation output in this layer
activation_cache -- a dictionary contains Z and A
"""
[m, n] = Z.shape
A = np.zeros((m,n))
for i in range(m):
for j in range(n):
if Z[i][j] < 0:
A[i][j] = 0
else:
A[i][j] = Z[i][j]
activation_cache = dict()
activation_cache["Z"] = Z
activation_cache["A"] = A
return A, activation_cache | b97073c806273edd4f9ccdf94743903ee4709674 | 3,657,860 |
def month(x: pd.Series) -> pd.Series:
"""
Month of each value in series
:param x: time series
:return: month of observations
**Usage**
Returns the month as a numeric value for each observation in the series:
:math:`Y_t = month(t)`
Month of the time or date is the integer month number, e.g. 1-12
**Examples**
Day for observations in series:
>>> series = generate_series(100)
>>> days = month(series)
**See also**
:func:`day` :func:`year`
"""
return pd.to_datetime(x.index.to_series()).dt.month | 5049681c77c9416bf2dd28c1a71265e44274c115 | 3,657,861 |
def _library_from_nglims(gi, sample_info, config):
"""Retrieve upload library from nglims specified user libraries.
"""
names = [config.get(x, "").strip() for x in ["lab_association", "researcher"]
if config.get(x)]
check_names = set([x.lower() for x in names])
for libname, role in config["private_libs"]:
# Try to find library for lab or rsearcher
if libname.lower() in check_names:
return _get_library_from_name(gi, libname, role, sample_info)
# default to first private library if available
if len(config.get("private_libs", [])) > 0:
libname, role = config["private_libs"][0]
return _get_library_from_name(gi, libname, role, sample_info)
# otherwise use the lab association or researcher name
elif len(names) > 0:
return _get_library_from_name(gi, names[0], None, sample_info, create=True)
else:
raise ValueError("Could not find Galaxy library for sample %s" % sample_info["description"]) | 3c22807d23723267b39d026990b787febaf7cb1d | 3,657,862 |
import argparse
def get_commandline_parser():
"""it parses commandline arguments."""
parser = argparse.ArgumentParser(description='Toolpath generator.')
parser.add_argument('--stl-filepath', help='filpath of stl file.')
parser.add_argument('--diameter', help='Diameter of toolbit.')
parser.add_argument('--step-size', help='Step size of the CNC machine.')
parser.add_argument('--feed-rate', help='Feed rate of CNC machine.')
parser.add_argument('--calculate-time', help='Flag to print time.',
type=bool)
return parser | d8727b7a9f40f63e0c322074ae88585e7dd5f0eb | 3,657,863 |
def con_isogonal(cos0,assign=False,**kwargs):
"""
keep tangent crossing angle
X += [lt1,lt2, ut1,ut2, cos]
(ue1-ue3) = lt1 * ut1, ut1**2 = 1
(ue2-ue4) = lt2 * ut2, ut2**2 = 1
ut1 * ut2 = cos
if assign:
cos == cos0
"""
w = kwargs.get('isogonal')
mesh = kwargs.get('mesh')
X = kwargs.get('X')
N = kwargs.get('N')
N5 = kwargs.get('N5')
N6 = kwargs.get('N6')
num = mesh.num_regular
arr = np.arange(num)
c_l1 = N6-8*num-1 + arr
c_l2 = c_l1+num
c_ut1 = columnnew(arr,N6-6*num-1,num)
c_ut2 = columnnew(arr,N6-3*num-1,num)
c_ue1 = columnnew(arr,N5-12*num,num)
c_ue2 = columnnew(arr,N5-9*num,num)
c_ue3 = columnnew(arr,N5-6*num,num)
c_ue4 = columnnew(arr,N5-3*num,num)
H1,r1 = con_edge(X,c_ue1,c_ue3,c_l1,c_ut1,num,N)
H2,r2 = con_edge(X,c_ue2,c_ue4,c_l2,c_ut2,num,N)
Hu1,ru1 = con_unit(X,c_ut1,num,N)
Hu2,ru2 = con_unit(X,c_ut2,num,N)
Ha,ra = con_constangle2(X,c_ut1,c_ut2,N6-1,num,N)
H = sparse.vstack((H1,H2,Hu1,Hu2,Ha))
r = np.r_[r1,r2,ru1,ru2,ra]
if assign:
H0,r0 = con_constl(np.array([N6-1],dtype=int),cos0,1,N)
H = sparse.vstack((H, H0))
r = np.r_[r,r0]
#self.add_iterative_constraint(H*w, r*w, 'isogonal')
#print('err:isogonal:',np.sum(np.square(H*X-r)))
return H*w,r*w | d2626e48a128b9b1dcecaa1ad3d297b5515540f9 | 3,657,864 |
import os
def download_is_complete(date):
"""
Has the process of downloading prescribing data for this date finished
successfully?
"""
return os.path.exists(local_storage_prefix_for_date(date) + SENTINEL_SUFFIX) | 2914ed7c35b2b46f71e0cb995fbbe3f4689211a5 | 3,657,865 |
def tf_apply_with_probability(p, fn, x):
"""Apply function `fn` to input `x` randomly `p` percent of the time."""
return tf.cond(
tf.less(tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32), p),
lambda: fn(x),
lambda: x) | 2d2a5a5c0c67c41007ed839423bc474e9dbf283d | 3,657,866 |
import functools
def adjoin(x,seq,test=lambda x,y: x is y):
"""Tests whether item is the same as an existing element of list. If the
item is not an existing element, adjoin adds it to list (as if by cons) and
returns the resulting list; otherwise, nothing is added and the original
list is returned. """
return seq if any(map(functools.partial(test,x),seq)) else cons(x,seq) | d32b459d688f5ccca36b56e8772aca2bf66603c5 | 3,657,867 |
from typing import Optional
from datetime import datetime
from typing import Any
def get_service_logs(
project_id: str = PROJECT_ID_PARAM,
service_id: str = SERVICE_ID_PARAM,
lines: Optional[int] = Query(None, description="Only show the last n lines."),
since: Optional[datetime] = Query(
None, description="Only show the logs generated after a given date."
),
component_manager: ComponentManager = Depends(get_component_manager),
token: str = Depends(get_api_token),
) -> Any:
"""Returns the stdout/stderr logs of the service."""
component_manager.verify_access(
token, f"projects/{project_id}/services/{service_id}/logs", AccessLevel.WRITE
)
service_id, extension_id = parse_composite_id(service_id)
return component_manager.get_service_manager(extension_id).get_service_logs(
project_id, service_id, lines, since
) | 1b20bc7ec55ba8f772490ea54848d1f331caa17e | 3,657,868 |
def uniquify_contacts(contacts):
"""
Return a sequence of contacts with all duplicates removed.
If any duplicate names are found without matching numbers, an exception is raised.
"""
ctd = {}
for ct in contacts:
stored_ct = ctd.setdefault(ct.name, ct)
if stored_ct.dmrid != ct.dmrid:
raise RuntimeError(
"Two contacts named {} have different IDs: {} {}".format(
ct.name, ct.dmrid, stored_ct.dmrid
)
)
return list(ctd.values()) | f4bf001abcccad1307633e6de6ed6228516ba0b2 | 3,657,869 |
def optimize_bank_transaction_list(bank_transactions):
"""Append related objects using select_related and prefetch_related"""
return bank_transactions.select_related('block') | c7c7242336f9cddf399efc9d813b7650b0f6ce5e | 3,657,870 |
from datetime import datetime
def rss():
""" RSS2 Support.
support xml for RSSItem with 12 diaries.
Args:
none
Return:
diaries_object: list
site_settings: title, link, description
"""
articles = Diary.objects.order_by('-publish_time')[:12]
items = []
for article in articles:
content = article.html
url = Config.SITE_URL + '/diary/' + str(article.pk) + '/' + \
article.title
items.append(PyRSS2Gen.RSSItem(
title=article.title,
link=url,
description=content,
guid=PyRSS2Gen.Guid(url),
pubDate=article.publish_time,
))
rss = PyRSS2Gen.RSS2(
title=Config.MAIN_TITLE,
link=Config.SITE_URL,
description=Config.DESCRIPTION,
lastBuildDate=datetime.datetime.now(),
items=items
).to_xml('utf-8')
return rss | f17e063bb0c15da4ab776e421408eb6a58fc7a50 | 3,657,871 |
def _interpolate_solution_at(target_time, solver_state, validate_args=False):
"""Computes the solution at `target_time` using 4th order interpolation.
Args:
target_time: Floating `Tensor` specifying the time at which to obtain the
solution. Must be within the interval of the last time step of the
`solver_state`: `solver_state.last_step_start` <= `target_time` <=
`solver_state.current_time`.
solver_state: `_DopriSolverInternalState` - solver state.
validate_args: Python `bool` indicating whether to validate inputs.
Default value: False.
Returns:
solution: Solution at `target_time` obtained by interpolation.
coefficients: Interpolating coefficients used to construct the solution.
"""
coefficients = solver_state.interpolating_coefficients
t0 = solver_state.last_step_start
t1 = solver_state.current_time
solution = rk_util.evaluate_interpolation(
coefficients, t0, t1, target_time, validate_args)
return solution, coefficients | 813b54730cb21443b9e051bfef553f226cfaf5ab | 3,657,872 |
def test_drawcities():
"""Draw Cities"""
mp = MapPlot(
title="Fill and Draw Cities",
subtitle="This is my subtitle",
continentalcolor="blue",
sector="iowa",
nocaption=True,
)
mp.drawcities()
return mp.fig | c83efdfb856010c93811d92f3ac1ffe39056bab6 | 3,657,873 |
def from_hi(psi_0, mpa_type, system_index, hi, tau=0.01, state_compression_kwargs=None,
op_compression_kwargs=None, second_order_trotter=False, t0=0, psi_0_compression_kwargs=None,
track_trace=False):
"""
Factory function for imaginary time TMP-objects (ITMPS, ITMPO, ITPMPS)
:param psi_0: Initial state as MPArray. Need not be normalized, as it is normalized before propagation
:param mpa_type: Type of MPArray to propagate, supported are mps, mpo, and pmps
:param system_index: Index of the system site in the chain (place of the system site operator in the hi_list)
:param hi: List/tuple for all terms in the Hamiltonian H = sum_i hi
Ordered like this:
- Sites left of the system site (denoted by system index) couple (from left to right)
the current site to the system site (and contain the site local operators)
- The term for the system site must be present and contains the local Hamiltonian only!
May be None, in which case the local Hamiltonian for the site is assumed to be 0
- Sites right of the system site (denoted by system index) couple (from left to right)
the system site to the current site (and contain the site local operators)
:param tau: Timestep for each invocation of evolve. Real timestep should be passed here. Default is .01
:param state_compression_kwargs: Arguments for mpa compression after each dot product (see real time
evolution factory function for details)
:param op_compression_kwargs: Arguments for trotter step operator pre-compression (see real time evolution
factory function for details)
:param second_order_trotter: Switch to use second order instead of fourth order trotter if desired
By default fourth order Trotter is used
:param t0: Initial time of the propagation
:param psi_0_compression_kwargs: Optional compresion kwargs for the initial state (see real time evolution
factory function for details)
:param track_trace: If the trace of the (effective) density matrix should be tracked during the
imaginary time evolution
:return: TMP object. If mpa_type is mps: ITMPS obj., if mpa_type is mpo: ITMPO obj., if mpa_type is pmps: ITPMPS obj.
"""
if not check_shape(psi_0, mpa_type):
raise AssertionError('MPA shape of the initial state is not compatible with the chosen mpa_type')
assert np.imag(tau) == 0 and np.real(tau) != 0
tau = 1j * tau
if mpa_type == 'mps':
return StarITMPS.from_hi(psi_0, False, False, system_index, hi, tau=tau,
state_compression_kwargs=state_compression_kwargs,
op_compression_kwargs=op_compression_kwargs,
second_order_trotter=second_order_trotter, t0=t0,
psi_0_compression_kwargs=psi_0_compression_kwargs,
track_trace=track_trace)
elif mpa_type == 'pmps':
return StarITPMPS.from_hi(psi_0, True, False, system_index, hi, tau=tau,
state_compression_kwargs=state_compression_kwargs,
op_compression_kwargs=op_compression_kwargs,
second_order_trotter=second_order_trotter, t0=t0,
psi_0_compression_kwargs=psi_0_compression_kwargs,
track_trace=track_trace)
elif mpa_type == 'mpo':
return StarITMPO.from_hi(psi_0, False, True, system_index, hi, tau=tau,
state_compression_kwargs=state_compression_kwargs,
op_compression_kwargs=op_compression_kwargs,
second_order_trotter=second_order_trotter, t0=t0,
psi_0_compression_kwargs=psi_0_compression_kwargs,
track_trace=track_trace)
else:
raise AssertionError('Unsupported mpa_type') | cdee96cae5a9798fd3757000e5e49538f9f1c191 | 3,657,874 |
from typing import Type
def config_backed(config_path: str):
"""Second order decorator that sets up a backing config for a
GuildState type.
"""
def deco(gs_type: Type[GuildStateTV]) -> Type[GuildStateTV]:
gs_type._cfg_path = config_path
return gs_type
return deco | 6d2e64fed12918fd27ff10a2e97fc2b69d5751c2 | 3,657,875 |
def determine_if_is_hmmdb(infp):
"""Return True if the given file is an HMM database (generated using
hmmpress from the HMMer3 software package), and return False otherwise.
"""
#if open(infp, 'r').read().startswith('HMMER3/f'):
if open(infp, 'r').readline().startswith('HMMER3/f'):
return True
else:
return False | 33b962e24c76e9e25f2cc76d4e7f78565adf8a3e | 3,657,876 |
from typing import Sequence
def assignment_path(base_var: str, path: Sequence[daglish.PathElement]) -> str:
"""Generates the LHS of an assignment, given a traversal path.
Example: ["foo", 3, "bar"] -> "foo[3].bar".
Args:
base_var: Base variable name.
path: Attribute path on `base_var` to assign to.
Returns:
Python code string for the LHS of an assignment.
Raises:
TypeError: If the first path element is not a string, or if any path element
is not a string or an int.
"""
return base_var + "".join(x.code for x in path) | 92af61cf96008bc45a62df8ce369d7d7e9b1879f | 3,657,877 |
def template_footer(in_template):
"""Extracts footer from the notebook template.
Args:
in_template (str): Input notebook template file path.
Returns:
list: List of lines.
"""
footer = []
template_lines = []
footer_start_index = 0
with open(in_template) as f:
template_lines = f.readlines()
for index, line in enumerate(template_lines):
if '## Display Earth Engine data layers' in line:
footer_start_index = index - 3
footer = ['\n'] + template_lines[footer_start_index:]
return footer | cb872076b82b2012b2e27fcb1be9b8704cd60d27 | 3,657,878 |
def conf_auc(test_predictions, ground_truth, bootstrap=1000, seed=None, confint=0.95):
"""Takes as input test predictions, ground truth, number of bootstraps, seed, and confidence interval"""
#inspired by https://stackoverflow.com/questions/19124239/scikit-learn-roc-curve-with-confidence-intervals by ogrisel
bootstrapped_scores = []
rng = np.random.RandomState(seed)
if confint>1:
confint=confint/100
for i in range(bootstrap):
# bootstrap by sampling with replacement on the prediction indices
indices = rng.randint(0, len(test_predictions) - 1, len(test_predictions))
if len(np.unique(ground_truth[indices])) < 2:
continue
score = metrics.roc_auc_score(ground_truth[indices], test_predictions[indices])
bootstrapped_scores.append(score)
sorted_scores = np.array(bootstrapped_scores)
sorted_scores.sort()
lower_bound=(1-confint)/2
upper_bound=1-lower_bound
confidence_lower = sorted_scores[int(lower_bound * len(sorted_scores))]
confidence_upper = sorted_scores[int(upper_bound * len(sorted_scores))]
auc = metrics.roc_auc_score(ground_truth, test_predictions)
print("{:0.0f}% confidence interval for the score: [{:0.3f} - {:0.3}] and your AUC is: {:0.3f}".format(confint*100, confidence_lower, confidence_upper, auc))
confidence_interval = (confidence_lower, auc, confidence_upper)
return confidence_interval | a1825187e896f479f4fb9b4aeb7c494fdd4b55e5 | 3,657,879 |
from binheap import BinHeap
def empty_heap():
"""Instantiate a heap for testing."""
min_heap = BinHeap()
return min_heap | 5941d09590be084465458d2ff3bd5db51ee41b4a | 3,657,880 |
def get_requires_python(dist):
# type: (pkg_resources.Distribution) -> Optional[str]
"""
Return the "Requires-Python" metadata for a distribution, or None
if not present.
"""
pkg_info_dict = get_metadata(dist)
requires_python = pkg_info_dict.get('Requires-Python')
if requires_python is not None:
# Convert to a str to satisfy the type checker, since requires_python
# can be a Header object.
requires_python = str(requires_python)
return requires_python | 9d8b475703bd3f12ca5845c33d324a3ba346c5fb | 3,657,881 |
def standard_primary_main_prefixes(primary = None):
"""Return list of standard prefixes that may go with particular primary
name.
**Note**
You may wish to use `StandardPrimaryMainPrefixes()` instead.
**Description**
The function returns, a list of main prefixes that may go together
with the given ``primary`` name. So, if, for example, the ``primary`` is
``"PROGRAMS"``, the supported main prefixes will be ``["bin", "sbin",
"libexec", "pkglibexec", "noinst", "check"]``. If the ``primary`` is
``None``, then entire dictionary describing allowed combinations is
returned. The dictionary has form::
{ 'PROGRAMS' : ["bin","sbin","libexec","pkglibexec","noinst","check"],
'LIBRARIES' : ["lib","pkglib","noinst","check"], ... },
is returned.
The lists were developed according to automake's documentation, especially:
- ``PROGRAMS`` : `Defining program sources`_ section,
- ``LIBRARIES`` : `Building a library`_ section,
- ``LTLIBRARIES`` : `Building Libtool Libraries`_ section,
- ``LISP`` : `Emacs Lisp`_ section,
- ``PYTHON`` : `Python`_ section,
- ``JAVA`` : `Java bytecode compilation`_ section,
- ``SCRIPTS`` : `Executable scripts`_ section,
- ``DATA`` : `Architecture-independent data files`_ section,
- ``HEADERS`` : `Header files`_ secition,
- ``MANS`` : `Man pages`_ section,
- ``TEXINFOS`` : `Texinfo`_ section
.. _Defining program sources: http://www.gnu.org/software/automake/manual/automake.html#Program-Sources
.. _Building a library: http://www.gnu.org/software/automake/manual/automake.html#A-Library
.. _Building Libtool Libraries: http://www.gnu.org/software/automake/manual/automake.html#Libtool-Libraries
.. _Emacs Lisp: http://www.gnu.org/software/automake/manual/automake.html#Emacs-Lisp
.. _Python: http://www.gnu.org/software/automake/manual/automake.html#Python
.. _Java bytecode compilation: http://www.gnu.org/software/automake/manual/automake.html#Java
.. _Executable scripts: http://www.gnu.org/software/automake/manual/automake.html#Scripts
.. _Architecture-independent data files: http://www.gnu.org/software/automake/manual/automake.html#Data
.. _Header files: http://www.gnu.org/software/automake/manual/automake.html#Headers
.. _Man pages: http://www.gnu.org/software/automake/manual/automake.html#Man-Pages
.. _Texinfo: http://www.gnu.org/software/automake/manual/automake.html#Texinfo
"""
if primary is None:
return __std_primary_main_prefixes
elif primary in __std_primary_main_prefixes:
return __std_primary_main_prefixes[primary]
else:
return [] | 294ad0fbf34a82c7d45a44ef3ba739c220298958 | 3,657,882 |
def post_step1(records):
"""Apply whatever extensions we have for GISTEMP step 1, that run
after the main step 1. None at present."""
return records | 98287f6930db6aa025715356084b3bef8c851774 | 3,657,883 |
def gen_report_complex_no_files() -> dp.Report:
"""Generate a complex layout report with simple elements"""
select = dp.Select(blocks=[md_block, md_block], type=dp.SelectType.TABS)
group = dp.Group(md_block, md_block, columns=2)
toggle = dp.Toggle(md_block, md_block)
return dp.Report(
dp.Page(
blocks=[
dp.Group(md_block, md_block, columns=2),
dp.Select(blocks=[md_block, group, toggle], type=dp.SelectType.DROPDOWN),
],
title="Page Uno",
),
dp.Page(
blocks=[
dp.Group(select, select, toggle, columns=2),
dp.Select(blocks=[md_block, md_block, md_block], type=dp.SelectType.TABS),
],
title="Page Duo",
),
dp.Page(
blocks=[
dp.Group(group, group, columns=2),
dp.Select(blocks=[select, select], type=dp.SelectType.TABS),
],
title="Page Tres",
),
) | 874aa4c601e4ba4a01dfcdd0067f2638f04bd597 | 3,657,884 |
import requests
from bs4 import BeautifulSoup
import re
def suggested_associations(wiki_title, language='de'):
"""Given a Wikipedia page title, return a list of suggested associations for this entry."""
# The main heuristic to determine relevant associations for a given Wikipedia entry is to first gather all
# articles that this entry's summary links to.
wiki = wikipediaapi.Wikipedia(language)
article = wiki.page(wiki_title)
links = article.links
# We encounter two problems:
# 1. links is a dictionary, so we lose information about the order in which links appear in the article
# 2. We are only interested in links appearing in the article's summary.
# We can overcome this by scraping the article's page ourselves and parsing it.
url = article.fullurl
html = requests.get(url)
bs = BeautifulSoup(html.text, "html.parser")
# The summary comprises all p-elements located before (but in the same hierarchy level as) the table of contents.
toc = bs.find(id='toc')
summary_ps = toc.find_all_previous('p')
# They are currently in reverse order.
summary_ps = list(reversed(summary_ps))
# Collect all links.
summary_as = []
for p in summary_ps:
summary_as += [a for a in p.find_all('a')]
# The link text itself may be an inflection of the article name, which can be accessed by through the 'title'
# attribute.
summary_references = []
for a in summary_as:
# Not all links point to a Wikipedia article, but those that do have a 'title' attribute.
if a.has_attr('title'):
title = a['title']
if title in links:
summary_references.append(links[title])
# 'summary_links' now contains the list of Wikipedia articles reference in the summary and in the order of their
# appearance.
associations = [article.title for article in summary_references]
# We can further improve the quality of the titles by filtering out irrelevant articles.
irrelevant = [
"^Liste",
"^Hilfe:",
"^Datei:",
".*Kalender$",
".*\d{4}.*",
"^\d{1,2}\. \w+$"
]
keep_associations = []
for assoc in associations:
keep = True
for pattern in irrelevant:
regex = re.compile(pattern)
if regex.match(assoc):
keep = False
break
if keep:
keep_associations.append(assoc)
associations = keep_associations
# remove any words in parenthesis
for (i, assoc) in enumerate(associations):
if '(' in assoc:
associations[i] = re.sub(" \(.*\)", '', assoc)
return associations | 4b20a309b863feea64004e1b872c97a4ff1d37af | 3,657,885 |
def manage_categories():
"""
Manage expense categories
"""
alert_message = ""
user = User.query.filter_by(id=session["user_id"]).scalar()
if request.method == "GET":
with app.app_context():
categories = (
Category.query.options(joinedload("category_type"))
.options(joinedload("account"))
.filter(Category.user_id == session["user_id"])
.all()
)
return render_template(
"categories.html",
username=user.username,
alert_message=alert_message,
categories=categories,
)
if request.method == "POST":
category_id = request.form.get("edit")
with app.app_context():
category = (
Category.query.options(joinedload("category_type"))
.options(joinedload("account"))
.filter(Category.user_id == session["user_id"])
.filter(Category.id == category_id)
.scalar()
)
category_types = CategoryType.query.all()
accounts = Account.query.filter_by(user_id=session["user_id"]).all()
return render_template(
"edit_category.html",
username=user.username,
category=category,
category_types=category_types,
accounts=accounts,
) | 4180e52585e1c1cdb55c9b33a7220bcb7cebc087 | 3,657,886 |
def shear_3d(sxy=0., sxz=0., syx=0., syz=0., szx=0., szy=0.):
"""
Returns transformation matrix for 3d shearing.
Args:
sxy: xy shearing factor
sxz: xz shearing factor
syx: yx shearing factor
syz: yz shearing factor
szx: zx shearing factor
szy: zy shearing factor
Returns:
A 4x4 float32 transformation matrix.
"""
matrix = jnp.array([[ 1, sxy, sxz, 0],
[syx, 1, syz, 0],
[szx, szy, 1, 0],
[ 0, 0, 0, 1]], dtype='float32')
return matrix | f160ec1d2e51164f56136f7e7f1613b9cd74f430 | 3,657,887 |
def Position(context):
"""Function: <number> position()"""
return context.position | e5ddf5aa8d5321ce9e7dc14b635cb942fbbbcbf1 | 3,657,888 |
import math
def spatial_shift_crop_list(size, images, spatial_shift_pos, boxes=None):
"""
Perform left, center, or right crop of the given list of images.
Args:
size (int): size to crop.
image (list): ilist of images to perform short side scale. Dimension is
`height` x `width` x `channel` or `channel` x `height` x `width`.
spatial_shift_pos (int): option includes 0 (left), 1 (middle), and
2 (right) crop.
boxes (list): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
Returns:
cropped (ndarray): the cropped list of images with dimension of
`height` x `width` x `channel`.
boxes (list): optional. Corresponding boxes to images. Dimension is
`num boxes` x 4.
"""
assert spatial_shift_pos in [0, 1, 2]
height = images[0].shape[0]
width = images[0].shape[1]
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_shift_pos == 0:
y_offset = 0
elif spatial_shift_pos == 2:
y_offset = height - size
else:
if spatial_shift_pos == 0:
x_offset = 0
elif spatial_shift_pos == 2:
x_offset = width - size
cropped = [
image[y_offset : y_offset + size, x_offset : x_offset + size, :]
for image in images
]
assert cropped[0].shape[0] == size, "Image height not cropped properly"
assert cropped[0].shape[1] == size, "Image width not cropped properly"
if boxes is not None:
for i in range(len(boxes)):
boxes[i][:, [0, 2]] -= x_offset
boxes[i][:, [1, 3]] -= y_offset
return cropped, boxes | c80d8ab83f072c94887d48c3d1cfe5bb18285dbb | 3,657,889 |
import argparse
def get_input_args():
"""
Used to parse the command line arguments in order to predict the flower name and the class probability.
Options:
Return top KK most likely classes: python predict.py input checkpoint --top_k 3
Use a mapping of categories to real names: python predict.py input checkpoint --category_names cat_to_name.json
Use GPU for inference: python predict.py input checkpoint --gpu
"""
# Create Parse using ArgumentParser
parser = argparse.ArgumentParser(
description='Process Image Folder, CNN Model Architecture, Set hyper parameters')
parser.add_argument('single_image', metavar='single_image', type=str, nargs=1,
help='a single image for which the flower name and the class probability is to be predicted')
parser.add_argument('checkpoint', metavar='checkpoint', type=str, nargs=1,
help='The checkpoint from which the model is re-built for the prediction')
parser.add_argument('--top_k', type=int, default='3',
help='The number of most likely classes with default value \'3\'')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='A file mapping of categories to real names with default value \'cat_to_name.json\'')
parser.add_argument('--gpu', action='store_true',
help='If available then the GPU will be used, else not')
return parser.parse_args() | 145e6d2601b37b10f4c8dbac649ab8abec2951a5 | 3,657,890 |
def testsuite_results(log_filename, msg_testsuite_section_start,
msg_testsuite_end_message):
"""Read the NEST Travis CI build log file, find the 'make-installcheck'
section which runs the NEST test suite. Extract the total number of tests
and the number of tests failed. Return True if all tests passed
successfully and False in case one or more tests failed. Additionally the
total number of tests performed and the number of tests failed are
returned.
Parameters
----------
log_filename: NEST Travis CI build log file name.
msg_testsuite_section_start: Message number string, e.g. "MSGBLD1234".
msg_testsuite_end_message: Message number string, e.g. "MSGBLD1234".
Returns
-------
True or False.
Total number of tests.
Number of tests failed.
"""
in_installcheck_section = False
in_results_section = False
total_number_of_tests = None
number_of_tests_failed = None
status_tests = None
with open(log_filename) as fh:
for line in fh:
if is_message(line, msg_testsuite_section_start):
in_installcheck_section = True
if in_installcheck_section:
if line.strip() == "NEST Testsuite Summary":
in_results_section = True
if in_results_section:
if "Total number of tests:" in line:
total_number_of_tests = int(line.split(' ')[-1])
if "Failed" in line:
number_of_tests_failed = \
[int(s) for s in line.split() if s.isdigit()][0]
if is_message(line, msg_testsuite_end_message):
if number_of_tests_failed == 0:
status_tests = True
else:
status_tests = False
# The log file contains only one 'make-installcheck'
# section. Stop reading the log file.
break
return status_tests, total_number_of_tests, number_of_tests_failed | 682d5bcc7676388cb23707079581036efcc2f3da | 3,657,891 |
def line_state_to_out(line: StaticStates, out_data: bool):
"""
Calculate the data and enable values given a initial state
Args:
line: StaticState that represent the line
out_data: If line value is 2 it will be returned as the next value of data
Returns:
Data and Enable values for the next iteration
"""
data = False
enable = False
if line.value == 0:
data = False
enable = True
elif line.value == 1:
data = True
enable = True
elif line.value == 2:
data = out_data
enable = False
return data, enable | e79b48bef3fba31bef6aa758686d2f79580954a4 | 3,657,892 |
def GetProxyConfig(http_proxy_uri=None, https_proxy_uri=None, cafile=None,
disable_certificate_validation=None):
"""Returns an initialized ProxyConfig for use in testing.
Args:
http_proxy_uri: A str containing the full URI for the http proxy host. If
this is not specified, the ProxyConfig will be initialized without an
HTTP proxy configured.
https_proxy_uri: A str containing the full URI for the https proxy host. If
this is not specified, the ProxyConfig will be initialized without an
HTTPS proxy configured.
cafile: A str containing the path to a custom ca file.
disable_certificate_validation: A boolean indicating whether or not to
disable certificate validation.
Returns:
An initialized ProxyConfig using the given configurations.
"""
return googleads.common.ProxyConfig(
http_proxy_uri, https_proxy_uri, cafile=cafile,
disable_certificate_validation=disable_certificate_validation) | f984a17dd452d91d7f5dde24784d44626561ca4f | 3,657,893 |
from typing import List
import os
def get_possible_paths(path: str) -> List[str]:
"""
Finds possible paths to resources, considering PACKAGE and USER directories first, then system-wide directories
:param path:
:return:
"""
# <sphinx_resources-get_possible_paths>
dist_name = env.distribution_name() # RKD_DIST_NAME env variable
paths = [
# eg. ~/.local/share/rkd/banner.txt
os.path.expanduser(('~/.local/share/%s/' + path) % dist_name),
# eg. /home/andrew/.local/lib/python3.8/site-packages/rkd/misc/banner.txt
(get_user_site_packages() + '/%s/misc/' + path) % dist_name,
# eg. /usr/lib/python3.8/site-packages/rkd/misc/banner.txt
(_get_global_site_packages() + '/%s/misc/' + path) % dist_name,
# eg. /usr/share/rkd/banner.txt
('/usr/share/%s/' + path) % dist_name
]
# </sphinx_resources-get_possible_paths>
# eg. ./rkd/misc/banner.txt
global_module_path = _get_current_script_path() + '/misc/' + path
# installed module directory should be less important to allow customizations
if "site-packages" in global_module_path:
paths.append(global_module_path)
else: # local development directory
paths = [global_module_path] + paths
return paths | 974071844bae1746aa9dbe4a59bca213903c1ba0 | 3,657,894 |
def inchi_key_to_chembl(inchi_keys):
"""Return list of chembl ids that positionally map to inchi keys."""
molecule = new_client.molecule
chembl_mols = []
ndone = 0 # counter for printing progress to console
for inchi_key in inchi_keys:
if pd.isnull(inchi_key):
chembl_mols.append('')
continue
try:
mol = molecule.get(inchi_key)
if mol and mol['molecule_chembl_id']:
chembl_mols.append(mol['molecule_chembl_id'])
else:
chembl_mols.append('')
except:
chembl_mols.append('')
print('in error: ' + inchi_key)
# increment progress tracker and print after 100th id conversion
ndone += 1
if ndone % 100 == 0:
print('... completed ' + ndone + ' / ' + len(inchi_keys))
return chembl_mols | d60dc5f9391c9ae085b41f2bea37ef1851b3a943 | 3,657,895 |
def StandaloneStyle(cfg):
"""
Construct a OWS style object that stands alone, independent of a complete OWS configuration environment.
:param cfg: A valid OWS Style definition configuration dictionary.
Refer to the documentation for the valid syntax:
https://datacube-ows.readthedocs.io/en/latest/cfg_styling.html
:return: A OWS Style Definition object, prepared to work in standalone mode.
"""
style = StyleDefBase(StandaloneProductProxy(), cfg, stand_alone=True)
style.make_ready(None)
return style | 4b517acd8b48616175d8d78745b1beb51f9ba00d | 3,657,896 |
def insert_or_test_version_number():
"""Should the format name and version number be inserted in text
representations (not in tests!)"""
return INSERT_AND_CHECK_VERSION_NUMBER | 2add9bf3041d8bab36ee4e5cf8c5d708c7e0ff79 | 3,657,897 |
import sys
import os
def IsInteractive(output=False, error=False, heuristic=False):
"""Determines if the current terminal session is interactive.
sys.stdin must be a terminal input stream.
Args:
output: If True then sys.stdout must also be a terminal output stream.
error: If True then sys.stderr must also be a terminal output stream.
heuristic: If True then we also do some additional heuristics to check if
we are in an interactive context. Checking home path for example.
Returns:
True if the current terminal session is interactive.
"""
if not sys.stdin.isatty():
return False
if output and not sys.stdout.isatty():
return False
if error and not sys.stderr.isatty():
return False
if heuristic:
# Check the home path. Most startup scripts for example are executed by
# users that don't have a home path set. Home is OS dependent though, so
# check everything.
# *NIX OS usually sets the HOME env variable. It is usually '/home/user',
# but can also be '/root'. If it's just '/' we are most likely in an init
# script.
# Windows usually sets HOMEDRIVE and HOMEPATH. If they don't exist we are
# probably being run from a task scheduler context. HOMEPATH can be '\'
# when a user has a network mapped home directory.
# Cygwin has it all! Both Windows and Linux. Checking both is perfect.
home = os.getenv('HOME')
homepath = os.getenv('HOMEPATH')
if not homepath and (not home or home == '/'):
return False
return True | 8037e2d38dca9fc745b7b0b79cfb83226e59d42d | 3,657,898 |
def get_ipaserver_host():
"""Return the fqdn of the node hosting the IPA_SERVER.
"""
for node in svars['nodes']:
if 'ipaserver' in node['groups']:
return fqdn(node['name']) | 46d74ec3b1ebadaa7699073423de9e27cec8c137 | 3,657,899 |
Subsets and Splits