content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def warn(string: str) -> str:
"""Add warn colour codes to string
Args:
string (str): Input string
Returns:
str: Warn string
"""
return "\033[93m" + string + "\033[0m" | 0bdbe5e7052e1994d978e45273baef75a1b72d89 | 6,200 |
def normalized_mean_square_error(logits, labels, axis = [0,1,2,3]):
"""
logits : [batch_size, w, h, num_classes]
labels : [batch_size, w, h, 1]
"""
with tf.name_scope("normalized_mean_square_error"):
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(logits, labels), axis=[1,2,3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(labels), axis=[1,2,3]))
nmse = tf.reduce_mean(nmse_a / nmse_b)
return nmse | 0aee175ed0be3132d02018961265461e4880221b | 6,201 |
from sys import path
def load_glove_data():
"""
Loads Stanford's dictionary of word embeddings created by using corpus of
Twitter posts. Word embeddings are vectors of 200 components.
OUTPUT:
dictionary containing tweet word embeddings
"""
glove_path = path.join('..', 'data', 'glove', 'glove.twitter.27B.200d.txt')
f = open(glove_path,'r')
model = {}
for line in f:
splitLine = line.split()
word = splitLine[0]
embedding = np.array([float(val) for val in splitLine[1:]])
model[word] = embedding
return model | e78010f80ee7dd54c11a0b2bd293025ff2f90d70 | 6,202 |
def get_partition_to_num_rows(
namespace, tablename, partition_column, partition_column_values
):
"""
Helper function to get total num_rows in hive for given
partition_column_values.
"""
partitions = {
"{0}={1}".format(partition_column, partition_column_value)
for partition_column_value in partition_column_values
}
# Setting higher number of retries, as during testing, sometimes default
# "retries" values didn't seem enough in some cases.
ms = metastore.metastore(
namespace=namespace,
meta_only=True,
retries=10,
# timeout in milliseconds.
timeout=1800000,
)
partition_to_num_rows = {}
all_partitions = ms.get_partitions(tablename)
for hive_partition in all_partitions:
assert "numRows" in hive_partition.parameters, (
"numRows not in hive_partition.parameters,"
"Do not use Presto tables, only Hive tables!')"
)
if hive_partition.partitionName in partitions:
patition_column_value = hive_partition.partitionName.split("=")[1]
partition_to_num_rows[patition_column_value] = int(
hive_partition.parameters["numRows"]
)
return partition_to_num_rows | 305d40fd326bc45e906925b94077182584ffe3be | 6,203 |
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = initialize_game()
card_title = "Welcome"
speech_output = "Hello! I am Cookoo. Let's play a game. " \
"Are you ready to play?"
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output)) | 9c28194575013e98d1d6130a956714f65ebe3764 | 6,204 |
def kl_divergence_with_logits(logits_a, logits_b):
"""
Compute the per-element KL-divergence of a batch.
Args:
logits_a: tensor, model outputs of input a
logits_b: tensor, model outputs of input b
Returns:
Tensor of per-element KL-divergence of model outputs a and b
"""
a = tf.nn.softmax(logits_a, axis=1)
a_loga = tf.reduce_sum(a * log_softmax(logits_a), 1)
a_logb = tf.reduce_sum(a * log_softmax(logits_b), 1)
return a_loga - a_logb | 7df5976287edf5de37291db653a4334ed046a2f3 | 6,205 |
import csv
def load_labels(abs_path):
"""
loads relative path file as dictionary
Args:
abs_path: absolute path
Returns dictionary of mappings
"""
label_tsv = open(abs_path, encoding="utf-8")
labels = list(csv.reader(label_tsv, delimiter="\t"))
return labels | 8ded58965dcc98b7a0aaa6614cbe4b66722dc76b | 6,206 |
def cut_tree_balanced(linkage_matrix_Z, max_cluster_size, verbose=False):
"""This function performs a balanced cut tree of a SciPy linkage matrix built using any linkage method
(e.g. 'ward'). It builds upon the SciPy and Numpy libraries.
The function looks recursively along the hierarchical tree, from the root (single cluster gathering
all the samples) to the leaves (i.e. the clusters with only one sample), retrieving the biggest
possible clusters containing a number of samples lower than a given maximum. In this way, if a
cluster at a specific tree level contains a number of samples higher than the given maximum, it is
ignored and its offspring (smaller) sub-clusters are taken into consideration. If the cluster contains
a number of samples lower than the given maximum, it is taken as result and its offspring sub-clusters
not further processed.
Input parameters:
linkage_matrix_Z: linkage matrix resulting from calling the method scipy.cluster.hierarchy.ward()
I.e. it contains the hierarchical clustering encoded as a linkage matrix.
max_cluster_size: maximum number of data samples contained within the resulting clusters. Thus, all
resulting clusters will contain a number of data samples <= max_cluster_size.
Note that max_cluster_size must be >= 1.
verbose: activates (True) / deactivates (False) some output print commands, which can be useful to
test and understand the proposed tree cut method.
Returns:
vec_cluster_id: one-dimensional numpy array of integers containing for each input sample its corresponding
cluster id. The cluster id is an integer which is higher for deeper tree levels.
vec_last_cluster_level: one-dimensional numpy array of arrays containing for each input sample its
corresponding cluster tree level, i.e. a sequence of 0s and 1s. Note that the cluster level is longer for
deeper tree levels, being [0] the root cluster, [0, 0] and [0, 1] its offspring, and so on. Also note that
in each cluster splitting, the label 0 denotes the bigger cluster, while the label 1 denotes the smallest.
"""
try:
# Assert that the input max_cluster_size is >= 1
assert max_cluster_size >= 1
# Perform a full cut tree of the linkage matrix, i.e. containing all tree levels
full_cut = cut_tree(linkage_matrix_Z)
if verbose:
print("Interim full cut tree (square matrix)")
print("Shape = " + str(full_cut.shape))
print(full_cut)
print('')
# Initialize the vble containing the current cluster id (it will be higher for each newly
# found valid cluster, i.e. for each found cluster with <= max_cluster_size data samples)
last_cluster_id = 1
# Initialize the resulting cluster id vector (containing for each row in input_data_x_sample
# its corresponding cluster id)
vec_cluster_id = np.zeros(full_cut.shape[1], dtype=int)
# Initialize the resulting cluster level vector (containing for each data sample its
# corresponding cluster tree level, i.e. a string of '0's and '1's separated by '.')
vec_last_cluster_level = np.empty((full_cut.shape[1],), dtype=object)
for i in range(full_cut.shape[1]): vec_last_cluster_level[i] = np.array([0],int)
# Scan the full cut matrix from the last column (root tree level) to the first column (leaves tree level)
if verbose:
print("Note about columns: within the full cut tree, the column " + str(full_cut.shape[1]-1) +
" represents the root, while 0 represent the leaves.")
print("We now scan the full cut tree from the root (column " + str(full_cut.shape[1]-1) + ") "
"to the leaves (column 0).")
print('')
for curr_column in range(full_cut.shape[1]-1,-1,-1):
# Get a list of unique group ids and their count within the current tree level
values, counts = np.unique(full_cut[:,curr_column], return_counts=True)
# Stop if all samples have been already selected (i.e. if all data samples have been already clustered)
if (values.size==1) and (values[0]==-1):
break
# For each group id within the current tree level
for curr_elem_pos in range(values.size):
# If it is a valid group id (i.e. not yet marked as processed with -1) ...
# Note: data samples which were alredy included in a valid cluster id (i.e. at a higher tree level)
# are marked with the group id -1 (see below)
if (values[curr_elem_pos] >= 0):
# Select the current group id
selected_curr_value = values[curr_elem_pos]
# Look for the vector positions (related to rows in input_data_x_sample) belonging to
# the current group id
selected_curr_elems = np.where(full_cut[:,curr_column]==selected_curr_value)
# Major step #1: Populate the resulting vector of cluster levels for each data sample
# If we are not at the root level (i.e. single cluster gathering all the samples) ...
if curr_column < (full_cut.shape[1]-1):
# Get the ancestor values and element positions
selected_ancestor_value = full_cut[selected_curr_elems[0][0],curr_column+1]
selected_ancestor_elems = np.where(full_cut[:,curr_column+1]==selected_ancestor_value)
# Compute the values and counts of the offspring (i.e. curr_elem + brothers) and sort them
# by their count (so that the biggest cluster gets the offspring_elem_label = 0, see below)
offspring_values, offspring_counts = np.unique(full_cut[selected_ancestor_elems,curr_column],
return_counts=True)
count_sort_ind = np.argsort(-offspring_counts)
offspring_values = offspring_values[count_sort_ind]
offspring_counts = offspring_counts[count_sort_ind]
# If the number of descendants is > 1 (i.e. if the curr_elem has at least one brother)
if (offspring_values.shape[0] > 1):
# Select the position of the current value (i.e. 0 or 1) and append it to the cluster level
offspring_elem_label = np.where(offspring_values==selected_curr_value)[0][0]
for i in selected_curr_elems[0]:
vec_last_cluster_level[i] = np.hstack((vec_last_cluster_level[i], offspring_elem_label))
# Major step #2: Populate the resulting vector of cluster ids for each data sample,
# and mark them as already clustered (-1)
# If the number of elements is below max_cluster_size ...
if (counts[curr_elem_pos] <= max_cluster_size):
if verbose:
print("Current column in full cut tree = " + str(curr_column))
print("list_group_ids: " + str(values))
print("list_count_samples: " + str(counts))
print("selected_curr_value: " + str(selected_curr_value) + ", count_samples = " +
str(counts[curr_elem_pos]) + ", marked as result")
print('')
# Relate these vector positions to the current cluster id
vec_cluster_id[selected_curr_elems] = last_cluster_id
# Delete these vector positions at the lower tree levels for further processing
# (i.e. mark these elements as already clustered)
full_cut[selected_curr_elems,0:curr_column] = -1
# Update the cluster id
last_cluster_id = last_cluster_id + 1
# Return the resulting clustering array (containing for each row in input_data_x_sample its
# corresponding cluster id) and the clustering level
return vec_cluster_id, vec_last_cluster_level
except AssertionError:
print("Please use a max_cluster_size >= 1") | 53290f432b9ad7404760e124ffe6d03e95e5d529 | 6,207 |
from typing import Callable
def len_smaller(length: int) -> Callable:
"""Measures if the length of a sequence is smaller than a given length.
>>> len_smaller(2)([0, 1, 2])
False
"""
def len_smaller(seq):
return count(seq) < length
return len_smaller | a43f1344a46a57d443d267de99ba7db08b9bf911 | 6,208 |
def e_2e_fun(theta, e_init=e_1f):
"""
Electron energy after Compton scattering, (using energy e_1f)
:param theta: angle for scattered photon
:param e_init: initial photon energy
:return:
"""
return e_init / (((m_e * c ** 2) / e_init) * (1 / (1 - np.cos(theta))) + 1) | 8785f6dfbb4226df88e6ab2b883a989ff799d240 | 6,209 |
from typing import AbstractSet
from pathlib import Path
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Mapping
from typing import Sequence
from typing import Dict
from typing import List
import csv
import shutil
def tag_images_for_google_drive(
input_files: AbstractSet[Path],
database: Optional[Path],
extra_tags: Optional[Set[str]],
tag_file: Optional[Path] = None,
from_files: bool = False,
from_db: bool = False,
force: bool = False,
dry: bool = False,
verbose: int = 0) -> Tuple[Mapping[Path, Tuple[str, Sequence[str]]], Mapping[Path, Tuple[str, Sequence[str]]]]:
"""
Analyse csv and files to extract tag and inject hash tag in description.
:param database: The CSV file or None
:param input_files: A set of filename
:param tag_file: A filename to save all tags or None
:param from_file: A boolean value to use only the files names
:param from_db: A boolean value to use only the CSV file
:param dry: True to simulate the modification in files.
:return: A tuple with the new data base and the description of all modified files.
"""
assert bool(from_files) + bool(from_db) < 2
merge = not from_db and not from_files
assert not ((from_db or merge) and not database)
if not extra_tags:
extra_tags = set()
updated_files: Dict[Path, Tuple[str, List[str]]] = {} # Files to update
update_descriptions = False
ref_descriptions: Dict[Path, Tuple[str, List[str]]] = {}
description_date = 0.0
if database and database.is_file():
description_date = database.stat().st_mtime
with open(str(database), 'rt', encoding='utf-8') as csv_file:
rows = csv.reader(csv_file, delimiter=',')
ref_descriptions = {Path(row[0]): _extract_tags(row[1], '#') for row in rows if len(row) == 2}
else:
update_descriptions = True
if not shutil.which("exiftool"):
LOGGER.error("Install exiftool in PATH before to use tag_images_for_google_drive")
raise OSError(-1, "exiftool not found")
with ExifTool() as exif_tool:
# 1. Update images files
update_descriptions = _manage_files(exif_tool,
input_files,
from_db,
from_files,
ref_descriptions,
extra_tags,
update_descriptions,
updated_files,
force,
verbose)
# 2. Apply the descriptions file
update_descriptions = _manage_db(exif_tool,
description_date,
from_db,
from_files,
merge,
ref_descriptions,
extra_tags,
update_descriptions,
updated_files,
force,
verbose)
# 3. Apply update files
_manage_updated_files(exif_tool, dry, updated_files)
# 4. Update description
_manage_updated_db(database, dry, ref_descriptions, update_descriptions)
# 5. Count tags
all_tags: AbstractSet[str] = set()
nb_files = len(ref_descriptions)
nb_total_tags = 0
for _, (_, keywords) in ref_descriptions.items():
nb_total_tags += len(keywords)
all_tags = set(all_tags).union(keywords)
LOGGER.info(f"Use {nb_total_tags} tags in {nb_files} files, with a dictionary of {len(all_tags)} "
f"({int(nb_files / nb_total_tags * 100) if nb_total_tags else 0} t/f).")
_manage_tags_file(all_tags, dry, tag_file)
LOGGER.debug("Done")
return ref_descriptions, updated_files | 93750786413455b7baa74bce9764fdcfddc665f2 | 6,210 |
import os
def xml_reader(filename):
"""
A method using iterparse as above would be preferable, since we just want to
collect the first few tags. Unfortunately, so far iterparse does not work
with html (aka broken xml).
"""
name = os.path.basename(filename)
with open(filename, "rb") as file_h:
if etree.LXML_VERSION < (3, 3):
parser = etree.HTMLParser(encoding="latin1")
tree = etree.parse(file_h, parser)
row_it = tree.iter(tag="row")
element = next(row_it)
attrs = [unicode(child.tag) for child in element.iterchildren()]
else:
row_it = etree.iterparse(file_h, tag="row", html=True)
(event, element) = next(row_it)
attrs = [unicode(child.tag) for child in element.iterchildren()]
return (name, attrs) | 70cc9420155d5a0cf15a68ada1a428f66ac6ba9f | 6,211 |
from typing import List
def interval_list_intersection(A: List[List], B: List[List], visualization: bool = True) -> List[List]:
"""
LeteCode 986: Interval List Intersections
Given two lists of closed intervals, each list of intervals is pairwise disjoint and in sorted order.
Return the intersection of these two interval lists.
Examples:
1. A: [[0, 2], [5, 10], [13, 23], [24, 25]], B: [[1, 5], [8, 12], [15, 24], [25, 26]]
return: [[1, 2], [5, 5], [8, 10], [15, 23], [24, 24], [25, 25]]
"""
res = []
i = j = 0
while i < len(A) and j < len(B):
s = max(A[i][0], B[j][0])
e = min(A[i][1], B[j][1])
if s <= e:
res.append([s, e])
if A[i][1] < B[j][1]:
i += 1
else:
j += 1
if visualization:
interval_list_intersection_visualization(A, B, res)
return res | 722902e4c4c076a1dc25d07cc3253b2ec9f3d110 | 6,212 |
import os
def get(environ: OsEnvironLike = None) -> str:
"""Get the application ID from the environment.
Args:
environ: Environment dictionary. Uses os.environ if `None`.
Returns:
Default application ID as a string.
We read from the environment APPLICATION_ID (deprecated) or else
GAE_APPLICATION.
"""
if environ is None:
environ = os.environ
return environ.get('APPLICATION_ID', environ.get('GAE_APPLICATION', '')) | 112d816eb0cf4582694222d63561e909444d3e5d | 6,213 |
def tokenize_query(query):
""" Tokenize a query """
tokenized_query = tokenizer.tokenize(query)
stop_words = set(nltk.corpus.stopwords.words("english"))
tokenized_query = [
word for word in tokenized_query if word not in stop_words]
tokenized_query = [stemmer.stem(word) for word in tokenized_query]
tokenized_query = [word.lower() for word in tokenized_query]
return tokenized_query | 422d59dc95661496dcfac83f142190a94127ae68 | 6,214 |
def rewrite_return(func):
"""Rewrite ret ops to assign to a variable instead, which is returned"""
ret_normalization.run(func)
[ret] = findallops(func, 'ret')
[value] = ret.args
ret.delete()
return value | d141ae9d2f36f4f3e41da626ed43a3902e43c267 | 6,215 |
def get_loss_fn(loss_factor=1.0):
"""Gets a loss function for squad task."""
def _loss_fn(labels, model_outputs):
start_positions = labels['start_positions']
end_positions = labels['end_positions']
start_logits, end_logits = model_outputs
return squad_loss_fn(
start_positions,
end_positions,
start_logits,
end_logits,
loss_factor=loss_factor)
return _loss_fn | ad07afbd39aa1338a0aeb3c1398aefacebceffa3 | 6,216 |
import asyncio
async def run_command(*args):
"""
https://asyncio.readthedocs.io/en/latest/subprocess.html
"""
# Create subprocess
process = await asyncio.create_subprocess_exec(
*args,
# stdout must a pipe to be accessible as process.stdout
stdout=asyncio.subprocess.PIPE)
# Wait for the subprocess to finish
stdout, stderr = await process.communicate()
# Return stdout
return stdout.decode().strip() | a0071a1bb8ba169179c67d22f5c8caca717697b3 | 6,217 |
def get_variants_in_region(db, chrom, start, stop):
"""
Variants that overlap a region
Unclear if this will include CNVs
"""
xstart = get_xpos(chrom, start)
xstop = get_xpos(chrom, stop)
variants = list(db.variants.find({ 'xpos': {'$lte': xstop, '$gte': xstart}
}, projection={'_id': False}, limit=SEARCH_LIMIT))
#add_consequence_to_variants(variants)
return list(variants) | 5665f4ff65832449c2dd7edb182fc3bd0707d189 | 6,218 |
def get_business(bearer_token, business_id):
"""Query the Business API by a business ID.
Args:
business_id (str): The ID of the business to query.
Returns:
dict: The JSON response from the request.
"""
business_path = BUSINESS_PATH + business_id
#4
return request(API_HOST, business_path, bearer_token) | 982eb518b7d9f94b7208fb68ddbc9f6607d9be9a | 6,219 |
from keras.layers import Conv2D, Input, MaxPooling2D, ZeroPadding2D
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import concatenate
from keras.models import Model
from keras.regularizers import l2
def AlexNet_modified(input_shape=None, regularize_weight=0.0001):
"""
Alexnet convolution layers with added batch-normalization and regularization
:param input_shape:
:param regularize_weight:
:return:
"""
img_input = Input(shape=input_shape)
#Branch A (mimic the original alexnet)
x = Conv2D(48, (11, 11), strides=(4,4), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(img_input)
x = MaxPooling2D((3,3), strides=(2, 2))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((2, 2))(x)
x = Conv2D(128, (5, 5), strides=(1,1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = BatchNormalization(axis=-1)(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
# Branch B (mimic the original alexnet)
y = Conv2D(48, (11, 11), strides=(4, 4), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(img_input)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((2, 2))(y)
y = Conv2D(128, (5, 5), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(192, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = BatchNormalization(axis=-1)(y)
y = ZeroPadding2D((1, 1))(y)
y = Conv2D(128, (3, 3), strides=(1, 1), activation='relu', padding='same', kernel_regularizer=l2(regularize_weight))(y)
y = MaxPooling2D((3, 3), strides=(2, 2))(y)
y = ZeroPadding2D((1, 1))(y)
out = concatenate([x,y], axis=-1)
inputs = img_input
model = Model(inputs, out, name='alexnet')
return model | b4bf37200a2bf429fe09eb9893b673e381ce0b36 | 6,220 |
import re
def readblock(fileObj):
"""
parse the block of data like below
ORDINATE ERROR ABSCISSA
2.930E-06 1.8D-07 5.00E+02 X.
8.066E-06 4.8D-07 6.80E+02 .X.
1.468E-05 8.3D-07 9.24E+02 ..X.
2.204E-05 1.2D-06 1.26E+03 ...X...
"""
data = []
p = re.compile('ORDINATE')
q = re.compile('0LINEAR COEFFICIENTS')
for line in fileObj:
if q.search(line) is not None:
break
if p.search(line) is None:
dataContent = line[0:31]
dataContent = dataContent.replace('D', 'E')
datarow = list(map(float, dataContent.split()))
data.append(datarow)
return np.array(data) | 838adc5e4efc4f97c255917e8d51b5da398718bd | 6,221 |
def as_scalar(scalar):
"""Check and return the input if it is a scalar.
If it is not scalar, raise a ValueError.
Parameters
----------
scalar : Any
the object to check
Returns
-------
float
the scalar if x is a scalar
"""
if isinstance(scalar, np.ndarray):
assert scalar.size == 1
return scalar[0]
elif np.isscalar(scalar):
return scalar
else:
raise ValueError('expected scalar, got %s' % scalar) | ca5dd15eb2672ec61785dd2a36495d61ad4a3f9f | 6,222 |
import itertools
def evaluate_dnf( # pylint: disable=too-many-arguments,too-many-locals
num_objects: int,
num_vars: int,
nullary: np.ndarray,
unary: np.ndarray,
binary: np.ndarray,
and_kernel: np.ndarray,
or_kernel: np.ndarray,
target_arity: int,
) -> np.ndarray:
"""Evaluate given batch of interpretations."""
# nullary (B, numNullary)
# unary (B, O, numUnary)
# binary (B, O, O-1, numBinary)
# and_kernel (H, IN)
# or_kernel (H,)
# ---------------------------
# We need a binding / permutation matrix that binds every object to every
# variable, so we can evaluate the rule. The following list of tuples,
# tells us which constant each variable is for each permutation
perm_idxs = np.array(
list(itertools.permutations(range(num_objects), num_vars))
) # (K, V)
# ---
# Binary comparison indices for variables, XY XZ YX YZ ...
var_bidxs = np.stack(np.nonzero(1 - np.eye(num_vars))).T # (V*(V-1), 2)
perm_bidxs = perm_idxs[:, var_bidxs] # (K, V*(V-1), 2)
obj_idxs = np.stack(np.nonzero(1 - np.eye(num_objects))).T # (O*(O-1), 2)
# The following matrix tells with variable binding pair is actually the
# object pair we're looking for
var_obj_pairs = (perm_bidxs[..., None, :] == obj_idxs).all(-1)
# (K, V*(V-1), O*(O-1))
# We are guaranteed to have 1 matching pair due to unique bindings, so the
# non-zero elements in the last dimension encode the index we want
var_obj_pairs = np.reshape(np.nonzero(var_obj_pairs)[-1], var_obj_pairs.shape[:2])
# (K, V*(V-1))
# ---------------------------
batch_size = nullary.shape[0] # B
# Take the permutations
perm_unary = unary[:, perm_idxs] # (B, K, V, numUnary)
perm_binary = binary.reshape(
(batch_size, -1, binary.shape[-1])
) # (B, O*(O-1), numBinary)
perm_binary = perm_binary[:, var_obj_pairs] # (B, K, V*(V-1), numBinary)
perm_binary = perm_binary.reshape(
(
batch_size,
var_obj_pairs.shape[0],
num_vars,
num_vars - 1,
perm_binary.shape[-1],
)
)
# (B, K, V, V-1, numBinary)
# ---------------------------
# Merge different arities
flat_nullary = np.repeat(
nullary[:, None], perm_unary.shape[1], axis=1
) # (B, K, numNullary)
interpretation = flatten_interpretation(flat_nullary, perm_unary, perm_binary)
# (B, K, IN)
# ---------------------------
# Evaluate
and_eval = np.min(
interpretation[:, :, None] * and_kernel + (and_kernel == 0), -1
) # (B, K, H)
# ---
# Reduction of existential variables if any, K actually expands to O, O-1 etc numVars many times
# If the arity of the target predicate is 0, then we can reduce over K. If
# it is 1, then expand once then reduce over remaining variables, i.e. O, K//O, H -> (O, H)
shape_range = num_objects - np.arange(num_objects) # [numObjs, numObjs-1, ...]
new_shape = np.concatenate(
[[batch_size], shape_range[:target_arity], [-1, and_eval.shape[-1]]]
) # [B, O, K//O,, H]
and_eval = np.reshape(and_eval, new_shape)
# (B, O, K//0, H)
perm_eval = np.max(and_eval, -2) # (B, H,) if arity 0, (B, O, H) if 1 etc.
# ---
or_eval = np.max(
or_kernel * perm_eval - (or_kernel == 0), -1
) # (B,) if arity 0, (B, O) if 1 etc.
# ---------------------------
return or_eval | 2a73f917594361ba4837e7e1d5f45398b3b0eb8d | 6,223 |
def black_color_func(word, font_size, position, orientation,
random_state=None, **kwargs):
"""Make word cloud black and white."""
return("hsl(0,100%, 1%)") | d5e874a4f62d30abcba29476d0ba7fc3a31b0ca6 | 6,224 |
import re
def setup(hass, config):
""" Setup history hooks. """
hass.http.register_path(
'GET',
re.compile(
r'/api/history/entity/(?P<entity_id>[a-zA-Z\._0-9]+)/'
r'recent_states'),
_api_last_5_states)
hass.http.register_path('GET', URL_HISTORY_PERIOD, _api_history_period)
return True | c87ddf7d7473d49b142a866043c0adee216aed39 | 6,225 |
import os
def shuffle_file(filename):
"""Shuffle lines in file.
"""
sp = filename.split('/')
shuffled_filename = '/'.join(sp[:-1] + ['shuffled_{}'.format(sp[-1])])
logger.info(shuffled_filename)
os.system('shuf {} > {}'.format(filename, shuffled_filename))
return shuffled_filename | a2105af5b049df9da0f96f91d4a5e4515d0d978c | 6,226 |
import itertools
def fitallseq(digitslist, list):
"""if there is repeating digits, itertools.permutations() is still usable
if fail, still print some print, if i >= threshold, served as start point for new searching """
for p in itertools.permutations(digitslist):
#print "".join(pw)
i=0
pw="".join(p)
for seq in list:
if seqfit(seq,pw):
i=i+1
continue
else:
break
if i==nlines:
print("password sequence is found as:", pw)
return True
print("password is not found in all %d digits permutations", len(digitslist))
return False | 069c9a2038593e7146558a53ac86c8fe877b44d3 | 6,227 |
def adduser(args):
"""Add or update a user to the database: <username> <password> [[role] [role] ...]"""
try:
username, password = args[0:2]
except (IndexError, ValueError), exc:
print >> sys.stderr, "you must include at least a username and password: %s" % exc
usage()
try:
roles = args[2:]
except IndexError:
roles = []
try:
store = _store()
user = User(username)
user.set_password(password)
for role in roles:
user.add_role(role)
store.put(user)
except Exception, exc:
print >> sys.stderr, 'unable to create or update user: %s' % exc
raise
return True | 7522753dff0647ac0764078902bf87c888f5a817 | 6,228 |
def check_linear_dependence(matrix: np.ndarray) -> bool:
"""
Functions checks by Cauchy-Schwartz inqeuality whether two matrices are linear dependent or not.
:param matrix: 2x2 matrix to be processed.
:return: Boolean.
"""
for i in range(matrix.shape[0]):
for j in range(matrix.shape[0]):
if i != j:
inner_product = np.inner(matrix[:, i], matrix[:, j])
norm_i = np.linalg.norm(matrix[:, i])
norm_j = np.linalg.norm(matrix[:, j])
print("I: ", matrix[:, i])
print("J: ", matrix[:, j])
print("Prod: ", inner_product)
print("Norm i: ", norm_i)
print("Norm j: ", norm_j)
if np.abs(inner_product - norm_j * norm_i) < 1e-5:
print("Dependent")
return True
else:
print("Independent")
return False | 1b962afc16c135c49409a1cfb1f4c2b6a5695c75 | 6,229 |
import json
def cors_400(details: str = None) -> cors_response:
"""
Return 400 - Bad Request
"""
errors = Model400BadRequestErrors()
errors.details = details
error_object = Model400BadRequest([errors])
return cors_response(
req=request,
status_code=400,
body=json.dumps(delete_none(error_object.to_dict()), indent=_INDENT, sort_keys=True)
if _INDENT != 0 else json.dumps(delete_none(error_object.to_dict()), sort_keys=True),
x_error=details
) | 1f775db943ed0989da49d1b7a6952d7614ace982 | 6,230 |
def detect_label_column(column_names):
""" Detect the label column - which we display as the label for a joined column.
If a table has two columns, one of which is ID, then label_column is the other one.
"""
if (column_names and len(column_names) == 2 and "id" in column_names):
return [c for c in column_names if c != "id"][0]
return None | 40524e7ed0878316564ad8fd66a2c09fc892e979 | 6,231 |
import subprocess
import re
def get_date():
"""
get the date
"""
date = subprocess.check_output(["date"])
date = date.decode("utf-8")
date = re.search(r"\w{3} \d{1,2} \w{3} \d{4}", date)
date = date.group(0)
return date | d8d69805a42f18e7cb793f6695146ea3dfb8251c | 6,232 |
import glob
def sorted_files(pattern):
"""Return files matching glob pattern, *effectively* sorted by date
"""
return sort_files(glob.glob(pattern)) | 4fb2ad9f6396cb844320e4e3aeb2941567d8af4a | 6,233 |
import torch
def random_float_tensor(seed, size, a=22695477, c=1, m=2 ** 32, requires_grad=False):
""" Generates random tensors given a seed and size
https://en.wikipedia.org/wiki/Linear_congruential_generator
X_{n + 1} = (a * X_n + c) % m
Using Borland C/C++ values
The tensor will have values between [0,1)
Inputs:
seed (int): an int
size (Tuple[int]): the size of the output tensor
a (int): the multiplier constant to the generator
c (int): the additive constant to the generator
m (int): the modulus constant to the generator
"""
num_elements = 1
for s in size:
num_elements *= s
arr = [(a * seed + c) % m]
for i in range(num_elements - 1):
arr.append((a * arr[i] + c) % m)
return torch.tensor(arr, requires_grad=requires_grad).float().view(size) / m | c6c8ce42b2774204c3156bdd7b545b08315d1606 | 6,234 |
def derivable_rng(spec, *, legacy=False):
"""
Get a derivable RNG, for use cases where the code needs to be able to reproducibly derive
sub-RNGs for different keys, such as user IDs.
Args:
spec:
Any value supported by the `seed` parameter of :func:`seedbank.numpy_rng`, in addition
to the following values:
* the string ``'user'``
* a tuple of the form (``seed``, ``'user'``)
Either of these forms will cause the returned function to re-derive new RNGs.
Returns:
function:
A function taking one (or more) key values, like :func:`derive_seed`, and
returning a random number generator (the type of which is determined by
the ``legacy`` parameter).
"""
if spec == 'user':
return DerivingRNG(derive_seed(), legacy)
elif isinstance(spec, tuple):
seed, key = spec
if key != 'user':
raise ValueError('unrecognized key %s', key)
return DerivingRNG(seed, legacy)
else:
return FixedRNG(rng(spec, legacy=legacy)) | 0772c9d27ba166f0981b3eb1da359a3ebb973322 | 6,235 |
def table(custom_headings, col_headings_formatted, rows, spec):
"""
Create a LaTeX table
Parameters
----------
custom_headings : None, dict
optional dictionary of custom table headings
col_headings_formatted : list
formatted column headings
rows : list of lists of cell-strings
Data in the table, pre-formatted
spec : dict
options for the formatter
Returns
-------
dict : contains key 'latex', which corresponds to a latex string representing the table
"""
longtables = spec['longtables']
table = "longtable" if longtables else "tabular"
if custom_headings is not None \
and "latex" in custom_headings:
latex = custom_headings['latex']
else:
latex = "\\begin{%s}[l]{%s}\n\hline\n" % \
(table, "|c" * len(col_headings_formatted) + "|")
latex += ("%s \\\\ \hline\n"
% (" & ".join(col_headings_formatted)))
for formatted_rowData in rows:
if len(formatted_rowData) > 0:
formatted_rowData_latex = [
(formatted_cell['latex'] if isinstance(formatted_cell, dict)
else formatted_cell) for formatted_cell in formatted_rowData]
latex += " & ".join(formatted_rowData_latex)
#MULTI-ROW support for *data* (non-col-header) rows of table. Currently
# unused (unneeded) - see multirow formatter that is commented out in formatters.py
#multirows = [ ("multirow" in el) for el in formatted_rowData_latex ]
#if any(multirows):
# latex += " \\\\ "
# last = True; lineStart = None; col = 1
# for multi,data in zip(multirows,formatted_rowData_latex):
# if last == True and multi == False:
# lineStart = col #line start
# elif last == False and multi == True:
# latex += "\cline{%d-%d} " % (lineStart,col) #line end
# last=multi
# res = _re.search("multicolumn{([0-9])}",data)
# if res: col += int(res.group(1))
# else: col += 1
# if last == False: #need to end last line
# latex += "\cline{%d-%d} "%(lineStart,col-1)
# latex += "\n"
#else:
latex += " \\\\ \hline\n"
latex += "\end{%s}\n" % table
return {'latex': latex} | 0ca28fce26fc7476aa5b88a621c5476ae8d381ce | 6,236 |
from optparse import OptionParser
import textDisplay
import textDisplay
import graphicsDisplay
import pickle
import random
import sys
def readCommand( argv ):
"""
Processes the command used to run pacman from the command line.
"""
usageStr = """
USAGE: python pacman.py <options>
EXAMPLES: (1) python pacman.py
- starts an interactive game
(2) python pacman.py --layout smallClassic --zoom 2
OR python pacman.py -l smallClassic -z 2
- starts an interactive game on a smaller board, zoomed in
"""
parser = OptionParser(usageStr)
parser.add_option('-n', '--numGames', dest='numGames', type='int',
help=default('the number of GAMES to play'), metavar='GAMES', default=1)
parser.add_option('-l', '--layout', dest='layout',
help=default('the LAYOUT_FILE from which to load the map layout'),
metavar='LAYOUT_FILE', default='mediumClassic')
parser.add_option('-p', '--pacman', dest='pacman',
help=default('the agent TYPE in the pacmanAgents module to use'),
metavar='TYPE', default='KeyboardAgent')
parser.add_option('-t', '--textGraphics', action='store_true', dest='textGraphics',
help='Display output as text only', default=False)
parser.add_option('-q', '--quietTextGraphics', action='store_true', dest='quietGraphics',
help='Generate minimal output and no graphics', default=False)
parser.add_option('-g', '--ghosts', dest='ghost',
help=default('the ghost agent TYPE in the ghostAgents module to use'),
metavar = 'TYPE', default='RandomGhost')
parser.add_option('-k', '--numghosts', type='int', dest='numGhosts',
help=default('The maximum number of ghosts to use'), default=4)
parser.add_option('-z', '--zoom', type='float', dest='zoom',
help=default('Zoom the size of the graphics window'), default=1.0)
parser.add_option('-f', '--fixRandomSeed', action='store_true', dest='fixRandomSeed',
help='Fixes the random seed to always play the same game', default=False)
parser.add_option('-r', '--recordActions', action='store_true', dest='record',
help='Writes game histories to a file (named by the time they were played)', default=False)
parser.add_option('--replay', dest='gameToReplay',
help='A recorded game file (pickle) to replay', default=None)
parser.add_option('-a','--agentArgs',dest='agentArgs',
help='Comma separated values sent to agent. e.g. "opt1=val1,opt2,opt3=val3"')
parser.add_option('-x', '--numTraining', dest='numTraining', type='int',
help=default('How many episodes are training (suppresses output)'), default=0)
parser.add_option('--frameTime', dest='frameTime', type='float',
help=default('Time to delay between frames; <0 means keyboard'), default=0.1)
parser.add_option('-c', '--catchExceptions', action='store_true', dest='catchExceptions',
help='Turns on exception handling and timeouts during games', default=False)
parser.add_option('--timeout', dest='timeout', type='int',
help=default('Maximum length of time an agent can spend computing in a single game'), default=30)
options, otherjunk = parser.parse_args(argv)
if len(otherjunk) != 0:
raise Exception('Command line input not understood: ' + str(otherjunk))
args = dict()
# Fix the random seed
if options.fixRandomSeed: random.seed('cs188')
# Choose a layout
args['layout'] = layout.getLayout( options.layout )
if args['layout'] == None: raise Exception("The layout " + options.layout + " cannot be found")
# Choose a Pacman agent
noKeyboard = options.gameToReplay == None and (options.textGraphics or options.quietGraphics)
pacmanType = loadAgent(options.pacman, noKeyboard)
agentOpts = parseAgentArgs(options.agentArgs)
if options.numTraining > 0:
args['numTraining'] = options.numTraining
if 'numTraining' not in agentOpts: agentOpts['numTraining'] = options.numTraining
pacman = pacmanType(**agentOpts) # Instantiate Pacman with agentArgs
args['pacman'] = pacman
# Don't display training games
if 'numTrain' in agentOpts:
options.numQuiet = int(agentOpts['numTrain'])
options.numIgnore = int(agentOpts['numTrain'])
# Choose a ghost agent
ghostType = loadAgent(options.ghost, noKeyboard)
args['ghosts'] = [ghostType( i+1 ) for i in range( options.numGhosts )]
# Choose a display format
if options.quietGraphics:
args['display'] = textDisplay.NullGraphics()
elif options.textGraphics:
textDisplay.SLEEP_TIME = options.frameTime
args['display'] = textDisplay.PacmanGraphics()
else:
args['display'] = graphicsDisplay.PacmanGraphics(options.zoom, frameTime = options.frameTime)
args['numGames'] = options.numGames
args['record'] = options.record
args['catchExceptions'] = options.catchExceptions
args['timeout'] = options.timeout
# Special case: recorded games don't use the runGames method or args structure
if options.gameToReplay != None:
print('Replaying recorded game %s.' % options.gameToReplay)
f = open(options.gameToReplay, 'rb')
try: recorded = pickle.load(f)
finally: f.close()
recorded['display'] = args['display']
replayGame(**recorded)
sys.exit(0)
return args | 07cfc3b70e867a7656b642ad8d4bc0740a58c126 | 6,237 |
def skipIfNoDB(test):
"""Decorate a test to skip if DB ``session`` is ``None``."""
@wraps(test)
def wrapper(self, db, *args, **kwargs):
if db.session is None:
pytest.skip('Skip because no DB.')
else:
return test(self, db, *args, **kwargs)
return wrapper | a75cc067679aaab3fec78c2310cbc2e34a19cee7 | 6,238 |
def rboxes2quads_numpy(rboxes):
"""
:param rboxes: ndarray, shape = (*, h, w, 5=(4=(t,r,b,l) + 1=angle))
Note that angle is between [-pi/4, pi/4)
:return: quads: ndarray, shape = (*, h, w, 8=(x1, y1,... clockwise order from top-left))
"""
# dists, shape = (*, h, w, 4=(t,r,b,l))
# angles, shape = (*, h, w)
h, w, _ = rboxes.shape[-3:]
dists, angles = rboxes[..., :4], rboxes[..., 4]
# shape = (*, h, w, 5=(t,r,b,l,offset), 2=(x,y))
pts = np.zeros(list(dists.shape[:-1]) + [5, 2], dtype=np.float32)
# assign pts for angle >= 0
dists_pos = dists[angles >= 0]
if dists_pos.size > 0:
# shape = (*, h, w)
tops, rights, bottoms, lefts = np.rollaxis(dists_pos, axis=-1)
shape = tops.shape
pts[angles >= 0] = np.moveaxis(np.array([[np.zeros(shape), -(tops+bottoms)],
[lefts+rights, -(tops+bottoms)],
[lefts+rights, np.zeros(shape)],
[np.zeros(shape), np.zeros(shape)],
[lefts, -bottoms]]), [0, 1], [-2, -1])
# assign pts for angle < 0
dists_neg = dists[angles < 0]
if dists_neg.size > 0:
# shape = (*, h, w)
tops, rights, bottoms, lefts = np.rollaxis(dists_neg, axis=-1)
shape = tops.shape
pts[angles < 0] = np.moveaxis(np.array([[-(lefts+rights), -(tops+bottoms)],
[np.zeros(shape), -(tops+bottoms)],
[np.zeros(shape), np.zeros(shape)],
[-(lefts+rights), np.zeros(shape)],
[-rights, -bottoms]]), [0, 1], [-2, -1])
# note that rotate clockwise is positive, otherwise, negative
angles *= -1
# rotate
# shape = (*, h, w, 2, 2)
R = np.moveaxis(np.array([[np.cos(angles), -np.sin(angles)],
[np.sin(angles), np.cos(angles)]]), [0, 1], [-2, -1])
# shape = (*, h, w, 2=(x, y), 5=(t,r,b,l,offset))
pts = np.swapaxes(pts, -1, -2)
# shape = (*, h, w, 2=(x, y), 5=(t,r,b,l,offset))
rotated_pts = R @ pts
# quads, shape = (*, h, w, 2=(x, y), 4=(t,r,b,l))
# offsets, shape = (*, h, w, 2=(x, y), 1=(offset))
quads, offsets = rotated_pts[..., :4], rotated_pts[..., 4:5]
# align
widths, heights = np.meshgrid(np.arange(w), np.arange(h))
# shape = (h, w, 2)
origins = np.concatenate((np.expand_dims(widths, -1), np.expand_dims(heights, -1)), axis=-1)
# shape = (*, h, w, 2=(x,y), 1)
origins = np.expand_dims(origins, axis=tuple(i for i in range(-1, rboxes.ndim - 3)))
quads += origins - offsets
quads[..., 0, :] = np.clip(quads[..., 0, :], 0, w)
quads[..., 1, :] = np.clip(quads[..., 1, :], 0, h)
# reshape
quads = np.swapaxes(quads, -1, -2).reshape(list(rboxes.shape[:-1]) + [8])
return quads | a5c48d48444f3c063fe912e2c6e76de373f7a1fc | 6,239 |
from typing import Callable
from typing import Optional
from typing import Mapping
from typing import Any
import reprlib
from typing import List
import inspect
from typing import cast
from typing import MutableMapping
def repr_values(condition: Callable[..., bool], lambda_inspection: Optional[ConditionLambdaInspection],
resolved_kwargs: Mapping[str, Any], a_repr: reprlib.Repr) -> List[str]:
"""
Represent function arguments and frame values in the error message on contract breach.
:param condition: condition function of the contract
:param lambda_inspection:
inspected lambda AST node corresponding to the condition function (None if the condition was not given as a
lambda function)
:param resolved_kwargs: arguments put in the function call
:param a_repr: representation instance that defines how the values are represented.
:return: list of value representations
"""
# Hide _ARGS and _KWARGS if they are not part of the condition for better readability
if '_ARGS' in resolved_kwargs or '_KWARGS' in resolved_kwargs:
parameters = inspect.signature(condition).parameters
malleable_kwargs = cast(
MutableMapping[str, Any],
resolved_kwargs.copy() # type: ignore
)
if '_ARGS' not in parameters:
malleable_kwargs.pop('_ARGS', None)
if '_KWARGS' not in parameters:
malleable_kwargs.pop('_KWARGS', None)
selected_kwargs = cast(Mapping[str, Any], malleable_kwargs)
else:
selected_kwargs = resolved_kwargs
# Don't use ``resolved_kwargs`` from this point on.
# ``selected_kwargs`` is meant to be used instead for better readability of error messages.
if is_lambda(a_function=condition):
assert lambda_inspection is not None, "Expected a lambda inspection when given a condition as a lambda function"
else:
assert lambda_inspection is None, "Expected no lambda inspection in a condition given as a non-lambda function"
reprs = None # type: Optional[MutableMapping[str, Any]]
if lambda_inspection is not None:
variable_lookup = collect_variable_lookup(condition=condition, resolved_kwargs=selected_kwargs)
recompute_visitor = icontract._recompute.Visitor(variable_lookup=variable_lookup)
recompute_visitor.visit(node=lambda_inspection.node.body)
recomputed_values = recompute_visitor.recomputed_values
repr_visitor = Visitor(
recomputed_values=recomputed_values, variable_lookup=variable_lookup, atok=lambda_inspection.atok)
repr_visitor.visit(node=lambda_inspection.node.body)
reprs = repr_visitor.reprs
# Add original arguments from the call unless they shadow a variable in the re-computation.
#
# The condition arguments are often not sufficient to figure out the error. The user usually needs
# more context which is captured in the remainder of the call arguments.
if reprs is None:
reprs = dict()
for key in sorted(selected_kwargs.keys()):
val = selected_kwargs[key]
if key not in reprs and _representable(value=val):
reprs[key] = val
parts = [] # type: List[str]
# We need to sort in order to present the same violation error on repeated violations.
# Otherwise, the order of the reported arguments may be arbitrary.
for key in sorted(reprs.keys()):
value = reprs[key]
if isinstance(value, icontract._recompute.FirstExceptionInAll):
writing = ['{} was False, e.g., with'.format(key)]
for input_name, input_value in value.inputs:
writing.append('\n')
writing.append(' {} = {}'.format(input_name, a_repr.repr(input_value)))
parts.append(''.join(writing))
else:
parts.append('{} was {}'.format(key, a_repr.repr(value)))
return parts | d7218029fd387bae108eedf49c9eef14d98e3c70 | 6,240 |
def human_permissions(permissions, short=False):
"""Get permissions in readable form.
"""
try:
permissions = int(permissions)
except ValueError:
return None
if permissions > sum(PERMISSIONS.values()) or permissions < min(
PERMISSIONS.values()
):
return ""
rez = []
for k, v in PERMISSIONS.items():
if permissions & v == v:
rez.append(k)
if short:
return "".join(((x.split("_")[1][:1]).lower() for x in rez))
else:
return " | ".join(rez) | 0d9c15659c93833042f44a0a96746e2f1dd9d307 | 6,241 |
def predict():
"""
Prediction end point
Post a JSON holding the features and expect a prediction
Returns
-------
JSON
The field `predictions` will hold a list of 0 and 1's corresponding
to the predictions.
"""
logger.info('Starting prediction')
json_ = request.get_json()
query_df = pd.DataFrame(json_)
query = tm.prepare_data(query_df, train=False)
prediction = clf.predict(query)
prediction = [int(x) for x in prediction]
logger.info("Prediction is ready")
return jsonify({'prediction': prediction}) | 6899725edff8d2536c4a97018a5c6c7a4e0d416e | 6,242 |
from unittest.mock import call
def run_program(program, cmdargs, stdin_f, stdout_f, stderr_f,
run=True, cmd_prepend="", run_from_cmd=True,
**kwargs):
"""Runs `program` with `cmdargs` using `subprocess.call`.
:param str stdin_f: File from which to take standard input
:param str stdout_f: File in which to put standard output
:param str stderr_f: File in which to put standard error
:param bool run: Whether to actually run `program`
If `True` the program return code is returned.
If false a string pointing to the script which will run
the program is returned
:param str cmd_prepend: Put in the beginning of the bash script
:param bool run_from_cmd: Run `program` using the generated bash
script instead of running it directly
"""
time_file_name = '.'.join(stdout_f.split('.')[:-1])+'.time'
cmd_file_name = '.'.join(stdout_f.split('.')[:-1])+'.sh'
with open(cmd_file_name, 'w') as cmd_file:
cmd = ' '.join([program]+cmdargs)
time_cmd = "/usr/bin/time -o {time_file}".format(time_file=time_file_name)
cmd = "{time_cmd} {cmd} 1> {stdout} 2> {stderr} \n".format(time_cmd=time_cmd,
cmd=cmd,
stdout=stdout_f,
stderr=stderr_f)
cmd = cmd_prepend + cmd
cmd_file.write(cmd)
if run:
with OpenWithNone(stdin_f, 'r') as input_file, open(stdout_f, 'w') as stdout_file, open(stderr_f, 'w') as stderr_file:
if run_from_cmd:
retcode = call(["bash", cmd_file_name], **kwargs)
else:
try:
with open(time_file_name, 'w') as time_file:
with print_time(time_file):
retcode = call([program]+cmdargs, stdin=input_file,
stdout=stdout_file, stderr=stderr_file, **kwargs)
except Exception as e:
print(e)
print('program ', program)
print('cmdargs', cmdargs)
print('stdin ', stdin_f)
print('stdout ', stdout_f)
print('stderr ', stderr_f)
# print 'kwargs ', kwargs
print(getcwd())
raise
replace_string_in_file(stdout_f, '\r', '\n')
return retcode
else:
return cmd_file_name | aea74ec8ac296567b16e6f76eed1360e8bc76f69 | 6,243 |
def second_step_red(x: np.array, y: np.array, z: np.array,
px: np.array, py: np.array, pz: np.array,
Fx: np.array, Fy: np.array, Fz: np.array,
z_start: float, z_stop: float) -> (np.array, np.array, np.array,
np.array, np.array, np.array):
""" Second step for Relativictic Difference Scheme
"""
n = int(len(x))
for i in prange(n):
if z[i] >= z_start and z[i] <= z_stop:
gamma = (1 + px[i]**2 + py[i]**2 + pz[i]**2)**(1/2)
vx = px[i]/gamma
vy = py[i]/gamma
vz = pz[i]/gamma
b2 = 1 + Fx[i]**2 + Fy[i]**2 + Fz[i]**2
b1 = 2 - b2
b3 = 2 * (vx*Fx[i] + vy*Fy[i] + vz*Fz[i])
fx = 2 * (vy*Fz[i] - vz*Fy[i])
fy = 2 * (vz*Fx[i] - vx*Fz[i])
fz = 2 * (vx*Fy[i] - vy*Fx[i])
vx = (vx*b1 + fx + Fx[i]*b3)/b2
vy = (vy*b1 + fy + Fy[i]*b3)/b2
vz = (vz*b1 + fz + Fz[i]*b3)/b2
x[i] += vx
y[i] += vy
z[i] += vz
px[i] = vx*gamma
py[i] = vy*gamma
pz[i] = vz*gamma
else:
gamma = (1 + px[i]**2 + py[i]**2 + pz[i]**2)**(1/2)
vz = pz[i]/gamma
z[i] += vz
return x, y, z, px, py, pz | 909f16a51074ca0c52641d3539509e513ca4ac80 | 6,244 |
def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy):
"""Drop a point from the tabu search list."""
if len(tabulist) < tabulistsize:
return tabulist
if tabustrategy == 'oldest':
tabulist.pop(0)
else:
distance = np.sqrt(np.sum((tabulist - xf)**2, axis=1))
index = np.argmax(distance)
tabulist.pop(index)
return tabulist | 4cd8887bdd77bb001635f0fba57f5908f3451642 | 6,245 |
def get_atom_feature_dims(list_acquired_feature_names):
""" tbd
"""
return list(map(len, [CompoundKit.atom_vocab_dict[name] for name in list_acquired_feature_names])) | 575de38dc0fdd198f6a6eb5cbb972063260bc4d4 | 6,246 |
def parse_selector(selector):
"""Parses a block of selectors like div .name #tag to class=.name, selector=div and id=#tag.
Returns (selector, id, class[]) """
m_class, m_id, m_selector, m_attr = [], None, None, {}
if selector is not None and type(selector) == str:
selector_labels = selector.split()
for label in selector_labels:
if label.startswith("."):
m_class.append(label)
elif label.startswith("#"):
if m_id is not None:
raise ValueError("Multiple id's are declared in block "+str(selector))
m_id = label
elif label.startswith("@@"):
attribute_block = str(label).split('=')
if len(attribute_block) < 2:
raise ValueError('Attribute does not match the \
format @@<attribute_name>=<attribute_value> without space')
attr = attribute_block[0]
value = attribute_block[1]
mattr[attr] = value
else:
if m_selector is not None:
raise ValueError("Multiple selectors are declared in block "+str(selector))
m_selector = label
if mattr and not m_selector:
raise AssertionError('If selection is done with attribute @@<attr_name>=<attr_value>,\
then it is must to have selector.\n Eg: <selector> @@<attr_name>=<attr_value>"')
return m_selector, m_id, m_class, mattr | eadaa4cd79ed933325b0058e752a7187d5a09085 | 6,247 |
def is_batch_enabled(release_id):
"""
Check whether batching is enabled for a release.
"""
details = get_release_details_by_id(release_id)
return details['data']['attributes']['enable_batching'] | e22965166b35584e172e775b16a9d84affe5868f | 6,248 |
import contextlib
def create(tiles):
"""Handler."""
with futures.ThreadPoolExecutor(max_workers=8) as executor:
responses = executor.map(worker, tiles)
with contextlib.ExitStack() as stack:
sources = [
stack.enter_context(rasterio.open(tile)) for tile in responses if tile
]
dest, output_transform = merge(sources, nodata=-32767)
meta = {
"driver": "GTiff",
"count": 1,
"dtype": np.int16,
"nodata": -32767,
"height": dest.shape[1],
"width": dest.shape[2],
"compress": "DEFLATE",
"crs": "epsg:4326",
"transform": output_transform,
}
memfile = MemoryFile()
with memfile.open(**meta) as dataset:
dataset.write(dest)
return memfile | cd080b0df34b12f8045420ac076f8e9ee6bc7c15 | 6,249 |
import os
import math
import sys
def write_rxn_rates(path, lang, specs, reacs, fwd_rxn_mapping):
"""Write reaction rate subroutine.
Includes conditionals for reversible reactions.
Parameters
----------
path : str
Path to build directory for file.
lang : {'c', 'cuda', 'fortran', 'matlab'}
Programming language.
specs : list of SpecInfo
List of species in the mechanism.
reacs : list of ReacInfo
List of reactions in the mechanism.
fwd_rxn_mapping : List of integers
The index of the reaction in the original mechanism
Returns
_______
None
"""
num_s = len(specs)
num_r = len(reacs)
rev_reacs = [i for i, rxn in enumerate(reacs) if rxn.rev]
num_rev = len(rev_reacs)
pdep_reacs = [i for i, rxn in enumerate(reacs) if rxn.thd_body or rxn.pdep]
pre = '__device__ ' if lang == 'cuda' else ''
filename = 'rates' + utils.header_ext[lang]
with open(os.path.join(path, filename), 'w') as file:
file.write(
'#ifndef RATES_HEAD\n'
'#define RATES_HEAD\n'
'\n'
'#include "header{}"\n'.format(utils.header_ext[lang]) +
'\n'
'{0}void eval_rxn_rates (const double,'
' const double, const double*, double*, double*);\n'
'{0}void eval_spec_rates (const double*,'
' const double*, const double*, double*, double*);\n'.format(pre)
)
if pdep_reacs:
file.write('{}void get_rxn_pres_mod (const double, const '
'double, const double*, double*);\n'.format(pre)
)
file.write('\n'
'#endif\n'
)
filename = 'rxn_rates' + utils.file_ext[lang]
with open(os.path.join(path, filename), 'w') as file:
line = ''
if lang == 'cuda': line = '__device__ '
if lang in ['c', 'cuda']:
file.write('#include "rates' + utils.header_ext[lang] + '"\n')
line += ('void eval_rxn_rates (const double T, const double pres,'
' const double * C, double * fwd_rxn_rates, '
'double * rev_rxn_rates) {\n'
)
elif lang == 'fortran':
line += ('subroutine eval_rxn_rates(T, pres, C, fwd_rxn_rates,'
' rev_rxn_rates)\n\n'
)
# fortran needs type declarations
line += (' implicit none\n'
' double precision, intent(in) :: '
'T, pres, C({})\n'.format(num_s)
)
line += (' double precision, intent(out) :: '
'fwd_rxn_rates({}), '.format(num_r) +
'rev_rxn_rates({})\n'.format(num_rev)
)
line += (' \n'
' double precision :: logT\n'
)
kf_flag = True
if rev_reacs and any([not r.rev_par for r in reacs]):
line += ' double precision :: kf, Kc\n'
kf_flag = False
if any([rxn.cheb for rxn in reacs]):
if kf_flag:
line += ' double precision :: kf, Tred, Pred\n'
kf_flag = False
else:
line += ' double precision :: Tred, Pred\n'
if any([rxn.plog for rxn in reacs]):
if kf_flag:
line += ' double precision :: kf, kf2\n'
kf_flag = False
else:
line += ' double precision :: kf2\n'
line += '\n'
elif lang == 'matlab':
line += ('function [fwd_rxn_rates, rev_rxn_rates] = '
'eval_rxn_rates (T, pres, C)\n\n'
' fwd_rxn_rates = zeros({},1);\n'.format(num_r) +
' rev_rxn_rates = fwd_rxn_rates;\n'
)
file.write(line)
get_array = utils.get_array
pre = ' '
if lang == 'c':
pre += 'double '
elif lang == 'cuda':
pre += 'register double '
line = (pre + 'logT = log(T)' +
utils.line_end[lang]
)
file.write(line)
file.write('\n')
kf_flag = True
if rev_reacs and any([not r.rev_par for r in reacs]):
kf_flag = False
if lang == 'c':
file.write(' double kf;\n'
' double Kc;\n'
)
elif lang == 'cuda':
file.write(' register double kf;\n'
' register double Kc;\n'
)
if any([rxn.cheb for rxn in reacs]):
# Other variables needed for Chebyshev
if lang == 'c':
if kf_flag:
file.write(' double kf;\n')
kf_flag = False
file.write(' double Tred;\n'
' double Pred;\n')
file.write(utils.line_start +
'double cheb_temp_0, cheb_temp_1' +
utils.line_end[lang]
)
dim = max(rxn.cheb_n_temp for rxn in reacs if rxn.cheb)
file.write(utils.line_start +
'double dot_prod[{}]'.format(dim) +
utils.line_end[lang]
)
elif lang == 'cuda':
if kf_flag:
file.write(' register double kf;\n')
kf_flag = False
file.write(' register double Tred;\n'
' register double Pred;\n')
file.write(utils.line_start +
'double cheb_temp_0, cheb_temp_1' +
utils.line_end[lang]
)
dim = max(rxn.cheb_n_temp for rxn in reacs if rxn.cheb)
file.write(utils.line_start +
'double dot_prod[{}]'.format(dim) +
utils.line_end[lang]
)
if any([rxn.plog for rxn in reacs]):
# Variables needed for Plog
if lang == 'c':
if kf_flag:
file.write(' double kf;\n')
file.write(' double kf2;\n')
if lang == 'cuda':
if kf_flag:
file.write(' register double kf;\n')
file.write(' register double kf2;\n')
file.write('\n')
def __get_arrays(sp, factor=1.0):
# put together all our coeffs
lo_array = [nu * factor] + [
sp.lo[6], sp.lo[0], sp.lo[0] - 1.0, sp.lo[1] / 2.0,
sp.lo[2] / 6.0, sp.lo[3] / 12.0, sp.lo[4] / 20.0,
sp.lo[5]
]
lo_array = [x * lo_array[0] for x in
[lo_array[1] - lo_array[2]] + lo_array[3:]
]
hi_array = [nu * factor] + [
sp.hi[6], sp.hi[0], sp.hi[0] - 1.0, sp.hi[1] / 2.0,
sp.hi[2] / 6.0, sp.hi[3] / 12.0, sp.hi[4] / 20.0,
sp.hi[5]
]
hi_array = [x * hi_array[0] for x in
[hi_array[1] - hi_array[2]] + hi_array[3:]
]
return lo_array, hi_array
for i_rxn in range(len(reacs)):
file.write(utils.line_start + utils.comment[lang] +
'rxn {}'.format(fwd_rxn_mapping[i_rxn]) + '\n')
rxn = reacs[i_rxn]
# if reversible, save forward rate constant for use
if rxn.rev and not rxn.rev_par and not (rxn.cheb or rxn.plog):
line = (' kf = ' + rxn_rate_const(rxn.A, rxn.b, rxn.E) +
utils.line_end[lang]
)
file.write(line)
elif rxn.cheb:
file.write(get_cheb_rate(lang, rxn))
elif rxn.plog:
# Special forward rate evaluation for Plog reacions
vals = rxn.plog_par[0]
file.write(' if (pres <= {:.4e}) {{\n'.format(vals[0]) +
' kf = ' +
rxn_rate_const(vals[1], vals[2], vals[3]) +
utils.line_end[lang]
)
for idx, vals in enumerate(rxn.plog_par[:-1]):
vals2 = rxn.plog_par[idx + 1]
line = (' }} else if ((pres > {:.4e}) '.format(vals[0]) +
'&& (pres <= {:.4e})) {{\n'.format(vals2[0]))
file.write(line)
line = (' kf = log(' +
rxn_rate_const(vals[1], vals[2], vals[3]) + ')'
)
file.write(line + utils.line_end[lang])
line = (' kf2 = log(' +
rxn_rate_const(vals2[1], vals2[2], vals2[3]) + ')'
)
file.write(line + utils.line_end[lang])
pres_log_diff = math.log(vals2[0]) - math.log(vals[0])
line = (' kf = exp(kf + (kf2 - kf) * (log(pres) - ' +
'{:.16e}) / '.format(math.log(vals[0])) +
'{:.16e})'.format(pres_log_diff)
)
file.write(line + utils.line_end[lang])
vals = rxn.plog_par[-1]
file.write(
' }} else if (pres > {:.4e}) {{\n'.format(vals[0]) +
' kf = ' +
rxn_rate_const(vals[1], vals[2], vals[3]) +
utils.line_end[lang] +
' }\n'
)
line = ' ' + get_array(lang, 'fwd_rxn_rates', i_rxn) + ' = '
# reactants
for i, isp in enumerate(rxn.reac):
nu = rxn.reac_nu[i]
# check if stoichiometric coefficient is double or integer
if utils.is_integer(nu):
# integer, so just use multiplication
for i in range(int(nu)):
line += '' + get_array(lang, 'C', isp) + ' * '
else:
line += ('pow(' + get_array(lang, 'C', isp) +
', {}) *'.format(nu)
)
# Rate constant: print if not reversible, or reversible but
# with explicit reverse parameters.
if (rxn.rev and not rxn.rev_par) or rxn.plog or rxn.cheb:
line += 'kf'
else:
line += rxn_rate_const(rxn.A, rxn.b, rxn.E)
line += utils.line_end[lang]
file.write(line)
if rxn.rev:
if not rxn.rev_par:
# line = ' Kc = 0.0' + utils.line_end[lang]
# file.write(line)
# sum of stoichiometric coefficients
sum_nu = 0
coeffs = {}
# go through product species
for isp, prod_sp in enumerate(rxn.prod):
# check if species also in reactants
if prod_sp in rxn.reac:
isp2 = rxn.reac.index(prod_sp)
nu = rxn.prod_nu[isp] - rxn.reac_nu[isp2]
else:
nu = rxn.prod_nu[isp]
# Skip species with zero overall
# stoichiometric coefficient.
if (nu == 0):
continue
sum_nu += nu
# get species object
sp = specs[prod_sp]
if not sp:
print('Error: species ' + prod_sp + ' in reaction '
'{} not found.\n'.format(i_rxn)
)
sys.exit()
lo_array, hi_array = __get_arrays(sp)
if not sp.Trange[1] in coeffs:
coeffs[sp.Trange[1]] = lo_array, hi_array
else:
coeffs[sp.Trange[1]] = [
lo_array[i] + coeffs[sp.Trange[1]][0][i]
for i in range(len(lo_array))
], [
hi_array[i] + coeffs[sp.Trange[1]][1][i]
for i in range(len(hi_array))
]
# now loop through reactants
for isp, reac_sp in enumerate(rxn.reac):
# Check if species also in products;
# if so, already considered).
if reac_sp in rxn.prod: continue
nu = rxn.reac_nu[isp]
sum_nu -= nu
# get species object
sp = specs[reac_sp]
if not sp:
print('Error: species ' + reac_sp + ' in reaction '
'{} not found.\n'.format(i_rxn)
)
sys.exit()
lo_array, hi_array = __get_arrays(sp, factor=-1.0)
if not sp.Trange[1] in coeffs:
coeffs[sp.Trange[1]] = lo_array, hi_array
else:
coeffs[sp.Trange[1]] = [
lo_array[i] +
coeffs[sp.Trange[1]][0][i]
for i in range(len(lo_array))
], [hi_array[i] +
coeffs[sp.Trange[1]][1][i]
for i in range(len(hi_array))
]
isFirst = True
for T_mid in coeffs:
# need temperature conditional for equilibrium constants
line = ' if (T <= {:})'.format(T_mid)
if lang in ['c', 'cuda']:
line += ' {\n'
elif lang == 'fortran':
line += ' then\n'
elif lang == 'matlab':
line += '\n'
file.write(line)
lo_array, hi_array = coeffs[T_mid]
if isFirst:
line = ' Kc = '
else:
if lang in ['cuda', 'c']:
line = ' Kc += '
else:
line = ' Kc = Kc + '
line += ('({:.16e} + '.format(lo_array[0]) +
'{:.16e} * '.format(lo_array[1]) +
'logT + T * ('
'{:.16e} + T * ('.format(lo_array[2]) +
'{:.16e} + T * ('.format(lo_array[3]) +
'{:.16e} + '.format(lo_array[4]) +
'{:.16e} * T))) - '.format(lo_array[5]) +
'{:.16e} / T)'.format(lo_array[6]) +
utils.line_end[lang]
)
file.write(line)
if lang in ['c', 'cuda']:
file.write(' } else {\n')
elif lang in ['fortran', 'matlab']:
file.write(' else\n')
if isFirst:
line = ' Kc = '
else:
if lang in ['cuda', 'c']:
line = ' Kc += '
else:
line = ' Kc = Kc + '
line += ('({:.16e} + '.format(hi_array[0]) +
'{:.16e} * '.format(hi_array[1]) +
'logT + T * ('
'{:.16e} + T * ('.format(hi_array[2]) +
'{:.16e} + T * ('.format(hi_array[3]) +
'{:.16e} + '.format(hi_array[4]) +
'{:.16e} * T))) - '.format(hi_array[5]) +
'{:.16e} / T)'.format(hi_array[6]) +
utils.line_end[lang]
)
file.write(line)
if lang in ['c', 'cuda']:
file.write(' }\n\n')
elif lang == 'fortran':
file.write(' end if\n\n')
elif lang == 'matlab':
file.write(' end\n\n')
isFirst = False
line = (' Kc = '
'{:.16e}'.format((chem.PA / chem.RU) ** sum_nu) +
' * exp(Kc)' +
utils.line_end[lang]
)
file.write(line)
line = ' ' + get_array(lang, 'rev_rxn_rates',
rev_reacs.index(i_rxn)
) + ' = '
# reactants (products from forward reaction)
for isp in rxn.prod:
nu = rxn.prod_nu[rxn.prod.index(isp)]
# check if stoichiometric coefficient is double or integer
if utils.is_integer(nu):
# integer, so just use multiplication
for i in range(int(nu)):
line += '' + get_array(lang, 'C', isp) + ' * '
else:
line += ('pow(' + get_array(lang, 'C', isp) +
', {}) * '.format(nu)
)
# rate constant
if rxn.rev_par:
# explicit reverse Arrhenius parameters
line += rxn_rate_const(rxn.rev_par[0],
rxn.rev_par[1],
rxn.rev_par[2]
)
else:
# use equilibrium constant
line += 'kf / Kc'
line += utils.line_end[lang]
file.write(line)
file.write('\n')
if lang in ['c', 'cuda']:
file.write('} // end eval_rxn_rates\n\n')
elif lang == 'fortran':
file.write('end subroutine eval_rxn_rates\n\n')
elif lang == 'matlab':
file.write('end\n\n')
return | 110074159d07bbca9837dafc560cadbbc2780689 | 6,250 |
def _rfftn_empty_aligned(shape, axes, dtype, order='C', n=None):
"""Patched version of :func:`sporco.fft.rfftn_empty_aligned`.
"""
ashp = list(shape)
raxis = axes[-1]
ashp[raxis] = ashp[raxis] // 2 + 1
cdtype = _complex_dtype(dtype)
return cp.empty(ashp, cdtype, order) | a85ab3a938694a82d186b968a2d7d4c710f1ecde | 6,251 |
def get_test_config():
"""
Returns a basic FedexConfig to test with.
"""
# Test server (Enter your credentials here)
return FedexConfig(key='xxxxxxxxxxxxxxxxx',
password='xxxxxxxxxxxxxxxxxxxxxxxxx',
account_number='xxxxxxxxx',
meter_number='xxxxxxxxxx',
use_test_server=True) | 81b29fbb135b30f24aa1fe7cb32844970617f0ee | 6,252 |
from datetime import datetime
import itertools
import json
import logging
import csv
def write_data(movies, user, data_format='json'):
"""
"""
assert movies, 'no data to write'
date = datetime.now().strftime('%Y%m%d')
movies_clean = itertools.chain.from_iterable((json.loads(el) for el in movies))
movies_clean = tuple(movies_clean)
if data_format == 'all':
file_formats = ('csv', 'json')
else:
file_formats = (data_format, )
if 'json' in file_formats:
file_name = f'{user}_filmweb_{date}.json'
with open(file_name, 'w', encoding='utf-8') as out_file:
out_file.write(json.dumps(movies_clean))
logging.info(f'{file_name} written!')
if 'csv' in file_formats:
file_name = f'{user}_filmweb_{date}.csv'
with open(file_name, 'w', encoding='utf-8') as out_file:
writer = csv.DictWriter(out_file, fieldnames=CSV_ROWS, dialect='unix')
writer.writeheader()
for movie in movies_clean:
writer.writerow(movie)
logging.info(f'{file_name} written!')
return file_name | 704ebf1aa1b45855b8fade61cdf6a9bb12e44c83 | 6,253 |
import json
import tempfile
def get_genotypes(
single_end: list,
paired_end: list,
metadata: str,
bam_dir: str,
intermediate_dir: str,
reference_genome_path: str,
mapping_quality: int,
blacklist_path: str,
snps_path: str,
processes: int,
memory: int,
skip_preprocessing: bool = False,
write_bam: bool = False,
algorithm_switch_bp: int = 70,
algorithm=None,
temp_dir=None
):
"""Obtain genotypes from sequencing data using QuASAR
Parameters
----------
single_end : list
List of single-end input files
paired_end : list
List of paired-end input files
metadata : dict
Dict of input file metadata
bam_dir : str
Directory to write BAM files
intermediate_dir : str
Directory to write intermediate pileup / bed files
reference_genome_path : str
Path to reference genome
mapping_quality : int
Minimum quality score for filtering alignment
blacklist_path : str
Path to ENCODE mappability blacklist
snps_path : str
Path to file containing SNPs to genotype
processes : int
Number of processes
memory : int
Memory limit
skip_preprocessing : bool
Indicator to skip preprocessing steps
write_bam : bool
Indicator to write a BAM file to disk
algorithm_switch_bp : int
Read length threshold for switching to `bwa mem`
algorithm : str or None
Force use of either `aln` or `mem` algorithm, if supplied
temp_dir
directory to use for temporary files
"""
n_single_end = len(single_end)
n_paired_end = len(paired_end)
if not metadata:
metadata_dict = {}
else:
with open(metadata, 'r') as f:
metadata_dict = json.load(f)
n_metadata = sum(len(x['libraries']) for x in metadata_dict.values())
def prepare_quasar_input_params(temp_dir_name, n, pe=False):
return {
'bam_dir': bam_dir if bam_dir else temp_dir_name,
'intermediate_dir': (
intermediate_dir if intermediate_dir
else temp_dir_name
),
'reference_genome_path': reference_genome_path,
'mapping_quality': mapping_quality,
'blacklist_path': blacklist_path,
'snps_path': snps_path,
'processes': max(1, int(processes / n)),
'memory': memory / min(processes, n),
'paired_end': pe,
'skip_preprocessing': skip_preprocessing,
'write_bam': write_bam,
'algorithm_switch_bp': algorithm_switch_bp,
'algorithm': algorithm,
'temp_dir': temp_dir
}
with tempfile.TemporaryDirectory(dir=temp_dir) as temp_dir_name:
with Pool(processes=min(processes, max(n_single_end, n_paired_end, n_metadata))) as pool:
if n_single_end > 0:
single_end_quasar_input_paths = pool.map(
partial(
prepare_quasar_input,
**prepare_quasar_input_params(temp_dir_name, n_single_end, pe=False)
),
single_end
)
else:
single_end_quasar_input_paths = []
if n_paired_end > 0:
paired_end_quasar_input_paths = pool.map(
partial(
prepare_quasar_input,
**prepare_quasar_input_params(temp_dir_name, n_paired_end, pe=True)
),
paired_end
)
else:
paired_end_quasar_input_paths = []
if n_metadata > 0:
meta_se, meta_pe = collate_metadata(metadata_dict)
if len(meta_se) > 0:
metadata_quasar_input_paths_se = pool.starmap(
partial(
prepare_quasar_input_from_metadata,
**prepare_quasar_input_params(temp_dir_name, len(meta_se), pe=False)
),
meta_se
)
else:
metadata_quasar_input_paths_se = []
if len(meta_pe) > 0:
metadata_quasar_input_paths_pe = pool.starmap(
partial(
prepare_quasar_input_from_metadata,
**prepare_quasar_input_params(temp_dir_name, len(meta_pe), pe=True)
),
meta_pe
)
else:
metadata_quasar_input_paths_pe = []
else:
metadata_quasar_input_paths_se, metadata_quasar_input_paths_pe = [], []
return pyQuASAR.genotype(
*filter(
None,
single_end_quasar_input_paths
+ paired_end_quasar_input_paths
+ metadata_quasar_input_paths_se
+ metadata_quasar_input_paths_pe
)
) | 7ee61a9b8dfbbedf7d595034a40ae9084e1fa69f | 6,254 |
def async_handle_google_actions(hass, cloud, payload):
"""Handle an incoming IoT message for Google Actions."""
result = yield from ga.async_handle_message(
hass, cloud.gactions_config, payload)
return result | 1c9ec2e37a1c752abb59301f546db4e14fdf57d8 | 6,255 |
def get_picture_landmarks(filepath, predictor, logs=True):
"""
Do the doc!
"""
if logs:
print("Processing file: {}".format(filepath))
frame = cv2.imread(filepath)
lm = FLandmarks()
lm.extract_points(frame, predictor)
return lm
if logs:
print('\n') | 02b92c663c9efe3fad18b35b3808e0b004b1a8c0 | 6,256 |
def conflict(next_x: int, s: tuple) -> bool:
"""Return a boolean that defines the conflict condition of the next queen's position"""
next_i = len(s)
for i in range(next_i):
if abs(s[i] - next_x) in (0, next_i - i):
return True
else:
return False | cc29b142e1cc799c0a305523b713c5085af25fd0 | 6,257 |
async def main_page():
"""Main page. Just for example."""
return APIResponse(message="ok") | f1a2022df08725388c02dabe77bc4ee29eb5f968 | 6,258 |
from typing import List
def split_to_sublists(initial_list:list, n:int, strict:bool=True) -> List[list]:
"""Takes a list and splits it into sublists of size n
Parameters
----------
initial_list : list
The initial list to split into sublists
n : int
The size of each sublist
strict: bool
Whether to force an error if the length of the initial list is not divisible by n (split into even groups), default True
Returns
-------
List[list]
A list of lists of size n (unless strict is False, then the last list may be > n)
Examples
--------
### Split gallery images into sublists of 3
#### JINJA USAGE
```jinja2
{% if gallery|length % 3 == 0 %}
{% for sublist in gallery|split_to_sublists(3) %}
<div class="row">
<div class="col-md-4">
<img src="{{ sublist.0[0]['file_path'] }}" alt="{{ sublist.0[0]['file_path'].split()[-1] }}">
</div>
<div class="col-md-4">
<img src="{{ sublist.1[0]['file_path'] }}" alt="{{ sublist.1[0]['file_path'].split()[-1]}}">
</div>
<div class="col-md-4">
<img src="{{ sublist.2[0]['file_path'] }}" alt="{{ sublist.2[0]['file_path'].split()[-1] }}">
</div>
</div>
{% endfor %}
{% endif }
```
The above jinja is roughly equivalent to something like this in pure python:
```python
gallery = ["image 1" , "image 2", "image 3", "image 4" , "image 5", "image 6"]
if len(images) % 3 == 0:
for sublist in split_to_sublists(gallery, 3): # Returns [["image 1" , "image 2", "image 3"], ["image 4" , "image 5", "image 6"]]
... # Do stuff with each sublist
```
"""
if strict:
if not len(initial_list) % n == 0:
raise ValueError(f"Provided list was not of correct size: \n\tList: {initial_list}\n\tSegment size {n}")
result = []
for i in range(0, len(initial_list), n): # Create sublists up to size n
result.append( initial_list[i:i + n])
return result | fcca74f9814020c99aaf8b31f092ca3ca9533216 | 6,259 |
import os
import re
def get_matched_files(dirPath=".", regex=None):
"""Get the abspath of the files whose name matches a regex
Only files will be returned, and directories are excluded.
Args:
dirPath (str): the directory to search
regex (regex): the regular expression to match the filename
Returns:
tuple of strings
"""
# check the exisitence of path
fns = []
_absDir = os.path.abspath(dirPath)
if os.path.isdir(_absDir):
for i in os.listdir(_absDir):
if regex != None:
if not re.match(regex, i):
continue
_fpath = os.path.join(_absDir, i)
if os.path.isfile(_fpath):
fns.append(_fpath)
return tuple(fns) | 118cf628b54f50b2c41c1885bcf000a741966086 | 6,260 |
from pathlib import Path
def get_sha1(req_path: Path) -> str:
""" For larger files sha1 algorithm is significantly faster than sha256 """
return get_hash(req_path, sha1) | 768f101fe4ad57eaea9ccd68d247e6a85b1cebaa | 6,261 |
import mimetypes
import os
import requests
def upload_attachment(page_id, file, comment, confluence_api_url, username, password, raw = None):
"""
Upload an attachement
:param page_id: confluence page id
:param file: attachment file
:param comment: attachment comment
:return: boolean
"""
content_type = mimetypes.guess_type(file)[0]
filename = os.path.basename(file)
if raw is None:
r = requests.get(file, stream=True)
r.raw.decode_content = True
else:
r = raw
file_to_upload = {
'comment': comment,
'file': (urlEncodeNonAscii(filename), r.raw, content_type, {'Expires': '0'})
}
attachment = get_attachment(page_id, filename, confluence_api_url, username, password)
if attachment:
url = '%s/rest/api/content/%s/child/attachment/%s/data' % (confluence_api_url, page_id, attachment.id)
else:
url = '%s/rest/api/content/%s/child/attachment/' % (confluence_api_url, page_id)
session = requests.Session()
session.auth = (username, password)
session.headers.update({'X-Atlassian-Token': 'no-check'})
res = session.post(url, files=file_to_upload)
return True | 563ea4b033a733c479d1e58b2c7fc3e661052021 | 6,262 |
def _make_note(nl_transcript: str, tl_audio_file: str) -> Note:
"""
Creates an Anki note from a native langauge transcript and a target language audio file.
"""
return Note(model=_MODEL, fields=[f"[sound:{tl_audio_file}]", nl_transcript]) | 4765e39b2c3a7794fb973de2b9424bad361cbe4c | 6,263 |
from datetime import datetime
def bed2beddb_status(connection, **kwargs):
"""Searches for small bed files uploaded by user in certain types
Keyword arguments:
lab_title -- limit search with a lab i.e. Bing+Ren, UCSD
start_date -- limit search to files generated since a date formatted YYYY-MM-DD
run_time -- assume runs beyond run_time are dead (default=24 hours)
"""
start = datetime.utcnow()
check = CheckResult(connection, 'bed2beddb_status')
my_auth = connection.ff_keys
check.action = "bed2beddb_start"
check.brief_output = []
check.full_output = {}
check.status = 'PASS'
check.summary = ''
# These are the accepted file types for this check
accepted_types = ['LADs', 'boundaries', 'domain calls', 'peaks']
# check indexing queue
check, skip = wfr_utils.check_indexing(check, connection)
if skip:
return check
# Build the query (find bg files without bw files)
query = ("/search/?type=FileProcessed&file_format.file_format=bed"
"&extra_files.file_format.display_title!=beddb"
"&status!=uploading&status!=to be uploaded by workflow"
"&status!=archived&status!=archived to project")
query += "".join(["&file_type=" + i for i in accepted_types])
# add date
s_date = kwargs.get('start_date')
if s_date:
query += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query += '&lab.display_title=' + lab
# build a second query for checking failed ones
query_f = ("/search/?type=FileProcessed&file_format.file_format=bed"
"&extra_files.file_format.display_title=beddb"
"&extra_files.status=uploading"
"&extra_files.status=to be uploaded by workflow"
"&status!=uploading&status!=to be uploaded by workflow")
# add date
s_date = kwargs.get('start_date')
if s_date:
query_f += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query_f += '&lab.display_title=' + lab
# The search
res_one = ff_utils.search_metadata(query, key=my_auth)
res_two = ff_utils.search_metadata(query_f, key=my_auth)
res_all = res_one + res_two
missing = []
for a_file in res_all:
if not a_file.get('genome_assembly'):
missing.append(a_file['accession'])
res_all = [i for i in res_all if i.get('genome_assembly')]
if not res_all:
check.summary = 'All Good!'
return check
check = wfr_utils.check_runs_without_output(res_all, check, 'bedtobeddb', my_auth, start)
if missing:
check.full_output['missing_assembly'] = missing
msg = str(len(missing)) + ' files missing genome assembly'
check.brief_output.insert(0, msg)
return check | 2fb1f67cc256bc1ff04c4a5e8c1fa61f43f69d30 | 6,264 |
def parse_urdf_file(package_name, work_name):
""" Convert urdf file (xml) to python dict.
Using the urdfpy package for now.
Using the xml package from the standard library could be
easier to understand. We can change this in the future
if it becomes a mess.
"""
rospack = rospkg.RosPack()
filepath = rospack.get_path(package_name)
filepath += REL_WORK_PATH
urdf = urdfpy.URDF.load(filepath + work_name + ".urdf")
d = {"links": {}, "joints": {}}
for link in urdf.links:
if link.name == "world" or link.name == "work":
continue
else:
d["links"][link.name] = parse_link(link, filepath)
for joint in urdf.joints:
p = PoseStamped()
p.header.frame_id = joint.parent
p.pose = numpy_to_pose(joint.origin)
d["joints"][joint.name] = {
"pose": p,
"parent": joint.parent,
"child": joint.child
}
return d | 7b209216d9f65303441e5e9f761119bfa9fc5810 | 6,265 |
def _get_mgmtif_mo_dn(handle):
"""
Internal method to get the mgmt_if dn based on the type of platform
"""
if handle.platform == IMC_PLATFORM.TYPE_CLASSIC:
return("sys/rack-unit-1/mgmt/if-1")
elif handle.platform == IMC_PLATFORM.TYPE_MODULAR:
return("sys/chassis-1/if-1")
else:
raise ImcValidationException("Invalid platform detected:%s" %
handle.platform) | 455c5baf0f659b98c78bfcc386bd03e0850df267 | 6,266 |
import scipy
def measure_cv_performance(gene_by_latent_train, data_test):
"""
Measure NMF model performance on held out data. Performance is evaluated based on the model's ability
to reconstuct held out samples.
\hat{u} := arg min_{u} || x - uV^T || s.t. u >= 0
\hat{x} := \hat{u} V^T
error = || x - \hat{x} ||
normalized_error = error / ||x||
Parameters
----------
gene_by_latent_train : np.array
V in the above equations
data_test : np.array
X in the above equations
Returns
-------
error : np.array
normalized error for each sample
"""
m_samples, n_genes = data_test.shape
error = np.zeros(m_samples,)
# TODO multi-sample version of nnls?
for m in range(m_samples):
u_hat, err = scipy.optimize.nnls(gene_by_latent_train, data_test[m,:])
error[m] = err / np.linalg.norm(data_test[m,:])
return error | a16b66d9604b3921a7288dcb06b43a51c987d5cd | 6,267 |
def sectionize(parts, first_is_heading=False):
"""Join parts of the text after splitting into sections with headings.
This function assumes that a text was splitted at section headings,
so every two list elements after the first one is a heading-section pair.
This assumption is used to join sections with their corresponding headings.
Parameters
----------
parts : list of str
List of text parts.
first_is_heading : bool
Should first element be treated as heading in lists of length greater than 1.
"""
parts = parts.copy()
if len(parts) <= 1:
return parts
first = []
if not first_is_heading:
first.append(parts[0])
del parts[0]
sections = first + [ "\n".join(parts[i:i+2]) for i in range(0, len(parts), 2) ]
return sections | 402832d55268dc808888f94b95e3a1c991394041 | 6,268 |
def byte_compare(stream_a, stream_b):
"""Byte compare two files (early out on first difference).
Returns:
(bool, int): offset of first mismatch or 0 if equal
"""
bufsize = 16 * 1024
equal = True
ofs = 0
while True:
b1 = stream_a.read(bufsize)
b2 = stream_b.read(bufsize)
if b1 != b2:
equal = False
if b1 and b2:
# we have two different buffers: find first mismatch
for a, b in zip(b1, b2):
if a != b:
break
ofs += 1
break
ofs += len(b1)
if not b1: # both buffers empty
break
return (equal, ofs) | 59adfe50fefdb79edd082a35437018d4b954ec75 | 6,269 |
from re import A
def get_resize_augmentation(image_size, keep_ratio=False, box_transforms=False):
"""
Resize an image, support multi-scaling
:param image_size: shape of image to resize
:param keep_ratio: whether to keep image ratio
:param box_transforms: whether to augment boxes
:return: albumentation Compose
"""
bbox_params = A.BboxParams(
format='pascal_voc',
min_area=0,
min_visibility=0,
label_fields=['class_labels']) if box_transforms else None
if not keep_ratio:
return A.Compose([
A.Resize(
height=image_size[1],
width=image_size[0]
)],
bbox_params=bbox_params)
else:
return A.Compose([
A.LongestMaxSize(max_size=max(image_size)),
A.PadIfNeeded(
min_height=image_size[1], min_width=image_size[0], p=1.0, border_mode=cv2.BORDER_CONSTANT),
],
bbox_params=bbox_params) | 62affae338e16cb0e7fc609d0ee995c728d6ec47 | 6,270 |
def extract_question(metric):
"""Extracts the name and question from the given metric"""
with open(metric) as f:
data = f.readlines()
data = [x.strip() for x in data]
# filter out empty strings
data = list(filter(None, data))
# data[0] = '# Technical Fork'
metric_name = data[0].split(maxsplit=1)[1]
# data[1] = 'Question: question part of the metric'
metric_question = spilt_by_colon(data[1])
# Replace '&' to 'and' to prevent breaking of tables in pandoc
metric_name = metric_name.replace('&', 'and')
metric_question = metric_question.replace('&', 'and')
return metric_name, metric_question | 27ddc25c489d19e1ca17ae80774e20c14208b653 | 6,271 |
def get_side(node, vowels, matcher, r):
"""Get side to which char should be added. r means round (or repeat).
Return 0 or plus int to add char to right,
minus int to left,
None if char node should be avoided.
"""
# check if node has both char neighbours
if node.next is None:
if node.prev is None:
raise Exception()
elif node.prev.syllable:
return -1
else:
return None
elif node.prev is None:
if node.next.syllable:
return 1
else:
return None
# node has both left and right char neighbours
# check if node has at least one syllable neighbour
if node.prev.syllable is None and node.next.syllable is None:
return None
# char matching
right_db = get_db_right_side(node, matcher)
if right_db == 2:
return right_db
elif right_db == 1 and r < 3:
return None
# suffix
suff = get_suffix_side(node, matcher)
if suff != 0:
syllable = node.prev.syllable if suff < 0 else node.next.syllable
return suff if syllable is not None else None
# prefix
pre = get_prefix_side(node, matcher)
if pre != 0:
syllable = node.prev.syllable if pre < 0 else node.next.syllable
return pre if syllable is not None else None
# syllable matching
if node.prev.syllable and node.next.syllable:
sdb = get_db_syllable_side(node, matcher) / 2 + right_db
if abs(sdb) >= 1:
return sdb
# no match in db nor suffixes nor prefixes
if r < 3:
return None
if node.prev in vowels and node.prev.neighbours_consonants(2, syllabic=False):
return -1
# this condition is for c in jablcko
if node.prev.syllabic_consonant_in_the_middle() and node.neighbours_consonants(1):
return -1
elif node.next.syllable:
return 1
elif node.prev.syllable:
return -1
return 0 | b7a34982bed475cacef08faf8f4d6155fc4147fb | 6,272 |
def gap_perform_pruning(model_path, pruned_save_path=None, mode='gap', slim_ratio=0.5,
mask_len=False, full_save=False, full_save_path=None, var_scope='',
ver=1):
""" Interface for GAP pruning step (step2).
Args:
model_path: path to the saved checkpoint,
including 3 files: `.meta', `.data' and `.index'.
pruned_save_path: path to save the pruned data (file in pickle format)
slim_ratio: ratio for model pruning.
Return:
data_dict: the pruned data dict
"""
graph = saver.import_meta_graph(model_path+'.meta', clear_devices=True)
with open('graph_def.pbtxt', 'w') as f:
f.write(str(ops.get_default_graph().as_graph_def(add_shapes=True)))
key_graph = KeyGraph(ops.get_default_graph())
data_dict = key_graph.gap(model_path, pruned_save_path, mode, slim_ratio, mask_len,
full_save, full_save_path, var_scope, ver)
return data_dict | b49b7f5d61113990746ef37d03267805424f10be | 6,273 |
def html_wrap(ptext, owrapper, attribute=''):
"""
Wrap text with html tags.
Input:
ptext -- text to be wrapped
owrapper -- tag to wrap ptext with
attribute -- if set, attribute to add to ptext
If owrapper ends with a newline, then the newline will appear after the
bracket character in the last tag.
Returns the wrapped string value.
"""
wrapper = owrapper.strip()
hdr = '<%s>' % wrapper
if attribute:
hdr = add_attrib(attribute, hdr)
trlr = '</%s>' % wrapper
if owrapper.endswith('\n'):
trlr += '\n'
return hdr + ptext + trlr | 3a9d6fcf165ce6ad46ecc2ab7437b794d03449d9 | 6,274 |
import socket
import subprocess
def start_vsfm(port=None, vsfm_binary_path=default_path):
"""
Starts VSFM, binds it to a socket, opens the socket interface, sets up a logger and waits.
:param port: Port number to open, defaults to a random one
:param vsfm_binary_path: the path to VSFM.exe, defaults from the vsfm_data file
:return: port that was opened
"""
# 'start program'
if port is None:
tmp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tmp_sock.bind(('',0))
port = int(tmp_sock.getsockname()[1])
tmp_sock.close()
logger.info("Binding to port " + str(port))
cmd = '"{}" listen+log {}'.format(vsfm_binary_path, port)
# Opens up VSFM and begins the socket.
logger.debug("Sending cmd: " + cmd)
vsfm_subprocess = subprocess.Popen(cmd, shell=True) # this needs changed from shell=True
return port | 2d92fe432053ee846757a4de759ab0d158f3d5dd | 6,275 |
def names(namespace):
"""Return extension names without loading the extensions."""
if _PLUGINS:
return _PLUGINS[namespace].keys()
else:
return _pkg_resources_names(namespace) | de772f9c671b92f9707e333006354d89ba166ae2 | 6,276 |
def cfq_lstm_attention_multi():
"""LSTM+attention hyperparameters tuned for CFQ."""
hparams = common_hparams.basic_params1()
hparams.daisy_chain_variables = False
hparams.batch_size = 1024
hparams.hidden_size = 128
hparams.num_hidden_layers = 2
hparams.initializer = 'uniform_unit_scaling'
hparams.initializer_gain = 1.0
hparams.weight_decay = 0.0
hparams.add_hparam('attention_layer_size', hparams.hidden_size)
hparams.add_hparam('output_attention', True)
hparams.add_hparam('num_heads', 1)
hparams.add_hparam('attention_mechanism', 'bahdanau')
hparams.num_heads = 4
# The remaining hyperparameters were determined as described in the paper:
hparams.batch_size = 2048
hparams.dropout = 0.4
hparams.hidden_size = 512
hparams.learning_rate = 0.03
hparams.num_hidden_layers = 2
return hparams | 7f982aff67a58200c7a297a5cfbfee6cc3c33173 | 6,277 |
def create_modeling_tables(spi_historical, spi_fixtures, fd_historical, fd_fixtures, names_mapping):
"""Create tables for machine learning modeling."""
# Rename teams
for col in ['team1', 'team2']:
spi_historical = pd.merge(spi_historical, names_mapping, left_on=col, right_on='left_team', how='left').drop(columns=[col, 'left_team']).rename(columns={'right_team': col})
spi_fixtures = pd.merge(spi_fixtures, names_mapping, left_on=col, right_on='left_team', how='left').drop(columns=[col, 'left_team']).rename(columns={'right_team': col})
# Combine data
historical = pd.merge(spi_historical, fd_historical, left_on=SPI_KEYS, right_on=FD_KEYS).dropna(subset=ODDS_COLS_MAPPING.keys(), how='any').reset_index(drop=True)
fixtures = pd.merge(spi_fixtures, fd_fixtures, left_on=SPI_KEYS, right_on=FD_KEYS)
# Extract training, odds and fixtures
X = historical.loc[:, ['season'] + SPI_KEYS + INPUT_COLS]
y = historical.loc[:, OUTPUT_COLS]
odds = historical.loc[:, SPI_KEYS + list(ODDS_COLS_MAPPING.keys())].rename(columns=ODDS_COLS_MAPPING)
X_test = fixtures.loc[:, SPI_KEYS + INPUT_COLS]
odds_test = fixtures.loc[:, SPI_KEYS + list(ODDS_COLS_MAPPING.keys())].rename(columns=ODDS_COLS_MAPPING)
# Add average scores columns
for ind in (1, 2):
avg_score = y[['adj_score%s' % ind, 'xg%s' % ind, 'nsxg%s' % ind]].mean(axis=1)
avg_score[avg_score.isna()] = y['score%s' % ind]
y['avg_score%s' % ind] = avg_score
# Add combined odds columns
for target in TARGETS:
if '+' in target:
targets = target.split('+')
odds[target] = combine_odds(odds[targets])
odds_test[target] = combine_odds(odds_test[targets])
# Feature extraction
with np.errstate(divide='ignore', invalid='ignore'):
for df in (X, X_test):
df['quality'] = hmean(df[['spi1', 'spi2']], axis=1)
df['importance'] = df[['importance1', 'importance2']].mean(axis=1)
df['rating'] = df[['quality', 'importance']].mean(axis=1)
df['sum_proj_score'] = df['proj_score1'] + df['proj_score2']
return X, y, odds, X_test, odds_test | bfaab71b64979859b7ec474dbf1805e117d9730d | 6,278 |
import math
import random
def daily_selection():
"""
Select a random piece of material from what is available. A piece is defined
by a newline; every line is a new piece of content.
"""
logger.log("Selecting today's material")
with open(settings.CONTENT, "r") as file:
content = file.readlines()
lines = len(content)
prev = get_previous(int(math.log10(lines)))
selection_index = random.choice(list(range(prev)) + list(range(prev + 1, lines)))
selection = content[selection_index]
selection += ("\n" if selection[-1] != "\n" else "")
logger.log("Selected: " + selection, newline=False)
set_previous(selection_index)
return selection | 2b16be5e02273e539e7f0417ef72d28de91624cb | 6,279 |
import shlex
import subprocess
import re
def get_browser_version():
"""
obtain the firefox browser version, this is necessary because zeus can only handle certain versions.
"""
logger.info(set_color(
"attempting to get firefox browser version"
))
try:
firefox_version_command = shlex.split("firefox --version")
output = subprocess.check_output(firefox_version_command)
except (OSError, Exception):
logger.error(set_color(
"failed to run firefox", level=50
))
return "failed to start"
try:
major, minor = map(int, re.search(r"(\d+).(\d+)", output).groups())
except (ValueError, Exception):
logger.error(set_color(
"failed to parse '{}' for version number".format(output), level=50
))
return "failed to gather"
return major, minor | bc170316136b89281076495a534208f36749847d | 6,280 |
def is_checkpointing() -> bool:
"""Whether the current forward propagation is under checkpointing.
Returns:
bool: :data:`True` if it's under checkpointing.
"""
return thread_local.is_checkpointing | 2779c059622bfe15586f69e4c0cfeb3bbf16a754 | 6,281 |
import json
def create_tile_assets_profile(iam, profile_name, locations):
"""
Creates a profile (and corresponding role) with read and write access to
the tile assets bucket.
"""
profile = iam.create_instance_profile(
InstanceProfileName=profile_name,
Path='/',
)
iam.create_role(
RoleName=profile_name,
AssumeRolePolicyDocument=json.dumps(
assume_role_policy_document('ec2.amazonaws.com')),
)
iam.add_role_to_instance_profile(
InstanceProfileName=profile_name,
RoleName=profile_name,
)
assets_path = locations.assets.name + '/' + locations.assets.prefix + '/*'
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "VisualEditor0",
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:DeleteObject"
],
"Resource": 'arn:aws:s3:::' + assets_path,
},
{
"Sid": "VisualEditor1",
"Effect": "Allow",
"Action": "s3:ListBucket",
"Resource": 'arn:aws:s3:::' + locations.assets.name,
}
]
}
iam.put_role_policy(
RoleName=profile_name,
PolicyName='AllowReadWriteAccessToTilesAssetsBucket',
PolicyDocument=json.dumps(policy),
)
return profile['InstanceProfile'] | e1e9bfb9405b4558fbf9972dfab67bd22a9f0189 | 6,282 |
import scipy
def noise_filter(rgb_array, coef=8, read_noise=2, shot_noise=246):
""" Apply bilateral noise filter to RGB image"""
h, w, _ = rgb_array.shape
luma_img = rgb_array[:, :, 0] + rgb_array[:, :, 1] + rgb_array[:, :, 2]
average = scipy.ndimage.filters.uniform_filter(luma_img, 5, mode='mirror')
sigma_map = average * shot_noise + read_noise
del average
sigma_map[sigma_map < 1] = 1
sy, sx = sigma_map.strides
sigma_tile = as_strided(sigma_map, strides=(sy, sx, 0, 0), shape=(h, w, 5, 5))
sigma_tile = sigma_tile[2:h-2, 2:w-2, :, :]
del sigma_map
sy, sx = luma_img.strides
luma_tile = as_strided(luma_img, strides=(sy, sx, 0, 0), shape=(h, w, 5, 5))
luma_tile = luma_tile[2:h-2, 2:w-2, :, :]
luma_box = as_strided(luma_img, strides=(sy, sx, sy, sx), shape=(h-4, w-4, 5, 5))
del luma_img
diff = luma_box - luma_tile
del luma_tile, luma_box
diff = diff * diff
weight = np.exp(-coef * diff / sigma_tile)
del diff, sigma_tile
weight_sum = weight.sum(axis=(2, 3))
sy, sx, sz, sw = weight.strides
weight_extend = as_strided(weight, strides=(sy, sx, 0, sz, sw), shape=(h-4, w-4, 3, 5, 5))
del weight
sy, sx = weight_sum.strides
weight_sum_extend = as_strided(weight_sum, strides=(sy, sx, 0), shape=(h-4, w-4, 3))
del weight_sum
sy, sx, sz = rgb_array.strides
img_boxes = as_strided(rgb_array, strides=(sy, sx, sz, sy, sx), shape=(h-4, w-4, 3, 5, 5))
img_flt = (weight_extend * img_boxes).sum(axis=(3, 4)) / weight_sum_extend
return img_flt | 6178429e237a56081696696c4d35f9fea5459065 | 6,283 |
import json
def drop_entity(p_json: json):
"""
Удаляет сущность
:param p_json: json с указанием id сущности, которую нужно удалить
"""
try:
l_model=Model(p_json=p_json)
l_model.drop_entity()
return _JsonOutput(p_json_object=None, p_message="Entity has dropped successfully").body
except Exception as e:
return _JsonOutput(p_json_object=None, p_error=e.args[0]).body | 1bec1f8f42d6aea39e25078383b018c2a651e5e5 | 6,284 |
from typing import Optional
from typing import Sequence
def get_ssl_vpn_client_certs(ids: Optional[Sequence[str]] = None,
name_regex: Optional[str] = None,
output_file: Optional[str] = None,
ssl_vpn_server_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSslVpnClientCertsResult:
"""
The SSL-VPN client certificates data source lists lots of SSL-VPN client certificates resource information owned by an Alicloud account.
## Example Usage
```python
import pulumi
import pulumi_alicloud as alicloud
foo = alicloud.vpc.get_ssl_vpn_client_certs(ids=["fake-cert-id"],
name_regex="^foo",
output_file="/tmp/clientcert",
ssl_vpn_server_id="fake-server-id")
```
:param Sequence[str] ids: IDs of the SSL-VPN client certificates.
:param str name_regex: A regex string of SSL-VPN client certificate name.
:param str output_file: Save the result to the file.
:param str ssl_vpn_server_id: Use the SSL-VPN server ID as the search key.
"""
__args__ = dict()
__args__['ids'] = ids
__args__['nameRegex'] = name_regex
__args__['outputFile'] = output_file
__args__['sslVpnServerId'] = ssl_vpn_server_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('alicloud:vpc/getSslVpnClientCerts:getSslVpnClientCerts', __args__, opts=opts, typ=GetSslVpnClientCertsResult).value
return AwaitableGetSslVpnClientCertsResult(
certs=__ret__.certs,
id=__ret__.id,
ids=__ret__.ids,
name_regex=__ret__.name_regex,
names=__ret__.names,
output_file=__ret__.output_file,
ssl_vpn_server_id=__ret__.ssl_vpn_server_id) | 3d3bb6664aff7468c684a6e2e16a887fbbc3f425 | 6,285 |
from typing import Optional
def quantile(h: Distogram, value: float) -> Optional[float]:
""" Returns a quantile of the distribution
Args:
h: A Distogram object.
value: The quantile to compute. Must be between 0 and 1
Returns:
An estimation of the quantile. Returns None if the Distogram
object contains no element or value is outside of (0:1).
"""
if len(h.bins) == 0:
return None
if not (0 <= value <= 1):
return None
total_count = count(h)
q_count = int(total_count * value)
v0, f0 = h.bins[0]
vl, fl = h.bins[-1]
if q_count <= (f0 / 2): # left values
fraction = q_count / (f0 / 2)
result = h.min + (fraction * (v0 - h.min))
elif q_count >= (total_count - (fl / 2)): # right values
base = q_count - (total_count - (fl / 2))
fraction = base / (fl / 2)
result = vl + (fraction * (h.max - vl))
else:
mb = q_count - f0 / 2
mids = [(fi + fj) / 2 for (_, fi), (_, fj) in zip(h.bins[:-1], h.bins[1:])]
i, _ = next(filter(lambda i_f: mb < i_f[1], enumerate(accumulate(mids))))
(vi, _), (vj, _) = h.bins[i], h.bins[i + 1]
fraction = (mb - sum(mids[:i])) / mids[i]
result = vi + (fraction * (vj - vi))
return result | 76f2a9b33d2e3e6a4a419a9f32cff591e191a145 | 6,286 |
def arachni_del_vuln(request):
"""
The function Delete the Arachni Vulnerability.
:param request:
:return:
"""
if request.method == 'POST':
vuln_id = request.POST.get("del_vuln", )
un_scanid = request.POST.get("scan_id", )
scan_item = str(vuln_id)
value = scan_item.replace(" ", "")
value_split = value.split(',')
split_length = value_split.__len__()
print "split_lenght", split_length
for i in range(0, split_length):
vuln_id = value_split.__getitem__(i)
delete_vuln = arachni_scan_result_db.objects.filter(vuln_id=vuln_id)
delete_vuln.delete()
arachni_all_vul = arachni_scan_result_db.objects.filter(scan_id=un_scanid).values(
'name',
'severity',
'vuln_color'
).distinct()
total_vul = len(arachni_all_vul)
total_high = len(arachni_all_vul.filter(severity="high"))
total_medium = len(arachni_all_vul.filter(severity="medium"))
total_low = len(arachni_all_vul.filter(severity="low"))
arachni_scan_db.objects.filter(scan_id=un_scanid).update(
total_vul=total_vul,
high_vul=total_high,
medium_vul=total_medium,
low_vul=total_low
)
messages.success(request, "Deleted vulnerability")
return HttpResponseRedirect("/webscanners/arachni_list_vuln?scan_id=%s" % un_scanid) | 3be794525025fec019a5f76e0d885519077bc72a | 6,287 |
import subprocess
def pipe(*args, encoding="utf-8", print_output=False, raise_exception=False):
"""Every arg should be a subprocess command string which will be run and piped to
any subsequent args in a linear process chain. Each arg will be split into command
words based on whitespace so whitespace embedded within words is not possible.
Returns stdout from the chain.
"""
pipes = []
for cmd in args:
words = cmd.split()
if pipes:
p = subprocess.Popen(words, stdin=pipes[-1].stdout, stdout=subprocess.PIPE)
pipes[-1].stdout.close()
else:
p = subprocess.Popen(words, stdout=subprocess.PIPE)
pipes.append(p)
output = p.communicate()[0]
ret_code = p.wait()
if ret_code and raise_exception:
raise RuntimeError(f"Subprocess failed with with status: {ret_code}")
output = output.decode(encoding) if encoding else output
if print_output:
print(output, end="")
return output | 6ee3f95a86bd0c8d30d912e691738b5fbd846919 | 6,288 |
def construct_imports(variables, imports):
"""Construct the list of imports by expanding all command line arguments."""
result = {}
for i in imports:
kv = i.split('=', 1)
if len(kv) != 2:
print 'Invalid value for --imports: %s. See --help.' % i
sys.exit(1)
result[kv[0]] = expand_template(kv[1], variables, result)
return result | 2e26b3496dff96fa713e2388af415cadf831d032 | 6,289 |
import re
def is_regex(param):
"""
判断参数是否是合法正则表达式字符串
:param param: {String} 参数
:return: {Boolean} 是否是合法正则表达式
"""
try:
re.compile(param)
return True
except re.error:
return False | 6a3ee33e68e33d3557db546beadc005235360080 | 6,290 |
def NetCDF_SHP_lat_lon(name_of_nc, box_values, name_of_lat_var, name_of_lon_var, correct_360):
"""
@ author: Shervan Gharari
@ Github: https://github.com/ShervanGharari/candex
@ author's email id: [email protected]
@license: Apache2
This function gets a NetCDF file the assosiated shapefile given the cordination of a given box
if correct_360 is True then the code convert the lon values more than 180 to negative lon
Arguments
---------
name_of_nc: string, the name of the nc file
box_values: the box to limit to a specific domain
name_of_lat_var: string, the name of the variable lat
name_of_lon_var: string, the name of the variable lon
correct_360: logical, True or Flase
Returns
-------
result: a shapefile for the NetCDF file
"""
# open the nc file to read
dataset = xr.open_dataset(name_of_nc, decode_times=False)
# reading the lat and lon and converting them to np.array
lat = dataset[name_of_lat_var].data
lon = dataset[name_of_lon_var].data
lat = np.array(lat)
lon = np.array(lon)
# check if lat and lon are 1 D, if yes then they should be converted to 2D lat and lon WARNING only for case 1 and 2
if len(lat.shape) == 1 and len(lon.shape) == 1:
lat, lon = lat_lon_2D(lat, lon)
# creating the shapefile
result = lat_lon_SHP(lat, lon, box_values, correct_360)
return result | dc214f4449193f0daef0327df596c3109837a16e | 6,291 |
from datetime import datetime
import logging
def format_issues(
input_issues: list,
developer_ids: list,
start_date: datetime.datetime,
end_date: datetime.datetime,
end_date_buffer: int = 0,
) -> list:
"""extract and formats key fields into an output list
Args:
input_issues: issues (tuples) from GitHub
developer_ids: GitHub id strings to filter
start_date: start date of report
end_date: similar, passed in for testing
end_date_buffer: number of days to add to 'end time'
Returns:
list issues_summary: list of tuples with select, reformatted fields
"""
logging.info("beginning format issues")
issues_summary = []
len(input_issues)
for issue in input_issues:
logging.info(f"formatting issue #: {issue.number}")
# determine branch based on common PR naming pattern with [X.Y] branch prefix
if "[main]" in issue.title or "[3." not in issue.title:
branch_name = "[main]"
else:
branch_name = str(issue.title).split(" ", 2)[0]
match issue.state:
case "open":
# issues we authored
if (
issue.user.login in developer_ids
and check_if_issue_date_interesting(
issue.updated_at, start_date, end_date, end_date_buffer
)
):
issues_summary.append(
tuple(
(
f"{issue.updated_at}",
"Issue",
"opened",
f"{branch_name.rjust(6)}",
f"{issue.url}",
f"{issue.title}",
)
)
)
# issues we closed
case "closed":
if issue.closed_by.login in developer_ids:
issues_summary.append(
tuple(
(
f"{issue.closed_at}",
"Issue",
"closed",
f"{branch_name.rjust(6)}",
f"{issue.url}",
f"{issue.title}",
)
)
)
# END match
# END for issue in input_issues
return issues_summary | 98af172b329c8887666d2ba430ad6e3bda00fe3d | 6,292 |
import torch
def train_transforms(image_size, train_img_scale=(0.35, 1),
normalize: bool = True,
mean=torch.tensor([0.485, 0.456, 0.406]),
std=torch.tensor([0.229, 0.224, 0.225])):
"""Transforms for train augmentation with Kornia."""
transforms = [
AccimageImageToTensorNN(),
RandomResizedCrop((image_size, image_size), train_img_scale, keepdim=True),
RandomHorizontalFlip(keepdim=True)]
if normalize:
transforms.append(Normalize(mean=std, std=std, keepdim=True))
return torch.nn.Sequential(*transforms) | 957aaf01edf64589d5bd846cac9895077ba43fd0 | 6,293 |
def get_spans_bio(tags,id2label=None):
"""Gets entities from sequence.
Args:
tags (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
>>> tags = ['B-PER', 'I-PER', 'O', 'B-LOC']
>>> get_spans_bio(tags)
# output [['PER', 0,1], ['LOC', 3, 3]]
"""
chunks = []
chunk = [-1, -1, -1]
for indx, tag in enumerate(tags):
if not isinstance(tag, str):
tag = id2label[tag]
if tag.startswith("B-"):
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
chunk[1] = indx
chunk[0] = tag.split('-')[1]
elif tag.startswith('I-') and chunk[1] != -1:
_type = tag.split('-')[1]
if _type == chunk[0]:
chunk[2] = indx
if indx == len(tags) - 1:
chunks.append(chunk)
else:
if chunk[2] != -1:
chunks.append(chunk)
chunk = [-1, -1, -1]
return chunks | 9a9e45eedaf7c8700b72af9649cf80b13e276fc8 | 6,294 |
def min_count1(lst):
"""
Get minimal value of list, version 1
:param lst: Numbers list
:return: Minimal value and its count on the list
"""
if len(lst) == 0:
return []
count = 0
min_value = lst[0]
for num in lst:
if num == min_value:
count += 1
elif num < min_value:
count = 1
min_value = num
return [min_value, count] | b441d0a37534909e9a990b91a953d4022698c04b | 6,295 |
import os
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image="quay.io/ocsci/fedora",
source_image_label="latest",
):
"""
Allows to create a build config using a Dockerfile specified as an
argument, eg.::
$ oc new-build -D $'FROM centos:7\\nRUN yum install -y httpd'
creates a build with ``httpd`` installed.
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
defaults to 'latest'
install_package (str): package to install over the base image
Returns:
ocs_ci.ocs.ocp.OCP (obj): The OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ":" + source_image_label
if config.DEPLOYMENT.get("disconnected"):
base_image = mirror_image(image=base_image)
cmd = f"yum install -y {install_package}"
http_proxy, https_proxy, no_proxy = get_cluster_proxies()
if http_proxy:
cmd = (
f"http_proxy={http_proxy} https_proxy={https_proxy} "
f"no_proxy='{no_proxy}' {cmd}"
)
docker_file = f"FROM {base_image}\n " f" RUN {cmd}\n" f"CMD tail -f /dev/null"
command = f"new-build -D $'{docker_file}' --name={image_name}"
kubeconfig = os.getenv("KUBECONFIG")
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f"Running command {oc_cmd}")
result = run(oc_cmd, stdout=PIPE, stderr=PIPE, timeout=15, shell=True)
if result.stderr.decode():
raise UnavailableBuildException(
f"Build creation failed with error: {result.stderr.decode()}"
)
out = result.stdout.decode()
logger.info(out)
if "Success" in out:
# Build becomes ready once build pod goes into Completed state
pod_obj = OCP(kind="Pod", resource_name=image_name)
if pod_obj.wait_for_resource(
condition="Completed",
resource_name=f"{image_name}" + "-1-build",
timeout=300,
sleep=30,
):
logger.info(f"build {image_name} ready")
set_image_lookup(image_name)
logger.info(f"image {image_name} can now be consumed")
image_stream_obj = OCP(kind="ImageStream", resource_name=image_name)
return image_stream_obj
else:
raise UnavailableBuildException("Build creation failed") | ac615fae1643c37a2ce86a7579605e48b585d338 | 6,296 |
def exactly_one_topping(ketchup, mustard, onion):
"""Return whether the customer wants exactly one of the three available toppings
on their hot dog.
"""
return True if int(ketchup) + int(mustard) + int(onion) == 1 else False | 214c95d35c116993dc78740d5d16b874122960ed | 6,297 |
def strip_line_endings(data: list) -> list:
"""Removes line endings(\n). Removes item if only contains \n."""
return [i.rstrip("\n") for i in data if i != "\n"] | 5383b1bc3884395459ca63b6f15c0a1091eaaaf0 | 6,298 |
def calculate_afqt_scores(df):
"""This function calculates the AFQT scores. See information at
https://www.nlsinfo.org/content/cohorts/nlsy79/topical-guide/education/aptitude-achievement-intelligence-scores
for more details. In addition, we adjust the Numerical Operations score along the lines
described in NLS Attachment 106.
"""
df["NUMERICAL_ADJ"] = df["ASVAB_NUMERICAL_OPERATIONS"]
adjust_no = {
0: 0,
1: 0,
2: 1,
3: 2,
7: 8,
8: 9,
9: 10,
10: 11,
11: 12,
12: 14,
13: 15,
14: 16,
15: 17,
16: 18,
17: 19,
18: 21,
19: 22,
20: 23,
21: 24,
22: 25,
23: 26,
24: 27,
25: 28,
26: 29,
27: 30,
28: 31,
29: 33,
30: 34,
31: 35,
32: 36,
33: 37,
34: 38,
35: 39,
36: 39,
37: 40,
38: 41,
39: 42,
40: 43,
41: 44,
42: 45,
43: 46,
44: 47,
45: 48,
46: 49,
47: 49,
48: 50,
49: 50,
50: 50,
}
df["NUMERICAL_ADJ"].replace(adjust_no, inplace=True)
df["AFQT_RAW"] = 0.00
df["AFQT_RAW"] += df["ASVAB_ARITHMETIC_REASONING"]
df["AFQT_RAW"] += df["ASVAB_WORD_KNOWLEDGE"]
df["AFQT_RAW"] += df["ASVAB_PARAGRAPH_COMPREHENSION"]
df["AFQT_RAW"] += 0.5 * df["NUMERICAL_ADJ"]
del df["NUMERICAL_ADJ"]
# There are a couple of variables for which we can compute AFQT_RAW while there is no AFQT_1
# available. The variable AFQT_1 is set to NAN by the NLSY team if the test procedure was
# altered, i.e. variable R06148 (ASVAB_ALTERED_TESTING) takes value 67. However, we noticed
# that there are other indicators of problems as well.
#
# PROFILES, ASVAB VOCATIONAL TEST - NORMAL/ALTERED TESTING
#
# 11625 51 COMPLETED
# 41 52 COMP-CONVERTED REFUSAL
# 127 53 COMP-PROBLEM REPORTED
# 85 54 COMP-SPANISH INSTR. CARDS
# 36 67 COMP-PRODECURES ALTERED
#
# We followed up with the NLSY staff to get some guidance on how to deal with 51, 52, 53,
# 54. The correspondence is available in ``correspondence-altered-testing.pdf'' in the sources
# subdirectory. In a nutshell, not detailed information is available anymore on the meaning
# of the different realizations. We decided to follow the original decision of the NLSY staff
# to only set 67 to NAN.
cond = df["ASVAB_ALTERED_TESTING"].isin([67])
df.loc[cond, "AFQT_RAW"] = np.nan
# We have a little unit test, where we reconstruct the AFQT_1 variable from the inputs.
assert_equal(_test_afqt(df), True)
return df | ba6573e40115d766b2c0aebb78a3beb2881fbb4c | 6,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.