content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from functools import reduce
def convert_array_to_df(emission_map):
"""
This function converts the emission map dict to a DataFrame where
- 'emission_map' is a dictionary containing at least 'z_var_ave', 'count_var','std_var','q25_var' and'q75_var
"""
def reform_df(df, nr):
"""This subfunction will reform the format of the dataframe is such a way that it can be saved in the .map.txt
file later on. The expected input is:
df: pd.DataFrame
nr: int
The output is a dataframe that can contains all data of one map and is ready to be written to a .map.txt file"""
df_temp_fin = pd.DataFrame()
for key, value in df.items():
df_temp = pd.DataFrame()
df_temp['Y'] = value.index # CO2_mF [g/s]
df_temp['X'] = key # velocity_filtered [km/h]
df_temp['Z{}'.format(nr)] = df[key].values # avgNOx [mg/s]
df_temp = df_temp[['X', 'Y', 'Z{}'.format(nr)]]
df_temp_fin = df_temp_fin.append(df_temp)
return df_temp_fin
numbering = {'z_var_ave': 1, 'std_var': 2, 'q25_var': 3,
'q75_var': 4, 'count_var': 5}
map_df = []
for var in numbering.keys():
if type(emission_map[var]) == np.ndarray:
map = emission_map[var]
x_axis = np.arange(emission_map['binsizes'][0],
emission_map['binsizes'][0] * map.shape[1] + 1,
emission_map['binsizes'][0])
y_axis = np.arange(emission_map['binsizes'][1],
(emission_map['binsizes'][1] * map.shape[0]) + emission_map['binsizes'][1],
emission_map['binsizes'][1])
# check if shape of axis and indices are the same
if map.shape[1] != len(x_axis):
x_axis = x_axis[:map.shape[1]]
elif map.shape[0] != len(y_axis):
y_axis = y_axis[:map.shape[0]]
## Make Table for .map.txt outputfile
df = pd.DataFrame(data=map, index=y_axis, columns=x_axis)
reformed_df = reform_df(df, numbering[var])
map_df.append(reformed_df)
final_df = reduce(lambda left, right: pd.merge(left, right, on=['X', 'Y']), map_df)
return final_df | 171e387ca51f543b522946a213e51040463aec74 | 6,500 |
def add_missing_flows(data):
"""There are some flows not given in ReCiPe that seem like they should be there, given the relatively coarse precision of these CFs."""
new_cfs = {
"managed forest": {
"amount": 0.3,
"flows": [
"occupation, forest, unspecified",
"occupation, field margin/hedgerow",
],
},
"annual crops": {
"amount": 1.0,
"flows": [
"occupation, annual crop, flooded crop",
"occupation, annual crop, irrigated, extensive",
],
},
"pasture": {
"amount": 0.55,
"flows": [
"occupation, arable land, unspecified use",
"occupation, grassland, natural, for livestock grazing",
"occupation, heterogeneous, agricultural",
],
},
"artificial area": {"amount": 0.73, "flows": [],},
"permanent crops": {
"amount": 0.7,
"flows": [
"occupation, permanent crop, irrigated",
"occupation, permanent crop, irrigated, extensive",
"occupation, permanent crop, non-irrigated",
"occupation, permanent crop, non-irrigated, extensive",
],
},
}
""" The following were included in an earlier version of ReCiPe, but are skipped here, as we don't have enough info to use them consistently:
* 'occupation, bare area (non-use)',
* 'occupation, cropland fallow (non-use)',
* 'occupation, forest, primary (non-use)',
* 'occupation, forest, secondary (non-use)',
* 'occupation, inland waterbody, unspecified',
* 'occupation, lake, natural (non-use)',
* 'occupation, river, natural (non-use)',
* 'occupation, seabed, natural (non-use)',
* 'occupation, seabed, unspecified',
* 'occupation, snow and ice (non-use)',
* 'occupation, unspecified',
* 'occupation, unspecified, natural (non-use)',
* 'occupation, wetland, coastal (non-use)',
* 'occupation, wetland, inland (non-use)'
"""
for ds in data:
ds["exchanges"].extend(
[
{"name": flow, "amount": obj["amount"]}
for obj in new_cfs.values()
for flow in obj["flows"]
]
)
return data | e23184bb7363db4777d9f693a3fdc4ace9f8ff14 | 6,501 |
from typing import List
def cglb_conjugate_gradient(
K: TensorType,
b: TensorType,
initial: TensorType,
preconditioner: NystromPreconditioner,
cg_tolerance: float,
max_steps: int,
restart_cg_step: int,
) -> tf.Tensor:
"""
Conjugate gradient algorithm used in CGLB model. The method of
conjugate gradient (Hestenes and Stiefel, 1952) produces a
sequence of vectors :math:`v_0, v_1, v_2, ..., v_N` such that
:math:`v_0` = initial, and (in exact arithmetic)
:math:`Kv_n = b`. In practice, the v_i often converge quickly to
approximate :math:`K^{-1}b`, and the algorithm can be stopped
without running N iterations.
We assume the preconditioner, :math:`Q`, satisfies :math:`Q ≺ K`,
and stop the algorithm when :math:`r_i = b - Kv_i` satisfies
:math:`||rᵢᵀ||_{Q⁻¹r}^2 = rᵢᵀQ⁻¹rᵢ <= ϵ`.
:param K: Matrix we want to backsolve from. Must be PSD. Shape [N, N].
:param b: Vector we want to backsolve. Shape [B, N].
:param initial: Initial vector solution. Shape [N].
:param preconditioner: Preconditioner function.
:param cg_tolerance: Expected maximum error. This value is used
as a decision boundary against stopping criteria.
:param max_steps: Maximum number of CG iterations.
:param restart_cg_step: Restart step at which the CG resets the
internal state to the initial position using the currect
solution vector :math:`v`. Can help avoid build up of
numerical errors.
:return: `v` where `v` approximately satisfies :math:`Kv = b`.
"""
CGState = namedtuple("CGState", ["i", "v", "r", "p", "rz"])
def stopping_criterion(state: CGState) -> bool:
return (0.5 * state.rz > cg_tolerance) and (state.i < max_steps)
def cg_step(state: CGState) -> List[CGState]:
Ap = state.p @ K
denom = tf.reduce_sum(state.p * Ap, axis=-1)
gamma = state.rz / denom
v = state.v + gamma * state.p
i = state.i + 1
r = tf.cond(
state.i % restart_cg_step == restart_cg_step - 1,
lambda: b - v @ K,
lambda: state.r - gamma * Ap,
)
z, new_rz = preconditioner(r)
p = tf.cond(
state.i % restart_cg_step == restart_cg_step - 1,
lambda: z,
lambda: z + state.p * new_rz / state.rz,
)
return [CGState(i, v, r, p, new_rz)]
Kv = initial @ K
r = b - Kv
z, rz = preconditioner(r)
p = z
i = tf.constant(0, dtype=default_int())
initial_state = CGState(i, initial, r, p, rz)
final_state = tf.while_loop(stopping_criterion, cg_step, [initial_state])
final_state = tf.nest.map_structure(tf.stop_gradient, final_state)
return final_state[0].v | bc00f2423c4ffdaf0494ab6e6114222cbc694915 | 6,502 |
import os
def build_docker_build(latest=True):
"""Create command used to (re)build the container.
We store the Dockerfile (as that name)
in dir .next or .latest so that we can
have various templates and assets and so on
in the 'context' directory.
"""
tmpl = "{} build -t {{tagname}}:{{tagtag}} {{pathtodockerfile}}".format(OCI_CMD)
_latest = LATEST if latest else NEXT
pathtodockerfile = os.path.join(CONFD["devstation_config_root"], "." + _latest)
return tmpl.format(
tagname=CONFD["tagname"], tagtag=_latest, pathtodockerfile=pathtodockerfile
) | 317b5d63cc941d6db57d704e1be76c7aa8b32799 | 6,503 |
def scrape_new_thread(thread_name, url):
"""Scrape data for a thread that isn't already in our database."""
logger.debug(f"Start of scrape_new_thread for {thread_name}, {url}")
# URL Validation
# TODO: write this function, then hand it off to scrape_existing_thread()
logger.debug("Now that the thread exists in our db, hand it off to scrape_existing_thread()")
scrape_existing_thread(thread_name)
logger.debug("End of scrape_new_thread")
return render_template('scrape_new_thread.html', title='Browse TalkBeer BIFs') | cbb583e262c938378562951af495d122b2db13bd | 6,504 |
def num_fixed_points(permutation):
"""
Compute the number of fixed points (elements mapping to themselves) of a permutation.
:param permutation: Permutation in one-line notation (length n tuple of the numbers 0, 1, ..., n-1).
:return: Number of fixed points in the permutation.
.. rubric:: Examples
>>> num_fixed_points((0, 2, 1))
1
"""
n = 0
for i in range(len(permutation)):
if permutation[i] == i:
n += 1
return n | 124713cd4c90988c43630a74881e7107ff748682 | 6,505 |
import os
def load_messages(path, unique, verbose):
""" Loads messages from the corpus and returns them as Message objects """
messages = []
signatures = set()
for root, _, files in os.walk(path):
if verbose:
print("Processing {}".format(root))
for message_file in files:
message = read_message(os.path.join(root, message_file))
if unique:
sig = (message.sender, message.recipients, message.timestamp, message.subject, message.body)
if sig in signatures:
continue
signatures.add(sig)
messages.append(message)
return messages | 61e3abafb0bd6c20adf9dfbc45c03e510704880c | 6,506 |
import numpy
def mutate(grid):
"""
Alters the cycle by breaking it into two separate circuits, and then fusing
them back together to recreate a (slightly different) cycle.
This operation is called "sliding" in 'An Algorithm for Finding Hamiltonian
Cycles in Grid Graphs Without Holes', and it's specifically mentioned
because it is insuffient if you want to be able to reach all possible cycles
for a given starting graph. That condition isn't really relevant to this
project, so I use sliding since it's much easier to implement.
"""
working_grid = grid.copy().astype(numpy.uint8)
above = shift_down(grid)
below = shift_up(grid)
left = shift_right(grid)
right = shift_left(grid)
# this mask highlights every grid location that could be turned off
candidates = numpy.logical_and(
numpy.logical_and(grid, above != left),
numpy.logical_and(above == below, left == right)
)
# the connected region is split into two
coord = pick_candidate(candidates)
flood_y, flood_x = coord
working_grid[coord] = 0
# find the right spot to label one of the regions '2'
if left[coord] == 1:
flood_x -= 1
elif right[coord] == 1:
flood_x += 1
elif above[coord] == 1:
flood_y -= 1
elif below[coord] == 1:
flood_y += 1
cv2.floodFill(
working_grid,
numpy.zeros([v + 2 for v in grid.shape], dtype=numpy.uint8),
(flood_x, flood_y),
2
)
above = shift_down(working_grid)
below = shift_up(working_grid)
left = shift_right(working_grid)
right = shift_left(working_grid)
x_neighbors = left + right
y_neighbors = above + below
# this mask highlights every grid location that can fuse the two regions
# back together while preserving a cycle
fuse_candidates = numpy.logical_and(
working_grid == 0,
numpy.logical_or(
numpy.logical_and(x_neighbors == 3, y_neighbors == 0),
numpy.logical_and(x_neighbors == 0, y_neighbors == 3)
)
)
fuse_location = pick_candidate(fuse_candidates)
grid[coord] = 0
grid[fuse_location] = 1
return grid | 35cc32385d090fa8091f872858fbeb0c32ecf43d | 6,507 |
def reverse_permute(output_shape: np.array, order: np.array):
"""
Calculates Transpose op input shape based on output shape and permute order.
:param output_shape: Transpose output shape
:param order: permute order
:return: Transpose input shape corresponding to the specified output shape
"""
return int64_array(output_shape[PermuteAttrs.get_inverse_permutation(order)]) | ada631cc086a1dc0d2dce05f6d97a74a1f3861f4 | 6,508 |
def recursive_bisection(block, block_queue, epsilon_cut, depth_max, theta, lamb, delta, verbose=False):
"""Random cut and random converge
Args:
block_queue (multiprocessing.Queue): Shared queue to store blocks to be executed
Returns:
[{"range": {int: (int,int)}, "mondrian_budget": float, "depth": int}]
"""
# Random cut
if verbose:
print('Before cut', block.domain_dict)
if block.depth > depth_max:
axis, index = cut_random(block)
else:
axis, index = cut_exp_mech(block, epsilon_cut)
if verbose:
print(axis, index)
left_block, right_block = block.split(axis, index)
# Random converge
converged_block_results = []
if left_block.size() == 1:
converged_block_results.append(BlockResult(left_block.domain_dict, left_block.depth))
elif random_converge(left_block, left_block.depth, theta, lamb, delta):
converged_block_results.append(BlockResult(left_block.domain_dict, left_block.depth))
else:
block_queue.put(left_block)
if right_block.size() == 1:
converged_block_results.append(BlockResult(right_block.domain_dict, right_block.depth))
elif random_converge(right_block, right_block.depth, theta, lamb, delta):
converged_block_results.append(BlockResult(right_block.domain_dict, right_block.depth))
else:
block_queue.put(right_block)
return converged_block_results | d65070c2cf64356277c4af044b97c5eaa8efdd3d | 6,509 |
def _get_global_step_read(graph=None):
"""Gets global step read tensor in graph.
Args:
graph: The graph in which to create the global step read tensor. If missing,
use default graph.
Returns:
Global step read tensor.
Raises:
RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY.
"""
graph = graph or ops.get_default_graph()
global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY)
if len(global_step_read_tensors) > 1:
raise RuntimeError('There are multiple items in collection {}. '
'There should be only one.'.format(GLOBAL_STEP_READ_KEY))
if len(global_step_read_tensors) == 1:
return global_step_read_tensors[0]
return None | 46bf3b55b36216e4247d6d73226d22b20383321f | 6,510 |
from unittest.mock import Mock
def light_control() -> LightControl:
"""Returns the light_control mock object."""
mock_request = Mock()
mock_request.return_value = ""
return LightControl(mock_request) | cb135ed24d2e992eab64b298e5c9238576a37c5d | 6,511 |
def map_threshold(stat_img=None, mask_img=None, alpha=.001, threshold=3.,
height_control='fpr', cluster_threshold=0):
""" Compute the required threshold level and return the thresholded map
Parameters
----------
stat_img : Niimg-like object or None, optional
statistical image (presumably in z scale)
whenever height_control is 'fpr' or None,
stat_img=None is acceptable.
If it is 'fdr' or 'bonferroni', an error is raised if stat_img is None.
mask_img : Niimg-like object, optional,
mask image
alpha: float, optional
number controling the thresholding (either a p-value or q-value).
Its actual meaning depends on the height_control parameter.
This function translates alpha to a z-scale threshold.
threshold: float, optional
desired threshold in z-scale.
This is used only if height_control is None
height_control: string, or None optional
false positive control meaning of cluster forming
threshold: 'fpr'|'fdr'|'bonferroni'\|None
cluster_threshold : float, optional
cluster size threshold. In the returned thresholded map,
sets of connected voxels (`clusters`) with size smaller
than this number will be removed.
Returns
-------
thresholded_map : Nifti1Image,
the stat_map thresholded at the prescribed voxel- and cluster-level
threshold: float,
the voxel-level threshold used actually
Note
----
If the input image is not z-scaled (i.e. some z-transformed statistic)
the computed threshold is not rigorous and likely meaningless
"""
# Check that height_control is correctly specified
if height_control not in ['fpr', 'fdr', 'bonferroni', None]:
raise ValueError(
"height control should be one of ['fpr', 'fdr', 'bonferroni', None]")
# if height_control is 'fpr' or None, we don't need to look at the data
# to compute the threhsold
if height_control == 'fpr':
threshold = norm.isf(alpha)
# In this case, and is stat_img is None, we return
if stat_img is None:
if height_control in ['fpr', None]:
return None, threshold
else:
raise ValueError(
'Map_threshold requires stat_img not to be None'
'when the heigh_control procedure is bonferroni or fdr')
# Masking
if mask_img is None:
masker = NiftiMasker(mask_strategy='background').fit(stat_img)
else:
masker = NiftiMasker(mask_img=mask_img).fit()
stats = np.ravel(masker.transform(stat_img))
n_voxels = np.size(stats)
# Thresholding
if height_control == 'fdr':
threshold = fdr_threshold(stats, alpha)
elif height_control == 'bonferroni':
threshold = norm.isf(alpha / n_voxels)
stats *= (stats > threshold)
# embed it back to 3D grid
stat_map = get_data(masker.inverse_transform(stats))
# Extract connected components above threshold
label_map, n_labels = label(stat_map > threshold)
labels = label_map[get_data(masker.mask_img_) > 0]
for label_ in range(1, n_labels + 1):
if np.sum(labels == label_) < cluster_threshold:
stats[labels == label_] = 0
return masker.inverse_transform(stats), threshold | ea7c1ca48641ed76eef2f2b0396b93fd522fdbaf | 6,512 |
import time
import random
def grab_features(dataframe: pd.DataFrame) -> pd.DataFrame:
"""
Attempts to assign song features using the get_features function to all songs in given dataframe.
This function creates a column that encompasses all features retuerned from Spotify in a json format for each track ID.
It then explodes this column into a seperate dataframe and concatenates it with the original.
Parameters:
dataframe (pandas dataframe): Dataframe to assigned track IDs to. Must have a "trackID" column
Returns:
dataframe (pandas dataframe): original pandas dataframe with song features included
"""
start = time.time()
print("Getting song features..")
dataframe["features_json"] = dataframe["trackId"].progress_apply(
get_features
) # progress apply allows for tqdm progress bar
dataframe.dropna(
axis=0, subset=["trackId"], inplace=True
) # cannot search for tracks that have no ID
temp_list = [pd.json_normalize(x) for x in dataframe["features_json"]]
features_df = pd.concat(x for x in temp_list).reset_index().drop(["index"], axis=1)
dataframe = dataframe.reset_index().drop(["index"], axis=1)
dataframe = pd.concat([dataframe, features_df], axis=1)
dataframe.drop(["features_json"], axis=1, inplace=True)
index_check = random.randint(
0, len(dataframe)
) # performing check that temporary song feature df matches orignal df
assert (
dataframe["trackId"].iloc[index_check] == dataframe["id"].iloc[index_check]
), "track IDs do not match"
del temp_list, features_df
end = time.time()
print(
f".apply took {round((end - start),3)} seconds for {len(dataframe)} songs, around {round((end-start) / (len(dataframe)), 3)} seconds per song"
)
return dataframe | 7a2810b68815241a62f2ce753169bd982a17a211 | 6,513 |
def _build_geo_shape_query(field, geom, relation):
"""Crea una condición de búsqueda por relación con una geometría en formato
GeoJSON.
Args:
field (str): Campo de la condición.
geom (dict): Geometría GeoJSON.
relation (str): Tipo de búsqueda por geometrías a realizar. Ver la
documentación de Elasticsearch GeoShape Query para más detalles.
Returns:
Query: Condición para Elasticsearch.
"""
options = {
'shape': geom,
'relation': relation
}
return GeoShape(**{field: options}) | f42fe6e21da30e3d6c8466be92143b215925686c | 6,514 |
import os
from sys import version
def ProcessConfigurationFile(options):
"""Process configuration file, merge configuration with OptionParser.
Args:
options: optparse.OptionParser() object
Returns:
options: optparse.OptionParser() object
global_ns: A list of global nameserver tuples.
regional_ns: A list of regional nameservers tuples.
Raises:
ValueError: If we are unable to find a usable configuration file.
"""
config = ConfigParser.ConfigParser()
full_path = util.FindDataFile(options.config)
config.read(full_path)
if not config or not config.has_section('general'):
raise ValueError('Could not find usable configuration in %s (%s)' % (full_path, options.config))
general = dict(config.items('general'))
if options.only or options.system_only:
global_ns = []
regional_ns = []
else:
global_ns = config.items('global')
regional_ns = config.items('regional') + config.items('private')
# -U implies -u
if options.site_url:
options.upload_results = True
for option in general:
if not getattr(options, option, None):
if 'timeout' in option:
value = float(general[option])
elif 'count' in option or 'num' in option or 'hide' in option:
value = int(general[option])
else:
value = general[option]
setattr(options, option, value)
for key in ('input_file', 'output_file', 'csv_file', 'input_source'):
value = getattr(options, key, None)
if value:
setattr(options, key, os.path.expanduser(value))
options.version = version.VERSION
return (options, global_ns, regional_ns) | e1d78fa2b904b2c46e27d67f3679d7192456db29 | 6,515 |
from typing import Optional
def map_symptom(symptom_name: str) -> Optional[str]:
"""
Maps a *symptom_name* to current symptom values in ID3C warehouse.
There is no official standard for symptoms, we are using the values
created by Audere from year 1 (2018-2019).
"""
symptom_map = {
'feeling feverish': 'feelingFeverish',
'fever': 'feelingFeverish',
'headache': 'headaches',
'headaches': 'headaches',
'cough': 'cough',
'chills': 'chillsOrShivering',
'chills or shivering': 'chillsOrShivering',
'sweats': 'sweats',
'throat': 'soreThroat',
'sore throat or itchy/scratchy throat': 'soreThroat',
'nausea': 'nauseaOrVomiting',
'nausea or vomiting': 'nauseaOrVomiting',
'nose': 'runnyOrStuffyNose',
'runny or stuffy nose': 'runnyOrStuffyNose',
'runny / stuffy nose': 'runnyOrStuffyNose',
'tired': 'fatigue',
'feeling more tired than usual': 'fatigue',
'ache': 'muscleOrBodyAches',
'muscle or body aches': 'muscleOrBodyAches',
'diarrhea': 'diarrhea',
'ear': 'earPainOrDischarge',
'ear pain or ear discharge': 'earPainOrDischarge',
'rash': 'rash',
'breathe': 'increasedTroubleBreathing',
'increased trouble with breathing': 'increasedTroubleBreathing',
'eye': 'eyePain',
'smell_taste': 'lossOfSmellOrTaste',
'other': 'other',
'none': 'none',
'none of the above': 'none',
}
if symptom_name.lower() not in symptom_map:
raise UnknownSymptomNameError(f"Unknown symptom name «{symptom_name}»")
return symptom_map[symptom_name.lower()] | c86f0694715b434b1e3b2dc3f66ddfc3afadeaf0 | 6,516 |
def check_sbatch(cmd, call=True, num_cpus=1, mem="2G", time=None,
partitions=None, dependencies=None, no_output=False, no_error=False,
use_slurm=False, mail_type=['FAIL', 'TIME_LIMIT'], mail_user=None,
stdout_file=None, stderr_file=None,
args=None):
""" This function wraps calls to sbatch. It adds the relevant command line
options based on the parameters (either specified or extracted from
args, if args is not None).
The 'ntasks' option is always 1 with the function.
Args:
cmd (str): The command to execute
call (bool): If this flag is false, then the commands will not be
executed (but will be logged).
num_cpus (int): The number of CPUs to use. This will be translated into
an sbatch request like: "--ntasks 1 --cpus-per-task <num-cpus>".
default: 1
mem (str): This will be translated into an sbatch request like:
"--mem=<mem>". default: 10G
time (str): The amount of time to request. This will be translated
into an sbatch request like: "--time <time>". default: 0-05:59
partitions (str): The partitions to request. This will be translated
into an sbatch request like: "-p <partitions>". default: general
(N.B. This value should be a comma-separated list with no spaces,
for example: partitions="general,long")
dependencies (list of int-likes): A list of all of the job ids to
use as dependencies for this call. This will be translated into
an sbatch request like: "--dependency=afterok:<dependencies>".
default: None (i.e., no dependencies)
N.B. This IS NOT overwritten by args.
no_output (bool): If this flag is True, stdout will be redirected
to /dev/null. This will be translated into an sbatch request
like: "--output=/dev/null". default: If the flag is not present,
then stdout will be directed to a log file with the job number.
This corresponds to "--output=slurm-%J.out" in the sbatch call.
stdout_file (str): If this value is given and no_output is False,
then this filename will be used for stdout rather than
slurm-%J.out. This corresponds to "--output=<stdout_file>" in
the sbatch call.
no_error (bool): If this flag is True, stderr will be redirected
to /dev/null. This will be translated into an sbatch request
like: "--error=/dev/null". default: If the flag is not present,
then stderr will be directed to a log file with the job number.
This corresponds to "--error=slurm-%J.err" in the sbatch call.
stderr_file (str): If this value is given and no_output is False,
then this filename will be used for stderr rather than
slurm-%J.err. This corresponds to "--output=<stdout_file>" in
the sbatch call.
use_slurm (bool): If this flag is True, then the commands will be
submitted to SLURM via sbatch. default: By default, each command
is executed sequentially within the current terminal.
mail_type (list of strings): A list of the types of mail to send.
This will be translated into an sbatch request like:
"--mail-type type_1,type_2,...". default: ['FAIL', 'TIME_LIMIT']
mail_user (string): The email address (or user name if that is
configured) of the recipient of the mails. This is translated
into an sbatch request like: "--mail-user <user>"
args (namespace): A namespace which contains values for all of the
options (i.e., created from an argparse parser after calling
add_sbatch_options on the parser)
Returns:
If use_slurm is False, None
If use_slurm is True, the slurm job id
"""
# use args if they are present
if args is not None:
call = not args.do_not_call
num_cpus = args.num_cpus
mem = args.mem
time = args.time
partitions = args.partitions
no_output = args.no_output
no_error = args.no_error
use_slurm = args.use_slurm
mail_type = args.mail_type
mail_user = args.mail_user
stdout_file = args.stdout_file
stderr_file = args.stderr_file
output_str = "--output=slurm-%J.out"
if stdout_file is not None:
output_str = "--output={}".format(stdout_file)
if no_output:
output_str = "--output=/dev/null"
error_str = "--error=slurm-%J.err"
if stderr_file is not None:
error_str = "--error={}".format(stderr_file)
if no_error:
error_str = "--error=/dev/null"
dependencies_str = ""
if dependencies is not None:
dependencies_str = ':'.join(str(d) for d in dependencies)
dependencies_str = "--dependency=afterok:{}".format(dependencies_str)
# check if we actually want to use SLURM
msg = "slurm.check_sbatch.use_slurm: {}, call: {}".format(use_slurm, call)
logger.debug(msg)
# anyway, make sure to remove the --use-slurm option
cmd = cmd.replace("--use-slurm", "")
if use_slurm:
time_str = ""
if time is not None:
time_str = "--time {}".format(time)
mem_str = ""
if mem is not None:
mem_str = "--mem={}".format(mem)
partitions_str = ""
if partitions is not None:
partitions_str = "-p {}".format(partitions)
num_cpus_str = ""
if num_cpus is not None:
num_cpus_str = "--cpus-per-task {}".format(num_cpus)
mail_type_str = ""
if mail_type is not None:
mail_type_str = "--mail-type {}".format(','.join(mail_type))
mail_user_str = ""
if mail_user is not None:
mail_user_str = "--mail-user {}".format(mail_user)
else:
# if we did not give a mail user, then do not specify the mail types
mail_type_str = ""
cmd = ("sbatch {} {} --ntasks 1 {} {} "
"{} {} {} {} {} {}".format(time_str, mem_str, partitions_str, num_cpus_str, dependencies_str,
output_str, error_str, mail_type_str, mail_user_str, cmd))
output = shell_utils.check_output(cmd, call=call)
# and parse out the job id
if call:
job_id = output.strip().split()[-1]
else:
job_id = None
return job_id
else:
shell_utils.check_call(cmd, call=call)
return None | 292757f8a9901a722d10d3d9f76f7e584d802b3b | 6,517 |
def get_project_details(p):
"""Extract from the pickle object detailed information about
a given project and parse it in a comprehensive dict structure."""
res = {}
project = p['projects'][0]
fields = {'Owner(s)': 'project_owners',
'Member(s)': 'project_members',
'Collaborator(s)': 'project_collabs',
'User(s)': 'project_users',
'last_accessed': 'project_last_access'}
for k, v in fields.items():
res[k] = project[v].strip().split(' <br/> ')
if res[k][0] == '':
res[k] = ['None']
for e in ['insert_user', 'insert_date', 'project_access', 'name',
'project_last_workflow']:
res[e] = project[e]
return res | f8ba3debdd8be7cc7a906851a6a6fb1e3c5f039a | 6,518 |
def get_category(name: str) -> Category:
"""Returns a category with a given name"""
return Category.objects.get(name=name) | 4dc99ed672bbb3d7843692da797d0cd901c2c44c | 6,519 |
def log_report():
""" The log report shows the log file. The user can filter and search the log. """
log_main = open(main_log, 'r').readlines()
data_main = []
for line in log_main:
split_line = line.split(' ')
data_main.append([' '.join(split_line[:2]), ' '.join(split_line[2:])])
return render_template(
'log.html',
title="Logs",
data_main=data_main) | 53905c90bed2666c7e668bf76ff03a6ba93eca5b | 6,520 |
def int_or_none(x) -> int:
"""Either convert x to an int or return None."""
try:
return int(x)
except TypeError:
return None
except ValueError:
return None | e7fbd422a6c61293c9f4f71df211a85570d4400e | 6,521 |
import argparse
import logging
def create_parser_for_docs() -> argparse.ArgumentParser:
"""Create a parser showing all options for the default CLI
documentation.
Returns:
The primary parser, specifically for generating documentation.
"""
daiquiri.setup(level=logging.FATAL)
# load default plugins
plugin.initialize_default_plugins()
ext_commands = plug.manager.hook.create_extension_command()
return create_parser(
show_all_opts=True,
ext_commands=ext_commands,
config_file=_repobee.constants.DEFAULT_CONFIG_FILE,
) | 97fca333d2e5893f21070efc0016bcf3634d7977 | 6,522 |
def needed_to_build_multi(deriv_outputs, existing=None, on_server=None):
"""
:param deriv_outputs: A mapping from derivations to sets of outputs.
:type deriv_outputs: ``dict`` of ``Derivation`` to ``set`` of ``str``
"""
if existing is None:
existing = {}
if on_server is None:
on_server = {}
needed, need_fetch = {}, {}
for deriv, outputs in deriv_outputs.items():
needed_to_build(deriv, outputs, needed=needed, need_fetch=need_fetch,
existing=existing, on_server=on_server)
return needed, need_fetch | d05083ea9c71c982d312e8b420b21bba92b80ee4 | 6,523 |
def iscode(c):
"""
Tests if argument type could be lines of code,
i.e. list of strings
"""
if type(c) == type([]):
if c:
return type(c[0]) == type('')
else:
return True
else: return False | e60da6c05922ff1e67db15fa4caa1500a8f470c7 | 6,524 |
def get_comment_list(request, thread_id, endorsed, page, page_size, requested_fields=None):
"""
Return the list of comments in the given thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id of the thread to get comments for.
endorsed: Boolean indicating whether to get endorsed or non-endorsed
comments (or None for all comments). Must be None for a discussion
thread and non-None for a question thread.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
requested_fields: Indicates which additional fields to return for
each comment. (i.e. ['profile_image'])
Returns:
A paginated result containing a list of comments; see
discussion.rest_api.views.CommentViewSet for more detail.
"""
response_skip = page_size * (page - 1)
cc_thread, context = _get_thread_and_context(
request,
thread_id,
retrieve_kwargs={
"with_responses": True,
"recursive": False,
"user_id": request.user.id,
"response_skip": response_skip,
"response_limit": page_size,
}
)
# Responses to discussion threads cannot be separated by endorsed, but
# responses to question threads must be separated by endorsed due to the
# existing comments service interface
if cc_thread["thread_type"] == "question":
if endorsed is None: # lint-amnesty, pylint: disable=no-else-raise
raise ValidationError({"endorsed": ["This field is required for question threads."]})
elif endorsed:
# CS does not apply resp_skip and resp_limit to endorsed responses
# of a question post
responses = cc_thread["endorsed_responses"][response_skip:(response_skip + page_size)]
resp_total = len(cc_thread["endorsed_responses"])
else:
responses = cc_thread["non_endorsed_responses"]
resp_total = cc_thread["non_endorsed_resp_total"]
else:
if endorsed is not None:
raise ValidationError(
{"endorsed": ["This field may not be specified for discussion threads."]}
)
responses = cc_thread["children"]
resp_total = cc_thread["resp_total"]
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a PageNotFoundError in that case
if not responses and page != 1:
raise PageNotFoundError("Page not found (No results on this page).")
num_pages = (resp_total + page_size - 1) // page_size if resp_total else 1
results = _serialize_discussion_entities(request, context, responses, requested_fields, DiscussionEntity.comment)
paginator = DiscussionAPIPagination(request, page, num_pages, resp_total)
return paginator.get_paginated_response(results) | 980e52645e96853339df0525359ddba4698bf7e7 | 6,525 |
from typing import List
def files(name: str, dependencies=False, excludes=None) -> List[PackagePath]:
"""
List all files belonging to a distribution.
Arguments:
name:
The name of the distribution.
dependencies:
Recursively collect files of dependencies too.
excludes:
Distributions to ignore if **dependencies** is true.
Returns:
All filenames belonging to the given distribution.
With ``dependencies=False``, this is just a shortcut for::
conda_support.distribution(name).files
"""
return [file
for dist in _iter_distributions(name, dependencies, excludes)
for file in dist.files] | cfda01bb7e6858e378aadeea47e6e4a0d76dda2f | 6,526 |
def ready_to_delete_data_node(name, has_executed, graph):
"""
Determines if a DataPlaceholderNode is ready to be deleted from the
cache.
Args:
name:
The name of the data node to check
has_executed: set
A set containing all operations that have been executed so far
graph:
The networkx graph containing the operations and data nodes
Returns:
A boolean indicating whether the data node can be deleted or not.
"""
data_node = get_data_node(name, graph)
return set(gr.successors(graph, data_node)).issubset(has_executed) | 7da3c6053146a1772223e29e1eca15107e0347b6 | 6,527 |
import hashlib
def extract_hash_parts(repo):
"""Extract hash parts from repo"""
full_hash = hashlib.sha1(repo.encode("utf-8")).hexdigest()
return full_hash[:2], full_hash[2:] | aa1aebaf9b8330539eb0266c4ff97fd3459753c8 | 6,528 |
def create_cloud_mask(im_QA, satname, cloud_mask_issue):
"""
Creates a cloud mask using the information contained in the QA band.
KV WRL 2018
Arguments:
-----------
im_QA: np.array
Image containing the QA band
satname: string
short name for the satellite: ```'L5', 'L7', 'L8' or 'S2'```
cloud_mask_issue: boolean
True if there is an issue with the cloud mask and sand pixels are being
erroneously masked on the images
Returns:
-----------
cloud_mask : np.array
boolean array with True if a pixel is cloudy and False otherwise
"""
# convert QA bits (the bits allocated to cloud cover vary depending on the satellite mission)
if satname == 'L8':
cloud_values = [2800, 2804, 2808, 2812, 6896, 6900, 6904, 6908]
elif satname == 'L7' or satname == 'L5' or satname == 'L4':
cloud_values = [752, 756, 760, 764]
elif satname == 'S2':
cloud_values = [1024, 2048] # 1024 = dense cloud, 2048 = cirrus clouds
# find which pixels have bits corresponding to cloud values
cloud_mask = np.isin(im_QA, cloud_values)
# remove cloud pixels that form very thin features. These are beach or swash pixels that are
# erroneously identified as clouds by the CFMASK algorithm applied to the images by the USGS.
if sum(sum(cloud_mask)) > 0 and sum(sum(~cloud_mask)) > 0:
morphology.remove_small_objects(cloud_mask, min_size=10, connectivity=1, in_place=True)
if cloud_mask_issue:
elem = morphology.square(3) # use a square of width 3 pixels
cloud_mask = morphology.binary_opening(cloud_mask,elem) # perform image opening
# remove objects with less than 25 connected pixels
morphology.remove_small_objects(cloud_mask, min_size=25, connectivity=1, in_place=True)
return cloud_mask | 5143c1c61425a131bdb3b0f91018643c9a9d4123 | 6,529 |
import re
def split_bucket(s3_key):
"""
Returns the bucket name and the key from an s3 location string.
"""
match = re.match(r'(?:s3://)?([^/]+)/(.*)', s3_key, re.IGNORECASE)
if not match:
return None, s3_key
return match.group(1), match.group(2) | 6b854bdc9d105643a9fa528e6fefd19672451e63 | 6,530 |
def contains_chinese(ustr):
"""
将字符串中的中文去除
Args:
ustr: 字符串
Returns: 去除中文的字符串
"""
return any('\u4e00' <= char <= '\u9fff' for char in ustr) | 8d53a214e1754e1c129f1583a298f5a19e1f76d3 | 6,531 |
def payment_activity():
"""Request for extra-curricular activity"""
try:
req_json = request.get_json(force=True)
except TypeError:
return jsonify(message='Invalid json input'), 400
activity_info = req_json['activity']
student = req_json['student']
envelope_args = {
'signer_client_id': 1000,
'ds_return_url': req_json['callback-url'],
'gateway_account_id': session.get('payment_gateway_account_id'),
'gateway_name': session.get('payment_gateway'),
'gateway_display_name': session.get('payment_display_name')
}
try:
# Create envelope with payment
envelope = DsDocument.create_with_payment(
'payment-activity.html', student, activity_info, envelope_args
)
# Submit envelope to Docusign
envelope_id = Envelope.send(envelope, session)
except ApiException as exc:
return process_error(exc)
user_documents = session.get('ds_documents')
if not user_documents:
session['ds_documents'] = [envelope_id]
else:
session['ds_documents'].append(envelope_id)
try:
# Get the recipient view
result = Envelope.get_view(envelope_id, envelope_args, student, session)
except ApiException as exc:
return process_error(exc)
return jsonify({'envelope_id': envelope_id, 'redirect_url': result.url}) | a313b6e5ed00ffc9b3685ce28a9c640e96276347 | 6,532 |
def gdc_to_dos_list_response(gdcr):
"""
Takes a GDC list response and converts it to GA4GH.
:param gdc:
:return:
"""
mres = {}
mres['data_objects'] = []
for id_ in gdcr.get('ids', []):
mres['data_objects'].append({'id': id_})
if len(gdcr.get('ids', [])) > 0:
mres['next_page_token'] = gdcr['ids'][-1:][0]
return mres | a237a64f55c15fb10070d76b6f3cc4f283460a96 | 6,533 |
def get_labels_by_db_and_omic_from_graph(graph):
"""Return labels by db and omic given a graph."""
db_subsets = defaultdict(set)
db_entites = defaultdict(dict)
entites_db = defaultdict(dict)
# entity_type_map = {'Gene':'genes', 'mirna_nodes':'mirna', 'Abundance':'metabolites', 'BiologicalProcess':'bps'}
for u, v, k in graph.edges(keys=True):
if ANNOTATIONS not in graph[u][v][k]:
continue
if 'database' not in graph[u][v][k][ANNOTATIONS]:
continue
for database in graph[u][v][k][ANNOTATIONS]['database']:
db_subsets[database].add(u)
db_subsets[database].add(v)
for database, nodes in db_subsets.items():
db_entites[database] = calculate_database_sets_as_dict(nodes, database)
database_sets = calculate_database_sets_as_dict(nodes, database)
db_entites[database] = database_sets
for entity_type, entities in database_sets.items():
entites_db[entity_type][database] = entities
return db_entites, entites_db | 14374977afb09fded25f78e14fced607bb8f9ea1 | 6,534 |
import os
def python_modules():
"""Determine if there are python modules in the cwd.
Returns:
list of python modules as strings
"""
ignored = ["setup.py", "conftest.py"]
py_modules = []
for file_ in os.listdir(os.path.abspath(os.curdir)):
if file_ in ignored or not os.path.isfile(file_):
continue
file_name, file_ext = os.path.splitext(file_)
if file_ext == ".py":
py_modules.append(file_name)
return sorted(py_modules) | eba262b38bddb0f76f614c74a9a0b1c090e48e6b | 6,535 |
import logging
def covid_API_england ():
"""Function retrieves date, hospital admissions, total deaths
and daily cases using government API"""
england_only = [
'areaType=nation',
'areaName=England'
]
dates_and_cases = {
"date": "date",
"newCasesByPublishDate": "newCasesByPublishDate",
"newAdmissions": "newAdmissions",
"cumDailyNsoDeathsByDeathDate":"cumDailyNsoDeathsByDeathDate"
}
api = Cov19API(filters=england_only, structure=dates_and_cases)
logging.info('API has received query for national data')
global DATA2
DATA2 = api.get_json()
return DATA2 | a13090a052a35dd675c1fb31b861cbcc4b9e7c4a | 6,536 |
from meerschaum.actions.shell import default_action_completer
from typing import Optional
from typing import List
from typing import Any
def _complete_uninstall(
action : Optional[List[str]] = None,
**kw : Any
) -> List[str]:
"""
Override the default Meerschaum `complete_` function.
"""
if action is None:
action = []
options = {
'plugin': _complete_uninstall_plugins,
'plugins': _complete_uninstall_plugins,
}
if len(action) > 0 and action[0] in options:
sub = action[0]
del action[0]
return options[sub](action=action, **kw)
return default_action_completer(action=(['uninstall'] + action), **kw) | 1cfdc5694a069c924316f57e4804ae04d63bb4af | 6,537 |
def test_bucket():
"""Universal bucket name for use throughout testing"""
return 'test_bucket' | 2f78b1b1bf7ccfff07ca29213d975f3b20f0e9a5 | 6,538 |
import os
import sys
def validate_args(args):
""" Validate all of the arguments parsed.
Args:
args (argparser.ArgumentParser) : Args parsed by the argument parser.
Returns:
args (CoreclrArguments) : Args parsed
Notes:
If the arguments are valid then return them all in a tuple. If not,
raise an exception stating x argument is incorrect.
"""
coreclr_setup_args = CoreclrArguments(args,
require_built_test_dir=False,
require_built_core_root=True,
require_built_product_dir=False)
coreclr_setup_args.verify(args,
"base_root",
lambda directory: os.path.isdir(directory) if directory is not None else True,
"Base root is not a valid directory")
coreclr_setup_args.verify(args,
"diff_root",
lambda directory: os.path.isdir(directory) if directory is not None else True,
"Diff root is not a valid directory",
modify_arg=lambda directory: nth_dirname(os.path.abspath(sys.argv[0]), 3) if directory is None else os.path.abspath(directory))
coreclr_setup_args.verify(args,
"scratch_root",
lambda unused: True,
"Error setting scratch_root",
modify_arg=lambda directory: os.path.join(coreclr_setup_args.diff_root, '_', 'pmi') if directory is None else os.path.abspath(directory))
coreclr_setup_args.verify(args,
"skip_baseline_build",
lambda unused: True,
"Error setting baseline build")
coreclr_setup_args.verify(args,
"skip_diffs",
lambda unused: True,
"Error setting skip_diffs")
coreclr_setup_args.verify(args,
"target_branch",
lambda unused: True,
"Error setting target_branch")
coreclr_setup_args.verify(args,
"commit_hash",
lambda unused: True,
"Error setting commit_hash")
coreclr_setup_args.verify(args,
"ci_arch",
lambda ci_arch: ci_arch in coreclr_setup_args.valid_arches + ['x86_arm_altjit', 'x64_arm64_altjit'],
"Error setting ci_arch")
args = (
coreclr_setup_args.arch,
coreclr_setup_args.ci_arch,
coreclr_setup_args.build_type,
coreclr_setup_args.base_root,
coreclr_setup_args.diff_root,
coreclr_setup_args.scratch_root,
coreclr_setup_args.skip_baseline_build,
coreclr_setup_args.skip_diffs,
coreclr_setup_args.target_branch,
coreclr_setup_args.commit_hash
)
log('Configuration:')
log(' arch: %s' % coreclr_setup_args.arch)
log(' ci_arch: %s' % coreclr_setup_args.ci_arch)
log(' build_type: %s' % coreclr_setup_args.build_type)
log(' base_root: %s' % coreclr_setup_args.base_root)
log(' diff_root: %s' % coreclr_setup_args.diff_root)
log(' scratch_root: %s' % coreclr_setup_args.scratch_root)
log(' skip_baseline_build: %s' % coreclr_setup_args.skip_baseline_build)
log(' skip_diffs: %s' % coreclr_setup_args.skip_diffs)
log(' target_branch: %s' % coreclr_setup_args.target_branch)
log(' commit_hash: %s' % coreclr_setup_args.commit_hash)
return args | 35f9446ba1d52c0ec824e0184b5e17ddd16bbb76 | 6,539 |
import os
def _path_restrict(path, repo):
"""Generate custom package restriction from a given path.
This drops the repo restriction (initial entry in path restrictions)
since runs can only be made against single repo targets so the extra
restriction is redundant and breaks several custom sources involving
raw pkgs (lacking a repo attr) or faked repos.
"""
restrictions = []
path = os.path.realpath(path)
restrictions = repo.path_restrict(path)[1:]
restrict = packages.AndRestriction(*restrictions) if restrictions else packages.AlwaysTrue
# allow location specific scopes to override the path restrict scope
for scope in (x for x in base.scopes.values() if x.level == 0):
scope_path = os.path.realpath(pjoin(repo.location, scope.desc))
if path.startswith(scope_path):
break
else:
scope = _restrict_to_scope(restrict)
return scope, restrict | dbcff06e3cc32b0ff606459ce13cfc275bfae173 | 6,540 |
def us_send_code():
"""
Send code view.
This takes an identity (as configured in USER_IDENTITY_ATTRIBUTES)
and a method request to send a code.
"""
form_class = _security.us_signin_form
if request.is_json:
if request.content_length:
form = form_class(MultiDict(request.get_json()), meta=suppress_form_csrf())
else:
form = form_class(formdata=None, meta=suppress_form_csrf())
else:
form = form_class(meta=suppress_form_csrf())
form.submit_send_code.data = True
if form.validate_on_submit():
# send code
user = form.user
if not user.us_totp_secret:
after_this_request(_commit)
user.us_totp_secret = _security._totp_factory.generate_totp_secret()
_datastore.put(user)
send_security_token(
user,
form.chosen_method.data,
user.us_totp_secret,
user.us_phone_number,
send_magic_link=True,
)
if _security._want_json(request):
# Not authenticated yet - so don't send any user info.
return base_render_json(form, include_user=False)
return _security.render_template(
config_value("US_SIGNIN_TEMPLATE"),
us_signin_form=form,
methods=config_value("US_ENABLED_METHODS"),
chosen_method=form.chosen_method.data,
code_sent=True,
skip_loginmenu=True,
**_security._run_ctx_processor("us_signin")
)
# Here on GET or failed validation
if _security._want_json(request):
payload = {"methods": config_value("US_ENABLED_METHODS")}
return base_render_json(form, include_user=False, additional=payload)
return _security.render_template(
config_value("US_SIGNIN_TEMPLATE"),
us_signin_form=form,
methods=config_value("US_ENABLED_METHODS"),
skip_loginmenu=True,
**_security._run_ctx_processor("us_signin")
) | 7ca09dc6d6fdc7840d893e01b4166d65a1b9cc02 | 6,541 |
def merge_texts(files, file_index, data_type):
""" merge the dataframes in your list """
dfs, filenames = get_dataframe_list(files, file_index, data_type)
# enumerate over the list, merge, and rename columns
try:
df = dfs[0]
# print(*[df_.columns for df_ in dfs],sep='\n')
for i, frame in enumerate(dfs[1:]):
if data_type == 'gene':
try:
# rename first columns to metadata value
df = df.rename(columns={'raw_counts': get_metadata_tag(filenames[0])})
df = df.merge(frame, on='gene').rename(columns={'raw_counts':'raw_counts_' + get_metadata_tag(filenames[i-1])})
except:
continue
elif data_type == 'transcript':
try:
df = df.merge(frame, on='transcript')
# df = df.merge(frame, on=frame.index)
except:
continue
return df
except:
print("Could not merge dataframe") | 4e336a240afd100797b707efc9ccc96feb8d2919 | 6,542 |
def create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths):
"""Create dictionary of hurricanes with hurricane name as the key and a dictionary of hurricane data as the value."""
hurricanes = dict()
num_hurricanes = len(names)
for i in range(num_hurricanes):
hurricanes[names[i]] = {"Name": names[i],
"Month": months[i],
"Year": years[i],
"Max Sustained Wind": max_sustained_winds[i],
"Areas Affected": areas_affected[i],
"Damage": updated_damages[i],
"Deaths": deaths[i]}
return hurricanes | 5a27d5349113f29d2af55df27a2ee2c2cc524549 | 6,543 |
def create_variable_type(parent, nodeid, bname, datatype):
"""
Create a new variable type
args are nodeid, browsename and datatype
or idx, name and data type
"""
nodeid, qname = _parse_nodeid_qname(nodeid, bname)
if datatype and isinstance(datatype, int):
datatype = ua.NodeId(datatype, 0)
if datatype and not isinstance(datatype, ua.NodeId):
raise RuntimeError("Data type argument must be a nodeid or an int refering to a nodeid, received: {}".format(datatype))
return node.Node(parent.server, _create_variable_type(parent.server, parent.nodeid, nodeid, qname, datatype)) | b2202b929bc51e2a2badfef6ec31df45e9736268 | 6,544 |
def load_NWP(input_nc_path_decomp, input_path_velocities, start_time, n_timesteps):
"""Loads the decomposed NWP and velocity data from the netCDF files
Parameters
----------
input_nc_path_decomp: str
Path to the saved netCDF file containing the decomposed NWP data.
input_path_velocities: str
Path to the saved numpy binary file containing the estimated velocity
fields from the NWP data.
start_time: numpy.datetime64
The start time of the nowcasting. Assumed to be a numpy.datetime64 type
n_timesteps: int
Number of time steps to forecast
Returns
-------
R_d: list
A list of dictionaries with each element in the list corresponding to
a different time step. Each dictionary has the same structure as the
output of the decomposition function
uv: array-like
Array of shape (timestep,2,m,n) containing the x- and y-components
of the advection field for the (NWP) model field per forecast lead time.
"""
if not NETCDF4_IMPORTED:
raise MissingOptionalDependency(
"netCDF4 package is required to load the decomposed NWP data, "
"but it is not installed"
)
# Open the file
ncf_decomp = netCDF4.Dataset(input_nc_path_decomp, "r", format="NETCDF4")
velocities = np.load(input_path_velocities)
# Initialise the decomposition dictionary
decomp_dict = dict()
decomp_dict["domain"] = ncf_decomp.domain
decomp_dict["normalized"] = bool(ncf_decomp.normalized)
decomp_dict["compact_output"] = bool(ncf_decomp.compact_output)
# Convert the start time and the timestep to datetime64 and timedelta64 type
zero_time = np.datetime64("1970-01-01T00:00:00", "ns")
analysis_time = np.timedelta64(int(ncf_decomp.analysis_time), "ns") + zero_time
timestep = ncf_decomp.timestep
timestep = np.timedelta64(timestep, "m")
valid_times = ncf_decomp.variables["valid_times"][:]
valid_times = np.array(
[np.timedelta64(int(valid_times[i]), "ns") for i in range(len(valid_times))]
)
valid_times = valid_times + zero_time
# Find the indices corresponding with the required start and end time
start_i = (start_time - analysis_time) // timestep
assert analysis_time + start_i * timestep == start_time
end_i = start_i + n_timesteps + 1
# Add the valid times to the output
decomp_dict["valid_times"] = valid_times[start_i:end_i]
# Slice the velocity fields with the start and end indices
uv = velocities[start_i:end_i, :, :, :]
# Initialise the list of dictionaries which will serve as the output (cf: the STEPS function)
R_d = list()
for i in range(start_i, end_i):
decomp_dict_ = decomp_dict.copy()
cascade_levels = ncf_decomp.variables["pr_decomposed"][i, :, :, :]
# In the netcdf file this is saved as a masked array, so we're checking if there is no mask
assert not cascade_levels.mask
means = ncf_decomp.variables["means"][i, :]
assert not means.mask
stds = ncf_decomp.variables["stds"][i, :]
assert not stds.mask
# Save the values in the dictionary as normal arrays with the filled method
decomp_dict_["cascade_levels"] = np.ma.filled(cascade_levels)
decomp_dict_["means"] = np.ma.filled(means)
decomp_dict_["stds"] = np.ma.filled(stds)
# Append the output list
R_d.append(decomp_dict_)
return R_d, uv | d96dacc14404f59b15a428e62608765486623460 | 6,545 |
def get_ts(fn, tc, scale=0):
"""Returns timestamps from a frame number and timecodes file or cfr fps
fn = frame number
tc = (timecodes list or Fraction(fps),tc_type)
scale default: 0 (ns)
examples: 3 (µs); 6 (ms); 9 (s)
"""
scale = 9 - scale
tc, tc_type = tc
if tc_type == 'cfr':
ts = round(10 ** scale * fn * Fraction(tc.denominator, tc.numerator))
return ts
elif tc_type == 'vfr':
ts = round(float(tc[fn]) * 10 ** (scale - 3))
return ts | 845b2600268a2942ca0fe2b09336ab724b00e299 | 6,546 |
import numpy
def adapt_array(array):
"""
Using the numpy.save function to save a binary version of the array,
and BytesIO to catch the stream of data and convert it into a BLOB.
:param numpy.array array: NumPy array to turn into a BLOB
:return: NumPy array as BLOB
:rtype: BLOB
"""
out = BytesIO()
numpy.save(out, array)
out.seek(0)
return out.read() | 36a62c745de0e933b520821ea7cce70f5013c5d2 | 6,547 |
def make_queue(paths_to_image, labels, num_epochs=None, shuffle=True):
"""returns an Ops Tensor with queued image and label pair"""
images = tf.convert_to_tensor(paths_to_image, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.uint8)
input_queue = tf.train.slice_input_producer(
tensor_list=[images, labels],
num_epochs=num_epochs,
shuffle=shuffle)
return input_queue | 7a2ad9338642a5d6c7af59fe972ee9fd07f128b8 | 6,548 |
def display_import(request, import_id):
"""Display the details of an import."""
import_object = get_object_or_404(RegisteredImport, pk=import_id)
context_data = {'import': import_object}
return render(request, 'eats/edit/display_import.html', context_data) | b5676dd5da1791fb6eda3d6989b9c7c0c8b02b8c | 6,549 |
def TransformContainerAnalysisData(image_name, occurrence_filter=None,
deployments=False):
"""Transforms the occurrence data from Container Analysis API."""
analysis_obj = container_analysis_data_util.ContainerAndAnalysisData(
image_name)
occs = FetchOccurrencesForResource(image_name, occurrence_filter)
for occ in occs:
analysis_obj.add_record(occ)
if deployments:
depl_occs = FetchDeploymentsForImage(image_name, occurrence_filter)
for depl_occ in depl_occs:
analysis_obj.add_record(depl_occ)
analysis_obj.resolveSummaries()
return analysis_obj | d7021dde08a77ac6922274f3e69d841983728f4e | 6,550 |
def setup_milp(model, target, remove_blocked=False, exclude_reaction_ids=set()):
"""
This function constructs the MILP.
exclude_reaction_ids takes a list of reaction ids that shouldn't be considered for heterologous addition
(i.e. spontaneous reactions and exchange reactions). These reactions are thus always allowed to have flux within their model bounds.
"""
original_model = model
model = model.copy()
model.objective=target
for i in heterologous_reactions.keys(): # turns off each heterologous reaction in order to get the only the native metabolic network.
model.reactions.get_by_id(i).lower_bound=0
model.reactions.get_by_id(i).upper_bound=0
for r in model.reactions:
if r.id.find('MetaCyc')>-1:
r.type='heterologous'
else:
r.type='native'
# Set the solver to Gurobi for the fastest result. Set to CPLEX if Gurobi is not available.
if "gurobi" in cobra.util.solver.solvers.keys():
logger.info("Changing solver to Gurobi and tweaking some parameters.")
if "gurobi_interface" not in model.solver.interface.__name__:
model.solver = "gurobi"
# The tolerances are set to the minimum value. This gives maximum precision.
problem = model.solver.problem
problem.params.NodeMethod = 1 # primal simplex node relaxation
problem.params.FeasibilityTol = 1e-9 #If a flux limited to 0 by a constraint, which range around it is still considered the same as 0 > set smallest possible
problem.params.OptimalityTol = 1e-3 #how sure the solver has to be about this optimum being really the best it has.
problem.params.IntFeasTol = 1e-9 #If a value is set to an integer, how much may it still vary? > set smallest possible
problem.params.MIPgapAbs = 1e-9
problem.params.MIPgap = 1e-9
problem.params.Threads=1 #In order to reduce memory usage (increased calculation time)
problem.params.TimeLimit = 200 # Use max 200 seconds when called, return best solution after that
problem.params.PoolSearchMode = 1 #0 for only finding the optimum, 1 for finding more solutions (but no quality guaranteed), 2 for finding the n best possible solutions
problem.params.PoolSolutions = 10 # Number of solutions kept when finding the optimal solution
problem.params.PoolGap = 0.9 # only store solutions within 90% of the optimal objective value
elif "cplex" in cobra.util.solver.solvers.keys():
logger.warning("Changing solver to CPLEX, as Gurobi is not available. This may cause a big slowdown and limit options afterwards.")
if "cplex_interface" not in model.solver.interface.__name__:
model.solver = "cplex"
# The tolerances are set to the minimum value. This gives maximum precision.
problem = model.solver.problem
problem.parameters.mip.strategy.startalgorithm.set(1) # primal simplex node relaxation
problem.parameters.simplex.tolerances.feasibility.set(1e-9) #If a flux limited to 0 by a constraint, which range around it is still considered the same as 0 > set smallest possible
problem.parameters.simplex.tolerances.optimality.set(1e-3) #possibly fine with 1e-3, try if allowed. Is how sure the solver has to be about this optimum being really the best it has.
problem.parameters.mip.tolerances.integrality.set(1e-9) #If a value is set to an integer, how much may it still vary? > set smallest possible
problem.parameters.mip.tolerances.absmipgap.set(1e-9)
problem.parameters.mip.tolerances.mipgap.set(1e-9)
problem.parameters.mip.pool.relgap.set(0.9) # For populate: find all solutions within 10% of the optimum for relgap = 0.1
problem.parameters.timelimit.set(200) # Use max 200 seconds for solving
problem.parameters.mip.limits.populate.set(20) # Find max 20 solutions (=default)
else:
logger.warning("You are trying to run 'Hamlet Hot Rig' with %s. This might not end well." %
model.solver.interface.__name__.split(".")[-1])
pass
# Remove reactions that are blocked: no flux through these reactions possible. This will reduce the search space for the solver, if not done already.
if remove_blocked:
blocked_reactions = cameo.flux_analysis.analysis.find_blocked_reactions(model)
model.remove_reactions(blocked_reactions)
# Make dual
model_with = model.copy() # This variable looks unnecessary, but is kept out of fear of messing stuff up
model_with.optimize()
dual_problem = convert_linear_problem_to_dual(model_with.solver)
logger.debug("Dual problem successfully created")
# Combine primal and dual
primal_problem = model.solver
for var in dual_problem.variables: # All variables in the dual are copied to the primal
var = primal_problem.interface.Variable.clone(var)
primal_problem.add(var)
for const in dual_problem.constraints: # All constraints in the dual are copied to the primal
const = primal_problem.interface.Constraint.clone(const, model=primal_problem)
primal_problem.add(const)
logger.debug("Dual and primal combined")
dual_problem.optimize()
# Dictionaries to hold the binary control variables:
heterologous_y_vars = {} # 1 for 'knockin', 0 for inactive
medium_y_vars = {} # 1 for medium addition (up to -10), 0 for no addition
# Now the fun stuff
constrained_dual_vars = set()
# For the knockins and medium additions:
for reaction in [r for r in model.reactions if r.type == "heterologous"]:
# Add constraint variables
interface = model.solver.interface
y_var = interface.Variable("y_" + reaction.id, type="binary")
# Constrain the primal: flux through reactions maximum within (-1000, 1000), or smaller boundaries defined before
model.solver.add(interface.Constraint(reaction.flux_expression - 1000 * y_var, ub=0, name="primal_y_const_"+reaction.id+"_ub"))
model.solver.add(interface.Constraint(reaction.flux_expression + 1000 * y_var, lb=0, name="primal_y_const_"+reaction.id+"_lb"))
# Constrain the dual
constrained_vars = []
if reaction.upper_bound != 0:
dual_forward_ub = model.solver.variables["dual_" + reaction.forward_variable.name + "_ub"]
model.solver.add(interface.Constraint(dual_forward_ub - 1000 * (1 - y_var), ub=0))
constrained_vars.append(dual_forward_ub)
if reaction.lower_bound != 0:
dual_reverse_ub = model.solver.variables["dual_" + reaction.reverse_variable.name + "_ub"]
model.solver.add(interface.Constraint(dual_reverse_ub - 1000 * (1 - y_var), ub=0))
constrained_vars.append(dual_reverse_ub)
constrained_dual_vars.update(constrained_vars)
# Add y variable to the corresponding modifications dictionary
heterologous_y_vars[y_var] = reaction
logger.debug("Control variables created")
# Add number of heterologous switch contraint constraint
heterologous_turn_on = model.solver.interface.Constraint(
optlang.symbolics.Add(*heterologous_y_vars), lb=0, ub=0, name="heterologous_reaction_constraint"
)
model.solver.add(heterologous_turn_on)
# Set the objective
primal_objective = model.solver.objective
dual_objective = interface.Objective.clone(
dual_problem.objective, model=model.solver
)
switch_objective=interface.Objective(heterologous_turn_on.expression, direction='min')
full_objective = interface.Objective(primal_objective.expression-dual_objective.expression, direction="max")
model.objective = full_objective
return model,primal_objective,dual_objective,full_objective,switch_objective | ddefa4b44ac0dce087367762ead2cb0ce9bbb14b | 6,551 |
def bilinear_initializer(shape, dtype, partition_info):
"""
Bilinear initializer for deconvolution filters
"""
kernel = get_bilinear_kernel(shape[0], shape[1], shape[2])
broadcasted_kernel = np.repeat(kernel.reshape(shape[0], shape[1], shape[2], -1), repeats=shape[3], axis=3)
return broadcasted_kernel | 48a7cc2808e72df816c9b6ff7a8975eb52e4185e | 6,552 |
def pdf():
"""
Демо-версия PDF отчеа, открывается прямо в браузере,
это удобнее, чем каждый раз скачивать
"""
render_pdf(sample_payload_obj, './output.pdf')
upload_file('./output.pdf')
return send_file('./output.pdf', attachment_filename='output.pdf') | a2a60c26df9844e605606538d40d1402cd5a4985 | 6,553 |
def run_clear_db_es(app, arg_env, arg_skip_es=False):
"""
This function actually clears DB/ES. Takes a Pyramid app as well as two flags. _Use with care!_
For safety, this function will return without side-effect on any production system.
Also does additional checks based on arguments supplied:
If an `arg_env` (default None) is given as a non-empty string value,
this function will return without side-effect if the current app environment does not match the given value.
If `arg_skip_es` (default False) is True, this function will return after DB clear
and before running create_mapping.
Args:
app: Pyramid application
arg_env (str): if provided, only run if environment matches this value
arg_skip_es (bool): if True, do not run create_mapping after DB clear
Returns:
bool: True if DB was cleared (regardless of ES)
"""
env = app.registry.settings.get('env.name', '')
# for now, do NOT allow clearing of production systems
if is_stg_or_prd_env(env):
log.error('clear_db_es_contents: will NOT run on env %s. Exiting...' % env)
return False
if arg_env and arg_env != env:
log.error('clear_db_es_contents: environment mismatch! Given --env %s '
'does not match current env %s. Exiting....' % (arg_env, env))
return False
log.info('clear_db_es_contents: clearing DB tables...')
db_success = clear_db_tables(app)
if not db_success:
log.error('clear_db_es_contents: clearing DB failed! Try to run again.'
' This command can fail if there are external DB connections')
return False
log.info('clear_db_es_contents: successfully cleared DB')
# create mapping after clear DB to remove ES contents
if not arg_skip_es:
log.info('clear_db_es_contents: clearing ES with create_mapping...')
run_create_mapping(app, purge_queue=True)
log.info('clear_db_es_contents: done!')
return True | e7d865dec8691c4d0db7bef71b68bab9bc5174a2 | 6,554 |
def init_total_population():
"""
Real Name: b'init total population'
Original Eqn: b'init Infected asymptomatic+init Susceptible'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return init_infected_asymptomatic() + init_susceptible() | cf742a00d0140c48dbdb4692dabbb8bbd6c5c6b2 | 6,555 |
def one_hot(dim: int, idx: int):
""" Get one-hot vector """
v = np.zeros(dim)
v[idx] = 1
return v | 84b87b357dc7b7bf54af4718885aa1d6fbcb35e4 | 6,556 |
import re
def process_priors(prior_flat, initial_fit):
"""Process prior input array into fit object."""
if any(
[float(val) <= 0 for key, val in prior_flat.items() if key.endswith("sdev")]
):
raise ValueError("Standard deviations must be larger than zero.")
prior = {}
for key, val in initial_fit.prior.items():
if hasattr(val, "__len__"):
nmax = len(
[k for k in prior_flat if re.match(f"{key}__array_[0-9]+-mean", k)]
)
prior[key] = gv.gvar(
[prior_flat[f"{key}__array_{n}-mean"] for n in range(nmax)],
[prior_flat[f"{key}__array_{n}-sdev"] for n in range(nmax)],
)
else:
prior[key] = gv.gvar(prior_flat[f"{key}-mean"], prior_flat[f"{key}-sdev"])
fit = nonlinear_fit(initial_fit.data, initial_fit.fcn, prior)
for attr in ["models", "meta"]:
if hasattr(initial_fit, attr):
setattr(fit, attr, getattr(initial_fit, attr))
return fit | 32358fb494a221e5e7d5d4d73776993f1c363f0f | 6,557 |
def _sample_data(ice_lines, frac_to_plot):
"""
Get sample ice lines to plot
:param ice_lines: all ice lines
:param frac_to_plot: fraction to plot
:return: the sampled ice lines
"""
if frac_to_plot < 1.:
ice_plot_data = ice_lines.sample(int(ice_lines.shape[0] * frac_to_plot))
elif frac_to_plot > 1:
ice_plot_data = ice_lines.sample(frac_to_plot)
else:
ice_plot_data = ice_lines.copy()
ice_plot_data = ice_plot_data.reset_index(drop=True)
return ice_plot_data | e5da9b1ecaf615863504e81cdd246336de97b319 | 6,558 |
def fast_dot(M1, M2):
"""
Specialized interface to the numpy.dot function
This assumes that A and B are both 2D arrays (in practice)
When A or B are represented by 1D arrays, they are assumed to reprsent
diagonal arrays
This function then exploits that to provide faster multiplication
"""
if len(M1.shape) in [1, 2] and len(M2.shape) == 1:
return M1*M2
elif len(M1.shape) == 1 and len(M2.shape) == 2:
return M1[:,None]*M2
elif len(M1.shape) == 2 and len(M2.shape) == 2:
return M1.dot(M2)
else:
raise Exception('fast_dot requires shapes to be 1 or 2') | b34e44787f48dfb25af4975e74262f3d8eaa5096 | 6,559 |
async def autoredeem(
bot: commands.Bot,
guild_id: int
) -> bool:
"""Iterates over the list of users who have
enabled autoredeem for this server, and if
one of them does redeem some of their credits
and alert the user."""
await bot.wait_until_ready()
conn = bot.db.conn
guild = bot.get_guild(guild_id)
if guild is None:
return False
async with bot.db.lock:
async with conn.transaction():
ar_members = await conn.fetch(
"""SELECT * FROM members
WHERE guild_id=$1
AND autoredeem=True""",
guild_id
)
redeemed = False
for m in ar_members:
ms = await get_members([int(m['user_id'])], guild)
if len(ms) == 0:
continue
current_credits = await get_credits(
bot, int(m['user_id'])
)
if current_credits < bot_config.PREMIUM_COST:
continue
try:
await alert_user(
bot, int(m['user_id']),
f"You have autoredeem enabled in {guild.name}, "
f"so {bot_config.PREMIUM_COST} credits were taken "
"from your account since they ran out of premium."
)
except Exception:
continue
try:
await redeem(
bot, int(m['user_id']),
guild_id, 1
)
redeemed = True
except errors.NotEnoughCredits:
pass
return redeemed | ee0a34e4aa9d85e9402dcbec8a1ecce5a2ca58e1 | 6,560 |
def get_ISO_369_3_from_string(term: str,
default: str = None,
strict: bool = False,
hdp_lkg: dict = None) -> str:
"""Convert an individual item to a ISO 369-3 language code, UPPERCASE
Args:
term (str): The input term to search
default (str, optional): Default no match found. Defaults to None.
strict (bool, optional): If require exact match on hdp_lkg.
hdp_lkg (dict, optional): HDP localization knowledge graph dictionary.
Default to use internal HDP localization knowledge graph.
Returns:
str: An ISO 369-3 language code, UPPERCASE
Examples:
>>> import hxlm.core.localization as l10n
>>> l10n.get_ISO_369_3_from_string(term='pt')
'POR'
>>> l10n.get_ISO_369_3_from_string(term='en')
'ENG'
>>> l10n.get_ISO_369_3_from_string(term='ZZZ', strict=False)
'ZZZ'
>>> l10n.get_ISO_369_3_from_string(term='pt_BR')
>>> # inputs like 'pt_BR' still not implemented... yet
>>> # But when using system languages, like 'pt_BR:pt:en',
>>> # often the next term would be PT anyway
"""
if _IS_DEBUG:
print('get_ISO_369_3_from_string')
print(' term', term)
print(' term.upper', term.upper())
print(' default', default)
print(' strict', strict)
# print(' hdp_lkg', hdp_lkg)
result = default
if hdp_lkg is None:
hdp_lkg = get_localization_knowledge_graph()
# Since the HDP localization knowledge may not contain the full ISO 639-3
# language codes, without strict = True, if the input already is 3 letter
# uppercase ASCII letters, we will fallback to this
if not strict and (len(term) == 3 and term.isalpha() and term.isupper()):
result = term
if hdp_lkg is None or 'linguam23' not in hdp_lkg:
return result
if term.upper() in hdp_lkg['linguam23']:
return hdp_lkg['linguam23'][term.upper()]
if len(term) >= 5 and len(term) >= 12:
if _IS_DEBUG:
print(' TODO: implement some type of search by language name')
return result | 434c39cad948bf7e4e33e9f5a6bd8da7c8a708ab | 6,561 |
from typing import Optional
from typing import Sequence
def plot_heatmap(
data: DataFrame,
columns: Optional[Sequence[str]] = None,
droppable: bool = True,
sort: bool = True,
cmap: Optional[Sequence[str]] = None,
names: Optional[Sequence[str]] = None,
yaxis: bool = False,
xaxis: bool = True,
legend_kws: dict = None,
sb_kws: dict = None) -> SubplotBase:
"""NA heatmap. Plots NA values as red lines and normal values
as black lines.
Parameters
----------
data : DataFrame
Input data.
columns : Optional[Sequence[str]], optional
Columns names.
droppable : bool, optional
Show values to be dropped by :py:meth:`pandas.DataFrame.dropna()`
method.
sort : bool, optional
Sort DataFrame by selected columns.
cmap : Optional[Sequence[str]], optional
Heatmap and legend colormap: non-missing values, droppable values,
NA values, correspondingly. Passed to :py:meth:`seaborn.heatmap()`
method.
names : Optional[Sequence[str]], optional
Legend labels: non-missing values, droppable values,
NA values, correspondingly.
yaxis : bool, optional
Show Y axis.
xaxis : bool, optional
Show X axis.
legend_kws : dict, optional
Keyword arguments passed to
:py:meth:`matplotlib.axes._subplots.AxesSubplot()` method.
sb_kws : dict, optional
Keyword arguments passed to
:py:meth:`seaborn.heatmap` method.
Returns
-------
matplotlib.axes._subplots.AxesSubplot
AxesSubplot object.
"""
if not cmap:
cmap = ['green', 'orange', 'red']
if not names:
names = ['Filled', 'Droppable', 'NA']
if not sb_kws:
sb_kws = {'cbar': False}
cols = _select_cols(data, columns).tolist()
data_na = data.loc[:, cols].isna().copy()
if sort:
data_na.sort_values(by=cols, inplace=True)
if droppable:
non_na_mask = ~data_na.values
na_rows_mask = data_na.any(axis=1).values[:, None]
droppable_mask = non_na_mask & na_rows_mask
data_na = data_na.astype(float)
data_na.values[droppable_mask] = 0.5
labels = names
else:
labels = [names[0], names[-1]]
if not legend_kws:
legend_kws = {'bbox_to_anchor': (0.5, 1.15), 'loc': 'upper center', 'ncol': len(labels)}
ax_heatmap = heatmap(data_na, cmap=cmap, **sb_kws)
ax_heatmap.yaxis.set_visible(yaxis)
ax_heatmap.xaxis.set_visible(xaxis)
legend_elements = [Patch(facecolor=cmap[0]), Patch(facecolor=cmap[-1])]
if droppable:
legend_elements.insert(1, Patch(facecolor=cmap[1]))
ax_heatmap.legend(legend_elements, labels, **legend_kws)
return ax_heatmap | 84233ee9293131ce98072f880a3c1a57fc71b321 | 6,562 |
def iadd_tftensor(left, right, scale=1):
"""This function performs an in-place addition. However, TensorFlow returns
a new object after a mathematical operation. This means that in-place here
only serves to avoid the creation of a TfTensor instance. We do not have
any control over the memory where the Tensor is stored."""
_check_shape(left, right)
# If scale=1 we obtain a x2 speed-up if we do not multiply by the scale.
if scale == 1:
left._tf = left._tf + right._tf
else:
left._tf = left._tf + scale*right._tf
return left | 3f14de3df3544b74f0a900fca33eb6cdf6e11c00 | 6,563 |
import sys
def bookmark_desc_cmd(query):
"""describe: desc [num.. OR url/tag substr..]."""
split_query = query[4:].strip().split()
if not split_query:
sys.stderr.write(BOOKMARK_HELP)
return False
bk_indices = find_bookmark_indices(split_query)
if bk_indices:
return describe_bookmark(bk_indices)
return False | 2572ffbb801f01bdff4706a2823d2ca385943290 | 6,564 |
def encode(string_):
"""Change String to Integers"""
return (lambda f, s: f(list( ord(c) for c in str(string_) ) , \
s))(lambda f, s: sum(f[i] * 256 ** i for i in \
range(len(f))), str(string_)) | da3a729c2024d80792e08424745dc267ca67dff7 | 6,565 |
def generate_file_prefix(bin_params):
""" Use the bin params to generate a file prefix."""
prefix = "bin_"
for j in range(0, len(bin_params)):
if (j + 1) % 2 != 0:
prefix += str(bin_params[j]) + "-"
else:
prefix += str(bin_params[j]) + "_"
return prefix | cc058a64fcab77f6a4794a8bf7edb1e0e86c040c | 6,566 |
def extract_features_from_html(html, depth, height):
"""Given an html text, extract the node based features
including the descendant and ancestor ones if depth and
height are respectively nonzero."""
root = etree.HTML(html.encode('utf-8')) # get the nodes, serve bytes, unicode fails if html has meta
features = extract_features_from_nodes(list(root.iter()), depth, height)
# add the paths to the elements for identification
features.loc[:, 'path'] = pd.Series((node.getroottree().getpath(node) for node in root.iter()))
return features | ee7b627bf7c859fc886eab10f6a8b6b793653262 | 6,567 |
def __clean_field(amazon_dataset, option):
"""Cleanes the Text field from the datset """
clean = []
if option == 1:
for i in amazon_dataset['Text']:
clean.append(__one(i))
elif option == 2:
for i in amazon_dataset['Summary']:
clean.append(__one(i))
else:
pass
return clean | 1e8ef28c810413b87804a42514059c347d715972 | 6,568 |
import os
def write_bruker_search_path(ftype, destfile, sourcefile=None, sourcetext=None):
"""Will copy a file from sourcefile (out of the add_files directory) or
text to destfile in first directory of Bruker search path for
ftype = cpd, f1, gp, ... with checks for overwrite, identity, etc.
"""
if pp.run_flag not in pp.run_flags:
raise Exception('unknown run_flag: ' + pp.run_flag)
destfile = destfile + pp.name_tag
ut.putcomment('write_bruker_search_path: start', 2)
ut.putcomment('ftype: %s, destfile: %s' % (ftype, destfile), 2, ornament=False)
if sourcetext and sourcefile:
raise Exception('both sourcefile and sourcetext defined')
if sourcetext:
source = sourcetext
sourcestring = 'sourcetext'
ut.putcomment('input is from sourcetext', 2, ornament=False)
ut.putcomment(source, 2, ornament=False)
else:
ut.putcomment('input is from sourcefile: ' + pp.addfiles_path + '/' + sourcefile,
2, ornament=False)
sourcestring = 'sourcefile <%s>' % sourcefile
f = open(os.path.join(pp.addfiles_path, sourcefile))
source = f.read()
f.close()
ut.putcomment(source, 3, ornament=False)
(destfilefullpath, destdir) = ut.find_file_dir(destfile, ftype)
if destfilefullpath:
ut.putcomment('destination file exists: ' + destfilefullpath, 2, ornament=False)
if not ut.cmp_text_file(source, destfilefullpath):
outstring = ('PP_FILE NO_ACTION: %s equals destfile <%s>'
% (sourcestring, destfilefullpath))
ut.putcomment(outstring, 1, ornament=False)
pp.pp_log_fd.write('%s\n' % outstring)
else:
outstring = ('PP_FILE CONFLICT: %s is not equal to destfile <%s>'
% (sourcestring, destfilefullpath))
ut.putcomment(outstring, 0, ornament=False)
pp.pp_log_fd.write('%s\n' % outstring)
if pp.run_flag == 'DRY':
outstring = ('PP_FILE OVERWRITE: %s will overwrite destfile <%s>'
% (sourcestring, destfilefullpath))
pp.pp_log_fd.write('%s\n' % outstring)
elif pp.run_flag == 'NORMAL':
raise Exception('%s\nPP_FILE NO_OVERWRITE: run_flag is %s\n'
% (outstring, pp.run_flag))
elif pp.run_flag == 'FORCE':
outstring = ('PP_FILE OVERWRITE: %s overwrites destfile <%s>'
% (sourcestring, destfilefullpath))
ut.putcomment(outstring, 0, ornament=False)
pp.pp_log_fd.write('%s\n' % outstring)
ut.write_text_file(source, destfilefullpath)
elif pp.run_flag == 'INTERACTIVE':
raise Exception('%s\nPP_FILE NO_OVERWRITE: run_flag is %s\n'
% (outstring, pp.run_flag))
else:
df1 = os.path.join(destdir, destfile)
outstring = ('PP_FILE CREATE: destfile <%s> from %s'
% (df1, sourcestring))
ut.putcomment(outstring, 1, ornament=False)
pp.pp_log_fd.write('%s\n' % outstring)
if pp.run_flag == 'DRY':
pass
elif pp.run_flag in ['FORCE', 'NORMAL', 'INTERACTIVE']:
ut.write_text_file(source, df1)
else:
raise Exception('unknown run_flag: ' + pp.run_flag)
ut.putcomment('write_bruker_search_path: end', 2)
return destfile | eaeaac026502308cbeb50932decfcc79d83d127c | 6,569 |
import warnings
def _read_atom_line(line):
"""
COLUMNS DATATYPE FIELD DEFINITION
-------------------------------------------------------------------------------------
1 - 6 RecordName "ATOM "
7 - 11 Integer serial Atom serial number.
13 - 16 Atom name Atom name.
17 Character altLoc Alternate location indicator.
18 - 20 Residue name resName Residue name.
22 Character chainID Chain identifier.
23 - 26 Integer resSeq Residue sequence number.
27 AChar iCode Code for insertion of residues.
31 - 38 Real(8.3) x Orthogonal coordinates for X in Angstroms.
39 - 46 Real(8.3) y Orthogonal coordinates for Y in Angstroms.
47 - 54 Real(8.3) z Orthogonal coordinates for Z in Angstroms.
55 - 60 Real(6.2) occupancy Occupancy.
61 - 66 Real(6.2) tempFactor Temperature factor.
77 - 78 LString(2) element Element symbol, right-justified.
79 - 80 LString(2) charge Charge on the atom.
"""
lineInfo = {}
lineInfo['RecordName'] = line[0:6]
lineInfo['serial'] = int(line[7:12].strip())
lineInfo['name'] = line[12:16].strip()
lineInfo['altLoc'] = line[16].strip()
lineInfo['resName'] = line[17:21].strip()
lineInfo['chainID'] = line[21].strip()
lineInfo['resSeq'] = int(line[22:26].strip())
lineInfo['iCode'] = line[26].strip()
try:
lineInfo['position'] = np.array(
[float(line[30:38]), float(line[38:46]), float(line[46:54])],
)
except ValueError:
raise ValueError("Invalid or missing coordinate(s)")
try:
lineInfo['occupancy'] = float(line[54:60])
except ValueError:
lineInfo['occupancy'] = None # Rather than arbitrary zero or one
if lineInfo['occupancy'] is not None and lineInfo['occupancy'] < 0:
warnings.warn("Negative occupancy in one or more atoms")
try:
lineInfo['bfactor'] = float(line[60:66])
except ValueError:
# The PDB use a default of zero if the data is missing
lineInfo['bfactor'] = 0.0
lineInfo['segid'] = line[72:76].strip()
lineInfo['element'] = line[76:78].strip().upper()
lineInfo['charge'] = line[79:81].strip()
return lineInfo | e511352dcc0bfcdec98035673adf759256c13e4c | 6,570 |
from typing import List
def semantic_parse_entity_sentence(sent: str) -> List[str]:
"""
@param sent: sentence to grab entities from
@return: noun chunks that we consider "entities" to work with
"""
doc = tnlp(sent)
ents_ke = textacy.ke.textrank(doc, normalize="lemma")
entities = [ent for ent, _ in ents_ke]
return entities | c65fa1d8da74b86b3e970cbf7f351e03d5a3fcec | 6,571 |
import json
import os
def extract_to_files(pkr_path, verbose=False):
"""
Extract data and image to .json and .png (if any) next to the .pkr
"""
title, buttons, png_data = parse_animschool_picker(pkr_path, verbose)
# Save to json
with open(pkr_path + '.json', 'w') as f:
json.dump([title, buttons], f, indent=4)
# Write PNG to file:
png_path = pkr_path + '.png'
if png_data and not os.path.exists(png_path):
save_png(png_data, png_path)
return title, buttons, png_data | 097a4ceb87f456b4a333ed18f7fa961aba2a2cad | 6,572 |
def readLog(jobpath):
"""
Reads log to determine disk/mem usage, runtime
For processing time, it will only grab the last execution/evict/terminated times.
And runTime supercedes evictTime (eg. an exec->evict combination will not be written if
a later exec-termination combination exists in the log)
To be appended to processing database, so that structure is:
ocr_processing["tag"]["jobs"] = [ {startTime: xxx, execTime: yyy, ... }, {reports from other jobs...} ]
:jobid: id of the job within the submit/output directories
:basedir: base directory for job output
:returns: If successful, returns dict of the form
jobReport = { subTime: (time of submission),
execTime: (start time of latest execution),
evictTime: (time of job eviction, if any),
termTime: (time of job termination, if any),
runTime: (time between execution start and termination/eviction time),
usage: { usage dictionary from above},
}
"""
try:
with open(jobpath + "/process.log") as file:
chunk = ""
subTime = None
execTime = None
evictTime = None
termTime = None
runTime = None
jobReport = {}
jobReport["path"] = jobpath
for line in file:
if line.startswith("..."):
if chunk.startswith("000"): # submitted
jobReport["subTime"] = parseTime(chunk.split('\n')[0])
elif chunk.startswith("001"): # executing
jobReport["execTime"] = parseTime(chunk.split('\n')[0])
elif chunk.startswith("004"): # evicted, has partitionable table
jobReport["evictTime"] = parseTime(chunk.split('\n')[0])
runTime = (jobReport["evictTime"] - jobReport["execTime"])
jobReport["runTime"] = runTime.days * 86400 + runTime.seconds
jobReport["usage"] = parseResources(chunk)
elif chunk.startswith("005"): # termination, has partitionable table
jobReport["termTime"] = parseTime(chunk.split('\n')[0])
runTime = (jobReport["termTime"] - jobReport["execTime"])
jobReport["runTime"] = runTime.days * 86400 + runTime.seconds
jobReport["usage"] = parseResources(chunk)
elif chunk.startswith("006"):
pass
elif chunk.startswith("009"):
pass
else:
if DEBUG:
print "UNKNOWN CODE"
print chunk
chunk=""
else:
chunk+=line
return jobReport
except IOError:
print "Couldn't find file at %s/process.log" % jobpath
return None | 4bc3f6b79d3eb18544ea54dd5b6ac173da43f83b | 6,573 |
from numpy import array
def match_cam_time(events, frame_times):
"""
Helper function for mapping ephys events to camera times. For each event in events, we return the nearest
camera frame before the event.
Parameters
----------
events : 1D numpy array
Events of interest. Sampled at a higher rate than frame_times.
frame_times : 1D numpy array
Timepoints of camera frames to be assigned to events. Sampled at a lower rate than events.
"""
output = []
for a in events:
lags = array(a - frame_times)
before = len(lags[lags > 0]) - 1
if before >= 0:
output.append(before)
return array(output) | 3f086a0f65a34183a429cf3c50e90fdc742672d3 | 6,574 |
import ctypes
def _glibc_version_string_ctypes() -> Optional[str]:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str | 86ae885182585eeb1c5e53ee8109036dd93d06d3 | 6,575 |
from typing import Sequence
from typing import List
from typing import Tuple
def encode_instructions(
stream: Sequence[Instruction],
func_pool: List[bytes],
string_pool: List[bytes],
) -> Tuple[bytearray, List[bytes], List[bytes]]:
"""
Encode the bytecode stream as a single `bytes` object that can be
written to file or kept in memory.
Parameters
----------
stream: Sequence[Instruction]
The bytecode instruction objects to be encoded.
func_pool: List[bytes]
Where the generated bytecode for function objects is stored
before being put in the final bytecode stream.
string_pool: List[bytes]
Where string objects are stored before being put in the final
bytecode stream.
Returns
-------
bytes
The encoded stream of bytecode instructions. It is guaranteed
to have a length proportional to the length of `stream`.
"""
result_stream = bytearray(len(stream) * 8)
for index, instruction in enumerate(stream):
start = index * 8
end = start + 8
opcode_space = instruction.opcode.value.to_bytes(1, BYTE_ORDER)
operand_space = encode_operands(
instruction.opcode, instruction.operands, func_pool, string_pool
)
operand_space = operand_space.ljust(7, b"\x00")
result_stream[start:end] = opcode_space + operand_space
return result_stream, func_pool, string_pool | 0a371731f627b96ca3a07c5ac992fd46724a7817 | 6,576 |
def get_random(selector):
"""Return one random game"""
controller = GameController
return controller.get_random(MySQLFactory.get(), selector) | 89f458a434cd20e10810d03e7addb1c5d6f1475a | 6,577 |
def get_ssh_dispatcher(connection, context):
"""
:param Message context: The eliot message context to log.
:param connection: The SSH connection run commands on.
"""
@deferred_performer
def perform_run(dispatcher, intent):
context.bind(
message_type="flocker.provision.ssh:run",
command=intent.log_command_filter(intent.command),
).write()
endpoint = SSHCommandClientEndpointWithTTY.existingConnection(
connection, intent.command)
d = Deferred()
connectProtocol(endpoint, CommandProtocol(
deferred=d, context=context))
return d
return TypeDispatcher({
Run: perform_run,
Sudo: perform_sudo,
Put: perform_put,
Comment: perform_comment,
}) | 1cb965c4e175276672173d5696e3196da5725fce | 6,578 |
def read_ac(path, cut_off, rnalen):
"""Read the RNA accessibility file and output its positions and values
The file should be a simple table with two columns:
The first column is the position and the second one is the value
'#' will be skipped
"""
access = []
with open(path) as f:
i = 0
while i < rnalen:
for line in f:
line = line.split()
if not line:
continue
elif line[0][0] == "#":
continue
elif len(line) < 2:
continue
else:
v = line[1]
if v == "NA":
access.append(0)
else:
try:
v = 2 ** (-float(v))
except:
continue
if v >= cut_off:
access.append(1)
else:
access.append(0)
i += 1
return access | 0a8b6c2ff6528cf3f21d3b5efce14d59ff8ad2b6 | 6,579 |
def subtableD0(cxt: DecoderContext, fmt: Format):
""" ORI """
fmt = FormatVI(fmt)
return MNEM.ORI, [Imm(fmt.imm16, width=16, signed=False), Reg(fmt.reg1), Reg(fmt.reg2)], 2 | 2bb307bd74568745b7f453365f7667c383cae9ff | 6,580 |
from datetime import datetime
def format_date(unix_timestamp):
""" Return a standardized date format for use in the two1 library.
This function produces a localized datetime string that includes the UTC timezone offset. This offset is
computed as the difference between the local version of the timestamp (python's datatime.fromtimestamp)
and the utc representation of the input timestamp.
Args:
unix_timestamp (float): a floating point unix timestamp
Returns:
string: A string formatted with "%Y-%m-%d %H:%M:%S %Z"
"""
local_datetime = datetime.fromtimestamp(unix_timestamp)
utz_offset = local_datetime - datetime.utcfromtimestamp(unix_timestamp)
local_date = local_datetime.replace(
tzinfo=timezone(utz_offset)
).strftime("%Y-%m-%d %H:%M:%S %Z")
return local_date | cc1a6ee0c604e14f787741ff2cb0e118134c9b92 | 6,581 |
import copy
from operator import and_
def or_(kb, goals, substitutions=dict(), depth=0, mask=None,
k_max=None, max_depth=1):
"""Base function of prover, called recursively.
Calls and_, which in turn calls or_, in order to recursively calculate scores for every possible proof in proof
tree.
Args:
kb: dict of facts / rules
goals: goal to be proved
substitutions: dict which contains current variable substitutions and scores of current proof path
depth: current proof depth
mask: mask to apply so that goal facts (which are drawn from kb) cannot be proved by unifying with themselves
k_max: number of fact unifications to retain from unifications with all facts in kb
max_depth: maximum allowed proof depth before termination
Returns:
List of proof paths of goal with corresponding scores
"""
proofs = []
# initialize history and substitutions as empty
if substitutions == {}:
substitutions['VARSUBS'] = {}
substitutions['HISTORY'] = []
for struct in kb:
# avoid fake added struct
if struct == 'goal':
continue
# Check if struct order matches
if len(struct[0]) != len(goals):
continue
rule = rule_struct_form(kb[struct], struct)
head = substitute(rule[0], substitutions, kb)
body = rule[1:]
mask_id = None
if mask is not None:
mask_key, mask_id = mask
mask_id = mask_id if mask_key == struct else None
is_fact = len(struct) == 1 and all([not is_variable(x)
for x in struct[0]])
if not is_fact and depth == max_depth:
# maximum depth reached
continue
# rule has been applied before
elif applied_before(rule, substitutions, kb):
continue
substitutions_copy = copy.deepcopy(substitutions)
substitutions_copy['HISTORY'].append([struct, depth])
substitutions_ = unify(head, goals, substitutions_copy, kb, depth, mask_id,
transpose=is_fact)
if is_fact and k_max is not None:
new_success, success_indices = tf.nn.top_k(substitutions_["SUCCESS"], k_max)
substitutions_["SUCCESS"] = new_success
for value in substitutions_['VARSUBS'].values():
if value['struct'] != 'goal' and not 'subset' in value:
value['subset'] = success_indices
if substitutions_ != 'FAILURE':
proof = and_(kb, body, substitutions_, depth, mask, k_max=k_max, max_depth=max_depth)
if not isinstance(proof, list):
proof = [proof]
else:
proof = flatten_proofs(proof)
for proof_substitutions in proof:
if proof_substitutions != 'FAILURE':
proofs.append(proof_substitutions)
return flatten_proofs(proofs) | d19382167143ffc3b5267fda126cc4f8d45fc86c | 6,582 |
import copy
def print_term(thy, t):
"""More sophisticated printing function for terms. Handles printing
of operators.
Note we do not yet handle name collisions in lambda terms.
"""
def get_info_for_operator(t):
return thy.get_data("operator").get_info_for_fun(t.head)
def get_priority(t):
if nat.is_binary(t) or hol_list.is_literal_list(t):
return 100 # Nat atom case
elif t.is_comb():
op_data = get_info_for_operator(t)
if op_data is not None:
return op_data.priority
elif t.is_all() or logic.is_exists(t) or logic.is_if(t):
return 10
else:
return 95 # Function application
elif t.is_abs():
return 10
else:
return 100 # Atom case
def helper(t, bd_vars):
LEFT, RIGHT = OperatorData.LEFT_ASSOC, OperatorData.RIGHT_ASSOC
# Some special cases:
# Natural numbers:
if nat.is_binary(t):
return N(str(nat.from_binary(t)))
if hol_list.is_literal_list(t):
items = hol_list.dest_literal_list(t)
res = N('[') + commas_join(helper(item, bd_vars) for item in items) + N(']')
if hasattr(t, "print_type"):
return N("(") + res + N("::") + print_type(thy, t.T) + N(")")
else:
return res
if set.is_literal_set(t):
empty_set = "∅" if settings.unicode() else "{}"
if hasattr(t, "print_type"):
return N("(") + N(empty_set) + N("::") + print_type(thy, t.T) + N(")")
else:
return N(empty_set)
if logic.is_if(t):
P, x, y = t.args
return N("if ") + helper(P, bd_vars) + N(" then ") + helper(x, bd_vars) + \
N(" else ") + helper(y, bd_vars)
if t.is_var():
return V(t.name)
elif t.is_const():
if hasattr(t, "print_type") and t.print_type:
return N("(" + t.name + "::") + print_type(thy, t.T) + N(")")
else:
return N(t.name)
elif t.is_comb():
op_data = get_info_for_operator(t)
# First, we take care of the case of operators
if op_data and op_data.arity == OperatorData.BINARY and t.is_binop():
arg1, arg2 = t.args
# Obtain output for first argument, enclose in parenthesis
# if necessary.
if (op_data.assoc == LEFT and get_priority(arg1) < op_data.priority or
op_data.assoc == RIGHT and get_priority(arg1) <= op_data.priority):
str_arg1 = N("(") + helper(arg1, bd_vars) + N(")")
else:
str_arg1 = helper(arg1, bd_vars)
if settings.unicode() and op_data.unicode_op:
str_op = N(' ' + op_data.unicode_op + ' ')
else:
str_op = N(' ' + op_data.ascii_op + ' ')
# Obtain output for second argument, enclose in parenthesis
# if necessary.
if (op_data.assoc == LEFT and get_priority(arg2) <= op_data.priority or
op_data.assoc == RIGHT and get_priority(arg2) < op_data.priority):
str_arg2 = N("(") + helper(arg2, bd_vars) + N(")")
else:
str_arg2 = helper(arg2, bd_vars)
return str_arg1 + str_op + str_arg2
# Unary case
elif op_data and op_data.arity == OperatorData.UNARY:
if settings.unicode() and op_data.unicode_op:
str_op = N(op_data.unicode_op)
else:
str_op = N(op_data.ascii_op)
if get_priority(t.arg) < op_data.priority:
str_arg = N("(") + helper(t.arg, bd_vars) + N(")")
else:
str_arg = helper(t.arg, bd_vars)
return str_op + str_arg
# Next, the case of binders
elif t.is_all():
all_str = "!" if not settings.unicode() else "∀"
if hasattr(t.arg, "print_type"):
var_str = B(t.arg.var_name) + N("::") + print_type(thy, t.arg.var_T)
else:
var_str = B(t.arg.var_name)
body_repr = helper(t.arg.body, [t.arg.var_name] + bd_vars)
return N(all_str) + var_str + N(". ") + body_repr
elif logic.is_exists(t):
exists_str = "?" if not settings.unicode() else "∃"
if hasattr(t.arg, "print_type"):
var_str = B(t.arg.var_name) + N("::") + print_type(thy, t.arg.var_T)
else:
var_str = B(t.arg.var_name)
body_repr = helper(t.arg.body, [t.arg.var_name] + bd_vars)
return N(exists_str) + var_str + N(". ") + body_repr
# Function update
elif function.is_fun_upd(t):
f, upds = function.strip_fun_upd(t)
upd_strs = [helper(a, bd_vars) + N(" := ") + helper(b, bd_vars) for a, b in upds]
return N("(") + helper(f, bd_vars) + N(")(") + commas_join(upd_strs) + N(")")
# Finally, usual function application
else:
if get_priority(t.fun) < 95:
str_fun = N("(") + helper(t.fun, bd_vars) + N(")")
else:
str_fun = helper(t.fun, bd_vars)
if get_priority(t.arg) <= 95:
str_arg = N("(") + helper(t.arg, bd_vars) + N(")")
else:
str_arg = helper(t.arg, bd_vars)
return str_fun + N(" ") + str_arg
elif t.is_abs():
lambda_str = "%" if not settings.unicode() else "λ"
if hasattr(t, "print_type"):
var_str = B(t.var_name) + N("::") + print_type(thy, t.var_T)
else:
var_str = B(t.var_name)
body_repr = helper(t.body, [t.var_name] + bd_vars)
return N(lambda_str) + var_str + N(". ") + body_repr
elif t.is_bound():
if t.n >= len(bd_vars):
raise OpenTermException
else:
return B(bd_vars[t.n])
else:
raise TypeError()
t = copy(t) # make copy here, because infer_printed_type may change t.
infertype.infer_printed_type(thy, t)
res = helper(t, [])
if settings.highlight():
res = optimize_highlight(res)
return res | 745b378dac77411ba678911c478b6f5c8915c762 | 6,583 |
def build_model():
"""Builds the model."""
return get_model()() | f843bce4edf099efd138a198f12c392aa2e723cd | 6,584 |
def truncate_field_data(model, data):
"""Truncate all data fields for model by its ``max_length`` field
attributes.
:param model: Kind of data (A Django Model instance).
:param data: The data to truncate.
"""
fields = dict((field.name, field) for field in model._meta.fields)
return dict((name, truncate_by_field(fields[name], value))
for name, value in data.items()) | 3f0c77d279e712258d3a064ca9fed06cd64d9eaf | 6,585 |
import io
def get_all_students(zip):
"""Returns student tuple for all zipped submissions found in the zip file."""
students = []
# creating all the student objects that we can zip files of
for filename in zip.namelist():
if not filename.endswith(".zip"):
continue
firstname, surname = split_zipname(filename)
student_zip_data = io.BytesIO(zip.open(filename).read())
student_zipfile = zf.ZipFile(student_zip_data)
students.append(Student(firstname, surname, student_zipfile))
return students | d5088ecf43275664e8420f30f508e70fad7cef77 | 6,586 |
def is_shipping_method_applicable_for_postal_code(
customer_shipping_address, method
) -> bool:
"""Return if shipping method is applicable with the postal code rules."""
results = check_shipping_method_for_postal_code(customer_shipping_address, method)
if not results:
return True
if all(
map(
lambda rule: rule.inclusion_type == PostalCodeRuleInclusionType.INCLUDE,
results.keys(),
)
):
return any(results.values())
if all(
map(
lambda rule: rule.inclusion_type == PostalCodeRuleInclusionType.EXCLUDE,
results.keys(),
)
):
return not any(results.values())
# Shipping methods with complex rules are not supported for now
return False | cca519a35ab01dddac71ac18bdf8a40e1b032b83 | 6,587 |
def populate_api_servers():
""" Find running API servers. """
def api_server_info(entry):
prefix, port = entry.rsplit('-', 1)
project_id = prefix[len(API_SERVER_PREFIX):]
return project_id, int(port)
global api_servers
monit_entries = yield monit_operator.get_entries()
server_entries = [api_server_info(entry) for entry in monit_entries
if entry.startswith(API_SERVER_PREFIX)]
for project_id, port in server_entries:
api_servers[project_id] = port | 0543e350917c3fe419022aebdd9002098021923a | 6,588 |
import torch
def gen_classification_func(model, *, state_file=None, transform=None, pred_func=None,
device=None):
""" 工厂函数,生成一个分类器函数
用这个函数做过渡的一个重要目的,也是避免重复加载模型
:param model: 模型结构
:param state_file: 存储参数的文件
:param transform: 每一个输入样本的预处理函数
:param pred_func: model 结果的参数的后处理
:return: 返回的函数结构见下述 cls_func
"""
if state_file: model.load_state_dict(torch.load(str(state_file), map_location=get_device()))
model.train(False)
device = device or get_device()
model.to(device)
def cls_func(raw_in):
"""
:param raw_in: 输入可以是路径、np.ndarray、PIL图片等,都为转为batch结构的tensor
im,一张图片路径、np.ndarray、PIL图片
[im1, im2, ...],多张图片清单
:return: 输入如果只有一张图片,则返回一个结果
否则会存在list,返回多个结果
"""
dataset = InputDataset(raw_in, transform)
# TODO batch_size根据device空间大小自适应设置
xs = torch.utils.data.DataLoader(dataset, batch_size=8)
res = None
for x in xs:
# 每个batch可能很大,所以每个batch依次放到cuda,而不是一次性全放入
x = x.to(device)
y = model(x)
if pred_func: y = pred_func(y)
res = y if res is None else (res + y)
return res
return cls_func | 4d905dfc9bd330cd68a2cc78b121debe04669720 | 6,589 |
def create_recipe_json(image_paths: list) -> dict:
"""
Orchestrate the various services to respond to a create recipe request.
"""
logger.info('Creating recipe json from image paths: {}'.format(image_paths))
full_text = load_images_return_text(image_paths)
recipe_json = assign_text_to_recipe(full_text)
return recipe_json | 25ef26d15bf20384df81f46da519c07ea883d5a7 | 6,590 |
def rstrip_extra(fname):
"""Strip extraneous, non-discriminative filename info from the end of a file.
"""
to_strip = ("_R", "_", "fastq", ".", "-")
while fname.endswith(to_strip):
for x in to_strip:
if fname.endswith(x):
fname = fname[:len(fname) - len(x)]
break
return fname | 281ff6dcfae1894dd4685acf433bde89538fe87e | 6,591 |
def run(preprocessors, data, preprocessing=defaultdict(lambda: None), fit=True):
"""Applies preprocessing to data. It currently suppoerts StandardScaler and
OneHotEncoding
Parameters
----------
preprocessors : list
preprocessors to be applied
data : pd.DataFrame
data to be preprocessed
preprocessing : dict, optional
encoders of each preprocessor, by default defaultdict(lambda: None)
fit : bool, optional
if False, it applies to current encoder, by default True
Returns
-------
pd.DataFrame dict
preprocessed data and preprocessors used
"""
scaler_to_data_type = {
'StandardScaler': 'numeric',
'OneHotEncoder': 'object'}
if len(preprocessors) == 0:
return data, preprocessing
preprocessor = preprocessors[0]
data_type = scaler_to_data_type[preprocessor]
splited_data = split_by_type(data)
splited_data[data_type], preprocessing[preprocessor] = \
apply_preprocessor(splited_data[data_type],
preprocessor,
fit=fit,
encoder=preprocessing[preprocessor])
processed_data = pd.concat(splited_data.values(), axis=1)
return run(preprocessors[1:], processed_data, preprocessing, fit) | 94b10007896062760a278cdeaf60388152c96f73 | 6,592 |
def vouchers_tab(request, voucher_group, deleted=False, template_name="manage/voucher/vouchers.html"):
"""Displays the vouchers tab
"""
vouchers = voucher_group.vouchers.all()
paginator = Paginator(vouchers, 20)
page = paginator.page((request.POST if request.method == 'POST' else request.GET).get("page", 1))
taxes = Tax.objects.all()
if (request.method == "POST") and (deleted is False):
voucher_form = VoucherForm(data=request.POST)
else:
voucher_form = VoucherForm()
return render_to_string(template_name, request=request, context={
"voucher_group": voucher_group,
"taxes": taxes,
"form": voucher_form,
"vouchers_inline": vouchers_inline(request, voucher_group, vouchers, paginator, page),
}) | f488c21a83b6b22e3c0e4d5fa2e35156435bada7 | 6,593 |
import torch
def normalise_intensity(x,
mode="minmax",
min_in=0.0,
max_in=255.0,
min_out=0.0,
max_out=1.0,
clip=False,
clip_range_percentile=(0.05, 99.95),
):
"""
Intensity normalisation (& optional percentile clipping)
for both Numpy Array and Pytorch Tensor of arbitrary dimensions.
The "mode" of normalisation indicates different ways to normalise the intensities, including:
1) "meanstd": normalise to 0 mean 1 std;
2) "minmax": normalise to specified (min, max) range;
3) "fixed": normalise with a fixed ratio
Args:
x: (ndarray / Tensor, shape (N, *size))
mode: (str) indicate normalisation mode
min_in: (float) minimum value of the input (assumed value for fixed mode)
max_in: (float) maximum value of the input (assumed value for fixed mode)
min_out: (float) minimum value of the output
max_out: (float) maximum value of the output
clip: (boolean) value clipping if True
clip_range_percentile: (tuple of floats) percentiles (min, max) to determine the thresholds for clipping
Returns:
x: (same as input) in-place op on input x
"""
# determine data dimension
dim = x.ndim - 1
image_axes = tuple(range(1, 1 + dim)) # (1,2) for 2D; (1,2,3) for 3D
# for numpy.ndarray
if type(x) is np.ndarray:
# Clipping
if clip:
# intensity clipping
clip_min, clip_max = np.percentile(x, clip_range_percentile, axis=image_axes, keepdims=True)
x = np.clip(x, clip_min, clip_max)
# Normalise meanstd
if mode == "meanstd":
mean = np.mean(x, axis=image_axes, keepdims=True) # (N, *range(dim))
std = np.std(x, axis=image_axes, keepdims=True) # (N, *range(dim))
x = (x - mean) / std # axis should match & broadcast
# Normalise minmax
elif mode == "minmax":
min_in = np.amin(x, axis=image_axes, keepdims=True) # (N, *range(dim))
max_in = np.amax(x, axis=image_axes, keepdims=True) # (N, *range(dim)))
x = (x - min_in) * (max_out - min_out) / (max_in - min_in + 1e-12) # (!) multiple broadcasting)
# Fixed ratio
elif mode == "fixed":
x = (x - min_in) * (max_out - min_out) / (max_in - min_in + 1e-12)
else:
raise ValueError("Intensity normalisation mode not understood."
"Expect either one of: 'meanstd', 'minmax', 'fixed'")
# cast to float 32
x = x.astype(np.float32)
# for torch.Tensor
elif type(x) is torch.Tensor:
# todo: clipping not supported at the moment (requires Pytorch version of the np.percentile()
# Normalise meanstd
if mode == "meanstd":
mean = torch.mean(x, dim=image_axes, keepdim=True) # (N, *range(dim))
std = torch.std(x, dim=image_axes, keepdim=True) # (N, *range(dim))
x = (x - mean) / std # axis should match & broadcast
# Normalise minmax
elif mode == "minmax":
# get min/max across dims by flattening first
min_in = x.flatten(start_dim=1, end_dim=-1).min(dim=1)[0].view(-1, *(1,)*dim) # (N, (1,)*dim)
max_in = x.flatten(start_dim=1, end_dim=-1).max(dim=1)[0].view(-1, *(1,)*dim) # (N, (1,)*dim)
x = (x - min_in) * (max_out - min_out) / (max_in - min_in + 1e-12) # (!) multiple broadcasting)
# Fixed ratio
elif mode == "fixed":
x = (x - min_in) * (max_out - min_out) / (max_in - min_in + 1e-12)
else:
raise ValueError("Intensity normalisation mode not recognised."
"Expect: 'meanstd', 'minmax', 'fixed'")
# cast to float32
x = x.float()
else:
raise TypeError("Input data type not recognised, support numpy.ndarray or torch.Tensor")
return x | 4dace59c8c12dda2c01c6c8a4bcd631b1e562c48 | 6,594 |
def spots_rmsd(spots):
""" Calculate the rmsd for a series of small_cell_spot objects
@param list of small_cell_spot objects
@param RMSD (pixels) of each spot
"""
rmsd = 0
count = 0
print 'Spots with no preds', [spot.pred is None for spot in spots].count(True), 'of', len(spots)
for spot in spots:
if spot.pred is None:
continue
rmsd += measure_distance(col((spot.spot_dict['xyzobs.px.value'][0],spot.spot_dict['xyzobs.px.value'][1])),col(spot.pred))**2
count += 1
if count == 0: return 0
return math.sqrt(rmsd/count) | 13809a7a0353dc18b037cd2d78944ed5f9cdc596 | 6,595 |
def sanitize_df(data_df, schema, setup_index=True, missing_column_procedure='fill_zero'):
""" Sanitize dataframe according to provided schema
Returns
-------
data_df : pandas DataFrame
Will have fields provided by schema
Will have field types (categorical, datetime, etc) provided by schema.
"""
data_df = data_df.reset_index()
for ff, field_name in enumerate(schema.field_names):
type_ff = schema.fields[ff].descriptor['type']
if field_name not in data_df.columns:
if missing_column_procedure == 'fill_zero':
if type_ff == 'integer':
data_df[field_name] = 0
elif type_ff == 'number':
data_df[field_name] = 0.0
# Reorder columns to match schema
data_df = data_df[schema.field_names]
# Cast fields to required type (categorical / datetime)
for ff, name in enumerate(schema.field_names):
ff_spec = schema.descriptor['fields'][ff]
if 'pandas_dtype' in ff_spec and ff_spec['pandas_dtype'] == 'category':
data_df[name] = data_df[name].astype('category')
elif 'type' in ff_spec and ff_spec['type'] == 'datetime':
data_df[name] = pd.to_datetime(data_df[name])
if hasattr(schema, 'primary_key'):
data_df = data_df.sort_values(schema.primary_key)
if setup_index:
data_df = data_df.set_index(schema.primary_key)
return data_df | 8664f9dd8feea60044397072d85d21c8b5dfd6d4 | 6,596 |
def get_id_argument(id_card):
"""
获取身份证号码信息
:param id_card:
:return:
"""
id_card = id_card.upper()
id_length = len(id_card)
if id_length == 18:
code = {
'body': id_card[0:17],
'address_code': id_card[0:6],
'birthday_code': id_card[6:14],
'order_code': id_card[14:17],
'check_bit': id_card[17:18],
'type': 18
}
else:
code = {
'body': id_card,
'address_code': id_card[0:6],
'birthday_code': '19' + id_card[6:12],
'order_code': id_card[12:15],
'check_bit': '',
'type': 15
}
return code | ae4cad97e787fe1b0697b6a0f842f0da09795d6a | 6,597 |
def rhypergeometric(n, m, N, size=None):
"""
Returns hypergeometric random variates.
"""
if n == 0:
return np.zeros(size, dtype=int)
elif n == N:
out = np.empty(size, dtype=int)
out.fill(m)
return out
return np.random.hypergeometric(n, N - n, m, size) | e8ea95bb742891037de264462be168fab9d68923 | 6,598 |
from typing import Tuple
def _validate_query(
hgnc_manager: HgncManager,
query_result,
original_identifier: str,
original_namespace: str,
) -> Tuple[str, str, str]:
"""Process and validate HGNC query.
:param hgnc_manager: hgnc manager
:param query_result:
:param original_identifier:
:param original_namespace:
"""
# If invalid entry from HGNC, try to find updated symbol
if not query_result and original_namespace == HGNC:
return _get_update_alias_symbol(hgnc_manager, original_identifier, HGNC)
# Invalid entry, proceed with invalid identifier
if not query_result:
logger.debug('No found HGNC Symbol for id %s in (%s)', original_identifier, original_namespace)
return original_namespace, original_identifier, original_identifier
# Multiple entries are returned, for UniProt identifiers
if isinstance(query_result, list):
if len(query_result) > 1:
logger.debug('UniProt identifier with multiple HGNC:s %s', query_result)
query_result = query_result[0]
# Correct entry, use HGNC identifier
return HGNC, query_result.symbol, query_result.identifier | 0dd516d3f1a1a178015835ee1dc72b657757d789 | 6,599 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.