content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def check_media(url):
"""Check if something is available or has a new hash
Checks if url is available, uf yes, download and hash it, then see if it has changed
Args:
url: A complete url to something
Returns:
0 if available and no change.
1 if not available.
2 if it has changed
"""
media = http.download_something(url):
# If failed to download
if not media:
return 1
# Hash media
hashed_media = hashlib.sha512(media).hexdigest() | f7237207a7ff6e555533cebe4dc83fa77538886c | 17,000 |
import random
def breed(tree1, tree2):
"""My breeding function.
Basically makes a copy of tree1, and swaps sub-trees with tree2 at
a random depth. Pretty much relies on my simplistic tree structure.
I have no fucking clue if this will work. I can't even debug it since
I have no way of printing my tree.
Right now it can only swap sub-trees, which kinda sucks but the
alternative is a far more complex algorithm than I have time for.
"""
cpy = tree1.copy()
start_depth = random.randint(0, MAX_DEPTH-2)
node1_parent = cpy.get_left_node_at_depth(start_depth)
node2 = tree2.get_left_node_at_depth(start_depth+1)
node1_parent.left = node2
return cpy | 56888f068375c6e35caa80cc40752a0fae71047d | 17,001 |
from typing import Iterable
from typing import Any
from typing import Tuple
def tuple_from_iterable(val: Iterable[Any]) -> Tuple[Any, ...]:
"""Builds a tuple from an iterable.
Workaround for https://github.com/python-attrs/attrs/issues/519
"""
return tuple(val) | 7880b1395f14aa690f967b9548456105b544d337 | 17,002 |
from typing import Counter
def merge_vocabs(vocabs, vocab_size=None):
"""
Merge individual vocabularies (assumed to be generated from disjoint
documents) into a larger vocabulary.
Args:
vocabs: `torchtext.vocab.Vocab` vocabularies to be merged
vocab_size: `int` the final vocabulary size. `None` for no limit.
Return:
`torchtext.vocab.Vocab`
"""
merged = sum([vocab.freqs for vocab in vocabs], Counter())
return Vocab(merged, specials=['<pad>','<unk>','<sep>','<sos>','<eos>'], vectors = 'fasttext.en.300d') | db83e858c1a8910b382bcd485923ef6ba9a1466e | 17,003 |
def read_g_char(in_name, pop="ESP", debug=False):
"""
Read charges and energy from a Gaussian log file.
Parameters
----------
in_name : str
Name of the file to read
pop : str, optional
Kind of charge to read, mulliken or esp
debug : bool, optional
Return extra energy information. Turn on with care
Returns
-------
charges : list of floats
Each partial charge value in the file
energy : float
Gaussian calculated energy in Hartree
char_ener : float
Self energy of the point charges
n_char : float
Nuclei-charge interaction energy
"""
with open(in_name) as gauss_file:
content = gauss_file.readlines()
# find last occurrence of Mulliken charges
if pop.lower() == "mulliken":
last_mull = len(content) - 1 - \
content[::-1].index(" Mulliken charges:\n")
elif pop.lower() == "esp" or pop.lower() == "resp":
last_mull = len(content) - 1 - \
content[::-1].index(" ESP charges:\n")
charges = []
for line in content[last_mull + 2:]:
if line.split()[0].isdigit():
charges.append(float(line.split()[2]))
else:
break
# find each occurrence of Energy
for line in content:
if "SCF Done" in line:
energy = float(line.split()[4])
if "Total Energy" in line:
energy = float(line.split()[4])
if "Self energy of the charges" in line:
char_ener = float(line.split()[6])
if "Nuclei-charges interaction" in line:
n_char = float(line.split()[3])
if debug:
return charges, energy, char_ener, n_char
else:
return charges, energy | 97e0e3433a8b6966335adc8e4cc66154a8470138 | 17,004 |
def count_nonzero(X, axis=None, sample_weight=None):
"""A variant of X.getnnz() with extension to weighting on axis 0
Useful in efficiently calculating multilabel metrics.
Parameters
----------
X : sparse matrix of shape (n_samples, n_labels)
Input data. It should be of CSR format.
axis : {0, 1}, default=None
The axis on which the data is aggregated.
sample_weight : array-like of shape (n_samples,), default=None
Weight for each row of X.
"""
if axis == -1:
axis = 1
elif axis == -2:
axis = 0
elif X.format != 'csr':
raise TypeError('Expected CSR sparse format, got {0}'.format(X.format))
# We rely here on the fact that np.diff(Y.indptr) for a CSR
# will return the number of nonzero entries in each row.
# A bincount over Y.indices will return the number of nonzeros
# in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.
if axis is None:
if sample_weight is None:
return X.nnz
else:
return np.dot(np.diff(X.indptr), sample_weight)
elif axis == 1:
out = np.diff(X.indptr)
if sample_weight is None:
# astype here is for consistency with axis=0 dtype
return out.astype('intp')
return out * sample_weight
elif axis == 0:
if sample_weight is None:
return np.bincount(X.indices, minlength=X.shape[1])
else:
weights = np.repeat(sample_weight, np.diff(X.indptr))
return np.bincount(X.indices, minlength=X.shape[1],
weights=weights)
else:
raise ValueError('Unsupported axis: {0}'.format(axis)) | e6754cca480d626dd7ba2c96426e5eebf17a1fcb | 17,005 |
def isolate_shape_axis(base, target, axis_list = ['X','Y','Z']):
"""
Given a base mesh, only take axis movement on the target that is specified in axis_list.
Args:
base (str): The base mesh that has no targets applied.
target (str): The target mesh vertices moved to a different position than the base.
axis_list (list): The axises of movement allowed. If axis_list = ['X'], only vertex movement on x will be present in the result.
Returns:
str: A new mesh with verts moving only on the isolated axis.
"""
verts = cmds.ls('%s.vtx[*]' % target, flatten = True)
if not verts:
return
vert_count = len(verts)
axis_name = '_'.join(axis_list)
new_target = cmds.duplicate(target, n = '%s_%s' % (target, axis_name))[0]
for inc in range(0, vert_count):
base_pos = cmds.xform('%s.vtx[%s]' % (base, inc), q = True, t = True, ws = True)
target_pos = cmds.xform('%s.vtx[%s]' % (target, inc), q = True, t = True, ws = True)
if (base_pos == target_pos):
continue
small_x = False
small_y = False
small_z = False
if abs(base_pos[0]-target_pos[0]) < 0.0001:
small_x = True
if abs(base_pos[1]-target_pos[1]) < 0.0001:
small_y = True
if abs(base_pos[2]-target_pos[2]) < 0.0001:
small_z = True
if small_x and small_y and small_z:
continue
if not 'X' in axis_list:
target_pos[0] = base_pos[0]
if not 'Y' in axis_list:
target_pos[1] = base_pos[1]
if not 'Z' in axis_list:
target_pos[2] = base_pos[2]
cmds.xform('%s.vtx[%s]' % (new_target, inc), ws = True, t = target_pos)
return new_target | a097442c2c379338890e5571d0e3516553fe70f3 | 17,006 |
from typing import List
def _ge(t1: 'Tensor', t2: 'Tensor', isnew: bool) -> 'Tensor':
"""
Also see
--------
:param t1:
:param t2:
:param isnew:
:return:
"""
data = t1.data >= t2.data
requires_grad = t1.requires_grad or t2.requires_grad
depends_on: List[Dependency] = []
if t1.requires_grad:
def grad_fn1(grad: 'np.ndarray') -> 'np.ndarray':
# Maually, discontinuous function just take its gradient to zero.
return np.zeros_like(t1.data)
depends_on.append(Dependency(t1, grad_fn1))
if t2.requires_grad:
def grad_fn2(grad: 'np.ndarray') -> 'np.ndarray':
return np.zeros_like(t2.data)
depends_on.append(Dependency(t2, grad_fn2))
if isnew:
requires_grad = False
depends_on: List[Dependency] = []
return Tensor(data,
requires_grad,
depends_on) | 02b0407c3b2bc3ed6bf65555ab62257e3f041d0e | 17,007 |
from typing import Dict
from typing import Any
import logging
import numpy
def convert_homogeneous_graph(graph: Dict[str, Any],
num_graphs: int,
output_dir: str):
"""Process a homogeneous graph."""
# NOTE(blais): We could in theory stash the data in the same format as their
# heterogeneous graphs in Python and just use convert_heterogeneous_graph().
# Gather node features.
logging.info("Processing node features")
num_nodes = graph.pop("num_nodes")
graph["node_#id"] = numpy.arange(num_nodes).astype(bytes)
node_features = extract_features(graph, "node", num_nodes)
filename = write_table(output_dir, "nodes", node_features, num_nodes)
node_features_dict = {}
node_features_dict["nodes"] = (filename, node_features)
# Gather edge features.
logging.info("Processing edge features")
indices = graph.pop("edge_index")
assert len(indices.shape) == 2
num_edges = indices.shape[1]
graph["edge_{}".format(tfgnn.SOURCE_NAME)] = indices[0].astype(bytes)
graph["edge_{}".format(tfgnn.TARGET_NAME)] = indices[1].astype(bytes)
# NOTE(blais): If external edge features are needed and each edge is
# unique, you can use this:
# graph["edge_#id"] = ["{}_{}".format(edge_index[0, i], edge_index[1, i])
# for i in range(num_edges)]
edge_features = extract_features(graph, "edge", num_edges)
filename = write_table(output_dir, "edges", edge_features, num_edges)
edge_features_dict = {}
edge_features_dict["edges"] = (filename, "nodes", "nodes", edge_features)
# Gather context features.
logging.info("Processing graph context features")
if num_graphs > 1:
graph_features = extract_features(graph, "graph", num_graphs)
filename = write_table(output_dir, "graph", graph_features, num_graphs)
context_features = (filename, graph_features)
else:
context_features = None
# Make sure we processed everything.
graph = remove_empty_dicts(graph)
if graph:
logging.error("Graph is not empty: %s", graph)
# Produce a corresponding graph schema.
logging.info("Producing graph schema")
return create_schema(context_features, node_features_dict, edge_features_dict) | 4aa0751437861af58159228c018a3f6d94b8613a | 17,008 |
def second_deriv_log_pdf(phi, alpha, beta, eps=1e-4):
"""Second derivative of `log_pdf` with respect to latitude."""
return (
log_pdf(phi + eps, alpha, beta)
- 2 * log_pdf(phi, alpha, beta)
+ log_pdf(phi - eps, alpha, beta)
) / eps ** 2 | 5df140d62466481997a472e260241961e872cbe3 | 17,009 |
from typing import Optional
from typing import Callable
from typing import Iterator
import uuid
import shutil
def create_map(
tag: Optional[str],
func: Callable,
args_and_kwargs: Iterator[ARGS_AND_KWARGS],
map_options: Optional[options.MapOptions] = None,
) -> maps.Map:
"""
All map calls lead here.
This function performs various checks on the ``tag``,
constructs a submit object that represents the map for HTCondor,
saves all of the map's definitional data to the map directory,
and submits the map job,
returning the map's :class:`Map`.
Parameters
----------
tag
The ``tag`` to assign to this map.
func
The function to map the arguments over.
args_and_kwargs
The arguments and keyword arguments to map over - the output of :func:`zip_args_and_kwargs`.
map_options
An instance of :class:`htmap.MapOptions`.
Returns
-------
map :
A :class:`htmap.Map` representing the map.
"""
if tag is None:
tag = tags.random_tag()
transient = True
else:
transient = False
tags.raise_if_tag_is_invalid(tag)
tags.raise_if_tag_already_exists(tag)
logger.debug(f"Creating map {tag} ...")
if map_options is None:
map_options = options.MapOptions()
uid = uuid.uuid4()
map_dir = map_dir_path(uid)
try:
make_map_dir_and_subdirs(map_dir)
transformed_args_and_kwargs, extra_input_files = transform_args_and_kwargs(args_and_kwargs)
num_components = len(transformed_args_and_kwargs)
if num_components == 0:
raise exceptions.EmptyMap("Cannot create a map with zero components")
if map_options.input_files is None and len(extra_input_files) > 0:
map_options.input_files = [[] for _ in range(len(extra_input_files))]
for tif, extra in zip(map_options.input_files, extra_input_files):
tif.extend(extra)
submit_obj, itemdata = options.create_submit_object_and_itemdata(
tag, map_dir, num_components, map_options,
)
logger.debug(f"Submit description for map {tag} is\n{submit_obj}")
logger.debug(f"First itemdatum for map {tag} is \n{pformat(itemdata[0])}")
logger.debug(f"Creating map directory for map {tag} ...")
with utils.Timer() as timer:
htio.save_func(map_dir, func)
htio.save_inputs(map_dir, transformed_args_and_kwargs)
htio.save_num_components(map_dir, num_components)
htio.save_submit(map_dir, submit_obj)
htio.save_itemdata(map_dir, itemdata)
logger.debug(f"Created map directory for map {tag} (took {timer.elapsed:.6f} seconds)")
logger.debug(f"Submitting map {tag}...")
tags.tag_file_path(tag).write_text(str(uid))
m = maps.Map(tag=tag, map_dir=map_dir,)
if transient:
m._make_transient()
m._submit()
if utils.is_interactive_session():
print(f"Created map {m.tag} with {len(m)} components")
return m
except BaseException as e:
# something went wrong during submission, and the job is malformed
# so delete the entire map directory
# the condor bindings should prevent any jobs from being submitted
logger.exception(f"Map submission for map {tag} aborted due to: {e}")
try:
tags.tag_file_path(tag).unlink()
except FileNotFoundError:
pass
shutil.rmtree(str(map_dir.absolute()))
logger.debug(f"Removed malformed map directory {map_dir}")
raise e | 6135d7dd0b6366d941ccbc8463644fb71a97f4b0 | 17,010 |
from typing import OrderedDict
def normalize_data(data, zp=25., zpsys='ab'):
"""Return a copy of the data with all flux and fluxerr values normalized
to the given zeropoint. Assumes data has already been standardized.
Parameters
----------
data : `~numpy.ndarray`
Structured array.
zp : float
zpsys : str
Returns
-------
normalized_data : `~numpy.ndarray`
"""
warn_once('standardize_data', '1.5', '2.0',
'This function not intended for public use; open an issue at '
'https://github.com/sncosmo/sncosmo/issues if you need this '
'functionality.')
normmagsys = get_magsystem(zpsys)
factor = np.empty(len(data), dtype=np.float)
for b in set(data['band'].tolist()):
idx = data['band'] == b
b = get_bandpass(b)
bandfactor = 10.**(0.4 * (zp - data['zp'][idx]))
bandzpsys = data['zpsys'][idx]
for ms in set(bandzpsys):
idx2 = bandzpsys == ms
ms = get_magsystem(ms)
bandfactor[idx2] *= (ms.zpbandflux(b) / normmagsys.zpbandflux(b))
factor[idx] = bandfactor
normalized_data = OrderedDict([('time', data['time']),
('band', data['band']),
('flux', data['flux'] * factor),
('fluxerr', data['fluxerr'] * factor),
('zp', zp),
('zpsys', zpsys)])
return dict_to_array(normalized_data) | 9aa3c4faf6f9a9f98afd9e11d2bafaf1b026519c | 17,011 |
import base64
def CreateMessage(sender, to, subject, message_text):
"""
Creates an object containing a base64url encoded email object.
"""
message = MIMEText(message_text)
message['to'] = to
message['from'] = sender
message['subject'] = subject
raw_message = base64.urlsafe_b64encode(message.as_bytes())
raw_message = raw_message.decode()
return {'raw': raw_message } | 8d55b64ebf4801781126f244441f619201d51190 | 17,012 |
def bg_lookup(bg_name: str) -> str:
"""Look up ANSI escape codes based on background color name.
:param bg_name: background color name to look up ANSI escape code(s) for
:return: ANSI escape code(s) associated with this color
:raises ValueError if the color cannot be found
"""
try:
ansi_escape = BG_COLORS[bg_name.lower()]
except KeyError:
raise ValueError('Background color {!r} does not exist.'.format(bg_name))
return ansi_escape | 8c520f599bc41ce847e5c602ddf8500fe366f24d | 17,013 |
def readData(f):
"""
Parse taxon count table (from count-taxon.py)
Parameters:
-----------
f : str
file name of taxon count table
Returns:
--------
tuple
a list of taxons and a list of their counts
"""
taxa_lis = []
num_lis = []
for n, line in enumerate(open(f)):
if line.startswith('#'):
continue
line = line.rstrip()
if line == '':
continue
taxa, num = line.split('\t')
skip = False
for word in EXCLUDE:
if word in taxa:
skip = True
break
if skip:
continue
taxa = taxa.rstrip(';')
lis = taxa.split(';')
lis2 = []
for item in lis:
item = item.strip()
if item.endswith(')'):
item = item.split('(')[0].strip()
# remove taxon level prefix, e.g. 'p__Firmicutes'
if '__' in item:
item = item.split('__', 1)[1]
#item = item.strip('"')
item = item.lower()
if 'unclassified' in item:
item = 'Unclassifed'
elif 'unknown' in item:
item = 'Unclassifed'
elif 'other' in item:
item = 'Unclassifed'
elif 'unassigned' in item:
item = 'Unclassifed'
item = item.capitalize()
lis2.append(item)
taxa_lis.append(lis2)
num_lis.append(float(num))
return taxa_lis, num_lis | fcd10e1d7dc1db0b871c7a4802f012eec43c08a9 | 17,014 |
import click
import sys
import requests
def workspace_check(func):
"""
Decorator for confirming <workspace> is defined in the CONFIG_PATH (i.e. kaos workspace set has been run).
"""
def wrapper(*args, **kwargs):
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read(CONFIG_PATH)
if 'pachyderm' not in config:
click.echo("{} - {} not defined - first run {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style("workspace", bold=True, fg='red'),
click.style("kaos workspace set", bold=True, fg='green')))
sys.exit(1)
# get base_url
base_url = config.get('backend', 'url')
current_workspace = config.get('pachyderm', 'workspace')
# GET all workspaces: /workspace
r = requests.get(f"{base_url}/workspace")
data = r.json()
workspaces_list = [v for v in data['names']]
if current_workspace not in workspaces_list:
click.echo("{} - Workspace {} has been {}. \n\n"
"Please ensure the kaos train/serve commands are run on an active workspace. \n\n"
"Check available workspaces with - {}".format(
click.style("Warning", bold=True, fg='yellow'),
click.style(current_workspace, bold=True, fg='green'),
click.style("deleted/killed", bold=True, fg='red'),
click.style("kaos workspace list", bold=True, fg='green')))
sys.exit(1)
func(*args, **kwargs)
return wrapper | 089236e5f2f19973dab8ca419efbb588ec37e3a7 | 17,015 |
def detect_onsets_offsets(data, threshold, min_distance):
"""
detects when a when a signal jumps above zero, and when it goes back to zero
"""
on = (data > threshold) # when the data is greater than zero
left_on = np.concatenate(([0], on), axis=0)[0:-1]
onset = np.squeeze(np.where(on & (left_on != True)))
offset = np.squeeze(np.where((on != True) & (left_on == True)))
if data[-1] > threshold:
offset = np.append(offset, len(data)) # make sure there is an offset at some point...
if len(np.shape(onset)) < 1:
offset = [offset]
onset = [onset]
new_offset = []
new_onset = []
if len(onset) > 0:
new_onset.append(onset[0])
if len(onset) > 1:
for i in range(len(onset)-1):
if (onset[i+1] - offset[i]) > min_distance:
new_onset.append(onset[i+1])
new_offset.append(offset[i])
new_offset.append(offset[-1])
return new_onset, new_offset | faa81445828b72bc7d7433a4c2c8740bb36050bb | 17,016 |
def STEPConstruct_PointHasher_IsEqual(*args):
"""
* Returns True when the two keys are the same. Two same keys must have the same hashcode, the contrary is not necessary.
:param Point1:
:type Point1: gp_Pnt
:param Point2:
:type Point2: gp_Pnt
:rtype: bool
"""
return _STEPConstruct.STEPConstruct_PointHasher_IsEqual(*args) | b3aa095d723203b05ea29ec4f5b34a70bc4c5276 | 17,017 |
def step_smooth(x) :
""" Smooth polinomial rising step from 0(x=0) to 1(x=1)
"""
return np.select([x>1, x>0], [1, 3*np.square(x)-2*np.power(x,3)], default=0) | ccf53e2561e256d2114510598ebb7a2ec1ce7cbd | 17,018 |
def getBitSizeOfVarInt64(value):
"""
Gets bit size of variable 64-bit signed integer value.
:param value: Value to use for bit size calculation.
:returns: Bit size of the value.
"""
return _getBitSizeOfVarIntImpl(value, VARINT64_MAX_VALUES, signed=True) | 20c52df5ec9a00680e771f206319a02e3ba3de66 | 17,019 |
import functools
def nan_if_exception(func):
"""Wrap func such that np.nan is returned if func raises an exception.
KeyboardInterrupt and SystemExit are still raised.
Examples:
>>> @nan_if_exception
... def f(x, y):
... assert x + y >= 5
>>> f(1, 2)
nan
>>> def f(x, y):
... assert x + y >= 5
>>> g = nan_if_exception(f)
>>> g(1, 2)
nan
"""
@functools.wraps(func)
def wrapper_nan_if_exception(params, *args, **kwargs):
try:
out = func(params, *args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
out = np.nan
return out
return wrapper_nan_if_exception | f03c314741c47805d767fc62fbce49cda9be35fe | 17,020 |
def get_client_public_key(patient_id, study_id):
"""Grabs a user's public key file from s3."""
key_pair_paths = construct_s3_key_paths(study_id, patient_id)
key = s3_retrieve(key_pair_paths['public'], study_id, raw_path=True)
return encryption.import_RSA_key( key ) | d6e6560c49f925a8f87a84829d632f67957f3c79 | 17,021 |
from typing import List
from typing import Tuple
def plot_offset_direction(
dsaimage: Image, coords: SkyCoord, ra_offsets: List[float], dec_offsets: List[float]
) -> Tuple["matplotlib.fig", "matplotlib.axes.Axes"]:
"""Plot measured offsets on an image."""
fig, ax = dsaimage.show()
dsaimage.add_arrows(coords, ra_offsets, dec_offsets)
return fig, ax | 24416863b795538ee8c86fa1949f6d701b66f28d | 17,022 |
import os
def get_file_path(mdir=None) -> str:
"""
makes user select a file using a TUI. `mdir` is the main starting directory which defaults to current
working directory.
.. note::
This clears screen a lot of times and might make your app ugly but
provides user with a easy way to choose files.
"""
if mdir is None:
mdir = os.getcwd()
mpath = os.path.abspath(mdir)
while True:
_print_tree(mpath)
f = input(">")
m = os.path.join(mpath, f)
if os.path.isfile(m):
_clear()
return m
elif os.path.isdir(m):
mpath = os.path.abspath(m)
_clear() | 09c6d49e116b732972a47359eee399e492cd9c8f | 17,023 |
def sensitivity_metric(event_id_1, event_id_2):
"""Determine similarity between two epochs, given their event ids."""
if event_id_1 == 1 and event_id_2 == 1:
return 0 # Completely similar
if event_id_1 == 2 and event_id_2 == 2:
return 0.5 # Somewhat similar
elif event_id_1 == 1 and event_id_2 == 2:
return 0.5 # Somewhat similar
elif event_id_1 == 2 and event_id_1 == 1:
return 0.5 # Somewhat similar
else:
return 1 | b04c5fa27ef655dd3f371c3ce6ef0410c55dd05b | 17,024 |
def duracion_promedio_peliculas(p1: dict, p2: dict, p3: dict, p4: dict, p5: dict) -> str:
"""Calcula la duracion promedio de las peliculas que entran por parametro.
Esto es, la duración total de todas las peliculas dividida sobre el numero de peliculas.
Retorna la duracion promedio en una cadena de formato 'HH:MM' ignorando los posibles decimales.
Parametros:
p1 (dict): Diccionario que contiene la informacion de la pelicula 1.
p2 (dict): Diccionario que contiene la informacion de la pelicula 2.
p3 (dict): Diccionario que contiene la informacion de la pelicula 3.
p4 (dict): Diccionario que contiene la informacion de la pelicula 4.
p5 (dict): Diccionario que contiene la informacion de la pelicula 5.
Retorna:
str: la duracion promedio de las peliculas en formato 'HH:MM'.
"""
# Se extraen las duraciones de las películas.
duracion1 = p1["duracion"]
duracion2 = p2["duracion"]
duracion3 = p3["duracion"]
duracion4 = p4["duracion"]
duracion5 = p5["duracion"]
# Promedio de duraciones de las películas.
promedio = (duracion1 + duracion2 + duracion3 + duracion4 + duracion5) / 5
# Conversión a formato 'HH:MM'.
horas = promedio // 60
minutos = promedio % 60
if horas < 10:
horas = '0' + str(int(horas))
else:
horas = str(int(horas))
if minutos < 10:
minutos = '0' + str(int(minutos))
else:
minutos = str(int(minutos))
return horas + ":" + minutos | a8cfcc96a43480ee6830cc212343a33148036c5d | 17,025 |
def _to_test_data(text):
"""
Lines should be of this format: <word> <normal_form> <tag>.
Lines that starts with "#" and blank lines are skipped.
"""
return [l.split(None, 2) for l in text.splitlines()
if l.strip() and not l.startswith("#")] | 8f0bae9f81d2d14b5654622f1493b23abd88424d | 17,026 |
import copy
def append(motion1, motion2):
"""
Combines two motion sequences into one. motion2 is appended to motion1.
The operation is not done in place.
Note that the operation places the sequences next to each other without
attempting to blend between the poses. To interpolate between the end of
motion1 and start of motion2, use the `append_and_blend` operation.
Args:
motion1, motion2: Motion sequences to be combined.
"""
assert isinstance(motion1, motion_class.Motion)
assert isinstance(motion2, motion_class.Motion)
assert motion1.skel.num_joints() == motion2.skel.num_joints()
combined_motion = copy.deepcopy(motion1)
combined_motion.name = f"{motion1.name}+{motion2.name}"
combined_motion.poses.extend(motion2.poses)
return combined_motion | dc51812f450a072ad283173a15fb2c07ae978e5b | 17,027 |
from datetime import datetime
from typing import List
def service(
fmt: SupportedFormats,
begints: datetime = Query(
..., description="Inclusive UTC timestamp window start for issuance."
),
endts: datetime = Query(
..., description="Exclusive UTC timestamp window end for issuance."
),
wfo: List[str] = Query(
None, description="WFO 3-letter codes for filter.", max_length=3
),
only_new: bool = Query(True, description="Only include issuance events."),
ph: List[str] = Query(
None, description="VTEC Phenomena 2-letter codes.", max_length=2
),
):
"""Replaced above."""
df = handler(begints, endts, wfo, only_new, ph)
return deliver_df(df, fmt) | eeb0a8b1187ff2386401440b6ddd812b81cd0fdd | 17,028 |
def cols_shuffled(expr_df, dist_df=None, algo="agno", seed=0):
""" Return a copy of the expr_df DataFrame with columns shuffled randomly.
:param pandas.DataFrame expr_df: the DataFrame to copy and shuffle
:param pandas.DataFrame dist_df: the distance DataFrame to inform us about distances between columns
:param str algo: Agnostic to distance ('agno') or distance aware ('dist')?
:param int seed: set numpy's random seed if desired
:returns: A copy of the expr_df DataFrame with columns shuffled.
"""
shuffled_df = expr_df.copy(deep=True)
np.random.seed(seed)
if algo == "agno":
shuffled_df.columns = np.random.permutation(expr_df.columns)
elif algo == "dist":
# Make a distance-similarity matrix, allowing us to characterize one well_id's distance-similarity to another.
diss = pd.DataFrame(data=np.corrcoef(dist_df.values), columns=dist_df.columns, index=dist_df.index)
# Old and new well_id indices
available_ids = list(expr_df.columns)
shuffled_well_ids = []
# For each well_id in the original list, replace it with another one as distance-similar as possible.
for well_id in list(expr_df.columns):
# Do we want to avoid same tissue-class?
# This algo allows for keeping the same well_id and doesn't even look at tissue-class.
# sort the distance-similarity by THIS well_id's column, but use corresponding index of well_ids
candidates = diss.sort_values(by=well_id, ascending=False).index
candidates = [x for x in candidates if x in available_ids]
if len(candidates) == 1:
candidate = candidates[0]
elif len(candidates) < 20:
candidate = np.random.permutation(candidates)[0]
else:
n_candidates = min(20, int(len(candidates) / 5.0))
candidate = np.random.permutation(candidates[:n_candidates])[0]
# We have our winner, save it to our new list and remove it from what's available.
shuffled_well_ids.append(candidate)
available_ids.remove(candidate)
shuffled_df.columns = shuffled_well_ids
else:
shuffled_df = pd.DataFrame()
# Column labels have been shuffled; return a dataframe with identically ordered labels and moved data.
return shuffled_df.loc[:, expr_df.columns], dict(zip(expr_df.columns, shuffled_df.columns)) | 37773c5219ecc92925c155e9d911c42ddbebc8ea | 17,029 |
import os
def verify(origin_dir, real_width, real_height, image_suffix):
"""
Verifique o tamanho da imagem
:return:
"""
if not os.path.exists(origin_dir):
print("[Aviso] O diretório {} não pode ser encontrado, ele será criado em breve".format(origin_dir))
os.makedirs(origin_dir)
print("Comece a verificar a coleção de fotos original")
# Imagem em tamanho real
real_size = (real_width, real_height)
# Lista e quantidade de nomes de fotos
img_list = os.listdir(origin_dir)
total_count = len(img_list)
print("O conjunto original de imagens compartilhadas: {}张".format(total_count))
# Lista de imagens inválida
bad_img = []
# Percorra todas as fotos para verificar
for index, img_name in enumerate(img_list):
file_path = os.path.join(origin_dir, img_name)
# Filtrar imagens com sufixos incorretos
if not img_name.endswith(image_suffix):
bad_img.append((index, img_name, "Sufixo de arquivo incorreto"))
continue
# Filtrar tags de imagem fora do padrão
prefix, posfix = img_name.split("_")
if prefix == "" or posfix == "":
bad_img.append((index, img_name, "O rótulo da imagem é anormal"))
continue
# A imagem não pode ser aberta normalmente
try:
img = Image.open(file_path)
except OSError:
bad_img.append((index, img_name, "A imagem não pode ser aberta normalmente"))
continue
# O tamanho da imagem está anormal
if real_size == img.size:
print("{} pass".format(index), end='\r')
else:
bad_img.append((index, img_name, "O tamanho da imagem está anormal:{}".format(img.size)))
print("====As seguintes {} imagens são anormais====".format(len(bad_img)))
if bad_img:
for b in bad_img:
print("[Foto {}] [{}] [{}]".format(b[0], b[1], b[2]))
else:
print("Nenhuma anormalidade encontrada(共 {} Fotos)".format(len(img_list)))
print("========end")
return bad_img | c51707aa9be9bf187c6ed19a8d534bb1085fe8e6 | 17,030 |
from typing import Dict
from typing import Any
from re import L
def build_model(task_description: Dict[str, Any]) -> Dict[str, Any]:
"""Build the predinet model."""
# ---------------------------
# Setup and process inputs
processors = {"image": process_image, "task_id": process_task_id}
mlp_inputs = utils.factory.create_input_layers(task_description, processors)
# ---------------------------
# Concatenate processed inputs
concat_in = next(iter(mlp_inputs["processed"].values()))
if len(mlp_inputs["processed"]) > 1:
concat_in = L.Concatenate()(list(mlp_inputs["processed"].values()))
# ---------------------------
for size, activation in zip(C["mlp_hidden_sizes"], C["mlp_hidden_activations"]):
concat_in = L.Dense(size, activation=activation)(concat_in)
predictions = L.Dense(task_description["output"]["num_categories"])(concat_in)
# ---------------------------
# Create model instance
model = tf.keras.Model(
inputs=mlp_inputs["input_layers"],
outputs=predictions,
name="mlp_image_classifier",
)
# ---------------------------
# Compile model for training
dataset_type = task_description["output"]["type"]
assert (
dataset_type == "binary"
), f"MLP image classifier requires a binary classification dataset, got {dataset_type}"
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.keras.metrics.BinaryAccuracy(name="acc")
# ---------------------------
return {"model": model, "loss": loss, "metrics": metrics} | 03c7951d3fb0fddbfb1e4bad3b4e5ce54253f994 | 17,031 |
def create_unet_model(N_classes, input_shape=(None, None, 1), dropout_rate=0.24, learning_rate=1e-5):
"""
Implementation of Unet mode for multiclass semantic segmentation
:param N_classes: Number of classes of segmentation map
:param input_shape: input image shape
:param dropout_rate: dropout rate
:return: a tuple of two models, first element is model to train and second is model to save
"""
# make sure the sizes are divisible by 16
if(input_shape[0] is not None): assert 16 * (input_shape[0] // 16) == input_shape[0], 'invalid dimension 0'
if( input_shape[1] is not None): assert 16 * (input_shape[1] // 16) == input_shape[1], 'invalid dimension 1'
in_image = Input(shape=input_shape)
conv0 = Conv2D(32, (3, 3), activation='relu', name='conv1_0', padding='same')(in_image)
conv1, x = conv_block_down(32, dropout_rate=dropout_rate ) (conv0)
conv2, x = conv_block_down(64, dropout_rate=dropout_rate ) (x)
conv3, x = conv_block_down(128, dropout_rate=dropout_rate )(x)
conv4, x = conv_block_down(256, dropout_rate=dropout_rate )(x)
x = conv_block(512, dropout_rate=dropout_rate ) (x)
x = deconv_block(512, skip_layer=conv4, dropout_rate=dropout_rate ) (x)
x = deconv_block(256, skip_layer=conv3, dropout_rate=dropout_rate ) (x)
x = deconv_block(128, skip_layer=conv2, dropout_rate=dropout_rate ) (x)
x = deconv_block(64, skip_layer=conv1, dropout_rate=dropout_rate ) (x)
outp_logit = Conv2D(N_classes, (1, 1), activation='linear', padding='same', name='logit')(x)
outp_softmax = Softmax4D(axis=3, name='segmap')(outp_logit)
model_train = Model(inputs=in_image, outputs=[outp_logit,outp_softmax])
model_save = Model(inputs=in_image, outputs=[outp_softmax])
#if last channel is background
if(N_classes <=5):
class_indices = list(range(N_classes))[:-1] #except last one which is background
metrics_classwise=[]
for c in class_indices:
fc = multiclass_dice_coef_metric(from_logits=True, class_index=c)
fc.__name__='dmc'+str(c)
metrics_classwise.append(fc)
metrics = {'logit': metrics_classwise}
else:
metrics = {'logit': [multiclass_dice_coef_metric(from_logits=True)]} #all classes
model_train.compile(optimizer=Adam(lr=learning_rate),
loss={'logit': multiclass_balanced_cross_entropy(from_logits=True, P=5)},
metrics=metrics)
return Models(model_train, model_save) | 93c15376eed0c5cf1abe689ef1daca6c8877e61a | 17,032 |
import os
def extension(name: str, compile_args=(), link_args=(), include_dirs=(),
libraries=(), language='c++', **kwargs):
"""Build standard Cython extension."""
path = os.path.sep.join(['src', *name.split('.')]) + '.pyx'
include_dirs = ['include', *include_dirs]
return Extension(name,
[path],
extra_compile_args=compile_args,
extra_link_args=link_args,
include_dirs=include_dirs,
libraries=libraries,
language=language,
**kwargs) | 09548bb188b1b858a52bb69a7a8b8b8e66d6df15 | 17,033 |
from uutils.torch_uu.models.learner_from_opt_as_few_shot_paper import get_default_learner
from typing import Optional
def get_5cnn_model(image_size: int = 84,
bn_eps: float = 1e-3,
bn_momentum: float = 0.95,
n_classes: int = 5,
filter_size: int = 32,
levels: Optional = None,
spp: bool = False) -> nn.Module:
"""
Gets a 5CNN that does not change the spatial dimension [H,W] as it processes the image.
:return:
"""
mdl: nn.Module = get_default_learner(image_size, bn_eps, bn_momentum, n_classes, filter_size, levels, spp)
return mdl | 6b3e21e33433102b16b70c88dd5d033e1f069b86 | 17,034 |
def valid_extract_input_specification(instance_of_property, depth, language_code, named_entity_label):
""" Checks if the input for the extraction is valid. Both to help
the user get correct input and to sanitize it to avoid
attacks as the values are used to generate filenames.
"""
pattern_match = valid_instance_of_property_pattern.match(instance_of_property)
if instance_of_property != "manual_entry" and instance_of_property != "stopwords" and( pattern_match is None or pattern_match.span()[1] != len(instance_of_property) ):
flash(f"The value of the instance of property must start with Q and then be followed by one or more digits (e.g. Q123). Currently, it is '{instance_of_property}'.", "danger")
return False
if len(language_code) != 2 or language_code.lower() != language_code:
flash(f"The language code must consist of two lowercase letters (e.g. en). Currently, it is '{language_code}'.", "danger")
return False
pattern_match = valid_named_entity_label_pattern.match(named_entity_label)
if pattern_match is None or pattern_match.span()[1] != len(named_entity_label):
flash(f"The label must only consist of the characters a-z (upper or lowercased) or the special characters - or _ (e.g. LOC or feature_film). Currently it is '{named_entity_label}'.", "danger")
return False
try:
depth_as_int = int(depth)
if depth_as_int < 0:
flash(f"The depth must be an integer >= 0. Currently it is '{depth}'.", "danger")
return False
except:
flash(f"The depth must be an integer >= 0. Currently it is '{depth}'.", "danger")
return False
return True | c71f744fef82e54ca2fad0ea64c8637692256299 | 17,035 |
def get_company_data(mid):
"""Looks up stock ticker information for a company via its Freebase ID."""
query = MID_TO_TICKER_QUERY % mid
bindings = make_wikidata_request(query)
if not bindings:
if mid:
print("%s No company data found for MID: %s" % (WARNING, mid))
return None
# Collect the data from the response.
companies = []
for binding in bindings:
try:
name = binding["companyLabel"]["value"]
except KeyError:
name = None
try:
root = binding["rootLabel"]["value"]
except KeyError:
root = None
try:
symbol = binding["tickerLabel"]["value"]
except KeyError:
symbol = None
try:
exchange = binding["exchangeNameLabel"]["value"]
except KeyError:
exchange = None
company = {"name": name,
"symbol": symbol,
"exchange": exchange}
# Add the root if there is one.
if root and root != name:
company["root"] = root
# Add to the list unless we already have the same entry.
if company not in companies:
print("%s Adding company data: %s" % (OK, company))
companies.append(company)
else:
print("%s Skipping duplicate company data: %s" % (WARNING, company))
return companies | 464b9ef795938b2d83fd6a629b9af09ff165a922 | 17,036 |
def embed_data_into_square_lattice(data):
"""Insert MR image into square 2D array."""
dims = np.array(data.shape)
offset_x = int((dims.max() - dims[0]) / 2.)
offset_y = int((dims.max() - dims[1]) / 2.)
temp = np.zeros((dims.max(), dims.max()))
temp[offset_x:offset_x+dims[0], offset_y:offset_y+dims[1]] = data
return temp | e701e871b4df9f4085b2548ad1e10f93ce33bf38 | 17,037 |
def is_partial_link_text_selector(selector):
"""
A basic method to determine if a selector is a partial link text selector.
"""
if (
selector.startswith("partial_link=")
or selector.startswith("partial_link_text=")
or selector.startswith("partial_text=")
or selector.startswith("p_link=")
or selector.startswith("p_link_text=")
or selector.startswith("p_text=")
):
return True
return False | 4f21143173e46ed273ca719ea1aac8489afa2395 | 17,038 |
def scell(obj, dims, method=1, **kwds):
"""Build supercell based on `dims`.
Uses coords_frac and cell.
Parameters
----------
obj : Structure or Trajectory
dims : tuple (nx, ny, nz) for a N = nx * ny * nz supercell
method : int, optional
Switch between numpy-ish (1) or loop (2) implementation. (2) should
always produce correct results but is sublty slower. Only for
Structure.
**kwds : see :func:`scell_mask`
Notes
-----
The mask for the supercell is created by :func:`scell_mask` and applied to
each atom in `obj` one after another, i.e. each atom is repeated nx*ny*nz
times according to the mask pattern, independently of how the pattern looks
like (e.g. the `direc` parameter in :func:`scell_mask`). So, just as rows
in np.repeat(), we have:
| original: symbols=[A,B,C,D]
| 2 x 1 x 1: symbols=[A,A,B,B,C,C,D,D]
| nx x ny x nz: symbols=[(nx*ny*nz) x A, (nx*ny*nz) x B, ...]
Returns
-------
scell : Structure
"""
# Place each atom N = nx*ny*nz times in the supercell, i.e. copy unit cell
# N times. Actually, N-1, since ix=iy=iz=0 is the unit cell itself.
#
# Let k = {x,y,z}.
#
# mask[j,:] = [ix, iy, iz], ik = integers (floats actually, but
# mod(ik, floor(ik)) == 0.0)
#
# original cell:
# coords_frac[i,:] = position vect of atom i in the unit cell in *crystal*
# coords!!
#
# super cell:
# sc_coords_frac[i,:] = coords_frac[i,:] + [ix, iy, iz]
# for all permutations (see scell_mask()) of ix, iy, iz.
# ik = 0, ..., nk - 1
#
# sc_coords_frac : crystal coords w.r.t the *old* cell, i.e. the entries are in
# [0,(max(dims))], not [0,1], is scaled below
#
if 'direc' not in kwds:
kwds['direc'] = 1
mask = scell_mask(*tuple(dims), **kwds)
nmask = mask.shape[0]
if obj.is_struct:
sc_cell = obj.cell * np.asarray(dims)[:,None]
container = Structure
elif obj.is_traj:
# (nstep,3,3) * (1,3,1) -> (nstep, 3,3)
sc_cell = obj.cell * np.asarray(dims)[None,:,None]
container = Trajectory
else:
raise Exception("unknown input type")
if method == 1:
sc_symbols = np.array(obj.symbols).repeat(nmask).tolist() if (obj.symbols
is not None) else None
if obj.is_struct:
# (natoms, 1, 3) + (1, nmask, 3) -> (natoms, nmask, 3)
sc_coords_frac = (obj.coords_frac[:,None,:]
+ mask[None,...]).reshape(obj.natoms*nmask,3)
elif obj.is_traj:
# cool, eh?
# (nstep, natoms, 1, 3) + (1, 1, nmask, 3) -> (nstep, natoms, nmask, 3)
sc_coords_frac = (obj.coords_frac[...,None,:]
+ mask[None,None,...]).reshape(obj.nstep,obj.natoms*nmask,3)
else:
raise Exception("huh!?")
# explicit loop version for testing, this is the reference implementation,
# only for Structure
elif method == 2:
if obj.is_struct:
sc_symbols = []
sc_coords_frac = np.empty((nmask*obj.natoms, 3), dtype=float)
k = 0
for iatom in range(obj.natoms):
for j in range(nmask):
if obj.symbols is not None:
sc_symbols.append(obj.symbols[iatom])
sc_coords_frac[k,:] = obj.coords_frac[iatom,:] + mask[j,:]
k += 1
else:
raise Exception("method=2 only implemented for Structure")
else:
raise Exception("unknown method: %s" %repr(method))
sc_coords_frac[...,0] /= dims[0]
sc_coords_frac[...,1] /= dims[1]
sc_coords_frac[...,2] /= dims[2]
return container(coords_frac=sc_coords_frac,
cell=sc_cell,
symbols=sc_symbols) | e0cf7e03323c5994d0c56ba171d168aed105cfda | 17,039 |
def create_config(device: str = 'CPU', *,
per_process_gpu_memory_fraction: float = 0.0,
log_device_placement: bool = False) -> tf.ConfigProto:
"""Creates tf.ConfigProto for specifi device"""
config = tf.ConfigProto(log_device_placement=log_device_placement)
if is_gpu(device):
if per_process_gpu_memory_fraction > 0.0:
config.gpu_options.per_process_gpu_memory_fraction = per_process_gpu_memory_fraction
else:
config.gpu_options.allow_growth = True
else:
config.device_count['GPU'] = 0
return config | 0b6f351bcad2d816d6c03896ed60223cf2bb90c9 | 17,040 |
from typing import Union
from typing import Dict
def format_childproc(cp: Union[Event, Dict]):
"""Format childproc event into single line."""
return f" @{as_configured_timezone(cp.get('event_timestamp'))}: {cp.get('childproc_cmdline')} - {cp.get('childproc_process_guid')}" | f233a0fca52fdbef8d7ed0177772c9a8d196ec0d | 17,041 |
import os
def create_not_mnist_doubleset() -> (list, list):
"""
A function which iterates through notMNIST images and sorts into two lists of images and arrays.
:return x: images as ndarrays
:return y: labels of images
"""
try:
with np.load("./notMNIST_all/all_data.npz") as f:
x, y = f['x'], f['y']
except FileNotFoundError:
# Have to parse the image files, if the .npz numpy file does not exist
x = []
y = []
for image_name in os.listdir("notMNIST_all"):
if image_name == ".DS_Store":
continue
try:
image_as_array = np.asarray(Image.open("notMNIST_all/" + image_name))
scale = float(np.max(image_as_array))
# Scale data so the input is in range [0, 1]
# and the class is in the range [0, 1, .., no_classes - 1]
x.append(image_as_array / scale)
y.append(ord(image_name[0]) - ord("A"))
except (FileNotFoundError, OSError) as e:
print(f"Skipping the file {image_name}, as it gave error {e}")
x, y = np.array(x), np.array(y)
np.savez(file="./notMNIST_all/all_data.npz", x=x, y=y) # Save data so we do not have to parse next time.
return x, y | bf1766f41f7ee659151a7185bed84b45faf6b7b0 | 17,042 |
def format_maven_jar_dep_name(group_id, artifact_id, repository = DEFAULT_REPOSITORY_NAME):
"""
group_id: str
artifact_id: str
repository: str = "maven"
"""
return "@%s//:%s" % (repository, format_maven_jar_name(group_id, artifact_id)) | a331ce788a510c09c32a1d2c7d1f8d4fbeaba975 | 17,043 |
import ctypes
def PCO_GetCameraName(handle):
"""
This function retrieves the name of the camera.
"""
f = pixelfly_dll.PCO_GetCameraName
f.argtypes = (ctypes.wintypes.HANDLE, ctypes.c_char_p, ctypes.wintypes.WORD)
f.restype = ctypes.c_int
cameraName = ctypes.create_string_buffer(41)
ret_code = f(handle, cameraName, 41)
PCO_manage_error(ret_code)
return cameraName.raw.decode("ascii") | f704f2a875f29f0876553c631de032d25b5166f4 | 17,044 |
import sys
def import_object(absolute_name):
"""
根据名字 import 对象
:param absolute_name: 按照 module:name 的格式
:return: 返回对应对象
"""
try:
module_name, obj_name = absolute_name.split(':')
module = sys.modules.get(module_name, None)
if not module:
module = import_module(module_name)
obj = getattr(module, obj_name)
return obj
except ValueError:
raise MLPMJobException(MLPMJobErrorEnum.BAD_FUNC_NAME,
f'函数名`{absolute_name}`不正确,应该为 `module:name` 的形式。')
except ModuleNotFoundError:
raise MLPMJobException(MLPMJobErrorEnum.BAD_FUNC_NAME,
f'没有找到您的函数名`{absolute_name}`所对应的对象。')
except AttributeError:
raise MLPMJobException(MLPMJobErrorEnum.BAD_FUNC_NAME,
f'没有找到函数名`{absolute_name}`所对应的对象。') | 02de503c436fdd64ce2cd1530b36c663e7ccab9b | 17,045 |
def issingleton(var):
""" If isunitset(var) is True, this function returns True,
otherwise isscalar(var) is returned.
"""
# Here we define singleton as a unit set or scalar
if isunitset(var):
return True
return isscalar(var) | cd1808ad99647486e81e0f903047db9327b77fb8 | 17,046 |
def satisfiesF(L):
"""
Assumes L is a list of strings
Assume function f is already defined for you and it maps a string to a Boolean
Mutates L such that it contains all of the strings, s, originally in L such
that f(s) returns True, and no other elements. Remaining elements in L
should be in the same order.
Returns the length of L after mutation
"""
idx =0
while idx < len(L):
if f(L[idx]): # do nothing if f true
idx += 1
else: # remove the element if false
L.pop(idx)
return len(L) | 429c385f51ba254fff7170f4e69725cc98c8b337 | 17,047 |
def calc_circle_radius(area: float) -> float:
"""
Calculate radius from area.
>>> calc_circle_radius(10.0)
1.7841241161527712
"""
assert not area < 0
radius = numpy_to_python_type(np.sqrt(area / np.pi))
assert isinstance(radius, float)
return radius | 06086e1b130bef960fad200b350cba01b647466e | 17,048 |
import tqdm
def load_imgs(paths, target_size):
"""Load images from `paths`."""
pairs = np.empty((len(paths), 2, *target_size), dtype=np.float32)
for i, row in tqdm(paths.iterrows(), total=len(pairs)):
img1 = img_to_array(load_img(row.p1, target_size=target_size)) / 255
img2 = img_to_array(load_img(row.p2, target_size=target_size)) / 255
pair = np.stack([img1, img2], axis=0)
pairs[i, :] = pair
y = paths.target.values.astype(np.uint8)
return pairs, y | b91b86bcae29a6bf2d1227a25a2b8297c6be1734 | 17,049 |
def load_dict_from_hdf5(h5_filepath):
"""
Load h5 file as a dict
"""
def recursively_load_dict_contents_from_group(h5_obj, path):
"""
Recursively load a dict from h5 file
"""
ans = {}
for key, item in h5_obj[path].items():
if isinstance(item, h5py._hl.dataset.Dataset):
ans[key] = item.value
elif isinstance(item, h5py._hl.group.Group):
ans[key] = recursively_load_dict_contents_from_group(h5_obj, path + key + '/')
return ans
with h5py.File(h5_filepath, 'r') as h5_obj:
return recursively_load_dict_contents_from_group(h5_obj, '/') | 2339cc6edb83ed59fb43ec49503d86758d37d83e | 17,050 |
def interactive_grid_shape(grid, max_n=200, plotfxn=None, **kwargs):
""" Interactive ipywidgets for select the shape of a grid
Parameters
----------
grid : pygridgen.Gridgen
The base grid from which the grids of new shapes (resolutions) will be
generated.
max_n : int (default = 200)
The maximum number of possible cells in each dimension.
plotfxn : callable, optional
Function that plots the grid to provide user feedback. The call
signature of this function must accept to positional parameters for the
x- and y-arrays of node locations, and then accept any remaining keyword
arguments. If not provided, *pygridtools.viz.plot_cells* is used.
Additional Parameters
---------------------
All remaining keyword arguments are passed to *plotfxn*
Returns
-------
newgrid : pygridgen.Gridgen
The reshaped grid
widget : ipywidgets.interactive
Collection of IntSliders for changing the number cells along each axis
in the grid.
Examples
--------
>>> from pygridgen import grid
>>> from pygridtools import viz, iotools
>>> def make_fake_bathy(shape):
... j_cells, i_cells = shape
... y, x = numpy.mgrid[:j_cells, :i_cells]
... z = (y - (j_cells // 2))** 2 - x
... return z
>>> def plot_grid(x, y, ax=None):
... shape = x[1:, 1:].shape
... bathy = make_fake_bathy(shape)
... if not ax:
... fig, ax = pyplot.subplots(figsize=(8, 8))
... ax.set_aspect('equal')
... return viz.plot_cells(x, y, ax=ax, cmap='Blues', colors=bathy, lw=0.5, ec='0.3')
>>> d = numpy.array([
... (13, 16, 1.00), (18, 13, 1.00), (12, 7, 0.50),
... (10, 10, -0.25), ( 5, 10, -0.25), ( 5, 0, 1.00),
... ( 0, 0, 1.00), ( 0, 15, 0.50), ( 8, 15, -0.25),
... (11, 13, -0.25)])
>>> g = grid.Gridgen(d[:, 0], d[:, 1], d[:, 2], (75, 75), ul_idx=1, focus=None)
>>> new_grid, widget = iotools.interactive_grid_shape(g, plotfxn=plot_grid)
"""
if not plotfxn:
plotfxn = viz.plot_cells
common_opts = dict(min=2, max=max_n, continuous_update=False)
return grid, ipywidgets.interactive(
_change_shape,
g=ipywidgets.fixed(grid),
irows=ipywidgets.IntSlider(value=grid.ny, **common_opts),
jcols=ipywidgets.IntSlider(value=grid.nx, **common_opts),
plotfxn=ipywidgets.fixed(plotfxn),
plotopts=ipywidgets.fixed(kwargs)
) | ef126f39f8433a65deb22e09ea825f342a38bea1 | 17,051 |
from lgsvl.utils import transform_to_forward
from typing import Optional
def generate_initial_state(initial_pos: Transform, initial_speed: Optional[float] = None) -> AgentState:
"""
:param initial_speed: Initial speed in km/h
"""
movement = AgentState()
movement.transform = initial_pos
if initial_speed is not None:
movement.velocity = (initial_speed / 3.6) * transform_to_forward(movement.transform)
return movement | 30906410d3fe92b84f3d2c93a49db24f90a8ec8b | 17,052 |
def resolve_ami(ami=None, arch="x86_64", tags=frozenset(), tag_keys=frozenset()):
"""
Find an AMI by ID, name, or tags.
- If an ID is given, it is returned with no validation; otherwise, selects the most recent AMI from:
- All available AMIs in this account with the Owner tag equal to this user's IAM username (filtered by tags given);
- If no AMIs found, all available AMIs in this account with the AegeaVersion tag present (filtered by tags given);
- If no AMIs found, all available AMIs in this account (filtered by tags given).
Return the AMI with the most recent creation date.
"""
assert arch in {"x86_64", "arm64"}
if ami is None or not ami.startswith("ami-"):
if ami is None:
filters = dict(Owners=["self"],
Filters=[dict(Name="state", Values=["available"]), dict(Name="architecture", Values=[arch])])
else:
filters = dict(Owners=["self"], Filters=[dict(Name="name", Values=[ami])])
all_amis = resources.ec2.images.filter(**filters)
if tags:
all_amis = filter_by_tags(all_amis, **tags)
if tag_keys:
all_amis = filter_by_tag_keys(all_amis, *tag_keys)
current_user_amis = all_amis.filter(Filters=[dict(Name="tag:Owner", Values=[ARN.get_iam_username()])])
amis = sorted(current_user_amis, key=lambda x: x.creation_date)
if len(amis) == 0:
aegea_amis = all_amis.filter(Filters=[dict(Name="tag-key", Values=["AegeaVersion"])])
amis = sorted(aegea_amis, key=lambda x: x.creation_date)
if len(amis) == 0:
amis = sorted(all_amis, key=lambda x: x.creation_date)
if not amis:
raise AegeaException("Could not resolve AMI {}".format(dict(tags, ami=ami)))
ami = amis[-1].id
return ami | 32495fb78a611f57b0e025b0ff68b51a190c7297 | 17,053 |
def _filter_colors(hcl, ihue, nhues, minsat):
"""
Filter colors into categories.
Parameters
----------
hcl : tuple
The data.
ihue : int
The hue column.
nhues : int
The total number of hues.
minsat : float
The minimum saturation used for the "grays" column.
"""
breakpoints = np.linspace(0, 360, nhues)
gray = hcl[1] <= minsat
if ihue == 0:
return gray
color = breakpoints[ihue - 1] <= hcl[0] < breakpoints[ihue]
if ihue == nhues - 1:
color = color or color == breakpoints[ihue] # endpoint inclusive
return not gray and color | f7ca00bdd17766c859b5262c1b9ae12187c23222 | 17,054 |
def SWO(
directed = False, preprocess = "auto", load_nodes = True, load_node_types = True,
load_edge_weights = True, auto_enable_tradeoffs = True,
sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None,
cache_sys_var = "GRAPH_CACHE_DIR", version = "swo.owl", **kwargs
) -> Graph:
"""Return SWO graph
Parameters
----------
directed = False
preprocess = "auto"
Preprocess for optimal load time & memory peak.
Will preprocess in Linux/macOS but not Windows.
load_nodes = True
Load node names or use numeric range
auto_enable_tradeoffs = True
Enable when graph has < 50M edges
cache_path = None
Path to store graphs
Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs`
cache_sys_var = "GRAPH_CACHE_DIR"
version = "swo.owl"
Version to retrieve
The available versions are:
- swo.owl
"""
return AutomaticallyRetrievedGraph(
"SWO", version, "kgobo", directed, preprocess, load_nodes,
load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache,
cache_path, cache_sys_var, kwargs
)() | 39936ffcd272e0d6c6c7e5510699bac68a465eb9 | 17,055 |
def upload_needed_files (handle, bucket, prefix, dir_path, kind, iter):
"""
upload the needed local files of a particular kind
"""
extension = f".{kind}"
count = 0
for uuid in iter:
file_name = uuid + extension
local_path = dir_path / file_name
grid_path = prefix + "/pub/" + kind + "/"
#print("uploading {} to {}".format(local_path, grid_path))
upload_file(handle, local_path.as_posix(), grid_path + file_name)
count += 1
return count | 9357e991eb14eaf9de54beda3ec86defc3e1ecaf | 17,056 |
def detect_tag(filename):
"""Return type and position of ID3v2 tag in filename.
Returns (tag_class, offset, length), where tag_class
is either Tag22, Tag23, or Tag24, and (offset, length)
is the position of the tag in the file.
"""
with fileutil.opened(filename, "rb") as file:
file.seek(0)
header = file.read(10)
file.seek(0)
if len(header) < 10:
raise NoTagError("File too short")
if header[0:3] != b"ID3":
raise NoTagError("ID3v2 tag not found")
if header[3] not in _tag_versions or header[4] != 0:
raise TagError("Unknown ID3 version: 2.{0}.{1}"
.format(*header[3:5]))
cls = _tag_versions[header[3]]
offset = 0
length = Syncsafe.decode(header[6:10]) + 10
if header[3] == 4 and header[5] & _TAG24_FOOTER:
length += 10
return (cls, offset, length) | 5b32c122d804aa5def21c59e73e4369c64b7cbbe | 17,057 |
def merge_peaks(peaks, start_merge_at, end_merge_at,
max_buffer=int(1e5)):
"""Merge specified peaks with their neighbors, return merged peaks
:param peaks: Record array of strax peak dtype.
:param start_merge_at: Indices to start merge at
:param end_merge_at: EXCLUSIVE indices to end merge at
:param max_buffer: Maximum number of samples in the sum_waveforms of
the resulting peaks (after merging).
Peaks must be constructed based on the properties of constituent peaks,
it being too time-consuming to revert to records/hits.
"""
assert len(start_merge_at) == len(end_merge_at)
new_peaks = np.zeros(len(start_merge_at), dtype=peaks.dtype)
# Do the merging. Could numbafy this to optimize, probably...
buffer = np.zeros(max_buffer, dtype=np.float32)
for new_i, new_p in enumerate(new_peaks):
old_peaks = peaks[start_merge_at[new_i]:end_merge_at[new_i]]
common_dt = np.gcd.reduce(old_peaks['dt'])
first_peak, last_peak = old_peaks[0], old_peaks[-1]
new_p['channel'] = first_peak['channel']
# The new endtime must be at or before the last peak endtime
# to avoid possibly overlapping peaks
new_p['time'] = first_peak['time']
new_p['dt'] = common_dt
new_p['length'] = \
(strax.endtime(last_peak) - new_p['time']) // common_dt
# re-zero relevant part of buffer (overkill? not sure if
# this saves much time)
buffer[:min(
int(
(
last_peak['time']
+ (last_peak['length'] * old_peaks['dt'].max())
- first_peak['time']) / common_dt
),
len(buffer)
)] = 0
for p in old_peaks:
# Upsample the sum waveform into the buffer
upsample = p['dt'] // common_dt
n_after = p['length'] * upsample
i0 = (p['time'] - new_p['time']) // common_dt
buffer[i0: i0 + n_after] = \
np.repeat(p['data'][:p['length']], upsample) / upsample
# Handle the other peak attributes
new_p['area'] += p['area']
new_p['area_per_channel'] += p['area_per_channel']
new_p['n_hits'] += p['n_hits']
new_p['saturated_channel'][p['saturated_channel'] == 1] = 1
# Downsample the buffer into new_p['data']
strax.store_downsampled_waveform(new_p, buffer)
new_p['n_saturated_channels'] = new_p['saturated_channel'].sum()
# Use the tight coincidence of the peak with the highest amplitude
i_max_subpeak = old_peaks['data'].max(axis=1).argmax()
new_p['tight_coincidence'] = old_peaks['tight_coincidence'][i_max_subpeak]
# If the endtime was in the peaks we have to recompute it here
# because otherwise it will stay set to zero due to the buffer
if 'endtime' in new_p.dtype.names:
new_p['endtime'] = strax.endtime(last_peak)
return new_peaks | 75f86b0c27cb2cac145234cfd9254105048be9a8 | 17,058 |
def batchedpatternsgenerator(generatorfunction):
"""Decorator that assumes patterns (X,y) and stacks them in batches
This can be thought of a specialized version of the batchedgenerator
that assumes the base generator returns instances of data patterns,
as tuples of numpy arrays (X,y). When grouping them in batches the
numpy arrays are stacked so that each returned batch has a pattern
per row.
A "batchsize" parameter is added to the generator, that if specified
groups the data in batches of such size.
"""
def modgenerator(*args, **kwargs):
for batch in batchedgenerator(generatorfunction)(*args, **kwargs):
Xb, yb = zip(*batch)
yield np.stack(Xb), np.stack(yb)
return modgenerator | 19a8e8d5c2872c38d469c41e163a947f208fc806 | 17,059 |
def reduce_min(raw_tensor, axis, keepdims=False):
"""
calculate reduce_min of raw_tensor, only support float16
Args:
raw_tensor (tvm.tensor.Tensor): input tensor
axis (Union[int, list]): reduce axis (range : [-len(raw_tensor.shape), len(raw_tensor.shape) - 1])
keepdims (bool): if true, retains reduced dimensions with length 1, default value is None
Returns:
tvm.tensor.Tensor, res
"""
return single_reduce_op(raw_tensor, axis, "reduce_min", keepdims) | b4473ca577a939f3c149758fd73a59c79b1f0db0 | 17,060 |
def align2local(seq):
"""
Returns list such that
'ATG---CTG-CG' ==> [0,1,2,2,2,3,4,5,5,6,7]
Used to go from align -> local space
"""
i = -1
lookup = []
for c in seq:
if c != "-":
i += 1
lookup.append(i)
return lookup | aa914a60d5db7801a3cf1f40e713e95c98cd647e | 17,061 |
def load_nodegraph(filename):
"""Load a nodegraph object from the given filename and return it.
Keyword argument:
filename -- the name of the nodegraph file
"""
nodegraph = _Nodegraph(1, [1])
nodegraph.load(filename)
return nodegraph | cd552fda874f1e8667bd09e95bdf43e6c5bd75c1 | 17,062 |
def get_bprop_sqrt(self):
"""Grad definition for `Sqrt` operation."""
mul_func = P.Mul()
fill_func = P.Fill()
div_op = P.RealDiv()
sqrt = P.Sqrt()
dtype = P.DType()
def bprop(x, out, dout):
temp = div_op(fill_func(dtype(x), shape_op(x), 0.5), sqrt(x))
dx = mul_func(dout, temp)
return (dx,)
return bprop | b297695effd9d063384b3343337d1647050b5f1a | 17,063 |
def classify_top1_batch(image):
"""Define method `classify_top1` for servable `resnet50`.
The input is `image` and the output is `lable`."""
x = register.add_stage(preprocess_batch, image, outputs_count=1, batch_size=1024)
x = register.add_stage(resnet_model, x, outputs_count=1)
x = register.add_stage(postprocess_top1, x, outputs_count=1)
return x | ff4ae67619f29e0e22e275845709ab73daabe2f0 | 17,064 |
def ngram_word(max_features=2_000):
"""Word count vectorizer.
Args:
max_features: number of features to consider.
"""
return CountVectorizer(
ngram_range=(1, 3),
analyzer='word',
max_features=max_features,
) | 2b8935b72a836ff6ab3cdb0b17939806d9f7ce02 | 17,065 |
def func_dispatcher(intent):
"""
Simple effect dispatcher that takes callables taking a box,
and calls them with the given box.
"""
def performer(dispatcher, intent, box):
intent(box)
return performer | 48dc23a8124569d5537c38b8f704fdea282853e8 | 17,066 |
import os
def get_system_path():
""" Get the system path as a list of files
Returns:
List of names in the system path
"""
path = os.getenv('PATH')
if path:
return path.split(os.pathsep)
return [] | 766931b403444c584edf71d053f5b3f5de6bf265 | 17,067 |
import multiprocessing
import tqdm
def encode(x, bps_arrangement='random', n_bps_points=512, radius=1.5, bps_cell_type='dists',
verbose=1, random_seed=13, x_features=None, custom_basis=None, n_jobs=-1):
"""Converts point clouds to basis point set (BPS) representation, multi-processing version
Parameters
----------
x: numpy array [n_clouds, n_points, n_dims]
batch of point clouds to be converted
bps_arrangement: str
supported BPS arrangements: "random", "grid", "custom"
n_bps_points: int
number of basis points
radius: float
radius for BPS sampling area
bps_cell_type: str
type of information stored in every BPS cell. Supported:
'dists': Euclidean distance to the nearest point in cloud
'deltas': delta vector from basis point to the nearest point
'closest': closest point itself
'features': return features of the closest point supplied by x_features.
e.g. RGB values of points, surface normals, etc.
verbose: boolean
whether to show conversion progress
x_features: numpy array [n_clouds, n_points, n_features]
point features that will be stored in BPS cells if return_values=='features'
custom_basis: numpy array [n_basis_points, n_dims]
custom basis to use
n_jobs: int
number of parallel jobs used for encoding. If -1, use all available CPUs
Returns
-------
x_bps: [n_clouds, n_points, n_bps_features]
point clouds converted to BPS representation.
"""
if n_jobs == -1:
n_jobs = multiprocessing.cpu_count()
if n_jobs == 1:
n_clouds, n_points, n_dims = x.shape
if bps_arrangement == 'random':
basis_set = generate_random_basis(n_bps_points, n_dims=n_dims, radius=radius, random_seed=random_seed)
elif bps_arrangement == 'grid':
# in case of a grid basis, we need to find the nearest possible grid size
grid_size = int(np.round(np.power(n_bps_points, 1 / n_dims)))
basis_set = generate_grid_basis(grid_size=grid_size, minv=-radius, maxv=radius)
elif bps_arrangement == 'custom':
# in case of a grid basis, we need to find the nearest possible grid size
if custom_basis is not None:
basis_set = custom_basis
else:
raise ValueError("Custom BPS arrangement selected, but no custom_basis provided.")
else:
raise ValueError("Invalid basis type. Supported types: \'random\', \'grid\', \'custom\'")
n_bps_points = basis_set.shape[0]
if bps_cell_type == 'dists':
x_bps = np.zeros([n_clouds, n_bps_points])
elif bps_cell_type == 'deltas':
x_bps = np.zeros([n_clouds, n_bps_points, n_dims])
elif bps_cell_type == 'closest':
x_bps = np.zeros([n_clouds, n_bps_points, n_dims])
elif bps_cell_type == 'features':
n_features = x_features.shape[2]
x_bps = np.zeros([n_clouds, n_bps_points, n_features])
else:
raise ValueError("Invalid cell type. Supported types: \'dists\', \'deltas\', \'closest\', \'features\'")
fid_lst = range(0, n_clouds)
if verbose:
fid_lst = tqdm(fid_lst)
for fid in fid_lst:
nbrs = NearestNeighbors(n_neighbors=1, leaf_size=1, algorithm="ball_tree").fit(x[fid])
fid_dist, npts_ix = nbrs.kneighbors(basis_set)
if bps_cell_type == 'dists':
x_bps[fid] = fid_dist.squeeze()
elif bps_cell_type == 'deltas':
x_bps[fid] = x[fid][npts_ix].squeeze() - basis_set
elif bps_cell_type == 'closest':
x_bps[fid] = x[fid][npts_ix].squeeze()
elif bps_cell_type == 'features':
x_bps[fid] = x_features[fid][npts_ix].squeeze()
return x_bps
else:
if verbose:
print("using %d available CPUs for BPS encoding.." % n_jobs)
bps_encode_func = partial(encode, bps_arrangement=bps_arrangement, n_bps_points=n_bps_points, radius=radius,
bps_cell_type=bps_cell_type, verbose=verbose, random_seed=random_seed,
x_features=x_features, custom_basis=custom_basis, n_jobs=1)
pool = multiprocessing.Pool(n_jobs)
x_chunks = np.array_split(x, n_jobs)
x_bps = np.concatenate(pool.map(bps_encode_func, x_chunks), 0)
pool.close()
return x_bps | 66edc2dd5d42fe53e55f2e5b95e2069123510006 | 17,068 |
def parsec_params_list_to_dict(var):
"""
convert parsec parameter array to dictionary
:param var:
:return:
"""
parsec_params = dict()
parsec_params["rle"] = var[0]
parsec_params["x_pre"] = var[1]
parsec_params["y_pre"] = var[2]
parsec_params["d2ydx2_pre"] = var[3]
parsec_params["th_pre"] = var[4]
parsec_params["x_suc"] = var[5]
parsec_params["y_suc"] = var[6]
parsec_params["d2ydx2_suc"] = var[7]
parsec_params["th_suc"] = var[8]
return parsec_params | 4ea4b4d2c0cbcb8fb49619e103b09f354c80de6a | 17,069 |
def parse_msiinfo_suminfo_output(output_string):
"""
Return a dictionary containing information from the output of `msiinfo suminfo`
"""
# Split lines by newline and place lines into a list
output_list = output_string.splitlines()
results = {}
# Partition lines by the leftmost ":", use the string to the left of ":" as
# the key and use the string to the right of ":" as the value
for output in output_list:
key, _, value = output.partition(':')
if key:
results[key] = value.strip()
return results | 6883e8fba9a37b9f877bdf879ebd14d1120eb88a | 17,070 |
from typing import Dict
from typing import Any
import json
from datetime import datetime
def create_indicators_fields(tag_details: Dict[str, Any]) -> Dict[str, Any]:
"""
Returns the indicator fields
Args:
tag_details: a dictionary containing the tag details.
Returns:
A dictionary represents the indicator fields.
"""
fields: Dict[str, Any] = {}
tag = tag_details.get('tag', {})
refs = json.loads(tag.get('refs', '[]'))
fields['publications'] = create_publications(refs)
fields['aliases'] = tag_details.get('aliases', [])
fields['description'] = tag.get('description', '')
last_hit = tag.get('lasthit', '')
fields['lastseenbysource'] = datetime.strptime(last_hit, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if last_hit else None
updated_at = tag.get('updated_at', '')
fields['updateddate'] = datetime.strptime(updated_at, AF_TAGS_DATE_FORMAT).strftime(
DATE_FORMAT) if updated_at else None
fields['reportedby'] = tag.get('source', '')
remove_nulls_from_dictionary(fields)
return fields | 349ab542d2c25cb24fe40aeb98c16a9bfccc871f | 17,071 |
def spatial_difference(gdf1: GeoDataFrame, gdf2: GeoDataFrame) -> GeoDataFrame:
"""Removes polygons from the first GeoDataFrame that intersect with polygons from the second GeoDataFrame
:param gdf1: First input data frame
:param gdf2: Second input data frame
:return: Resulting data frame
"""
gdf2 = gdf2[["geometry"]]
intersections = gpd.sjoin(gdf1, gdf2, how="left")
result_gdf = intersections[intersections["index_right"].isna()]
result_gdf = result_gdf.drop(columns=["index_right"])
return result_gdf | 2713376f45ed574399f9f406a06a60a47f002579 | 17,072 |
def frustumShellIxx(rb, rt, t, h, diamFlag=False):
"""This function returns a frustum's mass-moment of inertia (divided by density) about the
transverse x/y-axis passing through the center of mass with radii or diameter inputs.
NOTE: This is for a frustum SHELL, not a solid
INPUTS:
Parameters
----------
rb : float (scalar/vector), base radius
rt : float (scalar/vector), top radius
t : float (scalar/vector), thickness
h : float (scalar/vector), height
diamFlag : boolean, True if rb and rt are entered as diameters
OUTPUTs:
-------
Ixx=Iyy : float (scalar/vector), Moment of inertia about x/y-axis through center of mass (principle axes)
"""
if diamFlag:
# Convert diameters to radii
rb *= 0.5
rt *= 0.5
# Integrate 2*pi*r*dr*dz from r=ri(z) to ro(z), z=0 to h
rb_o = rb
rb_i = rb-t
rt_o = rt
rt_i = rt-t
return (frustumIxx(rb_o, rt_o, h) - frustumIxx(rb_i, rt_i, h)) | 3d23805d4f7ed952b50752ac4ab8158c2826137f | 17,073 |
def default_shaders():
"""
Returns a list with all thte default shadres of the current DCC
:return: str
"""
return shader_utils.get_default_shaders() | 795ca337c9ba163bb70ce4ff226c04d1034ec542 | 17,074 |
def get_domains_and_slugs():
"""
returns all the domain names and slugs as dictionary
{domain_name: slug}
"""
return_data = {}
domain_slugs = Domain.objects.filter(active=1).order_by('name')
if domain_slugs:
for domain in domain_slugs:
return_data[domain.name] = domain.slug
return return_data | d19af879fe96895808f1c1815d3cc563499d358d | 17,075 |
def has_video_ads() -> bool:
"""has_video_ads() -> bool
(internal)
"""
return bool() | 6b4822bb18171df5bfc5b4f3797e574557cd65dd | 17,076 |
def calculate_purchasing_plan(total_days, sellers, starting_bread=10, best_before_date=30, debug = False):
"""
total_days : positive int
sellers : list of tuple (day, price)
starting_bread : int, optional
best_before_date : positive int, (how long the bread lasts)
debug : boolean, (prints cost matrix)
"""
# create cost_matrix of (sellers+1) x total_days
cost_matrix = [[0] * starting_bread + [float('inf')] * (total_days - min(starting_bread, best_before_date))]
for merchant in sellers:
cost_matrix.append(
[float('inf')] * (merchant[0]) + # Add inf before
[merchant[1]] * min(best_before_date, (total_days - merchant[0])) + # Add merchant price
[float('inf')] * (total_days - merchant[0] - min(best_before_date, (total_days - merchant[0])))) # Add inf after
if debug:
print_matrix(cost_matrix)
current_merchant = len(sellers)
current_day = total_days - 1
best_merchant = current_merchant
merchant_of_the_day = [0] * total_days
new_merchant = True # If the merchant changes, we want to go as far up as possible
while current_day >= starting_bread:
best_price = cost_matrix[best_merchant][current_day]
# go up as far as you can
for best_merchant_index in range(current_merchant, -1, -1):
tmp = cost_matrix[best_merchant_index][current_day]
# go up only if price is lower
if tmp < best_price or (tmp <= best_price and new_merchant): # Up only if lower price or new merchant
# print("Better merchant found %3s with price %3s <= %3s" % (best_merchant_index, tmp, best_price))
best_merchant = best_merchant_index
best_price = tmp
new_merchant = True
merchant_of_the_day[current_day] = best_merchant # Save from which merchant we buy bread on selected day
current_day -= 1 # go left one step
if best_price == float('inf'):
if debug:
print("Plan not feasible on day %5s" % current_day)
return None
new_merchant = False # No new merchant for the previous day yet
# At this point we have fewest # merchants and lowest price. We need to make another walk from left to right to buy
# bread as soon as possible.
buying_plan = [0] * (len(sellers) + 1) # +1 is because initial bread is accounted for in the matrix
current_merchant = 0
current_day = 0
while current_day < total_days:
# If cost of current merchant is the same as cost of the merchant of the day, buy from current, since we buy
# bread from him earlier (because merchants are sorted by their arrival day)
if cost_matrix[current_merchant][current_day] > cost_matrix[merchant_of_the_day[current_day]][current_day]:
current_merchant = merchant_of_the_day[current_day]
buying_plan[current_merchant] += 1
current_day += 1
return buying_plan[1:] | 474354d1316839691fe3e0bffbb1352167e8095c | 17,077 |
from typing import Iterable
def compile_sites(inp: NetInput,
y_true: Iterable[np.ndarray],
y_pred: Iterable[np.ndarray],
masks: Iterable[np.ndarray]):
"""
Prepares sites to be dumped in tsv file
:param inp: NetInput
:param y_true: True known classes mapped on templates
:param y_pred: True predicted classes mapped on templates
:param masks: boolean numpy arrays with
True placed at positions of any class that
could be positive
:return: Iterable over Sites
"""
positions = (np.where(y > 0)[0] + 1 for y in masks)
def comp_site(id_, pos, cls_pred, cls_true):
site = [id_, pos, 0, 0]
if cls_pred:
site[2] = 1
if cls_true:
site[3] = 1
return Site(*site)
sites = chain.from_iterable(
((id_, pos, p, t) for pos, p, t in zip(pp, yp, yt))
for id_, pp, yp, yt in zip(inp.ids, positions, y_pred, y_true))
return starmap(comp_site, sites) | 14f655e18b5651c22373d4c23b51f55704cd63c8 | 17,078 |
def femda_estimator(X, labels, eps = 1e-5, max_iter = 20):
""" Estimates the matrix of means and the tensor of scatter matrix of the dataset using MLE estimator.
To tackle singular matrix issues, we use regularization.
Parameters
----------
X : 2-d array of size n*m
matrix of all the samples generated
labels : 1-d array of size n
vector of the label of each sample
eps : float > 0
criterion of termination when solving the fixed-point equation
max_iter : integer > 1
number of maximum iterations to solve the fixed-point equation
Returns
-------
means : 2-d array of size K*m
matrix of the robust estimation of the mean of the K clusters
shapes : 3-d array of size K*m*m
tensor of the robust estimation of shape matrix of the K clusters
"""
n, m = X.shape
K = int(max(set(labels)) + 1)
n_clusters = np.zeros(K) + 1e-5
for i in range(n):
n_clusters[int(labels[i])] = n_clusters[int(labels[i])] + 1
means, shapes = classic_estimator(X, labels)
for k in range(K):
convergence = False
ite = 1
while (not convergence) and ite<max_iter:
ite = ite + 1
mean = np.zeros(m)
shape = np.zeros([m,m])
sum_mean_weights = 1e-5
for i in range(n):
if labels[i] == k:
mean_weight = min([[0.5]], 1 / np.dot(np.array([X[i]-means[k]]), np.dot(np.linalg.inv(regularize(shapes[k])), np.array([X[i]-means[k]]).T)))[0][0]
#print(mean_weight)
mean = mean + mean_weight * X[i]
sum_mean_weights = sum_mean_weights + mean_weight
shape = shape + np.dot(np.array([X[i]-means[k]]).T, np.array([X[i]-means[k]])) * mean_weight
delta_mean = mean / sum_mean_weights - means[k]
delta_shape = shape * m / n_clusters[k] - shapes[k]
means[k] = means[k] + delta_mean
shapes[k] = shapes[k] + delta_shape
print("trace at", ite, np.trace(shapes[k]))
convergence = sum(abs(delta_mean)) + sum(sum(abs(delta_shape))) < eps
shapes[k] = regularize(shapes[k])
return means, shapes | 639532f9307e023561d6193730473533b240fb28 | 17,079 |
def get_collections():
"""read .db file, return raw collection"""
col = {}
f = open(collection_db, "rb")
version = nextint(f)
ncol = nextint(f)
for i in range(ncol):
colname = nextstr(f)
col[colname] = []
for j in range(nextint(f)):
f.read(2)
col[colname].append(f.read(32).decode("utf-8"))
f.close()
return (col, version) | b134e7e970fa7f5486226d2c2cab3c63ab9f67c3 | 17,080 |
def ot_has_small_bandgap(cp2k_input, cp2k_output, bandgap_thr_ev):
""" Returns True if the calculation used OT and had a smaller bandgap then the guess needed for the OT.
(NOTE: It has been observed also negative bandgap with OT in CP2K!)
cp2k_input: dict
cp2k_output: dict
bandgap_thr_ev: float [eV]
"""
list_true = [True, 'T', 't', '.TRUE.', 'True', 'true'] #add more?
try:
ot_settings = cp2k_input['FORCE_EVAL']['DFT']['SCF']['OT']
if '_' not in ot_settings.keys() or ot_settings['_'] in list_true: #pylint: disable=simplifiable-if-statement
using_ot = True
else:
using_ot = False
except KeyError:
using_ot = False
min_bandgap_ev = min(cp2k_output["bandgap_spin1_au"], cp2k_output["bandgap_spin2_au"]) * HARTREE2EV
is_bandgap_small = (min_bandgap_ev < bandgap_thr_ev)
return using_ot and is_bandgap_small | fbc63c373d052111932ea0fd2cd458d59b486d10 | 17,081 |
def profile():
"""Checking if user is already logged_in"""
if 'logged_in' in session:
'''getting all the account info for the user for displaying it on the profile page'''
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM accounts WHERE username = %s', (session['employee_uname'],))
account = cursor.fetchone()
'''Showing profile page with account info to the employee'''
return render_template('profile.html', acc=account)
'''if User is not logged_in redirect to login page'''
return redirect(url_for('login')) | 4796058d3bbc911cc0610b7a5458be80fa330d67 | 17,082 |
import logging
def gather_metrics(config, worker_output, endpoint_output, container_names):
"""Process the raw output to lists of dicts
Args:
config (dict): Parsed configuration
worker_output (list(list(str))): Output of each container ran on the edge
endpoint_output (list(list(str))): Output of each endpoint container
container_names (list(str)): Names of docker containers launched
Returns:
2x list(dict): Metrics of worker nodes and endpoints
"""
logging.debug('Print raw output from subscribers and publishers')
if config['mode'] == 'cloud' or config['mode'] == 'edge':
logging.debug('------------------------------------')
logging.debug('%s OUTPUT' % (config['mode'].upper()))
logging.debug('------------------------------------')
for out in worker_output:
for line in out:
logging.debug(line)
logging.debug('------------------------------------')
logging.debug('------------------------------------')
logging.debug('ENDPOINT OUTPUT')
logging.debug('------------------------------------')
for out in endpoint_output:
for line in out:
logging.debug(line)
logging.debug('------------------------------------')
worker_metrics = gather_worker_metrics(worker_output)
endpoint_metrics = gather_endpoint_metrics(config, endpoint_output, container_names)
return worker_metrics, endpoint_metrics | 17263ba1e1c717f52b3d4cb05f373a54630f8e06 | 17,083 |
import time
import json
async def ping(ws):
"""Send a ping request on an established websocket connection.
:param ws: an established websocket connection
:return: the ping response
"""
ping_request = {
'emit': "ping",
'payload': {
'timestamp': int(time.time())
}
}
await ws.send(json.dumps(ping_request))
return json.loads(await ws.recv()) | 587d2a72cbc5f50f0ffb0bda63668a0ddaf4c9c3 | 17,084 |
def split_files_each_proc(file_arr,nprocs):
""" Returns array that distributes samples across all processors. """
ntot = len(file_arr)
post_proc_file_arr = []
for i in range(0,nprocs):
each_proc_arr = []
ib,ie = split_array_old(ntot,nprocs,i)
if i == 0:
max_no = (ie-ib)+1
for j in range(ib,ie+1):
each_proc_arr.append(j)
if len(each_proc_arr) > max_no:
max_no = len(each_proc_arr)
elif len(each_proc_arr) < max_no :
for k in range(0,max_no-(len(each_proc_arr))):
each_proc_arr.append("no file")
max_no = len(each_proc_arr)
post_proc_file_arr.append(each_proc_arr)
return post_proc_file_arr | 0c5c481d1b9a9e0d5c6efdfb7abf6669f5a05ecf | 17,085 |
def get_data_generators_for_output(output):
""" Get the data generators involved in an output
Args:
output (:obj:`Output`): report or plot
Returns:
:obj:`set` of :obj:`DataGenerator`: data generators involved in the output
"""
data_generators = set()
if isinstance(output, Report):
for data_set in output.data_sets:
data_generators.add(data_set.data_generator)
elif isinstance(output, Plot2D):
for curve in output.curves:
data_generators.add(curve.x_data_generator)
data_generators.add(curve.y_data_generator)
elif isinstance(output, Plot3D):
for surface in output.surfaces:
data_generators.add(surface.x_data_generator)
data_generators.add(surface.y_data_generator)
data_generators.add(surface.z_data_generator)
else:
raise NotImplementedError('Output of type {} is not supported.'.format(output.__class__.__name__))
if None in data_generators:
data_generators.remove(None)
return data_generators | d05fde5b5ce25504b53d8ca4491235d3ab3b8680 | 17,086 |
def MarkovChainFunction(data, bins):
""" Data should be numpy array; bins is an integer """
#Normalize data
datMin = min(data)
datMax = max(data)
datNorm = (data - datMin)/(datMax - datMin)
# Create Markov Transition Table:
mesh = np.linspace(0, 1, bins)
meshReal = (mesh*(datMax - datMin) + datMin)
dmesh = mesh[1] - mesh[0]
dmeshReal = meshReal[1] - meshReal[0]
markovArray = np.zeros((len(mesh), len(mesh)))
cumMarkovArray = np.zeros((len(mesh), len(mesh)))
# Populate Markov Transition Table:
for i in range(1,(len(data)-1)):
datNow = datNorm[i]
datBefore = datNorm[i-1]
dn = np.floor(datNow / dmesh) # Get index....TODO: DO WE NOT WANT TO ROUND DOWN**? Ask Aaron
db = np.floor(datBefore / dmesh) # Get index
markovArray[int(db), int(dn)] = markovArray[int(db), int(dn)] + 1; #Update MTT
# Transform data in transition table to probability:
markovArray = markovArray/np.sum(markovArray, axis=1, keepdims = True) #? from https://stackoverflow.com/questions/16202348/numpy-divide-row-by-row-sum
cumMarkovArray = np.cumsum(markovArray, axis=1)
# Eliminate potential NaNs from potential /0:
ind = np.isnan(markovArray)
markovArray[ind] = 0
cumMarkovArray[ind] = 0
return markovArray, cumMarkovArray, datMin, datMax, dmeshReal | 09a73103e3dc6c3412803b58c8823ac1459921aa | 17,087 |
from re import M
def clustering_report(y_true, y_pred) -> pd.DataFrame:
"""
Generate cluster evaluation metrics.
Args:
y_true: Array of actual labels
y_pred: Array of predicted clusters
Returns:
Pandas DataFrame with metrics.
"""
return pd.DataFrame(
{
"Homogeneity": M.homogeneity_score(y_true, y_pred),
"Completeness": M.completeness_score(y_true, y_pred),
"V-Measure": M.v_measure_score(y_true, y_pred),
"Adjusted Rand Index": M.adjusted_rand_score(y_true, y_pred),
"Adjusted Mutual Information": M.adjusted_mutual_info_score(y_true, y_pred),
},
index=["value"],
).T | dc124dc4f248a2acedfd6201a205f285adc6ec1c | 17,088 |
from typing import List
from typing import Sequence
from typing import Set
from typing import Tuple
def _create_sampler_data(
datastores: List[Datastore], variables: Sequence[Variable],
preconditions: Set[LiftedAtom], add_effects: Set[LiftedAtom],
delete_effects: Set[LiftedAtom], param_option: ParameterizedOption,
datastore_idx: int
) -> Tuple[List[SamplerDatapoint], List[SamplerDatapoint]]:
"""Generate positive and negative data for training a sampler."""
# Populate all positive data.
positive_data: List[SamplerDatapoint] = []
for (segment, var_to_obj) in datastores[datastore_idx]:
option = segment.get_option()
state = segment.states[0]
if CFG.sampler_learning_use_goals:
# Right now, we're making the assumption that all data is
# demonstration data when we're learning samplers with goals.
# In the future, we may weaken this assumption.
goal = segment.get_goal()
else:
goal = None
assert all(
pre.predicate.holds(state, [var_to_obj[v] for v in pre.variables])
for pre in preconditions)
positive_data.append((state, var_to_obj, option, goal))
# Populate all negative data.
negative_data: List[SamplerDatapoint] = []
if CFG.sampler_disable_classifier:
# If we disable the classifier, then we never provide
# negative examples, so that it always outputs 1.
return positive_data, negative_data
for idx, datastore in enumerate(datastores):
for (segment, var_to_obj) in datastore:
option = segment.get_option()
state = segment.states[0]
if CFG.sampler_learning_use_goals:
# Right now, we're making the assumption that all data is
# demonstration data when we're learning samplers with goals.
# In the future, we may weaken this assumption.
goal = segment.get_goal()
else:
goal = None
trans_add_effects = segment.add_effects
trans_delete_effects = segment.delete_effects
if option.parent != param_option:
continue
var_types = [var.type for var in variables]
objects = list(state)
for grounding in utils.get_object_combinations(objects, var_types):
if len(negative_data
) >= CFG.sampler_learning_max_negative_data:
# If we already have more negative examples
# than the maximum specified in the config,
# we don't add any more negative examples.
return positive_data, negative_data
# If we are currently at the datastore that we're learning a
# sampler for, and this datapoint matches the positive
# grounding, this was already added to the positive data, so
# we can continue.
if idx == datastore_idx:
positive_grounding = [var_to_obj[var] for var in variables]
if grounding == positive_grounding:
continue
sub = dict(zip(variables, grounding))
# When building data for a datastore with effects X, if we
# encounter a transition with effects Y, and if Y is a superset
# of X, then we do not want to include the transition as a
# negative example, because if Y was achieved, then X was also
# achieved. So for now, we just filter out such examples.
ground_add_effects = {e.ground(sub) for e in add_effects}
ground_delete_effects = {e.ground(sub) for e in delete_effects}
if ground_add_effects.issubset(trans_add_effects) and \
ground_delete_effects.issubset(trans_delete_effects):
continue
# Add this datapoint to the negative data.
negative_data.append((state, sub, option, goal))
return positive_data, negative_data | ecf7ed06183264722df5d6e2d645bb899cb8358b | 17,089 |
def call_assign_job(job_id, mex_id):
""" Function to send an update to the MEx Sentinel to assign a Job to an MEx. """
try:
rospy.wait_for_service('/mex_sentinel/assign_job_to_mex', rospy.Duration(1))
try:
assign_job = rospy.ServiceProxy('mex_sentinel/assign_job_to_mex', AssignJobToMex)
req = AssignJobToMexRequest()
req.job_id = job_id
req.mex_id = mex_id
result = assign_job(req)
return result
except rospy.ServiceException as e:
print(NAME + "Service call failed: %s"%e)
except rospy.ROSException:
pass | 9c5b2aa27e8d04949fbb4c5a2c9eb2ac86ccd9a7 | 17,090 |
def full(
coords, nodata=np.nan, dtype=np.float32, name=None, attrs={}, crs=None, lazy=False
):
"""Return a full DataArray based on a geospatial coords dictionary.
Arguments
---------
coords: sequence or dict of array_like, optional
Coordinates (tick labels) to use for indexing along each dimension (max 3).
The coordinate sequence should be (dim0, y, x) of which the first is optional.
nodata: float, int, optional
Fill value for new DataArray, defaults to other.nodata or if not set np.nan
dtype: numpy.dtype, optional
Data type
name: str, optional
DataArray name
attrs : dict, optional
additional attributes
crs: int, dict, or str, optional
Coordinate Reference System. Accepts EPSG codes (int or str); proj (str or dict)
lazy: bool, optional
If True return DataArray with a dask rather than numpy array.
Returns
-------
da: DataArray
Filled DataArray
"""
f = dask.array.empty if lazy else np.full
dims = tuple([d for d in coords])
shape = tuple([coords[dim].size for dim in dims])
data = f(shape, nodata, dtype=dtype)
da = xr.DataArray(data, coords, dims, name, attrs)
da.raster.set_nodata(nodata)
da.raster.set_crs(crs)
return da | 41bb4fce22a8dd280dee0d4891ff81bd88d263b5 | 17,091 |
import os
def calculate_alignment(
sequences, mode, matrix, gapopen, gapextend, hash=uuid4().hex):
"""
1 - remove modifications
2 - convert sequence
3 - muscle - msa
4 - revert sequences
5 - add original modifications
"""
new_file_lines = []
for i, element in enumerate(sequences):
name, sequence, structure = element
unmodified_sequence = remove_modifications(sequence)
converted_sequence = convert_sequence(
unmodified_sequence, structure, mode)
new_file_lines.append('>{}'.format(str(i)))
new_file_lines.append('{}'.format(converted_sequence))
new_file_content = "\n".join(new_file_lines)
temp_name_in = os.path.join(
os.getcwd(), 'temp_1_{}'.format(hash))
temp_name_out = os.path.join(
os.getcwd(), 'temp_2_{}'.format(hash))
with open(temp_name_in, 'w') as f:
f.write(new_file_content)
command = 'muscle -in {} -out {} -matrix {} -gapopen {} ' \
'-gapextend {} -center 0.0'.format(
temp_name_in, temp_name_out, matrix, gapopen, gapextend)
os.system(command)
new_sequences = []
with open(temp_name_out, 'r') as f:
counter = 0
name = None
sequence = ''
for line in f.readlines():
if line.startswith('>'):
if counter != 0 and len(line.strip()) > 0:
my_id = int(name.replace('>', ''))
original_sequence = sequences[my_id][1]
original_name = sequences[my_id][0]
new_sequence, new_structure = revert_sequence(
sequence, original_sequence, mode)
new_sequence = add_original_modifications(
new_sequence, original_sequence)
new_sequences.append(
(original_name, new_sequence, new_structure))
sequence = ''
name = line.strip()
else:
sequence += line.strip()
counter += 1
my_id = int(name.replace('>', ''))
original_sequence = sequences[my_id][1]
original_name = sequences[my_id][0]
new_sequence, new_structure = revert_sequence(
sequence, original_sequence, mode)
new_sequence = add_original_modifications(
new_sequence, original_sequence)
new_sequences.append((original_name, new_sequence, new_structure))
os.remove(temp_name_in)
os.remove(temp_name_out)
return new_sequences | 93851e4e2afd9fd88665fc418d0ef680dc41847f | 17,092 |
from typing import Union
from typing import List
from typing import Tuple
from typing import Dict
def add_weight_decay(
model: nn.Module, weight_decay: float = 1e-5, skip_list: Union[List, Tuple] = ()
) -> List[Dict]:
"""Helper function to not decay weights in BatchNorm layers
Source: https://discuss.pytorch.org/t/weight-decay-in-the-optimizers-is-a-bad-idea-especially-with-batchnorm/16994/3
"""
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
if len(param.shape) == 1 or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{"params": no_decay, "weight_decay": 0.0},
{"params": decay, "weight_decay": weight_decay},
] | 27efae02eaaf0bdc94f3763c1069165c47e08acb | 17,093 |
def find_bands_hdu(hdu_list, hdu):
"""Discover the extension name of the BANDS HDU.
Parameters
----------
hdu_list : `~astropy.io.fits.HDUList`
hdu : `~astropy.io.fits.BinTableHDU` or `~astropy.io.fits.ImageHDU`
Returns
-------
hduname : str
Extension name of the BANDS HDU. None if no BANDS HDU was found.
"""
if "BANDSHDU" in hdu.header:
return hdu.header["BANDSHDU"]
has_cube_data = False
if (
isinstance(hdu, (fits.ImageHDU, fits.PrimaryHDU))
and hdu.header.get("NAXIS", None) == 3
):
has_cube_data = True
elif isinstance(hdu, fits.BinTableHDU):
if (
hdu.header.get("INDXSCHM", "") in ["EXPLICIT", "IMPLICIT", ""]
and len(hdu.columns) > 1
):
has_cube_data = True
if has_cube_data:
if "EBOUNDS" in hdu_list:
return "EBOUNDS"
elif "ENERGIES" in hdu_list:
return "ENERGIES"
return None | 3b170109d199482c651861764b0ec21a44aa7933 | 17,094 |
def read_raw_binary_file(file_path):
"""can actually be any file"""
with open(file_path, 'rb') as f:
return f.read() | b03bc1d4c00f9463ded0ea022023e66fd298a7ad | 17,095 |
def encode_cl_value(entity: CLValue) -> dict:
"""Encodes a CL value.
"""
def _encode_parsed(type_info: CLType) -> str:
if type_info.typeof in TYPES_NUMERIC:
return str(int(entity.parsed))
elif type_info.typeof == CLTypeKey.BYTE_ARRAY:
return entity.parsed.hex()
elif type_info.typeof == CLTypeKey.PUBLIC_KEY:
return entity.parsed.account_key.hex()
elif type_info.typeof == CLTypeKey.UREF:
return entity.parsed.as_string()
elif type_info.typeof == CLTypeKey.OPTION:
return _encode_parsed(type_info.inner_type)
else:
return str(entity.parsed)
return {
"bytes": serialisation.to_bytes(entity).hex(),
"cl_type": encode_cl_type(entity.cl_type),
"parsed": _encode_parsed(entity.cl_type),
} | 09d75f9552347e4fd121dcd1a57f26ac46756870 | 17,096 |
def escape(string):
""" Escape a passed string so that we can send it to the
regular expressions engine.
"""
ret = None
def replfunc(m):
if ( m[0] == "\\" ):
return("\\\\\\\\")
else:
return("\\\\" + m[0])
# @note - I had an issue getting replfunc to be called in
# javascript correctly when I didn't use this pragma
# not sure if I was just doing it wrong or what
__pragma__(
'js', '{}',
'''
var r = /[^A-Za-z:;\d]/g;
ret = string.replace(r, replfunc);
''')
if ( ret is not None ):
return(ret)
else:
raise Exception("Failed to escape the passed string") | c2682757fec2ddaefb32bb792fee44dd63c539fd | 17,097 |
def batch_apply(fn, inputs):
"""Folds time into the batch dimension, runs fn() and unfolds the result.
Args:
fn: Function that takes as input the n tensors of the tf.nest structure,
with shape [time*batch, <remaining shape>], and returns a tf.nest
structure of batched tensors.
inputs: tf.nest structure of n [time, batch, <remaining shape>] tensors.
Returns:
tf.nest structure of [time, batch, <fn output shape>]. Structure is
determined by the output of fn.
"""
time_to_batch_fn = lambda t: tf.reshape(t, [-1] + t.shape[2:].as_list())
batched = tf.nest.map_structure(time_to_batch_fn, inputs)
output = fn(*batched)
prefix = [int(tf.nest.flatten(inputs)[0].shape[0]), -1]
batch_to_time_fn = lambda t: tf.reshape(t, prefix + t.shape[1:].as_list())
return tf.nest.map_structure(batch_to_time_fn, output) | 4cc220a7891f236dc6741e9c203862c5ee33e978 | 17,098 |
def grab_haul_list(creep: Creep, roomName, totalStructures, add_storage=False):
"""
위에 허울러가 에너지를 채울 목록 확인.
:param creep:
:param roomName: 방이름.
:param totalStructures: 본문 all_structures 와 동일
:param add_storage: 스토리지를 포함할 것인가? priority == 0 인 상황 아니면 포함할일이 없음.
:return: 허울러의 에너지 채울 대상목록
"""
# defining structures to fill the energy on. originally above of this spot but replaced for cpu eff.
# towers only fills 80% since it's gonna repair here and there all the time.
structures = totalStructures.filter(lambda s: ((s.structureType == STRUCTURE_SPAWN
or s.structureType == STRUCTURE_EXTENSION)
and s.energy < s.energyCapacity)
or (s.structureType == STRUCTURE_TOWER
and s.energy < s.energyCapacity * 0.8)
or (s.structureType == STRUCTURE_TERMINAL
and s.store[RESOURCE_ENERGY] < 10000))
# 스토리지에 넣을 양이 있을때 추가하는거임.
# 기준: 스토리지에 남은 양이 max_energy 값 이상일 경우
# 변경: 스토리지에 남은 양이 있는 경우
if add_storage:
structures.extend(totalStructures.filter
(lambda s: s.structureType == STRUCTURE_STORAGE
# and s.storeCapacity - _.sum(s.store) >= Game.rooms[roomName].memory.structure_type[max_energy]))
and s.storeCapacity - _.sum(s.store) > 0))
# 핵에 에너지 넣는걸로 함?
if Memory.rooms[roomName].options and Memory.rooms[roomName].options.fill_nuke:
nuke_structure_add = totalStructures.filter(lambda s: s.structureType == STRUCTURE_NUKER
and s.energy < s.energyCapacity)
structures.extend(nuke_structure_add)
# 연구소에 에너지 넣는걸로 함?
if Memory.rooms[roomName].options and Memory.rooms[roomName].options.fill_labs:
structure_add = totalStructures \
.filter(lambda s: s.structureType == STRUCTURE_LAB and s.energy < s.energyCapacity)
structures.extend(structure_add)
container = []
# for_upgrade :스토리지가 컨트롤러에서 많이 떨어져 있을때 대비해 두는 컨테이너.
# 렙 8이하에 에너지가 있을때만 찾는다
if Game.rooms[roomName].controller.level < 8 and creep.store.getCapacity(RESOURCE_ENERGY):
for rcont in Game.rooms[roomName].memory[STRUCTURE_CONTAINER]:
cont_obj = Game.getObjectById(rcont.id)
if not cont_obj:
continue
# 업글용 컨테이너고 수확저장용도가 아닌가? 그러면 허울러가 넣는다. 2/3 이하로 차있을때만.
if rcont.for_upgrade and not rcont.for_harvest \
and cont_obj.store.getUsedCapacity() < cont_obj.store.getCapacity() * 2 / 3:
# 단, 스토리지를 만들 렙(4이상)이고 스토리지가 없으면 안넣는다.
# 방 내 에너지가 안 찼을때도 통과
if 4 <= creep.room.controller.level \
and not Game.getObjectById(creep.memory.upgrade_target).room.storage \
or creep.room.energyAvailable < creep.room.energyCapacityAvailable * .95:
continue
container.append(Game.getObjectById(rcont.id))
structures.extend(container)
return structures | d1d944c221089363a7e546bdc03dd51cd178fc35 | 17,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.