content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import requests
import random
def handle(req):
"""handle a request to the function
Args:
req (str): request body
"""
r = requests.get("http://api.open-notify.org/astros.json")
result = r.json()
index = random.randint(0, len(result["people"]) - 1)
name = result["people"][index]["name"]
return "{} is in space".format(name) | 7d951443bc5b6f3db86602d635a8c9ce84b703fb | 14,400 |
import hashlib
def get_file_hashsum(file_name: str):
"""Generate a SHA-256 hashsum of the given file."""
hash_sha256 = hashlib.sha256()
with open(file_name, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
return hash_sha256.hexdigest() | d515b6b7b743396240ada8d888b8bfbc4c316373 | 14,401 |
def generate_primes(n):
"""Generates a list of prime numbers up to `n`
"""
global PRIMES
k = PRIMES[-1] + 2
while k <= n:
primes_so_far = PRIMES[:]
divisible = False
for p in primes_so_far:
if k % p == 0:
divisible = True
break
if not divisible:
PRIMES.append(k)
k += 2
return PRIMES | 4ea29f8dc8dad11bf6c71fcb1dcc6b75b91bece5 | 14,402 |
def Convex(loss, L2_reg):
"""
loss: src_number loss
[loss_1, loss_2, ... loss_src_number]
"""
src_number = len(loss)
lam = cp.Variable(src_number)
prob = cp.Problem(
cp.Minimize(lam @ loss + L2_reg * cp.norm(lam, 2)), [cp.sum(lam) == 1, lam >= 0]
)
# prob.solve()
prob.solve(solver="SCS")
lam_optimal = lam.value
return lam_optimal | f2a6ecc464e2f87684d1537775816d22dc30d837 | 14,403 |
from limitlessled.pipeline import Pipeline
def state(new_state):
"""State decorator.
Specify True (turn on) or False (turn off).
"""
def decorator(function):
"""Decorator function."""
# pylint: disable=no-member,protected-access
def wrapper(self, **kwargs):
"""Wrap a group state change."""
pipeline = Pipeline()
transition_time = DEFAULT_TRANSITION
# Stop any repeating pipeline.
if self.repeating:
self.repeating = False
self.group.stop()
# Not on and should be? Turn on.
if not self.is_on and new_state is True:
pipeline.on()
# Set transition time.
if ATTR_TRANSITION in kwargs:
transition_time = kwargs[ATTR_TRANSITION]
# Do group type-specific work.
function(self, transition_time, pipeline, **kwargs)
# Update state.
self._is_on = new_state
self.group.enqueue(pipeline)
self.schedule_update_ha_state()
return wrapper
return decorator | 156b7bbad0a943af6bb4280e4fdb1dde2b6e320a | 14,404 |
def rotTransMatrixNOAD(axis, s, c, t):
"""
build a rotate * translate matrix - MUCH faster for derivatives
since we know there are a ton of zeros and can act accordingly
:param axis: x y or z as a character
:param s: sin of theta
:param c: cos of theta
:param t: translation (a 3 tuple)
:return:
"""
if axis == "Z" or axis == "z":
return N.array([[c, -s, 0, c * t[0] - s * t[1]],
[s, c, 0, s * t[0] + c * t[1]],
[0, 0, 1, t[2]],
[0, 0, 0, 1]])
elif axis == "Y" or axis == "y":
return N.array([[c, 0, s, c * t[0] + s * t[2]],
[0, 1, 0, t[1]],
[-s, 0, c, c * t[2] - s * -t[0]],
[0, 0, 0, 1]])
elif axis == "X" or axis == "x":
return N.array([[1, 0, 0, t[0]],
[0, c, -s, c * t[1] - s * t[2]],
[0, s, c, s * t[1] + c * t[2]],
[0, 0, 0, 1]])
else:
print "Unsupported Axis:", axis
raise NotImplementedError | f6e8d6474ba90e3a253229124e0571d67025c818 | 14,405 |
def angular_diameter_distance(z, cosmo=None):
""" Angular diameter distance in Mpc at a given redshift.
This gives the proper (sometimes called 'physical') transverse
distance corresponding to an angle of 1 radian for an object at
redshift `z`.
Parameters
----------
z : array_like
Input redshifts.
Returns
-------
angdist : astropy.units.Quantity
Angular diameter distance at each input redshift.
"""
if cosmo is None:
cosmo = _get_current()
return cosmo.angular_diameter_distance(z) | 694f9fcaa6ce2585315f63e69351ac47589e248c | 14,406 |
def main(stdin):
"""
Take sorted standard in from Hadoop and return lines.
Value is just a place holder.
"""
for line_num in stdin:
# Remove trailing newlines.
line_num = line_num.rstrip()
# Omit empty lines.
try:
(line, num) = line_num.rsplit('\t', 1)
print(("{line}\t{num}").format(line=line, num=num))
except ValueError:
pass
return None | 811e184d9425c1c76681c823b463b99ebde2c25c | 14,407 |
def _ignore_module_import_frames(file_name, name, line_number, line):
"""
Ignores import frames of extension loading.
Parameters
----------
file_name : `str`
The frame's respective file's name.
name : `str`
The frame's respective function's name.
line_number : `int`
The line's index where the exception occurred.
line : `str`
The frame's respective stripped line.
Returns
-------
should_show_frame : `bool`
Whether the frame should be shown.
"""
should_show_frame = True
if file_name.startswith('<') and file_name.endswith('>'):
should_show_frame = False
elif file_name == EXTENSION_LOADER_EXTENSION_FILE_PATH:
if name == '_load':
if line == 'loaded = self._load_module()':
should_show_frame = False
elif name == '_load_module':
if line == 'spec.loader.exec_module(module)':
should_show_frame = False
elif file_name == EXTENSION_LOADER_EXTENSION_LOADER_FILE_PATH:
if name == '_extension_loader_task':
if line in (
'module = await KOKORO.run_in_executor(extension._load)',
'await entry_point(module)',
'entry_point(module)',
):
should_show_frame = False
elif name == '_extension_unloader_task':
if line in (
'await exit_point(module)',
'exit_point(module)',
):
should_show_frame = False
return should_show_frame | 8f3e506e99b32d2945ea665367d5447ef1b05732 | 14,408 |
def get_tf_tensor_data(tensor):
"""Get data from tensor."""
assert isinstance(tensor, tensor_pb2.TensorProto)
is_raw = False
if tensor.tensor_content:
data = tensor.tensor_content
is_raw = True
elif tensor.float_val:
data = tensor.float_val
elif tensor.dcomplex_val:
data = tensor.dcomplex_val
elif tensor.int_val:
data = tensor.int_val
elif tensor.int64_val:
data = tensor.int64_val
elif tensor.bool_val:
data = tensor.bool_val
elif tensor.dtype == tf.int32:
data = [0]
elif tensor.dtype == tf.int64:
data = [0]
elif tensor.dtype == tf.float32:
data = [0.]
elif tensor.dtype == tf.float16:
data = [0]
elif tensor.string_val:
data = tensor.string_val
else:
raise ValueError('tensor data not supported')
return [is_raw, data] | f2d62a7ccba252d5c94cd9979eb01a0b44282d4a | 14,409 |
def _createPhraseMatchList(tree1, tree2, matchList, doEquivNodes=False):
"""
Create the list of linked phrases between tree1 and tree2
"""
phraseListTxt1 = tree1.getPhrases()
phraseListHi1 = tree2.getPhrases()
if PRINT_DEBUG or PRINT_DEBUG_SPLIT:
print "\nPhrase 1 nodes:"
printAllPhraseInfo(phraseListTxt1)
print "\nPhrase 2 nodes:"
printAllPhraseInfo(phraseListHi1)
# Match phrases based on word content
# match exact phrases first
matchList.extend( _phraseMatchListExactText(phraseListTxt1, phraseListHi1) )
if PRINT_MATCH_LIST:
print matchList
print "Exact phrase matching:"
printAllMatchListInfo(matchList)
# match based on headwords
matchList.extend(_phraseMatchListHeadwords(phraseListTxt1, phraseListHi1,stoplist=True))
matchList = refineML_TopLevelMatch(matchList)
matchList = refineML_RemoveDuplicates(matchList)
bestMatchList = matchList[:] # copy of matches we believe
if PRINT_MATCH_LIST:
print "*** raw match list, after identical phrases matched"
printAllMatchListInfo(matchList)
print "----------"
# relatively safe matches are completed
# now build up tree
continueMatching = True
watchdogLoopCounter = 0
while (continueMatching)>0:
watchdogLoopCounter += 1
if watchdogLoopCounter > 10: raise ValueError,"watchdog for match list creation"
_oldMLLength = len(matchList) # only for debugging, can compare to newML length
newML = []
# Link parent nodes together as well
# including stop words
newML.extend(linkParentNodes(matchList, matchList))
if PRINT_MATCH_LIST:
print "*** match list, after parent phrases matched"
printAllMatchListInfo(newML)
# Link equivalent higher nodes
# generally this is no longer needed, now that we can contract trees
# It is still needed if we are describing links to more than one target
if doEquivNodes:
mequiv = linkHigherEquivalentNodes(matchList)
if PRINT_MATCH_LIST:
print "*** equivalent nodes"
printAllMatchListInfo(mequiv)
# raise SystemExit
newML.extend(mequiv)
if PRINT_MATCH_LIST: printAllMatchListInfo(newML)
newML.extend(linkParentNodesOfSingleChildren(matchList, phraseListTxt1, phraseListHi1 ))
newML.extend(linkParentNodesOfSingleTargetChild(matchList, phraseListTxt1, phraseListHi1 ))
newML.extend(linkParentNodesOfSingleLinkedChildren(matchList, phraseListTxt1, phraseListHi1 ))
# Link child nodes that may not be independent phrases
# but which do have identical word content
# Working with highlights rather than sentences
# as it's more important to match all the phrases of the highlight
# nodesAlreadyMatched = [ n2 for (n1,n2) in matchList ]
for (ph1, ph2) in matchList:
if ph1.isLeaf() or ph2.isLeaf(): continue
newML.extend(linkIdenticalNodes(ph1,ph2,matchList))
newML.extend(linkIdenticalWords(ph1,ph2,matchList))
if PRINT_MATCH_LIST:
print "*** After further linking nodes"
printAllMatchListInfo(newML)
# Remove any rules that involve a change to top level phrase type
# We think that the only rules worth learning keep the
# top level phrase element the same
newML = refineML_TopLevelMatch(newML)
newML = refineML_RemoveDuplicates(newML)
newML = refineML_RemoveKnownMatches(newML, matchList)
# newML = refineML_RemoveMissedProperNouns(newML, matchList)
matchList.extend(newML)
matchList = topDownConsistency(tree1, tree2, matchList, bestMatchList)
# check to see what this iteration has done
newMLAcceptedLinks = [ m for m in newML if m in matchList ]
if len(newMLAcceptedLinks)==0: continueMatching=False
if PRINT_MATCH_LIST:
print
print "After refining matchList so top levels match"
print "New matches:"
printAllMatchListInfo(newML)
print "New matches that were accepted:"
printAllMatchListInfo(newMLAcceptedLinks)
print "Full set of matches:"
printAllMatchListInfo(matchList)
# TODO: make a consistent tree
# raise SystemExit,"End of while loop"
matchListRefined = topDownConsistency(tree1, tree2, matchList, bestMatchList)
matchList = matchListRefined
if PRINT_MATCH_LIST:
print
print "After refining matchList after making consistent top down"
printAllMatchListInfo(matchList)
# raise SystemExit,"topDownConsistency"
return matchList | 99d7e6fb13120ea84b671331940fc232cf061786 | 14,410 |
def coth(x):
"""
Return the hyperbolic cotangent of x.
"""
return 1.0/tanh(x) | 92d490563c8595b8c11334cd38afbf9ef389dfe8 | 14,411 |
def param_is_numeric(p):
"""
Test whether any parameter is numeric; functionally, determines if any
parameter is convertible to a float.
:param p: An input parameter
:return:
"""
try:
float(p)
return True
except ValueError:
return False | b92579ba019389cf21002b63ca6e2ebdfad7d86f | 14,412 |
def convert_graph_to_angular_abstract_graph(graph: Graph, simple_graph=True, return_tripel_edges=False) -> Graph:
"""Converts a graph into an abstract angular graph
Can be used to calculate a path tsp
Arguments:
graph {Graph} -- Graph to be converted
simple_graph {bool} -- Indicates if graph is simple
return_tripel_edges {bool} -- Also return translation for original edges to abstract
Returns:
Graph -- Converted abstract graph
"""
# create a vertex for every edge in the original graph
# For geometric instances, only one direction of edges is needed
vertices = np.array([[u, v] for u, v in graph.edges if u < v])
edges = {}
tripel_edges = {}
for i, vertex in enumerate(vertices):
ran = range(i+1, len(vertices)) if simple_graph else range(len(vertices))
for j in ran:
if j == i:
continue
other = vertices[j]
if np.intersect1d(vertex, other).size > 0:
shared_vertex = np.intersect1d(vertex, other)
non_shared = np.setdiff1d(np.hstack([vertex, other]), shared_vertex)
edges[(i, j)] = get_angle(
graph.vertices[shared_vertex],
graph.vertices[non_shared[0]],
graph.vertices[non_shared[1]]
)
if return_tripel_edges:
from_vertex = np.intersect1d(vertex, non_shared)
to_vertex = np.intersect1d(other, non_shared)
edge = (*from_vertex, *to_vertex)
tripel_edges[(*shared_vertex, *edge)] = (i, j)
graph = Graph(vertices, edges.keys(), c=edges)
if return_tripel_edges:
return (tripel_edges, graph)
return graph | 2f81743824549d8e19f70f1843d6449eb12e7e5d | 14,413 |
def login_to_site(url, username, password, user_tag, pass_tag):
"""
:param url:
:param username:
:param password:
:param user_tag:
:param pass_tag:
:return: :raise:
"""
browser = mechanize.Browser(factory=mechanize.RobustFactory())
browser.set_handle_robots(False)
browser.set_handle_referer(True)
browser.set_handle_refresh(True)
browser.set_handle_robots(False)
browser.open(url)
# noinspection PyCallingNonCallable,PyCallingNonCallable,PyCallingNonCallable,PyCallingNonCallable
browser.select_form(nr=0)
browser["USER"] = username
browser["password"] = password
# noinspection PyCallingNonCallable
browser.submit()
# noinspection PyCallingNonCallable
if "Case Search Login Error" in browser.response().get_data():
raise ValueError("Could not login to PACER Case Search. Check your "
"username and password")
print ("You are logged on to the Public Access to Court Electronic "
"Records (PACER) Case Search website as " + username + ". All costs "
"will be billed to this account.")
return browser | 6c906e037a619031eb45bb26f01da71833fa3e41 | 14,414 |
async def test_db(
service: Service = Depends(Service)
) -> HTTPSuccess:
"""Test the API to determine if the database is connected."""
if service.test().__class__ is not None:
return { "message": "Database connected." }
else:
return {"message": "Database not connected." } | 67e4530af6959fef03f55879ee3781ebf993f11c | 14,415 |
import tqdm
def model_fit_predict():
"""
Training example was implemented according to machine-learning-mastery forum
The function takes data from the dictionary returned from splitWindows.create_windows function
https://machinelearningmastery.com/stateful-stateless-lstm-time-series-forecasting-python/
:return: np.array of predictions
"""
X, y, test_input = windows_dict['X'], windows_dict['y'], windows_dict['X_test']
# Predictions are stored in a list
predictions = []
with tqdm(total=X.shape[0], desc="Training the model, saving predictions") as progress_bar:
# Save model History in order to check error data
history = History()
# build model framework
current_model = model_builder(X)
# Make predictions for each window
for i in range(X.shape[0]):
# TRAIN (FIT) model for each epoch
# history = current_model.fit(
# input_X[i], target_X[i],
# epochs=_epochs, batch_size=batch,
# verbose=0, shuffle=False, validation_split=0.1,
# callbacks=[history]
# )
# print(X[i].shape, X[i].dtype, y[i].shape, y[i].dtype)
for e in range(epochs):
current_model.fit(
X[i], y[i],
epochs=1, batch_size=batch,
verbose=0, shuffle=False,
callbacks=[history]
)
current_model.reset_states()
# PREDICT and save results
predictions.append(
current_model.predict(test_input[i], batch_size=batch_test, verbose=0)
)
progress_bar.update(1)
return np.asarray(predictions) | 753ca4e90034864e709809cc1bd2f30640554f28 | 14,416 |
from functools import partial
import multiprocessing as mp
import gc
def mp_variant_annotations(df_mp, df_split_cols='', df_sampleid='all',
drop_hom_ref=True, n_cores=1):
"""
Multiprocessing variant annotations
see variantAnnotations.process_variant_annotations for description of annotations
This function coordinates the annotation of variants using the
multiprocessing library.
Parameters
---------------
df_mp: pandas df, required
VCF DataFrame
df_split_cols: dict, optional
key:FORMAT id value:#fields expected
e.g. {'AD':2} indicates Allelic Depth should be
split into 2 columns.
df_sampleid: list, required
list of sample_ids, can be 'all'
drop_hom_ref: bool, optional
specifies whether to drop all homozygous reference
variants from dataframe.
FALSE REQUIRES LARGE MEMORY FOOTPRINT
n_cores: int, optional
Number of multiprocessing jobs to start.
Be careful as memory is copied to each process, RAM intensive
"""
print('starting multiprocessing')
pool = mp.Pool(int(n_cores))
# tasks = np.array_split(df_mp.copy(), int(n_cores)) #breaks with older
# pandas/numpy
dfs = df_split(df_mp.copy(), int(n_cores))
mp_process = partial(process_variant_annotations, sample_id=df_sampleid,
split_columns=df_split_cols, drop_hom_ref=drop_hom_ref)
results = []
del df_mp
gc.collect()
r = pool.map_async(mp_process, \
dfs, callback=results.append)
r.wait()
pool.close()
pool.join()
pool.terminate()
print('multiprocessing complete')
res_df = pd.concat([df for df in results[0] if len(df) > 0])
cat_cols = ['vartype1', 'vartype2', 'a1', 'a2', \
'GT1', 'GT2', 'GT','sample_ids', 'zygosity']
res_df.loc[:, cat_cols] = res_df[cat_cols].astype('category')
return res_df | 8569a4eb82ff04db1bee46b00bbd9eedf8b4d094 | 14,417 |
def find_attachments(pattern, cursor):
"""Return a list of attachments that match the specified pattern.
Args:
pattern: The path to the attachment, as a SQLite pattern (to be
passed to a LIKE clause).
cursor: The Cursor object through which the SQLite queries are
sent to the Zotero database.
Returns:
A list of (parentItemID, path) pairs that match the specified
pattern. The returned list is empty if no matches are found.
"""
query = 'SELECT parentItemID, path FROM itemAttachments WHERE path LIKE ?'
cursor.execute(query, (pattern,))
return list(cursor) | 614649f6fd5972b026b191bb1a272e270dedffe5 | 14,418 |
def generate_symmetric_matrix(n_unique_action: int, random_state: int) -> np.ndarray:
"""Generate symmetric matrix
Parameters
-----------
n_unique_action: int (>= len_list)
Number of actions.
random_state: int
Controls the random seed in sampling elements of matrix.
Returns
---------
symmetric_matrix: array-like, shape (n_unique_action, n_unique_action)
"""
random_ = check_random_state(random_state)
base_matrix = random_.normal(scale=5, size=(n_unique_action, n_unique_action))
symmetric_matrix = (
np.tril(base_matrix) + np.tril(base_matrix).T - np.diag(base_matrix.diagonal())
)
return symmetric_matrix | acb5d537762f2f306be8f300845dc6560c1dd121 | 14,419 |
def model_fields_map(model, fields=None, exclude=None, prefix='', prefixm='', attname=True, rename=None):
"""
На основании переданной модели, возвращает список tuple, содержащих путь в орм к этому полю,
и с каким именем оно должно войти в результат.
Обрабатываются только обычные поля, m2m и generic сюда не войдут.
ARGUMENTS:
:param model: модель или инстанс модели, на основе которой будет формироваться список полей
:param None | collections.Container fields: список полей, которые будут забраны из модели
:param None | collections.Container exclude: список полей, которые не будут забираться
:param str prefix: ORM путь, по которому будут распологаться модель в запросе
:param str prefixm: префикс, который будет добавлен к имени поля
:param bool attname: использовать имя name (model) или attname(model_id) эти поля отличаются для внешних ключей
:param dict rename: словарь переименования полей
:rtype: list[tuple[str]]
"""
data = []
rename = rename or {}
attribute = 'attname' if attname else 'name'
for f in model._meta.concrete_fields:
if fields and f.attname not in fields and f.name not in fields:
continue
if exclude and f.attname in exclude and f.name not in exclude:
continue
param_name = getattr(f, attribute)
new_param_name = rename[param_name] if param_name in rename else param_name
data.append(('{}{}'.format(prefix, param_name), '{}{}'.format(prefixm, new_param_name)))
return data | 812247543e5f714e0d2ef57cf018b0741679f83e | 14,420 |
from typing import Union
from typing import List
from typing import Dict
from typing import Set
import tqdm
def scaffold_to_smiles(mols: Union[List[str], List[Chem.Mol]],
use_indices: bool = False) -> Dict[str, Union[Set[str], Set[int]]]:
""" Computes the scaffold for each SMILES and returns a mapping from scaffolds to sets of smiles (or indices).
Parameters
----------
mols: A list of SMILES strings or RDKit molecules.
use_indices:
Whether to map to the SMILES's index in :code:`mols` rather than mapping to the smiles string itself.
This is necessary if there are duplicate smiles.
Returns
-------
A dictionary mapping each unique scaffold to all SMILES (or indices) which have that scaffold.
"""
scaffolds = defaultdict(set)
for i, mol in tqdm(enumerate(mols), total=len(mols)):
scaffold = generate_scaffold(mol)
if use_indices:
scaffolds[scaffold].add(i)
else:
scaffolds[scaffold].add(mol)
return scaffolds | 2a45731a5574bb37e81042fa19ac7c2f015c21ef | 14,421 |
import crypt
def encontrar_passwords():
"""
Probar todas las combinaciones de 6 letras, hasheando cada una para ver si
coinciden con los hashes guardados en los /etc/shadow
Para el tema de equipos, basicamente fui probando con copiar y pegar
contenido en texto de distintas paginas de wikipedia en el archivo
equipos.txt, hasta que con la NBA funciono.
"""
hashes = [
('ox', 'ox45K6RsEUfmQ', generar_palabras()), # fido
('$1$42dJ1xYh', '$1$42dJ1xYh$MfrRke8/Ej3h5.vMtNEhC.', leer_palabras('./colores.txt')), # white
('$6$SZGpKoPi', '$6$SZGpKoPi$GGGqHYKy6PO/H5nvV0AmaGB/5krnxVuz2k2uX81O.CF5nYctE5RlR/rzJQCL3ZsF8yratCRbSR2ZuwKzvve.D0', leer_palabras('./equipos.txt')), # knicks
]
encontradas = []
for algo_y_salt, hash_resultado, origen_passwords in hashes:
for password in origen_passwords:
if crypt(password, algo_y_salt) == hash_resultado:
encontradas.append(password)
break
return encontradas | 5762fed1f5e493c2399d40dbbc1e19ad24c6718e | 14,422 |
def queue_merlin_study(study, adapter):
"""
Launch a chain of tasks based off of a MerlinStudy.
"""
samples = study.samples
sample_labels = study.sample_labels
egraph = study.dag
LOG.info("Calculating task groupings from DAG.")
groups_of_chains = egraph.group_tasks("_source")
# magic to turn graph into celery tasks
LOG.info("Converting graph to tasks.")
celery_dag = chain(
chord(
group(
[
expand_tasks_with_samples.s(
egraph,
gchain,
samples,
sample_labels,
merlin_step,
adapter,
study.level_max_dirs,
).set(queue=egraph.step(chain_group[0][0]).get_task_queue())
for gchain in chain_group
]
),
chordfinisher.s().set(
queue=egraph.step(chain_group[0][0]).get_task_queue()
),
)
for chain_group in groups_of_chains[1:]
)
LOG.info("Launching tasks.")
return celery_dag.delay(None) | 448365e799001d09281707cab69c71c3be05408e | 14,423 |
import math
def sphere_mass(density,radius):
"""Usage: Find the mass of a sphere using density and radius"""
return density*((4/3)*(math.pi)*radius**3) | 8c1a2dc949980ca96a4f56f3918bacd19568965e | 14,424 |
def generate_stats_table(buildings_clust_df):
"""
Generate statistical analysis table of building types in the area
Args:
buildings_clust_df: building footprints dataframe after performed building blocks assignment (HDBSCAN)
Return:
stat_table: statistical analysis results which contains means and standard deviations values for every building type in the area
"""
# Count
count_table = buildings_clust_df.groupby('building_types')[['building_types']].size().to_frame('count').reset_index()
# Mean
mean_table = buildings_clust_df.groupby('building_types')[['building_types','surface_area','rectangularity']].mean().reset_index()
mean_table.columns = ['building_types','mean_surface_area','mean_rectangularity']
# Standard deviation
sd_table=buildings_clust_df.groupby('building_types')[['surface_area','rectangularity']].agg(np.std, ddof=0).reset_index()
# Rename columns
sd_table.columns = ['building_types','sd_surface_area','sd_rectangularity']
stat_table = count_table.merge(mean_table).merge(sd_table)
# Reorder columns
stat_table = stat_table[stat_table.columns[[0,1,3,2,4,5]]]
return stat_table | 732f035e591dc9f0b03673584f3a6e21dad03cad | 14,425 |
def make_multisat(nucsat_tuples):
"""Creates a rst.sty Latex string representation of a multi-satellite RST subtree
(i.e. merge a set of nucleus-satellite relations that share the same nucleus
into one subtree).
"""
nucsat_tuples = [tup for tup in nucsat_tuples] # unpack the iterable, so we can check its length
assert len(nucsat_tuples) > 1, \
"A multisat relation bundle must contain more than one relation"
result = "\dirrel\n\t"
first_relation, remaining_relations = nucsat_tuples[0], nucsat_tuples[1:]
relname, nuc_types, elements = first_relation
first_nucleus_pos = current_nucleus_pos = nuc_types.index('N')
result_segments = []
# add elements (nucleus and satellite) from first relation to resulting (sub)tree
for i, nuc_type in enumerate(nuc_types):
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
if nuc_type == 'N':
result_segments.append(NUC_TEMPLATE.substitute(nucleus=element))
else:
result_segments.append(SAT_TEMPLATE.substitute(satellite=element, relation=relname))
# reorder elements of the remaining relation and add them to the resulting (sub)tree
for (relname, nuc_types, elements) in remaining_relations:
for i, nuc_type in enumerate(nuc_types):
if nuc_type == 'N': # all relations share the same nucleus, so we don't need to reprocess it.
continue
else:
element = elements[i]
if is_edu_segment(element):
element = wrap_edu_segment(element)
result_segment = SAT_TEMPLATE.substitute(satellite=element, relation=relname)
if i < first_nucleus_pos: # satellite comes before the nucleus
result_segments.insert(current_nucleus_pos, result_segment)
current_nucleus_pos += 1
else:
result_segments.append(result_segment)
return result + '\n\t'.join(result_segments) | 16c1808267087beea6cea21811cd3c1f7d70932e | 14,426 |
import io
def plot_to_image(figure):
"""Converts the matplotlib figure to a PNG image."""
# The function is adapted from
# github.com/tensorflow/tensorboard/blob/master/docs/image_summaries.ipynb
# Save the plot to a PNG in memory.
buf = io.BytesIO()
plt.savefig(buf, format="png")
# Closing the figure prevents it from being displayed directly.
plt.close(figure)
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# tf.summary.image requires 4-D inputs. [num_samples, height, weight, color].
image = tf.expand_dims(image, 0)
return image | 24a63f7b27d47421baf2c7913bf989903e4d9545 | 14,427 |
async def getDiscordTwitchAlerts(cls:"PhaazebotDiscord", guild_id:str, alert_id:int=None, limit:int=0, offset:int=0) -> list:
"""
Get server discord alerts, if alert_id = None, get all
else only get one associated with the alert_id
Returns a list of DiscordTwitchAlert().
"""
sql:str = """
SELECT
`discord_twitch_alert`.*,
`twitch_user_name`.`user_name` AS `twitch_channel_name`
FROM `discord_twitch_alert`
LEFT JOIN `twitch_user_name`
ON `discord_twitch_alert`.`twitch_channel_id` = `twitch_user_name`.`user_id`
WHERE `discord_twitch_alert`.`discord_guild_id` = %s"""
values:tuple = ( str(guild_id), )
if alert_id:
sql += " AND `discord_twitch_alert`.`id` = %s"
values += (alert_id,)
if limit:
sql += f" LIMIT {limit}"
if offset:
sql += f" OFFSET {offset}"
res:list = cls.BASE.PhaazeDB.selectQuery(sql, values)
if res:
return [DiscordTwitchAlert(x) for x in res]
else:
return [] | 00c1bad85f4f7891d36e5fab5c651f10c79abf02 | 14,428 |
def is_visible_dir(file_info):
"""Checks to see if the file is a visible directory.
@param file_info: The file to check
@type file_info: a gnomevfs.FileInfo
"""
return is_dir(file_info) and not is_hidden(file_info) | 776361d4cbe16a5b3c45dc3073a37192e31e87a9 | 14,429 |
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
# Import needed availability type modules
required_availability_modules = get_required_subtype_modules_from_projects_file(
scenario_directory=scenario_directory,
subproblem=subproblem,
stage=stage,
which_type="availability_type",
)
imported_availability_modules = load_availability_type_modules(
required_availability_modules
)
# First, add any components specific to the availability type modules
for op_m in required_availability_modules:
imp_op_m = imported_availability_modules[op_m]
if hasattr(imp_op_m, "add_model_components"):
imp_op_m.add_model_components(m, d, scenario_directory, subproblem, stage)
def availability_derate_rule(mod, g, tmp):
"""
:param mod:
:param g:
:param tmp:
:return:
"""
# TODO: make the no_availability type module, which will be the
# default for the availability type param (it will just return 1 as
# the derate)
availability_type = mod.availability_type[g]
return imported_availability_modules[
availability_type
].availability_derate_rule(mod, g, tmp)
m.Availability_Derate = Expression(m.PRJ_OPR_TMPS, rule=availability_derate_rule) | 2ecca392e718bf72023702c13621d9d54e764b3d | 14,430 |
def read_file(item):
"""Read file in key path into key image
"""
item['image'] = tf.read_file(item['path'])
return item | 06b87851717bd486b13f964ad5b45cbdc7a97142 | 14,431 |
def make_joint(withdraw, old_password, new_password):
"""Return a password-protected withdraw function that has joint access to
the balance of withdraw.
>>> w = make_withdraw(100, 'hax0r')
>>> w(25, 'hax0r')
75
>>> make_joint(w, 'my', 'secret')
'Incorrect password'
>>> j = make_joint(w, 'hax0r', 'secret')
>>> w(25, 'secret')
'Incorrect password'
>>> j(25, 'secret')
50
>>> j(25, 'hax0r')
25
>>> j(100, 'secret')
'Insufficient funds'
>>> j2 = make_joint(j, 'secret', 'code')
>>> j2(5, 'code')
20
>>> j2(5, 'secret')
15
>>> j2(5, 'hax0r')
10
>>> j2(25, 'password')
'Incorrect password'
>>> j2(5, 'secret')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> j(5, 'secret')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> w(5, 'hax0r')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
>>> make_joint(w, 'hax0r', 'hello')
"Your account is locked. Attempts: ['my', 'secret', 'password']"
"""
"*** YOUR CODE HERE ***"
x = withdraw(0, old_password)
if type(x) == str:
return x
else:
def withdraw_r(amount, code):
if code == new_password:
# print('password is new')
return withdraw(amount, old_password)
elif code != new_password:
return withdraw(amount, code)
return withdraw_r | f40073429aea946486263a7d6e0fc8b24cd60a84 | 14,432 |
def should_parse(config, file):
"""Check if file extension is in list of supported file types (can be configured from cli)"""
return file.suffix and file.suffix.lower() in config.filetypes | 1c2258d405ef715574b557d99cdf87e461627ffd | 14,433 |
def _get_pipeline_per_subband(subband_name: str):
"""
Constructs a pipeline to extract the specified subband related features.
Output:
sklearn.pipeline.Pipeline object containing all steps to calculate time-domain feature on the specified subband.
"""
freq_range = FREQ_BANDS_RANGE[subband_name]
order = FREQ_BANDS_ORDERS[subband_name]
assert len(
freq_range) == 2, "Frequency range must only have 2 elements: [lower bound frequency, upper bound frequency]"
bounds = [freq / NYQUIST_FREQ for freq in freq_range]
b, a = butter(order, bounds, btype='bandpass')
def filter_epochs_in_specified_subband(epochs):
return epochs.copy().filter(
l_freq=bounds[0],
h_freq=bounds[1],
method='iir',
n_jobs=1,
iir_params={
'a': a,
'b': b
}, verbose=False)
return Pipeline([
('filter', FunctionTransformer(filter_epochs_in_specified_subband, validate=False)),
('get-values', FunctionTransformer(get_data_from_epochs, validate=False)),
('mean-energy', FunctionTransformer(
get_transformer(_get_signal_mean_energy), validate=True
)),
]) | 2b1d8bd2543ae07b861df6f979297a82e3f5e827 | 14,434 |
def get_credentials_interactively() -> Credentials:
""" Gets credentials for the bl interactively
"""
return ("placeholder-user", "placeholder-pass") | b5e4d55015155589632b958252a3c078b5920e59 | 14,435 |
def reynolds(find="Re", printEqs=True, **kwargs):
"""
Reynolds Number = Inertia / Viscosity
"""
eq = list()
eq.append("Eq(Re, rho * U * L / mu)")
return solveEqs(eq, find=find, printEq=printEqs, **kwargs) | df5ad0c0279894e8f671942ceddb64e08e35fa0d | 14,436 |
def data_app():
""" Data Processer and Visualizer """
st.title("Data Cake")
st.subheader("A to Z Data Analysis")
file = ['./dataset/Ac1',[0,1]]
def file_selector():
filename = st.file_uploader("Upload Excel File", type=['xls','xlsx'])
if filename is not None:
sheetnames = pd.ExcelFile(filename).sheet_names
sheet = st.selectbox("Sheet Sheet", sheetnames)
return [filename, sheet]
file = file_selector()
# Read Data
try :
df = pd.read_excel(file[0], sheet_name = file[1])
except Exception as e:
st.info("Please upload Excel file")
# Show Datas
try:
if st.checkbox("Show Dataset"):
number = st.number_input("Number of Rows to View",5,10)
st.dataframe(df.head(number))
except Exception as e:
st.info("Please upload Excel file")
# Show Columns
try:
if st.button("Column Names"):
st.write(df.columns)
except Exception as e:
st.info("Please upload Excel file")
# Show Shape
try:
if st.checkbox("Shape of Dataset"):
st.write(df.shape)
except Exception as e:
st.info("Please upload Excel file")
# Select Columns
try:
if st.checkbox("Select Columns To Show"):
all_columns = df.columns.tolist()
selected_columns = st.multiselect("Select",all_columns)
new_df = df[selected_columns]
st.dataframe(new_df)
except Exception as e:
st.info("Please upload Excel file")
# Show Datatypes
try:
if st.button("Data Types"):
st.write(df.dtypes)
except Exception as e:
st.info("Please upload Excel file")
# Show Summary
try:
if st.checkbox("Summary"):
st.write(df.describe().T)
except Exception as e:
st.info("Please upload Excel file")
## Plot and Visualization
st.subheader("Data Visualization")
# Correlation
# Seaborn Plot
if st.checkbox("Correlation Plot[Seaborn]"):
st.write(sns.heatmap(df.corr(),annot=True))
st.pyplot()
# Pie Chart
if st.checkbox("Pie Plot"):
all_columns_names = df.columns.tolist()
if st.button("Generate Pie Plot"):
st.success("Generating A Pie Plot")
st.write(df.iloc[:,-1].value_counts().plot.pie(autopct="%1.1f%%"))
st.pyplot()
# Count Plot
if st.checkbox("Plot of Value Counts"):
st.text("Value Counts By Target")
all_columns_names = df.columns.tolist()
primary_col = st.selectbox("Primary Columm to GroupBy",all_columns_names)
selected_columns_names = st.multiselect("Select Columns",all_columns_names)
if st.button("Plot"):
st.text("Generate Plot")
if selected_columns_names:
vc_plot = df.groupby(primary_col)[selected_columns_names].count()
else:
vc_plot = df.iloc[:,-1].value_counts()
st.write(vc_plot.plot(kind="bar"))
st.pyplot()
#Contour Plot
if st.checkbox("Contour Plot "):
st.text("3D Contour Plot")
all_columns_names = df.columns.tolist()
X = st.selectbox("Select X axis",all_columns_names)
Y = st.selectbox("Select Y axis",all_columns_names,index = 1)
VS = st.selectbox("Select Z axis",all_columns_names,index =2)
Z_F = df.pivot_table(index=X, columns=Y, values=VS).T.values
X_unique = np.sort(df[X].unique())
Y_unique = np.sort(df[Y].unique())
X_F, Y_F = np.meshgrid(X_unique, Y_unique)
pd.DataFrame(Z_F).round(3)
pd.DataFrame(X_F).round(3)
pd.DataFrame(Y_F).round(3)
fig,ax=plt.subplots(1,1)
cp = ax.contourf(X_F, Y_F, Z_F)
fig.colorbar(cp) # Add a colorbar to a plot
st.pyplot(fig=fig)
# Customizable Plot
try:
st.subheader("Customizable Plot")
all_columns_names = df.columns.tolist()
type_of_plot = st.selectbox("Select Type of Plot",["area","bar","line","hist","box","kde"])
selected_columns_names = st.multiselect("Select Columns To Plot",all_columns_names)
if st.button("Generate Plot"):
st.success("Generating Customizable Plot of {} for {}".format(type_of_plot,selected_columns_names))
# Plot By Streamlit
if type_of_plot == 'area':
cust_data = df[selected_columns_names]
st.area_chart(cust_data)
elif type_of_plot == 'bar':
cust_data = df[selected_columns_names]
st.bar_chart(cust_data)
elif type_of_plot == 'line':
cust_data = df[selected_columns_names]
st.line_chart(cust_data)
# Custom Plot
elif type_of_plot:
cust_plot= df[selected_columns_names].plot(kind=type_of_plot)
st.write(cust_plot)
st.pyplot()
if st.button("Ready to ML !"):
st.balloons()
except:
st.info("Please upload Excel file")
st.sidebar.header("Data Cake")
st.sidebar.info("Built by Veera Ragavan") | b6286064757d276a5319ef0b3cffe1515c4a7fb1 | 14,437 |
def derivative(x, y, order=1):
"""Returns the derivative of y-coordinates as a function of x-coodinates.
Args:
x (list or array): 1D array x-coordinates.
y (list or array): 1D array y-coordinates.
order (number, optional): derivative order.
Returns:
x and y arrays.
"""
if order<0:
raise ValueError('order must be a positive integer.')
x = np.array(x)
y = np.array(y)
x_diff = np.diff(x)
y_diff = np.diff(y)/x_diff
for i in range(order-1):
y_diff = np.diff(y_diff)/x_diff[:len(x_diff)-(i+1)]
for i in range(order):
x = moving_average(x, n=2)
return x, y_diff | 23a1213721e553e5b72a0bd92877675b499f9848 | 14,438 |
from typing import Dict
def get_ff_parameters(wc_params, molecule=None, components=None):
"""Get the parameters for ff_builder."""
ff_params = {
'ff_framework': wc_params['ff_framework'],
'ff_molecules': {},
'shifted': wc_params['ff_shifted'],
'tail_corrections': wc_params['ff_tail_corrections'],
'mixing_rule': wc_params['ff_mixing_rule'],
'separate_interactions': wc_params['ff_separate_interactions']
}
if molecule is not None:
ff_params['ff_molecules'] = {molecule['name']: molecule['forcefield']}
if components is not None:
for value in components.get_dict().values():
ff = value['forcefield'] #pylint: disable=invalid-name
ff_params['ff_molecules'][value['name']] = ff
return Dict(dict=ff_params) | d87008dba0b9d835f71366eb64486b1d18fe2381 | 14,439 |
def healpix_header_odict(nside,nest=False,ordering='RING',coord=None, partial=True):
"""Mimic the healpy header keywords."""
hdr = odict([])
hdr['PIXTYPE']=odict([('name','PIXTYPE'),
('value','HEALPIX'),
('comment','HEALPIX pixelisation')])
ordering = 'NEST' if nest else 'RING'
hdr['ORDERING']=odict([('name','ORDERING'),
('value',ordering),
('comment','Pixel ordering scheme, either RING or NESTED')])
hdr['NSIDE']=odict([('name','NSIDE'),
('value',nside),
('comment','Resolution parameter of HEALPIX')])
if coord:
hdr['COORDSYS']=odict([('name','COORDSYS'),
('value',coord),
('comment','Ecliptic, Galactic or Celestial (equatorial)')])
if not partial:
hdr['FIRSTPIX']=odict([('name','FIRSTPIX'),
('value',0),
('comment','First pixel # (0 based)')])
hdr['LASTPIX']=odict([('name','LASTPIX'),
('value',hp.nside2npix(nside)-1),
('comment','Last pixel # (0 based)')])
hdr['INDXSCHM']=odict([('name','INDXSCHM'),
('value','EXPLICIT' if partial else 'IMPLICIT'),
('comment','Indexing: IMPLICIT or EXPLICIT')])
hdr['OBJECT']=odict([('name','OBJECT'),
('value','PARTIAL' if partial else 'FULLSKY'),
('comment','Sky coverage, either FULLSKY or PARTIAL')])
return hdr | 1202d7564b94a3c2288a513f42e4b781a583e41c | 14,440 |
def hello():
"""Test endpoint"""
return {'hello': 'world'} | 91ad620815a6371a4723e21bc79aad8c1d49e73e | 14,441 |
import logging
import json
def get_config_data(func_name: str, config_file_name: str = "config.json")\
-> tuple[str, str, str, str] | str | tuple[str, str]:
"""Extracts the data pertaining to the covid_news_handling module from the
provided config file.
A try except is used to get the encoding style to be used, and to check if
a valid/present config file has been provided. If one hasn't been provided
the event is logged and the dashboard is shutdown. Otherwise, the encoding
style is extracted (data loaded as a json and the value of the 'encoding'
key is found). The config file is opened again with the required encoding
style, loaded as a json, with the data relating to the
'covid_news_handling' key being found and the required values being
extracted. A while loop is used to ensure all values are present in
the config file, if they aren't, the event is logged and the dashboard is
shutdown, and each value is returned to the respective functions.
Args:
func_name (str): The name of the function data is being returned to,
given as a string. This allows for certain values to be returned
to certain functions (no wasted variables).
config_file_name (str): The name of the config file data is being taken
from, given as a string. This allows for data in the config file
to be used throughout the module and to customise the program.
Returns:
tuple[str, str, str, str]: (queries, language, sort_by, news_api_key).
The parameters to be used in the news API call, returned as a
tuple of strings. This allows the user to change the parameters
used within the news API call (further customise the dashboard).
str: displayed_content. The data from the article to be displayed in
the content section of the news article widgets on the dashboard.
This again lets the user customise the news section of the
dashboard.
tuple[str, str]: (num_displayed_articles, no_articles_message). The
number of news articles to display on each page and the message
that is displayed when there are no unremoved articles remaining,
returned as a tuple of strings, allowing the user to change the
number of displayed articles and the no articles message via the
config file.
"""
logging.debug("Entering the get_config_data function.")
# Get the encoding style to be used throughout the module.
try:
get_encoding = open(config_file_name, 'r')
except FileNotFoundError:
logging.critical("Config file missing or cannot be located.")
# Loads the json data and gets the value of the 'encoding' key.
data = json.load(get_encoding)
program_encoding = data['encoding']
get_encoding.close()
# Opens the file with the given encoding to get the rest of the data.
with open(config_file_name, 'r', encoding=program_encoding) as\
configuration_file:
data = json.load(configuration_file)
json_news_data = data['covid_news_handling']
queries = json_news_data['queries']
language = json_news_data['language']
sort_by = json_news_data['sort_by']
displayed_content = json_news_data['displayed_content']
num_displayed_articles = json_news_data['num_displayed_articles']
no_articles_message = json_news_data['no_articles_message']
news_api_key = json_news_data['news_api_key']
# Ensures a complete config file is provided before progressing.
while (queries and language and sort_by and displayed_content
and num_displayed_articles and no_articles_message
and news_api_key) is not None:
# Returns different values depending on the function calling it.
if func_name == 'news_API_request':
logging.info("Exiting get_config_data function as intended")
return (queries, language, sort_by, news_api_key)
if func_name == 'news_processor':
logging.info("Exiting get_config_data function as intended")
return displayed_content
if func_name == 'remove_and_limit_news_articles':
logging.info("Exiting get_config_data function as intended")
return (num_displayed_articles, no_articles_message)
logging.error("Incomplete config file provided, dashboard stopped.") | 9f665277d0e1a0c2d02ba6e487d20ade139ab5d6 | 14,442 |
def permute_channels(n_channels, keep_nbr_order=True):
"""Permute the order of neighbor channels
Args:
n_channels: the total number of channels
keep_nbr_order: whether to keep the relative order of neighbors
if true, only do random rotation and flip
if false, random permutation
"""
ch_idx = np.arange(1, n_channels)
if keep_nbr_order:
# rotate and flip
ch_idx = np.roll(ch_idx, np.random.randint(n_channels-1))
if np.random.randint(2) == 1:
ch_idx = ch_idx[::-1]
else:
# random permute
np.random.shuffle(ch_idx)
ch_idx = np.concatenate([[0], ch_idx])
return ch_idx | 5491f181d32a5ff77ef1d9f6ac3327e6b0a746e0 | 14,443 |
def from_file(file,typcls):
"""Parse an instance of the given typeclass from the given file."""
s = Stream(file)
return s.read_value(typcls._ep_typedesc) | 90507278f33fe30a73c31f94ab046c07962250cc | 14,444 |
def read_test_ids():
"""
Read sample submission file, list and return all test image ids.
"""
df_test = pd.read_csv(SAMPLE_SUBMISSION_PATH)
ids_test = df_test['img'].map(lambda s: s.split('.')[0])
return ids_test | cc4d53d28631fe0e22cabe30704a1844ff3e3a5b | 14,445 |
def chuseok(year=None):
"""
:parm year: int
:return: Thanksgiving Day of Korea
"""
year = year if year else _year
return LunarDate(year, 8, 15).toSolarDate() | 28a3170153862bda2ae52176d4931ee10050c3e1 | 14,446 |
def DELETE(request):
"""Delete a user's authorization level over a simulation."""
# Make sure required parameters are there
try:
request.check_required_parameters(
path={
'simulationId': 'int',
'userId': 'int'
}
)
except exceptions.ParameterError as e:
return Response(400, e.message)
# Instantiate an Authorization
authorization = Authorization.from_primary_key((
request.params_path['userId'],
request.params_path['simulationId']
))
# Make sure this Authorization exists in the database
if not authorization.exists():
return Response(404, '{} not found.'.format(authorization))
# Make sure this User is allowed to delete this Authorization
if not authorization.google_id_has_at_least(request.google_id, 'OWN'):
return Response(403, 'Forbidden from deleting {}.'.format(authorization))
# Delete this Authorization
authorization.delete()
return Response(
200,
'Successfully deleted {}.'.format(authorization),
authorization.to_JSON()
) | 7ac7c6277126a827790e786cbfdf9f84ccaace7b | 14,447 |
def process_query(data):
"""
Concat query, question, and narrative then 'preprocess'
:data: a dataframe with queries in rows; query, question, and narrative in columns
:return: 2d list of tokens (rows: queries, columns: tokens)
"""
lst_index = []
lst_words = []
for index, row in data.iterrows():
tmp = preprocess(row["query"] +" "+ row["question"]+ " "+row["narrative"])
lst_words.append(tmp)
lst_index.append(row["number"])
return lst_words | 8a8067f1766abdc08aa7b8995508d6cdc9057bd2 | 14,448 |
import argparse
def parameter_parser():
"""
A method to parse up command line parameters.
The default hyperparameters give a high performance model without grid search.
"""
parser = argparse.ArgumentParser(description="Run SimGNN.")
parser.add_argument("--dataset",
nargs="?",
default="AIDS700nef", # AIDS700nef LINUX IMDBMulti
help="Dataset name. Default is AIDS700nef")
parser.add_argument("--gnn-operator",
nargs="?",
default="gin", # gcn gin gat
help="Type of GNN-Operator. Default is gcn")
parser.add_argument("--epochs",
type=int,
default=350,
help="Number of training epochs. Default is 350.")
parser.add_argument("--filters-1",
type=int,
default=64,
help="Filters (neurons) in 1st convolution. Default is 64.")
parser.add_argument("--filters-2",
type=int,
default=32,
help="Filters (neurons) in 2nd convolution. Default is 32.")
parser.add_argument("--filters-3",
type=int,
default=32, ##
help="Filters (neurons) in 3rd convolution. Default is 32.")
parser.add_argument("--tensor-neurons",
type=int,
default=16,
help="Neurons in tensor network layer. Default is 16.")
parser.add_argument("--bottle-neck-neurons",
type=int,
default=16,
help="Bottle neck layer neurons. Default is 16.")
parser.add_argument("--batch-size",
type=int,
default=128,
help="Number of graph pairs per batch. Default is 128.")
parser.add_argument("--bins",
type=int,
default=16,
help="Histogram Similarity score bins. Default is 16.")
parser.add_argument("--dropout",
type=float,
default=0,
help="Dropout probability. Default is 0.")
parser.add_argument("--learning-rate",
type=float,
default=0.001,
help="Learning rate. Default is 0.001.")
parser.add_argument("--weight-decay",
type=float,
default=5 * 10 ** -4,
help="Adam weight decay. Default is 5*10^-4.")
parser.add_argument("--histogram",
dest="histogram",
action="store_true")
parser.add_argument("--diffpool",
dest="diffpool",
action="store_true",
help="Enable differentiable pooling.")
parser.add_argument("--plot-loss",
dest="plot_loss",
action="store_true")
parser.add_argument("--notify",
dest="notify",
action="store_true",
help="Send notification message when the code is finished (only Linux & Mac OS support).")
# TODO device selection
#parser.add_argument("--device",
# nargs="?",
# default='cpu', # torch.device('cuda' if torch.cuda.is_available() else 'cpu'),
# help="Select to run with gpu or cpu. Default depends on existing CUDA installation.")
parser.add_argument("--use-lsh",
dest="use_lsh",
action="store_true",
help="Specify if LSH will be utilized. Default choice is to train WITH LSH.")
parser.set_defaults(histogram=False) # True False
parser.set_defaults(use_lsh=False) # True False
parser.set_defaults(diffpool=False) # True False
parser.set_defaults(plot_loss=False) # True False
parser.set_defaults(notify=False)
# TODO add lsh variables as arguments conditional on --use-lsh
return parser.parse_args() | fa79c8ad15250cc7709d267551b570584fc8d86a | 14,449 |
def nb_view_patches(Yr, A, C, S, b, f, d1, d2, YrA=None, image_neurons=None, thr=0.99, denoised_color=None, cmap='jet'):
"""
Interactive plotting utility for ipython notebook
Args:
Yr: np.ndarray
movie
A,C,b,f: np.ndarrays
outputs of matrix factorization algorithm
d1,d2: floats
dimensions of movie (x and y)
YrA: np.ndarray
ROI filtered residual as it is given from update_temporal_components
If not given, then it is computed (K x T)
image_neurons: np.ndarray
image to be overlaid to neurons (for instance the average)
thr: double
threshold regulating the extent of the displayed patches
denoised_color: string or None
color name (e.g. 'red') or hex color code (e.g. '#F0027F')
cmap: string
name of colormap (e.g. 'viridis') used to plot image_neurons
"""
# PREPROCESSING
nr, T = C.shape
nA2 = np.ravel(np.power(A, 2).sum(0)) if type(
A) == np.ndarray else np.ravel(A.power(2).sum(0))
b = np.squeeze(b)
f = np.squeeze(f)
if YrA is None:
Y_r = np.array(spdiags(old_div(1, nA2), 0, nr, nr) *
(A.T * np.matrix(Yr) -
(A.T * np.matrix(b[:, np.newaxis])) * np.matrix(f[np.newaxis]) -
A.T.dot(A) * np.matrix(C)) + C)
else:
Y_r = C + YrA
x = np.arange(T)
if image_neurons is None:
image_neurons = A.mean(1).reshape((d1, d2), order='F')
coors = get_contours(A, (d1, d2), thr)
cc1 = [cor['coordinates'][:, 0] for cor in coors]
cc2 = [cor['coordinates'][:, 1] for cor in coors]
c1 = cc1[0]
c2 = cc2[0]
# PLOTTING
fig, axes = plt.subplots(2)
axes[0].imshow(image_neurons, cmap = 'gray')
axes[0].set_title('Neural map')
axes[1].plot(C[0], label = 'C: raw traces', c = 'blue')
axes[1].plot(Y_r[0], label = 'Y_r: residuals', c = 'red')
axes[1].plot(S[0], label = 'S: deconvolved activity', c = 'green')
plt.legend()
axes[1].set_xlabel('t [frames]')
# WIDGETS
neuron_nr_slider = IntSlider(description = 'Neuron Number', value = 0, min = 0, max = len(C) - 1)
def neuron_nr_handler(*args):
i = neuron_nr_slider.value
axes[1].clear()
axes[1].plot(C[i], label = 'C: raw traces', c = 'blue')
axes[1].plot(Y_r[i], label = 'Y_r: residuals', c = 'red')
axes[1].plot(S[i], label = 'S: deconvolved activity', c = 'green')
plt.legend()
axes[1].set_xlabel('t [frames]')
neuron_nr_slider.observe(neuron_nr_handler, 'value')
widgets = [neuron_nr_slider]
return fig, widgets | 84db2e40734b21ebb6be5eef8eeb89bbe2838542 | 14,450 |
def menu():
"""Manda el Menú \n
Opciones:
1: Añadir a un donante
2: Añadir a un donatario
3: Revisar la lista de donantes
4: Revisar la lista de donatarios
5: Realizar una transfusion
6: Estadisticas
7: Salir
Returns:
opc(num):Opcion del menu """
print("\nBienvenido a el sistema de Donacion de Sangre. Elige la accion que deseas realizar.\n1.Añadir Donante de Sangre\n2.Añadir Donatario de Sangre\n3.Revisar lista de Donantes\n4.Revisar Lista de Donatarios\n5.Realizar una transfusion\n6.Estadisticas\n7.Salir")
opc=int(input("Seleccionar: "))
return opc | 805d1ef48fbe03f8185e3c7be71ce3d9aa6104df | 14,451 |
import shutil
def get_engine(hass, config):
"""Set up Pico speech component."""
if shutil.which("pico2wave") is None:
_LOGGER.error("'pico2wave' was not found")
return False
return PicoProvider(config[CONF_LANG]) | d03a19dcff4bc8556a84b434d14468a45ffc7e6c | 14,452 |
from typing import Tuple
from typing import List
def load_cp() -> Tuple[List[str], List[List[float]]]:
"""
Loads cloud point data; target values given in Celsius
Returns:
Tuple[List[str], List[List[float]]]: (smiles strings, target values);
target values have shape (n_samples, 1)
"""
return _load_set('cp') | 84b11da1b3cd1a9ecaf5e1419d69b877c160e2aa | 14,453 |
def look_up(f, *args, **kwargs):
"""
:param f:
:type f:
:param args:
:type args:
:param kwargs:
:type kwargs:
:return:
:rtype:"""
ag_hash = hash(args) + make_hash(kwargs)
if f in global_table:
if ag_hash in global_table[f]:
return global_table[f][ag_hash]
res = global_table[f][ag_hash] = f(*args, **kwargs)
return res
global_table[f] = {}
res = global_table[f][ag_hash] = f(*args, **kwargs)
return res | ebc9015066959f66cd98db226178cdf087fc897b | 14,454 |
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default.
"""
return environ.get(key, default) | 0e355c73dbbdc971e4442123dc5945dc04fac8fc | 14,455 |
def read_tag(request, tid, *args, **kwargs):
"""read_tag(tid) returns ..."""
s = api.read_tag(request, tid, *args, **kwargs)
return render_to_response('read/tag.html', s) | 7582e752563f35eb8d025625eaac87d8a9f45f32 | 14,456 |
def ber_img(original_img_bin, decoded_img_bin):
"""Compute Bit-Error-Rate (BER) by comparing 2 binary images."""
if not original_img_bin.shape == decoded_img_bin.shape:
raise ValueError('Original and decoded images\' shapes don\'t match !')
height, width, k = original_img_bin.shape
errors_bits = abs(original_img_bin - decoded_img_bin).sum()
errors_bits = errors_bits.flatten()
total_bits = np.prod(original_img_bin.shape)
ber = errors_bits / total_bits
return(ber) | f5768aa5435a76bcd82b331d76dadf554749e82d | 14,457 |
def get_fractal_patterns_WtoE_NtoS(fractal_portrait, width, height):
""" get all fractal patterns from fractal portrait, from West to East, from North to South """
fractal_patterns = []
for x in range(width):
# single fractal pattern
f_p = get_fractal_patterns_zero_amounts()
for y in range(height):
if fractal_portrait[x][y] != EMPTY_PLACE:
f_p[fractal_portrait[x][y]] += 1
if any(v > 0 for v in f_p.values()):
fractal_patterns.append(f_p)
return fractal_patterns | ad5e2025515231ae8efb256b5dc466d66fedb467 | 14,458 |
import time
from datetime import datetime
import requests
import io
def rating(date=None):
"""P2peye comprehensive rating and display results.
from https://www.p2peye.com
Args:
date: if None, download latest data, if like '201812', that download month data.
Returns:
DataFrame
"""
start = time.time()
if date is None:
date = str(pd.to_datetime(datetime.datetime.now())-pd.DateOffset(months=1))[:7].replace('-', '')
assert (isinstance(date, str) and len(date)==6), "`date` shoule format '201812' or None"
url_txt = 'https://raw.githubusercontent.com/Hourout/datasets/master/report/p2peye/rating/p2peye_rating'+date+'.txt'
s = requests.get(url_txt).content
data = pd.read_csv(io.StringIO(s.decode('utf-8')))
print('p2peye rating dataset download completed, run time %d min %.2f sec' %divmod((time.time()-start), 60))
return data | d76205c933c07764938b38fbfcdcbbe84b584471 | 14,459 |
def crop_image(src, box, expand=0):
"""Read sensor data and crop a bounding box
Args:
src: a rasterio opened path
box: geopandas geometry polygon object
expand: add padding in percent to the edge of the crop
Returns:
masked_image: a crop of sensor data at specified bounds
"""
#Read data and mask
try:
left, bottom, right, top = box.bounds
expand_width = (right - left) * expand /2
expand_height = (top - bottom) * expand / 2
#If expand is greater than increase both size
if expand >= 0:
expanded_left = left - expand_width
expanded_bottom = bottom - expand_height
expanded_right = right + expand_width
expanded_top = top+expand_height
else:
#Make sure of no negative boxes
expanded_left = left+expand_width
expanded_bottom = bottom+expand
expanded_right = right-expand_width
expanded_top = top-expand_height
window = rasterio.windows.from_bounds(expanded_left, expanded_bottom, expanded_right, expanded_top, transform=src.transform)
masked_image = src.read(window=window)
except Exception as e:
raise ValueError("sensor path: {} failed at reading window {} with error {}".format(src, box.bounds,e))
#Roll depth to channel last
masked_image = np.rollaxis(masked_image, 0, 3)
#Skip empty frames
if masked_image.size ==0:
raise ValueError("Empty frame crop for box {} in sensor path {}".format(box, src))
return masked_image | 84948cf3c81fe650d15acd834f89c532e9658466 | 14,460 |
def add_msgpack_support(cls, ext, add_cls_methods=True):
"""Adds serialization support,
Enables packing and unpacking with msgpack with 'pack.packb' and
'pack.unpackb' methods.
If add_method then enables equality, reading and writing for the classs.
Specificly, adds methods:
bytes <- obj.to_binary()
obj <- cls.from_binary(bytes)
boolean <- obj1 == obj2
Args:
cls: class
ext: an unique code for the msgpack's Ext hook
"""
def enc(obj):
return packb(obj.__dict__)
def dec(data):
obj = cls.__new__(cls)
obj.__dict__.update(unpackb(data))
return obj
def eq(a, b):
if type(a) != type(b):
return NotImplemented
return a.__dict__ == b.__dict__
if add_cls_methods:
if cls.__eq__ is object.__eq__:
cls.__eq__ = eq
cls.to_bytes = enc
cls.from_bytes = staticmethod(dec)
_pack_reg[cls] = (ext, enc)
petlib.pack.register_coders(cls, ext, enc, dec) | fa8a6fcce7103466f923b84eeb6f5bdae8383b79 | 14,461 |
import os
import configparser
def parse_conf():
"""(Dictionary) Function that parses the config file and/or environment variables and returns dictionary."""
# Following tuple holds the configfile/env var versions of each config
ALL_CONFIGS = (
# Name of the variable in code, Path to config in the config file(section + value), name of environment variable, default value)
("data_store", "default", "path_to_result", "WEATHER_PATH_TO_RESULT", "data"),
("geocoder", "default", "geocoder", "WEATHER_GEOCODER", "YES"),
("lat", "default", "lat", "WEATHER_LAT", "0"),
("lon", "default", "lon", "WEATHER_LON", "0"),
("api_key", "credentials", "openweather_api", "")
)
# Initialize return dictionary
ret = {}
# Attempt to read config file
path_to_config = os.getenv("WEATHER_CONFIG", "weather.conf")
config = configparser.ConfigParser()
config.read(path_to_config)
debug("Config sections loaded: " + str(config.sections()))
for t in ALL_CONFIGS:
tmp_env = os.getenv(t[3])
if tmp_env != None:
ret[t[0]] = tmp_env
debug("Environment variable loaded for " + t[0] + " is " + str(tmp_env))
elif t[1] in config and t[2] in config[t[1]]:
debug("Config file value loaded for " + t[0] + " is " + config[t[1]][t[2]])
ret[t[0]] = config[t[1]][t[2]]
else:
debug("Couldn't not find a config file value nor Environment variable for " + t[0])
debug("Default value for " + t[0] + " is " + t[4])
ret[t[0]] = t[4]
return ret | 30d1776e2cf2f316a8a4d03312273fbf4d82e7fa | 14,462 |
def UnN(X, Z, N, sampling_type):
"""Computes block-wise complete U-statistic."""
return UN(X, Z, N, Un, sampling_type=sampling_type) | 0e2fb8c62cbcca99017bc7d0ff2c13dbecad1ab3 | 14,463 |
def view_log_view(request, model_name, object_id):
"""view log view
Arguments:
request {object} -- wsgi request object
content_type {str} -- content type
object_id {int} -- admin_log id
Returns:
retun -- html view
"""
if model_name not in register_form:
return render(request, 'admin/error.html', {'error_msg': 'illegal request!'})
model = register_form[model_name]['model']
res = get_object_or_404(model, pk=object_id)
log_entries = LogEntry.objects.filter(
content_type_id=get_content_type_for_model(model).pk,
object_id=res.id
)
return render(request, 'admin/view_log.html', {
'log_data': log_entries
}) | 55ebd2d5226e06b1f5833595b0efad3de81140d7 | 14,464 |
from typing import Tuple
from typing import Dict
def parse_markdown(source: str) -> Tuple[str, Dict]:
"""Parse a Markdown document using our custom parser.
Args:
source (str): the Markdown source text
Returns:
tuple(str, dict):
1. the converted output as a string
2. any extracted metadata as a dict
"""
# Reset or we'll have leftover garbage from the previous file
_md_parser.reset()
html: str = _md_parser.convert(source)
meta: Dict = set_metadata(_md_parser.metadata)
return html, meta | 2bf5a8d43f3763d6b1356dc0496ab4ed1896fe99 | 14,465 |
def flatten_list(nested_list):
# Essentially we want to loop through each element in the list
# and check to see if it is of type integer or list
"""
Flatten a arbitrarily nested list
Args:
nested_list: a nested list with item to be either integer or list
example:
[2,[[3,[4]], 5]]
Returns:
a flattened list with only integers
example:
[2,3,4,5]
"""
result = []
for element in nested_list:
if isinstance(element, int):
result.append(element)
elif hasattr(element, '__iter__'):
#check to see if it is of type list
list_result = flatten_list(element) #recursive call
for single_integer in list_result:
result.append(single_integer)
return result | d79b350167cd1fdf35582e9b149bfb364741d566 | 14,466 |
import inspect
def detect_runner():
""" Guess which test runner we're using by traversing the stack and looking
for the first matching module. This *should* be reasonably safe, as
it's done during test discovery where the test runner should be the
stack frame immediately outside. """
if _test_runner_override is not None:
return _test_runner_override
global _test_runner_guess
if _test_runner_guess is False:
stack = inspect.stack()
for record in reversed(stack):
frame = record[0]
module = frame.f_globals.get("__name__").partition(".")[0]
if module in _test_runner_aliases:
module = _test_runner_aliases[module]
if module in _test_runners:
_test_runner_guess = module
break
if record[1].endswith("python2.6/unittest.py"):
_test_runner_guess = "unittest"
break
else:
_test_runner_guess = None
return _test_runner_guess | 881758d42e5047fe58106a99377dcc7191c0010c | 14,467 |
def retinanet(
mode,
offsets_mean=None,
offsets_std=None,
architecture='resnet50',
train_bn=False,
channels_fmap=256,
num_anchors_per_pixel=9,
num_object_classes=1,
pi=0.01,
alpha=0.25,
gamma=2.0,
confidence_threshold=0.05,
num_top_scoring=1000,
batch_size=2,
max_objects_per_class_per_img=100,
iou_threshold=0.5,
output_top_scoring=False
):
"""
Builds a RetinaNet.
Parameters
----------
mode : string
The mode of building a retinanet either in 'training' or 'inference'.
offsets_mean, offsets_std : float
The mean and std of anchor offsets for a given dataset. If offsets are
normalized, they will be used to de-normalize offsets.
architecture : string, optional
ResNet architecture in {'resnet50', 'resnet101'}. The default is
'resnet50'.
train_bn : boolean, optional
Whether one should normalize the layer input by the mean and variance
over the current batch. The default is False, i.e., use the moving
average of mean and variance to normalize the layer input.
channels_fmap : integer, optional
The number of filters in all FPN conv layers. The default is 256.
num_anchors_per_pixel : integer, optional
The number of anchors to generate at different scales for every pixel;
see anchors.anchors_from_fpn(). The default is 9.
num_object_classes : integer, optional
The number of classes containing only objects, i.e., object classes
denoted by positive integers while background denoted by 0. The default
is 1.
pi : float, optional
The bias initialization at the final conv layer of the classification
subnet, prevents the large number of anchors from generating a large
loss value in the first iteration of training. The default is 0.01.
alpha : float, optional
A weighting factor in [0,1] for the object class, addressing class
imbalance. The default is 0.25.
gamma : float, optional
A focusing parameter >= 0 for removing easy examples. The default is
2.0.
confidence_threshold : float, optional
The minimum selection's probabilites. The default is 0.05.
num_top_scoring : integer, optional
The number of top-scoring selections. The default is 1000.
batch_size : integer, optional
The batch size of input images. The default is 2.
max_objects_per_class_per_img : integer, optional
The maximum number of objects over all images for a particular class.
The default is 100.
iou_threshold : float, optional
An iou threshold for NMS. The default is 0.5.
output_top_scoring : boolean, optional
Whether to include the output of detections.select_top_scoring() in the
inference mode. The default is False.
Returns
-------
model : tf keras
The retinanet.
- Training mode
* inputs are a batch of images, anchor indicators, ground-truth
class ids and offsets generated by data_gen.data_generator();
* outputs are predicted anchor probabilities, offsets,
classification and regression losses.
- Inference mode
* inputs are a batch of raw images, a list of anchors at all
levels generated by anchors.anchors_from_fpn() and a window with
shape of [1, 4] used in clipping anchors in
detections.SelectTopScoring() where 4 is (y1, x1, y2, x2) corner
coordinates for all images in the batch.
* outputs is a list of detections, each has corresponding target
boxes, class ids and scores.
"""
assert mode in ['training', 'inference']
# input images
images = tf.keras.Input(shape=(None, None, 3), name='images')
if mode == 'training':
# inputs generated by anchors.anchors_targets()
gt_anchor_indicators = tf.keras.Input(
shape=(None,),
name='gt_anchor_indicators',
dtype=tf.int32)
gt_anchor_class_ids = tf.keras.Input(
shape=(None, num_object_classes),
name='gt_anchor_class_ids',
dtype=tf.int32)
gt_anchor_offsets = tf.keras.Input(
shape=(None, 4),
name='gt_anchor_offsets',
dtype=tf.float32)
# backbone, ResNet + FPN
fmaps = resnet_fpn.resnet_fpn(
images, architecture, train_bn, channels_fmap)
if mode == 'inference':
# input generated by anchors.anchors_from_fpn(), and then each
# element is broadcasted to batch_size, resulting in shape of
# [batch_size, num_anchors_per_fmap, 4]
anchors_fpn_batches = []
for i in range(len(fmaps)):
anchors_i = tf.keras.Input(
shape=(None, 4),
name='anchors_p'+str(i+3),
dtype=tf.float32)
anchors_fpn_batches.append(anchors_i)
# input used when clipping anchors in detections.SelectTopScoring()
window = tf.keras.Input(
shape=(4),
batch_size=1,
name='window',
dtype=tf.int32)
# classification and regression subnets
cls_subnet = subnets.cls_subnet(
num_anchors_per_pixel,
num_object_classes,
channels_fmap,
pi)
reg_subnet = subnets.reg_subnet(
num_anchors_per_pixel,
channels_fmap)
# outputs, list, each element is for one FPN level
if mode == 'training':
pred_anchor_probs, pred_anchor_offsets = [], []
else:
list_anchor_idxes = []
list_anchors, list_class_ids, list_scores = [], [], []
# loop for each FPN level
for i in range(len(fmaps)):
# fmap, [batch_size, h_i, w_i, channels_fmap] where h_i and w_i denote
# the current fmap size
p = fmaps[i]
# cls, [batch_size, h_i, w_i, num_anchors_per_pixel*num_object_classes]
pred_anchor_probs_i = cls_subnet([p])
# reshape, [batch_size, h_i*w_i*num_anchors_per_pixel, num_object_classes]
pred_anchor_probs_i = tf.keras.layers.Reshape(
(-1, num_object_classes),
name='cls_probs_p'+str(i+3)
)(pred_anchor_probs_i)
# reg, [batch_size, h_i, w_i, num_anchors_per_pixel*4]
pred_anchor_offsets_i = reg_subnet([p])
# reshape, [batch_size, h_i*w_i*num_anchors_per_pixel, 4]
pred_anchor_offsets_i = tf.keras.layers.Reshape(
(-1, 4),
name='reg_offsets_p'+str(i+3)
)(pred_anchor_offsets_i)
if mode == 'training':
pred_anchor_probs.append(pred_anchor_probs_i)
pred_anchor_offsets.append(pred_anchor_offsets_i)
else:
# filter low confidence, select top-scoring and refine anchors
anchors_i = anchors_fpn_batches[i]
select_top_scoring_inputs_i = [
anchors_i,
pred_anchor_probs_i,
pred_anchor_offsets_i,
window]
select_top_scoring_outputs_i = detections.SelectTopScoring(
confidence_threshold,
num_top_scoring,
batch_size,
offsets_mean,
offsets_std,
name='select_top_detection_p'+str(i+3)
)(select_top_scoring_inputs_i)
list_anchor_idxes.append(select_top_scoring_outputs_i[0])
list_anchors.append(select_top_scoring_outputs_i[1])
list_class_ids.append(select_top_scoring_outputs_i[2])
list_scores.append(select_top_scoring_outputs_i[3])
if mode == 'training':
# probs, [batch_size, num_anchors, num_object_classes]
pred_anchor_probs = tf.keras.layers.Concatenate(
axis=1, name='pred_anchor_probs')(pred_anchor_probs)
# offsets, [batch_size, num_anchors, 4]
pred_anchor_offsets = tf.keras.layers.Concatenate(
axis=1, name='pred_anchor_offsets')(pred_anchor_offsets)
# cls loss
cls_inputs = [
gt_anchor_indicators, gt_anchor_class_ids, pred_anchor_probs]
cls_loss = losses.ClsLoss(alpha, gamma)(cls_inputs)
# reg loss
reg_inputs = [
gt_anchor_indicators, gt_anchor_offsets, pred_anchor_offsets]
reg_loss = losses.RegLoss()(reg_inputs)
# training model's inputs and outputs
inputs = [
images,
gt_anchor_indicators,
gt_anchor_class_ids,
gt_anchor_offsets,]
outputs = [
pred_anchor_probs,
pred_anchor_offsets,
cls_loss,
reg_loss]
else:
# NMS
nms_fpn_inputs = [
list_anchor_idxes, list_anchors, list_class_ids, list_scores]
nms_fpn_outputs = detections.NMS_FPN(
max_objects_per_class_per_img,
iou_threshold,
batch_size,
name='nms'
)(nms_fpn_inputs)
# anchors_batch, class_ids_batch, scores_batch = nms_fpn_outputs
# inference model's inputs and outputs
inputs = [images, anchors_fpn_batches, window]
if output_top_scoring:
outputs = [nms_fpn_inputs, nms_fpn_outputs]
else:
outputs = nms_fpn_outputs
with tf.device('/cpu:0'):
model = tf.keras.Model(inputs, outputs, name='RetinaNet')
return model | a3cdb088345740583c7ea08049e5f03f8d496cad | 14,468 |
from typing import Union
from typing import Tuple
from typing import List
from typing import Literal
def default_chap_exec(gallery_or_id: Union[Gallery, int], chap: Chapter, only_values=False) \
-> Union[Tuple[str, dict], Tuple[int, Union[str, List[str]], int, bytes, int, Literal[0, 1]]]:
"""Pass a Gallery object or gallery id and a Chapter object"""
gid: int
if isinstance(gallery_or_id, Gallery):
gallery: Gallery = gallery_or_id
gid = gallery.id
in_archive = gallery.is_archive
else:
gid = gallery_or_id
in_archive = chap.in_archive
if only_values:
result_exec = (gid, chap.title, chap.number, str.encode(chap.path), chap.pages, in_archive)
else:
result_exec = (
"""
INSERT INTO chapters(series_id, chapter_title, chapter_number, chapter_path, pages, in_archive)
VALUES(:series_id, :chapter_title, :chapter_number, :chapter_path, :pages, :in_archive)""",
{
'series_id': gid,
'chapter_title': chap.title,
'chapter_number': chap.number,
'chapter_path': str.encode(chap.path),
'pages': chap.pages,
'in_archive': in_archive
}
)
return result_exec | 8bd8cbfc47ce3463f2ea6da313cc871d8b6dcdf5 | 14,469 |
def _list_all(listing_call, output_format='dict', *args, **filters):
"""Helper to handle paged listing requests.
Example usage:
``evaluations = list_all(list_evaluations, "predictive_accuracy", task=mytask)``
Parameters
----------
listing_call : callable
Call listing, e.g. list_evaluations.
output_format : str, optional (default='dict')
The parameter decides the format of the output.
- If 'dict' the output is a dict of dict
- If 'dataframe' the output is a pandas DataFrame
*args : Variable length argument list
Any required arguments for the listing call.
**filters : Arbitrary keyword arguments
Any filters that can be applied to the listing function.
additionally, the batch_size can be specified. This is
useful for testing purposes.
Returns
-------
dict or dataframe
"""
# eliminate filters that have a None value
active_filters = {key: value for key, value in filters.items()
if value is not None}
page = 0
result = {}
if output_format == 'dataframe':
result = pd.DataFrame()
# Default batch size per paging.
# This one can be set in filters (batch_size), but should not be
# changed afterwards. The derived batch_size can be changed.
BATCH_SIZE_ORIG = 10000
if 'batch_size' in active_filters:
BATCH_SIZE_ORIG = active_filters['batch_size']
del active_filters['batch_size']
# max number of results to be shown
LIMIT = None
offset = 0
if 'size' in active_filters:
LIMIT = active_filters['size']
del active_filters['size']
if LIMIT is not None and BATCH_SIZE_ORIG > LIMIT:
BATCH_SIZE_ORIG = LIMIT
if 'offset' in active_filters:
offset = active_filters['offset']
del active_filters['offset']
batch_size = BATCH_SIZE_ORIG
while True:
try:
current_offset = offset + BATCH_SIZE_ORIG * page
new_batch = listing_call(
*args,
limit=batch_size,
offset=current_offset,
output_format=output_format,
**active_filters
)
except openml.exceptions.OpenMLServerNoResult:
# we want to return an empty dict in this case
break
if output_format == 'dataframe':
if len(result) == 0:
result = new_batch
else:
result = result.append(new_batch, ignore_index=True)
else:
# For output_format = 'dict' or 'object'
result.update(new_batch)
if len(new_batch) < batch_size:
break
page += 1
if LIMIT is not None:
# check if the number of required results has been achieved
# always do a 'bigger than' check,
# in case of bugs to prevent infinite loops
if len(result) >= LIMIT:
break
# check if there are enough results to fulfill a batch
if BATCH_SIZE_ORIG > LIMIT - len(result):
batch_size = LIMIT - len(result)
return result | 0b57d047b9ba2e3fdbe391a338a2392780d969fc | 14,470 |
from typing import List
from typing import Dict
from typing import Literal
from typing import Any
def get_matching_based_variables(match_definitions:List[Dict[Literal['name', 'matching'],Any]],
global_dict=None,
local_dict=None,
var_lenght=0):
"""
Function to construct an array with values depending on the condition provided by user
The idea is to define things like, for example, 'region' for a table,
indicating which analysis region is used.
Example:
Assume we want to have region="SRB" when "MET>100 && mt2<450".
For ``MET=[50 ,150,250]`` and ``mt2=[300,400,500]``,
when provided with argument
``matching_definitions=[{name:"SRB","matching":["np.logical_and(MET>100,mt2<450)"]}]``
will give output of ``[None,SRB, None]``.
Args:
match_definitions: list of dictionaries defining matching conditions and
the value associated with the match.
Each dictionary has to have field 'name' (value of variable when condition is met)
and 'matching' -- list of cuts and indices for which the condition is met.
Conditions are concacanated to each other.
In the example above ``matching_definitions=[{name:"SRB","matching":["np.logical_and(MET>100,mt2<450)"]}``
is equivalent to ``matching_definitions=[{name:"SRB","matching":[1]}`` (index specifying position that matches)
submission_dict: collections of variables and other known objects to be used in the transformation
local_vars: yet another collection of variables known to be used in the transformation
var_lenght: lenght of the corresponding variable/table (in case index is is chosen for matching specification)
"""
result=None
for specification in match_definitions:
var=specification.get('name',None)
if(var is None):
raise ValueError(f"matching_definitions have to have name for each specification.")
cuts=specification.get('matching',[])
for cut in cuts:
if(type(cut)==str):
cutOutput=np.where(eval(cut,global_dict,local_dict),var,None)
ToAppend=cutOutput.reshape(len(cutOutput),1)
if(not result):
result=ToAppend
else:
result=np.concatenate((result,ToAppend),axis=1)
elif(type(cut)==int):
if(cut>=len(cuts)):
raise RuntimeError("lenght of cut table smaller than required index.")
else:
ToAppend=np.array([[None]]*len(var_lenght))
ToAppend[cut]=var
if(not result):
result=ToAppend
else:
result=np.concatenate((result,ToAppend),axis=1)
else:
raise TypeError("Variable cutDefinitions has improper content.")
return result | 6722bb4f258ef69c4aab74970f8f924ca938bbf5 | 14,471 |
def _AStar_graph(problem: BridgeProblem) -> (list, list):
"""Used for graphing, returns solution as well as all nodes in a list"""
all_nodes = [problem.initial_node]
pq = [(problem.initial_node.path_cost + problem.h(problem.initial_node.state), problem.initial_node)]
closed = set()
while True:
assert pq
priority, node = heappop(pq)
if problem.goal_test(node):
return problem.get_ancestors(node), all_nodes
closed.add(node)
children = problem.expand(node)
for node in children:
priority = node.path_cost + problem.h(node.state)
bn = (priority, node)
inpq = None
for i, (_, pq_node) in enumerate(pq):
if node == pq_node: inpq = i
if node not in closed and inpq is None:
heappush(pq, bn)
elif inpq is not None and bn < pq[inpq]:
pq.pop(inpq)
pq.append(bn)
heapify(pq)
all_nodes.extend(children) | a1616f7d499f12a229843007d7bc4939cbd02a7a | 14,472 |
def plot_setup(name, figsize=None, fontsize=9, font='paper', dpi=None):
""" Setup a PDF page for plot.
name: PDF file name. If not ending with .pdf, will automatically append.
figsize: dimension of the plot in inches, should be an array of length two.
fontsize: fontsize for legends and labels.
font: font for legends and labels, 'paper' uses Times New Roman, 'default'
uses default, a tuple of (family, font, ...) customizes font.
dpi: resolution of the figure.
"""
paper_plot(fontsize=fontsize, font=font)
if not name.endswith('.pdf'):
name += '.pdf'
pdfpage = matplotlib.backends.backend_pdf.PdfPages(name)
fig = matplotlib.pyplot.figure(figsize=figsize, dpi=dpi)
return pdfpage, fig | 4f9595757df57ee451dddc82815a91b727feb1f1 | 14,473 |
def author_endyear(pub2author_df = None, colgroupby = 'AuthorId', datecol = 'Year', show_progress=False):
"""
Calculate the year of last publication for each author.
Parameters
----------
pub2author_df : DataFrame, default None, Optional
A DataFrame with the author2publication information.
colgroupby : str, default 'AuthorId', Optional
The DataFrame column with Author Ids. If None then the database 'AuthorId' is used.
datecol : str, default 'Year', Optional
The DataFrame column with Date information. If None then the database 'Year' is used.
Returns
-------
DataFrame
Productivity DataFrame with 2 columns: 'AuthorId', 'CareerLength'
"""
newname_dict = zip2dict([str(datecol), '0'], ['EndYear']*2)
return pub2author_df.groupby(colgroupby)[datecol].max().to_frame().reset_index().rename(columns=newname_dict) | 8a3ebf5e1870a8aa79ed2cba18fbb18fa634e604 | 14,474 |
def to_gif(images, fps):
"""Converts image sequence (4D numpy array) to gif."""
imageio.mimsave('./animation.gif', images, fps=fps)
return embed.embed_file('./animation.gif') | b329da4710a5ad2da57ed2cf6b774ac4b6b8c7dd | 14,475 |
import logging
def get_half_max_down(signal, peak):
"""See `get_half_max_up` for explanation.
This is a minor modification of the above function.
"""
if peak['peak'] == 0:
return np.nan
fflag = False
half_max = signal[peak['peak']] / 2
falling_signal = signal[peak['peak']:(peak['right']+1)]
closest_idx = (np.abs(falling_signal - half_max)).argmin() + peak['peak']
if closest_idx <= 1 or closest_idx >= 98:
logging.warning('HM_DOWN: half-max too close to end of signal')
return np.nan
# If the signal at the index is nearly equal to half max, take that index
if np.allclose(half_max, signal[closest_idx]):
half_max_point = closest_idx
# ...otherwise interpolate
else:
ix = -1
triplet = signal[(closest_idx - 1):(closest_idx + 2)]
if triplet[0] > half_max > triplet[1]:
ix = 0
elif triplet[1] > half_max > triplet[2]:
ix = 1
else:
logging.warning('HM_DOWN: simple method for interpolating'
' half-max decay time failed')
fflag = True
if ix != -1:
y = [ix,ix+1]
x = [triplet[ix], triplet[ix+1]]
f = interp1d(x,y)
trip_coord = f(half_max)
half_max_point = closest_idx + (trip_coord - 1)
if fflag == True:
half_max_down = np.nan
else:
half_max_down = float(half_max_point - peak['peak'])
return half_max_down | 0b9b20b66a82d8a60aa650bc1bacd24f67f217f1 | 14,476 |
import os
import io
import json
import sys
def connect2server(env=None, key=None, keyfile=None, logger=None):
"""Sets up credentials for accessing the server. Generates a key using info
from the named keyname in the keyfile and checks that the server can be
reached with that key.
Also handles keyfiles stored in s3 using the env param"""
if key and keyfile:
keys = None
if os.path.isfile(keyfile):
with io.open(keyfile, 'r') as kf:
keys_json_string = kf.read()
keys = json.loads(keys_json_string)
if keys:
key = keys.get(key)
try:
auth = get_authentication_with_server(key, env)
except Exception:
logger.error("Authentication failed")
sys.exit(1)
return auth | f80b6482fcba70b291735b0cc938433f38b19f99 | 14,477 |
import copy
def ternary(c):
"""
Encodes the circuit with ternary values
Parameters
----------
c : Circuit
Circuit to encode.
Returns
-------
Circuit
Encoded circuit.
"""
if c.blackboxes:
raise ValueError(f"{c.name} contains a blackbox")
t = copy(c)
# add dual nodes
for n in c:
if c.type(n) in ["and", "nand"]:
t.add(f"{n}_x", "and")
t.add(
f"{n}_x_in_fi",
"or",
fanout=f"{n}_x",
fanin=[f"{p}_x" for p in c.fanin(n)],
)
t.add(f"{n}_0_not_in_fi", "nor", fanout=f"{n}_x")
for p in c.fanin(n):
t.add(
f"{p}_is_0", "nor", fanout=f"{n}_0_not_in_fi", fanin=[p, f"{p}_x"]
)
elif c.type(n) in ["or", "nor"]:
t.add(f"{n}_x", "and")
t.add(
f"{n}_x_in_fi",
"or",
fanout=f"{n}_x",
fanin=[f"{p}_x" for p in c.fanin(n)],
)
t.add(f"{n}_1_not_in_fi", "nor", fanout=f"{n}_x")
for p in c.fanin(n):
t.add(f"{p}_is_1", "and", fanout=f"{n}_1_not_in_fi", fanin=p)
t.add(f"{p}_not_x", "not", fanout=f"{p}_is_1", fanin=f"{p}_x")
elif c.type(n) in ["buf", "not"]:
p = c.fanin(n).pop()
t.add(f"{n}_x", "buf", fanin=f"{p}_x")
elif c.type(n) in ["output"]:
p = c.fanin(n).pop()
t.add(f"{n}_x", "output", fanin=f"{p}_x")
elif c.type(n) in ["xor", "xnor"]:
t.add(f"{n}_x", "or", fanin=(f"{p}_x" for p in c.fanin(n)))
elif c.type(n) in ["0", "1"]:
t.add(f"{n}_x", "0")
elif c.type(n) in ["input"]:
t.add(f"{n}_x", "input")
else:
raise ValueError(f"Node {n} has unrecognized type: {c.type(n)}")
return t | 6fd813b957da408c23cc8a37038b8f3b660fdc73 | 14,478 |
from typing import List
from typing import Dict
def eval_lane_per_frame(
gt_file: str, pred_file: str, bound_ths: List[float]
) -> Dict[str, np.ndarray]:
"""Compute mean,recall and decay from per-frame evaluation."""
task2arr: Dict[str, np.ndarray] = dict() # str -> 2d array
gt_byte = np.asarray(Image.open(gt_file))
pred_byte = np.asarray(Image.open(pred_file))
gt_foreground = get_foreground(gt_byte)
pd_foreground = get_foreground(pred_byte)
for task_name, class_func in sub_task_funcs.items():
task_scores: List[List[float]] = []
for value in range(len(sub_task_cats[task_name])):
gt_mask = class_func(gt_byte, value) & gt_foreground
pd_mask = class_func(pred_byte, value) & pd_foreground
cat_scores = [
eval_lane_per_threshold(gt_mask, pd_mask, bound_th)
for bound_th in bound_ths
]
task_scores.append(cat_scores)
task2arr[task_name] = np.array(task_scores)
return task2arr | 571cd737151576869170e33d181f89d22bc0657b | 14,479 |
import torch
def membrane(field, voxel_size=1, bound='dct2', dim=None, weights=None):
"""Precision matrix for the Membrane energy
Note
----
.. This is exactly equivalent to SPM's membrane energy
Parameters
----------
field : (..., *spatial) tensor
voxel_size : float or sequence[float], default=1
bound : str, default='dct2'
dim : int, default=field.dim()
weights : (..., *spatial) tensor, optional
Returns
-------
field : (..., *spatial) tensor
"""
if weights is None:
return _membrane_l2(field, voxel_size, bound, dim)
def mul_(x, y):
"""Smart in-place multiplication"""
if ((torch.is_tensor(x) and x.requires_grad) or
(torch.is_tensor(y) and y.requires_grad)):
return x * y
else:
return x.mul_(y)
backend = dict(dtype=field.dtype, device=field.device)
dim = dim or field.dim()
if torch.is_tensor(voxel_size):
voxel_size = make_vector(voxel_size, dim, **backend)
dims = list(range(field.dim()-dim, field.dim()))
fieldf = diff(field, dim=dims, voxel_size=voxel_size, side='f', bound=bound)
weights = torch.as_tensor(weights, **backend)
fieldf = mul_(fieldf, weights[..., None])
fieldb = diff(field, dim=dims, voxel_size=voxel_size, side='b', bound=bound)
fieldb = mul_(fieldb, weights[..., None])
dims = list(range(fieldb.dim() - 1 - dim, fieldb.dim() - 1))
fieldb = div(fieldb, dim=dims, voxel_size=voxel_size, side='b', bound=bound)
dims = list(range(fieldf.dim()-1-dim, fieldf.dim()-1))
field = div(fieldf, dim=dims, voxel_size=voxel_size, side='f', bound=bound)
del fieldf
field += fieldb
field *= 0.5
return field | 5e238ca2253fc7105b1bbfba58947ac442c05699 | 14,480 |
from typing import Tuple
import struct
def get_uint64(dgram: bytes, start_index: int) -> Tuple[int, int]:
"""Get a 64-bit big-endian unsigned integer from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the integer starts in the datagram.
Returns:
A tuple containing the integer and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _UINT64_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>Q',
dgram[start_index:start_index + _UINT64_DGRAM_LEN])[0],
start_index + _UINT64_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e) | e5ed2470656e3c0d1a8efe02bd638ac05245f187 | 14,481 |
import torch
from typing import Optional
from typing import List
def fftshift(x: torch.Tensor, dim: Optional[List[int]] = None) -> torch.Tensor:
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
Args:
x: A PyTorch tensor.
dim: Which dimension to fftshift.
Returns:
fftshifted version of x.
"""
if dim is None:
# this weird code is necessary for toch.jit.script typing
dim = [0] * (x.dim())
for i in range(1, x.dim()):
dim[i] = i
# also necessary for torch.jit.script
shift = [0] * len(dim)
for i, dim_num in enumerate(dim):
shift[i] = x.shape[dim_num] // 2
return roll(x, shift, dim) | a1ff7a81df83df63dcbcf56cf89d9b0e54c16ba0 | 14,482 |
def flatten(x):
"""Flattens nested list"""
if isinstance(x, list):
return [a for i in x for a in flatten(i)]
else:
return [x] | 7d348f8287dfccfbb77a52a84a5642c265381eb1 | 14,483 |
import copy
def get_capture_points_gazebo(bag, odom_topic='/gazebo/model_states', sync_topic='/mavros/imu/data_raw', camera_freq=20, sync_topic_freq=100, method='every'):
"""
method(string): method for sampling capturing points.
'every': Sample IMU for every n msgs, and then capture odometry msg which has the closest timestamp. This requires the existence of odom_msg for every imu_msg.
"""
odom_msg_list = []
odom_time_list = []
odom_stamp_list = []
capture_time_list = []
sync_topic_num = 0
for topic, msg, t in bag:
if topic==odom_topic:
odom_msg_list.append(msg)
odom_time_list.append(t.to_time())
odom_stamp_list.append(copy.deepcopy(t))
for topic, msg, t in bag:
if topic==sync_topic:
if odom_time_list[0] > t.to_time():
continue
if sync_topic_num % (int(sync_topic_freq/camera_freq)) == 0:
capture_time_list.append(t.to_time())
sync_topic_num += 1
assert len(odom_msg_list)==len(odom_time_list) and len(odom_msg_list)==len(odom_stamp_list), 'length of odom_(msg/time/stamp)_list is not equal.'
# start sampling odometry
capture_points = []
curr_odom_idx = 0
for idx, capture_time in enumerate(capture_time_list):
# take an odometry msg which has the timestamp closest to capture_time
if capture_time < min(odom_time_list):
continue
while abs(capture_time - odom_time_list[curr_odom_idx]) >= 5*10**(-5):
curr_odom_idx += 1
if curr_odom_idx >= len(odom_time_list):
break
if curr_odom_idx >= len(odom_time_list):
break
if odom_topic=='/gazebo/gazebo_states':
capture_point = get_capture_point_from_gazebo_model_states(idx, odom_msg_list[curr_odom_idx], odom_stamp_list[curr_odom_idx])
elif odom_topic=='/odometry':
capture_point = get_capture_point_from_navmsgs_odom(idx, odom_msg_list[curr_odom_idx], odom_stamp_list[curr_odom_idx])
capture_points.append(capture_point)
return capture_points | 75d351c0ecd6ad8dad6e7cfcd2ecd04d0826405b | 14,484 |
import fileinput
def parse_input():
"""Parse input and return array of calendar
A user can either pass the calendar via the stdin or via one or several
icalendar files. This method will parse the input and return an array
of valid icalendar
"""
input_data = ''
calendars = []
for line in fileinput.input():
if 'BEGIN:VCALENDAR' in line:
calendars.append(input_data)
input_data = line
else:
input_data += line
calendars.append(input_data)
return calendars[1:] | a60a760968f139da0b7753ae5717d78b640cb232 | 14,485 |
def identity(obj):
"""Returns the ``obj`` parameter itself
:param obj: The parameter to be returned
:return: ``obj`` itself
>>> identity(5)
5
>>> foo = 2
>>> identity(foo) is foo
True
"""
return obj | a3271a831d2e91fe6eebed7e80c18e7c81996da6 | 14,486 |
def percent_clipper(x, percentiles):
"""
Takes data as np.ndarray and percentiles as array-like
Returns clipped ndarray
"""
LOWERBOUND, UPPERBOUND = np.percentile(x, [percentiles[0], percentiles[1])
return np.clip(x, LOWERBOUND, UPPERBOUND) | 3d114a956bfd0b6b8349c39f5c42f4487a812ee7 | 14,487 |
def check_prob_vector(p):
"""
Check if a vector is a probability vector.
Args:
p, array/list.
"""
assert np.all(p >= 0), p
assert np.isclose(np.sum(p), 1), p
return True | f9a6ea74fe9e5ff8a7244e7cc8aee2cbf5ae512e | 14,488 |
def relabel_subgraph():
""" This function adapts an existing sampler by relabelling the vertices in the edge list
to have dense index.
Returns
-------
sample: a function, that when invoked, produces a sample for the input function.
"""
def relabel(edge_list, positive_vertices):
shape = edge_list.shape
vertex_index, edge_list = np.unique(edge_list, return_inverse=True)
edge_list = edge_list.astype(np.int32).reshape(shape)
# relabel the positive vertices
positive_verts = np.searchsorted(vertex_index, positive_vertices)
is_positive = np.zeros_like(vertex_index)
is_positive[positive_verts] = 1
return edge_list, vertex_index, is_positive
def sample(data):
edge_list = data['edge_list']
positive_vertices = data.get('positive_vertices', tf.unique(tf.reshape(edge_list, [-1]))[0])
vertex_index = data.get('vertex_index', None)
if isinstance(edge_list, tf.Tensor):
new_edge_list, new_vertex_index, is_positive = tf.py_func(relabel, [edge_list, positive_vertices],
[tf.int32, tf.int32, tf.int32], stateful=False)
new_edge_list.set_shape(edge_list.shape)
new_vertex_index.set_shape([None])
is_positive.set_shape([None])
else:
new_edge_list, new_vertex_index, is_positive = relabel(edge_list, positive_vertices)
if vertex_index is not None:
if isinstance(vertex_index, tf.Tensor):
vertex_index = tf.gather(vertex_index, new_vertex_index, name='resample_vertex_index')
else:
vertex_index = vertex_index[new_vertex_index]
else:
vertex_index = new_vertex_index
return {**data, 'edge_list': new_edge_list, 'vertex_index': vertex_index, 'is_positive': is_positive}
return sample | e9b8269640663b830c894c4aa4f8a8cce2b49af7 | 14,489 |
import os
import re
def get_activation_bytes(input_file=None, checksum=None):
"""
Get the activation bytes from the .aax checksum using rainbow tables.
None is returned if the activation bytes can't be computed.
"""
if (not input_file and not checksum) or (input_file and checksum):
raise ValueError('Please specify only one of [input_file, checksum]')
if input_file:
checksum = get_checksum(input_file)
_, stdout, _ = run_cmd(
['./rcrack', '.', '-h', checksum],
cwd=os.path.join(_SCRIPT_PATH, 'tables')
)
activation_bytes = re.findall('hex:(.*)', stdout)[0]
return activation_bytes | 97c0619579ffa05ef7b878300d64c09ff638c9d2 | 14,490 |
def init_binary(mocker):
"""Initialize a dummy BinaryDigitalAssetFile for testing."""
mocker.patch.multiple(
houdini_package_runner.items.digital_asset.BinaryDigitalAssetFile,
__init__=lambda x, y, z: None,
)
def _create():
return houdini_package_runner.items.digital_asset.BinaryDigitalAssetFile(
None, None
)
return _create | 34a3ee5fb09f413bf07b36f1b73189472c188f3d | 14,491 |
def with_setup_(setup=None, teardown=None):
"""Decorator like `with_setup` of nosetest but which can be applied to any
function"""
def decorated(function):
def app(*args, **kwargs):
if setup:
setup()
try:
function(*args, **kwargs)
finally:
if teardown:
teardown()
return app
return decorated | f9e8eddfd01ee99e458857de403c49b91dafa92c | 14,492 |
import click
def post_options():
"""Standard arguments and options for posting timeseries readings.
"""
options = [
click.argument('port'),
click.argument('value', type=JSONParamType()),
click.option('--timestamp', metavar='DATE',
help='the time of the reading'),
]
def wrapper(func):
func.__doc__ += _post_options_docs
for option in reversed(options):
func = option(func)
return func
return wrapper | 7b7c386bfcbf36f1365392a6ba2562fa0ed520ce | 14,493 |
def authenticated(f):
"""Decorator for authenticating with the Hub"""
@wraps(f)
def decorated(*args, **kwargs):
token = request.cookies.get(auth.cookie_name)
if token:
user = auth.user_for_token(token)
else:
user = None
if user:
return f(user, *args, **kwargs)
else:
# redirect to login url on failed auth
state = auth.generate_state(next_url=request.path)
response = make_response(
redirect(auth.login_url + '&state=%s' % state)
)
response.set_cookie(auth.state_cookie_name, state)
return response
return decorated | 1149f14ad540521b71efa3a3240c13719ccf8a17 | 14,494 |
def json_complex_hook(dct):
"""
Return an encoded complex number to it's python representation.
:param dct: (dict) json encoded complex number (__complex__)
:return: python complex number
"""
if isinstance(dct, dict):
if '__complex__' in dct:
parts = dct['__complex__']
assert len(parts) == 2
return parts[0] + parts[1] * 1j
return dct | a3c8cb13485279ab3b222eb63efdfdf6421c17a6 | 14,495 |
def reg_logLiklihood(x, weights, y, C):
"""Regularizd log-liklihood function (cost function to minimized in logistic
regression classification with L2 regularization)
Parameters
-----------
x : {array-like}, shape = [n_samples, n_features + 1]
feature vectors. Note, first column of x must be
a vector of ones.
weights : 1d-array, shape = [1, 1 + n_features]
Coefficients that weight each samples feature vector
y : list, shape = [n_samples,], values = 1|0
target values
C : float
Regularization parameter. C is equal to 1/lambda
Returns
-----------
Value of regularized log-liklihood function with the given feature values,
weights, target values, and regularization parameter
"""
z = np.dot(x, weights)
reg_term = (1 / (2 * C)) * np.dot(weights.T, weights)
return -1 * np.sum((y * np.log(logistic_func(z))) + ((1 - y) * np.log(1 - logistic_func(z)))) + reg_term | 4a13bac09a6989463014784c72c72729ec40e718 | 14,496 |
def smooth_internal(xyzlist, atom_names, width, allpairs=False, w_morse=0.0, rep=False, anchor=-1, window='hanning', **kwargs):
"""Smooth a trajectory by transforming to redundant, internal coordinates,
running a 1d timeseries smoothing algorithm on each DOF, and then
reconstructing a set of consistent cartesian coordinates.
TODO: write this function as a iterator that yields s_xyz, so that
they can be saved to disk (async) immediately when they're produced.
Parameters
----------
xyzlist : np.ndarray
Cartesian coordinates
atom_names : array_like of strings
The names of the atoms. Required for determing connectivity.
width : float
Width for the smoothing kernels
allpairs : bool
Use all interatomic distances (not just the bonds)
w_morse: float
Weight of the Morse potential in the smoothing
window: string, default='hanning'
Type of window to perform the averaging
Other Parameters
----------------
bond_width : float
Override width just for the bond terms
angle_width : float
Override width just for the angle terms
dihedral_width : float
Override width just for the dihedral terms
xyzlist_guess :
Cartesian coordinates to use as a guess during the
reconstruction from internal
xyzlist_match :
Cartesian coordinates to use as a guess during the
reconstruction from internal
Returns
-------
smoothed_xyzlist : np.ndarray
"""
bond_width = kwargs.pop('bond_width', width)
angle_width = kwargs.pop('angle_width', width)
dihedral_width = kwargs.pop('dihedral_width', width)
xyzlist_guess = kwargs.pop('xyzlist_guess', xyzlist)
xyzlist_match = kwargs.pop('xyzlist_match', None)
for key in list(kwargs.keys()):
raise KeyError('Unrecognized key, %s' % key)
ibonds, iangles, idihedrals = None, None, None
s_bonds, s_angles, s_dihedrals = None, None, None
ibonds, iangles, idihedrals = union_connectivity(xyzlist, atom_names, allpairs=allpairs)
# get the internal coordinates in each frame
bonds = core.bonds(xyzlist, ibonds)
angles = core.angles(xyzlist, iangles)
dihedrals = core.dihedrals(xyzlist, idihedrals)
# run the smoothing
s_bonds = np.zeros_like(bonds)
s_angles = np.zeros_like(angles)
s_dihedrals = np.zeros_like(dihedrals)
for i in range(bonds.shape[1]):
#s_bonds[:, i] = buttersworth_smooth(bonds[:, i], width=bond_width)
s_bonds[:, i] = window_smooth(bonds[:, i], window_len=bond_width, window=window)
for i in range(angles.shape[1]):
#s_angles[:, i] = buttersworth_smooth(angles[:, i], width=angle_width)
s_angles[:, i] = window_smooth(angles[:, i], window_len=angle_width, window=window)
# filter the dihedrals with the angular smoother, that filters
# the sin and cos components separately
for i in range(dihedrals.shape[1]):
#s_dihedrals[:, i] = angular_smooth(dihedrals[:, i],
# smoothing_func=buttersworth_smooth, width=dihedral_width)
s_dihedrals[:, i] = angular_smooth(dihedrals[:, i],
smoothing_func=window_smooth,
window_len=dihedral_width, window=window)
# compute the inversion for each frame
s_xyzlist = np.zeros_like(xyzlist_guess)
errors = np.zeros(len(xyzlist_guess))
# Thresholds for error and jump
w_xrefs = 0.0
for i, xyz_guess in enumerate(xyzlist_guess):
w_xref = 0.0
passed = False
corrected = False
while not passed:
passed = False
if i > 0:
xref = s_xyzlist[i-1]
else:
xref = None
passed = True
ramp = 0.1
if i >= (1.0-ramp)*len(xyzlist_guess):
w_morse_ = w_morse * float(len(xyzlist_guess)-i-1)/(ramp*len(xyzlist_guess))
elif i <= ramp*len(xyzlist_guess):
w_morse_ = w_morse * float(i)/(ramp*len(xyzlist_guess))
else:
w_morse_ = w_morse
r = least_squares_cartesian(s_bonds[i], ibonds, s_angles[i], iangles,
s_dihedrals[i], idihedrals, xyz_guess, xref=xref, w_xref=w_xref,
elem=atom_names, w_morse=w_morse_, rep=rep)
s_xyzlist[i], errors[i] = r
if i > 0:
aligned0 = align_trajectory(np.array([xyzlist[i],xyzlist[i-1]]), 0)
aligned1 = align_trajectory(np.array([s_xyzlist[i],s_xyzlist[i-1]]), 0)
maxd0 = np.max(np.abs(aligned0[1] - aligned0[0]))
maxd1 = np.max(np.abs(aligned1[1] - aligned1[0]))
if maxd0 > 1e-5:
jmp = maxd1 / maxd0
else:
jmp = 0.0
else:
maxd1 = 0.0
jmp = 0.0
if (not passed) and (anchor < 0 or jmp < anchor):
passed = True
if w_xref >= 1.99:
w_xrefs = w_xref - 1.0
elif w_xref < 0.1:
w_xrefs = 0.0
else:
w_xrefs = w_xref / 1.5
elif not passed:
if w_xref == 0.0:
if w_xrefs > 0.0:
w_xref = w_xrefs
else:
w_xref = 2.0**10 / 3.0**10
else:
if w_xref >= 0.99:
w_xref += 1.0
else:
w_xref *= 1.5
if w_xref > 30:
print("\nanchor %f, giving up" % (w_xref))
# Set it back to a reasonable (but still high) number
w_xrefs = 20.0
passed = True
else:
print("jump %f max(dx) %f, trying anchor = %f\r" % (jmp, maxd1, w_xref), end=' ')
corrected = True
if xyzlist_match != None:
aligned_ij = align_trajectory(np.array([s_xyzlist[i], xyzlist_match[i]]), 0)
maxd_ij = np.max(np.abs(aligned_ij[1] - aligned_ij[0]))
if maxd_ij < 1e-3:
print("% .4f" % maxd_ij, "\x1b[92mMatch\x1b[0m")
s_xyzlist[i:] = xyzlist_match[i:]
break
# Print out a message if we had to correct it.
if corrected:
print('\rxyz: error %f max(dx) %f jump %s anchor %f' % (errors[i], maxd1, jmp, w_xref))
if (i%10) == 0:
print("\rWorking on frame %i / %i" % (i, len(xyzlist_guess)), end=' ')
print()
if i > 0:
print('max(dx) %f (new) %f (old) %s%f\x1b[0m (ratio)' % (maxd1, maxd0, "\x1b[91m" if jmp > 3 else "", jmp))
#return_value = (interweave(s_xyzlist), interweave(errors))
return_value = s_xyzlist, errors
return return_value | 68c5bc7cacc0c14d2e9fad49d3017e5154d57a83 | 14,497 |
import argparse
def cli():
"""Define the command line interface of the script."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument("--output", help="the directory to store the archives")
parser.add_argument("--repeats", type=int, default=2, help="the number of compilations to run")
parser.add_argument("--overwrite", action="store_true", help="ignore existing archives")
parser.add_argument("--log-level", choices=("debug", "info", "warning", "error", "critical"),
default="info", help="the severity of log messages to print")
return parser | a555ea585cc26fe41ce1d9f95b75ee2ca443cdae | 14,498 |
import itertools
def estimate_gridsearch_size(model, params):
""" Compute the total number of parameter combinations in a grid search
Parameters
----------
model: str
name of the model to train. The function currently supports feedforward neural networks (model = 'FNN'),
long-short term memory (model = 'LSTM') and naive discriminative learning (model = 'NDL') also commonly known as
Rescorla-Wagner model.
params: dict of lists
parameter set of the grid search:
Returns
-------
int
number of param combinations
"""
### FNN model
if model == 'FNN':
# Extract the dimensions of the pretrained embeddings
pretrain_embed_dim = {}
embed_inputs = params['embedding_input']
for i, e in enumerate(embed_inputs):
if embed_inputs[i] and embed_inputs[i] != 'learn':
pretrain_embed_dim.update({embed_inputs[i]:extract_embedding_dim(embed_inputs[i])})
# Create a list of dictionaries giving all possible parameter combinations
keys, values = zip(*params.items())
grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)]
### Remove impossible combinations
ind_to_remove = []
for i,d in enumerate(grid_full):
# In the case of no hidden layer, no need to set the 'activation' parameter - only 'last_activation' is used
if grid_full[i]['hidden_layers'] == 0:
grid_full[i]['activation'] = None
# In the case of hot encoding or pretrained embedding, no need to set embedding_dim, otherwise,
# it is essential to set embedding_dim, so remove all cases where embedding_dim is not given with
# embeddings to be learned from scratch
if not grid_full[i]['embedding_input']:
grid_full[i]['embedding_dim'] = None
elif grid_full[i]['embedding_input'] == 'learn' and not grid_full[i]['embedding_dim']:
ind_to_remove.append(i)
elif grid_full[i]['embedding_input'] and grid_full[i]['embedding_input'] != 'learn':
grid_full[i]['embedding_dim'] = pretrain_embed_dim[grid_full[i]['embedding_input']]
# In the case of embeddings, it is essential to set 'max_len' (max_len cannot be None),
# so remove all cases where embeddings are used max_len is not given
if grid_full[i]['embedding_input'] and not grid_full[i]['max_len']:
ind_to_remove.append(i)
# First remove the detected impossible combinations (e.g. 'embedding_input = 'learn', embedding_dim = None')
for ii in sorted(ind_to_remove, reverse = True):
del grid_full[ii]
# Second remove the duplicated combinations 'embedding_input != 'learn', embedding_dim = None'
grid_full = [dict(t) for t in {tuple(d.items()) for d in grid_full}]
### LSTM model
elif model == 'LSTM':
# Extract the dimensions of the pretrained embeddings
pretrain_embed_dim = {}
embed_inputs = params['embedding_input']
for i, e in enumerate(embed_inputs):
if embed_inputs[i] and embed_inputs[i] != 'learn':
pretrain_embed_dim.update({embed_inputs[i]:extract_embedding_dim(embed_inputs[i])})
### Create a list of dictionaries giving all possible parameter combinations
keys, values = zip(*params.items())
grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)]
### Remove impossible combinations
ind_to_remove = []
for i,d in enumerate(grid_full):
# In the case of hot encoding or pretrained embedding, no need to set embedding_dim, otherwise,
# it is essential to set embedding_dim, so remove all cases where embedding_dim is not given with
# embeddings to be learned from scratch
if not grid_full[i]['embedding_input']:
grid_full[i]['embedding_dim'] = None
elif grid_full[i]['embedding_input'] == 'learn' and not grid_full[i]['embedding_dim']:
ind_to_remove.append(i)
elif grid_full[i]['embedding_input'] and grid_full[i]['embedding_input'] != 'learn':
grid_full[i]['embedding_dim'] = pretrain_embed_dim[grid_full[i]['embedding_input']]
# First remove the combinations 'embedding_input = 'learn', embedding_dim = None'
for ii in sorted(ind_to_remove, reverse = True):
del grid_full[ii]
# Second remove the duplicated combinations 'embedding_input != 'learn', embedding_dim = None'
grid_full = [dict(t) for t in {tuple(d.items()) for d in grid_full}]
### NDL model
elif model == 'NDL':
### Create a list of dictionaries giving all possible parameter combinations
keys, values = zip(*params.items())
grid_full = [dict(zip(keys, v)) for v in itertools.product(*values)]
# Raise an error if a non-supported model is entered
else:
raise ValueError(f'The entered model "{model}" is not supported')
return len(grid_full) | 8105a457b3ec30c3cdc6c42bb71afd229770b376 | 14,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.