content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def weighting_system_z():
"""Z-weighting filter represented as polynomial transfer function.
:returns: Tuple of `num` and `den`.
Z-weighting is 0.0 dB for all frequencies and therefore corresponds to a
multiplication of 1.
"""
numerator = [1]
denomenator = [1]
return numerator, denomenator | 8d84c572631c23f50f8a57e388e21fa62e316930 | 6,300 |
def shutdown():
"""
Shuts down the API (since there is no legit way to kill the thread)
Pulled from https://stackoverflow.com/questions/15562446/how-to-stop-flask-application-without-using-ctrl-c
"""
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return 'Server shutting down...', 200 | a5c1a226fac7c912c11415abb08200cbe2e6f1e3 | 6,301 |
def on_post_request():
"""This function triggers on every POST request to chosen endpoint"""
data_sent = request.data.decode('utf-8')
return Response(return_animal_noise(data_sent), mimetype='text/plain') | c43343c697bfde9751dc4bb36b7ad162e7578049 | 6,302 |
def settings(comid=None, community=None):
"""Modify a community."""
pending_records = \
len(CommunityRecordsCollection(community).filter({'status': 'P'}))
return render_template(
'invenio_communities/settings.html',
community=community,
comid=comid,
pending_records=pending_records) | 702b41348b461876ebaae49187a3543dbdcd7d0d | 6,303 |
import math
def product_of_basins():
"""Return the product of the sizes of the three largest basins."""
max_x = len(values[0]) - 1
max_y = len(values) - 1
def heightmap(x, y):
"""Return the height value in (xth column, yth row)."""
return values[y][x]
def is_lowpoint(x, y):
"""Return True if (x, y) is a lowpoint, else False."""
value = heightmap(x, y)
return all((x == 0 or value < heightmap(x - 1, y), # left
x == max_x or value < heightmap(x + 1, y), # right
y == 0 or value < heightmap(x, y - 1), # up
y == max_y or value < heightmap(x, y + 1))) # down
def basin_size(x, y):
"""Return the basin size of the low point (x, y)."""
if (x, y) in visited or heightmap(x, y) == 9:
return 0
visited.add((x, y))
value = heightmap(x, y)
size = 1
if x > 0 and value <= heightmap(x - 1, y): # left
size += basin_size(x - 1, y)
if x < max_x and value <= heightmap(x + 1, y): # right
size += basin_size(x + 1, y)
if y > 0 and value <= heightmap(x, y - 1): # up
size += basin_size(x, y - 1)
if y < max_y and value <= heightmap(x, y + 1): # down
size += basin_size(x, y + 1)
return size
visited = set()
basin_sizes = []
lowpoints = ((x, y)
for x in range(max_x + 1)
for y in range(max_y + 1)
if is_lowpoint(x, y))
for x, y in lowpoints:
basin_sizes.append(basin_size(x, y))
basin_sizes.sort(reverse=True)
return math.prod(basin_sizes[:3]) | 3624faa5b5d1e991c31f2f0c5f790c68619a0b85 | 6,304 |
def singularity26(function):
"""Decorator to set the global singularity version"""
def wrapper(*args, **kwargs):
hpccm.config.g_ctype = container_type.SINGULARITY
hpccm.config.g_singularity_version = StrictVersion('2.6')
return function(*args, **kwargs)
return wrapper | a6cdd7f7a8b000a63fa459a38ef6dd3fa0eec037 | 6,305 |
def denormalize(series, last_value):
"""Denormalize the values for a given series.
This uses the last value available (i.e. the last
closing price of the week before our prediction)
as a reference for scaling the predicted results.
"""
result = last_value * (series + 1)
return result | f4c32aa4248378482f1294c54e706e6ee8d5332d | 6,306 |
import warnings
def tfidf(
s: pd.Series, max_features=None, min_df=1, max_df=1.0, return_feature_names=False
) -> pd.Series.sparse:
"""
Represent a text-based Pandas Series using TF-IDF.
*Term Frequency - Inverse Document Frequency (TF-IDF)* is a formula to
calculate the _relative importance_ of the words in a document, taking
into account the words' occurences in other documents. It consists of two parts:
The *term frequency (tf)* tells us how frequently a term is present in a document,
so tf(document d, term t) = number of times t appears in d.
The *inverse document frequency (idf)* measures how _important_ or _characteristic_
a term is among the whole corpus (i.e. among all documents).
Thus, idf(term t) = log((1 + number of documents) / (1 + number of documents where t is present)) + 1.
Finally, tf-idf(document d, term t) = tf(d, t) * idf(t).
Different from the `sklearn-implementation of tfidf <https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html>`,
this function does *not* normalize the output in any way,
so the result is exactly what you
get applying the formula described above.
The input Series should already be tokenized. If not, it will
be tokenized before tfidf is calculated.
If working with big pandas Series, you might want to limit
the number of features through the max_features parameter.
Parameters
----------
s : Pandas Series (tokenized)
max_features : int, optional, default to None.
If not None, only the max_features most frequent tokens are used.
min_df : int, optional, default to 1.
When building the vocabulary, ignore terms that have a document
frequency (number of documents a term appears in) strictly lower than the given threshold.
max_df : int or double, optional, default to 1.0
When building the vocabulary, ignore terms that have a document
frequency (number of documents a term appears in) strictly higher than the given threshold. This arguments basically permits to remove corpus-specific stop words. When the argument is a float [0.0, 1.0], the parameter represents a proportion of documents.
return_feature_names: Boolean, optional, default to False
Whether to return the feature (i.e. word) names with the output.
Examples
--------
>>> import texthero as hero
>>> import pandas as pd
>>> s = pd.Series(["Hi Bye", "Test Bye Bye"])
>>> s = hero.tokenize(s)
>>> hero.tfidf(s, return_feature_names=True)
(document
0 [1.0, 1.4054651081081644, 0.0]
1 [2.0, 0.0, 1.4054651081081644]
dtype: object, ['Bye', 'Hi', 'Test'])
"""
# Check if input is tokenized. Else, print warning and tokenize.
if not isinstance(s.iloc[0], list):
warnings.warn(_not_tokenized_warning_message, DeprecationWarning)
s = preprocessing.tokenize(s)
tfidf = TfidfVectorizer(
use_idf=True,
max_features=max_features,
min_df=min_df,
max_df=max_df,
tokenizer=lambda x: x,
preprocessor=lambda x: x,
norm=None, # Disable l1/l2 normalization.
)
tfidf_vectors_csr = tfidf.fit_transform(s)
# Result from sklearn is in Compressed Sparse Row format.
# Pandas Sparse Series can only be initialized from Coordinate format.
tfidf_vectors_coo = coo_matrix(tfidf_vectors_csr)
s_out = pd.Series.sparse.from_coo(tfidf_vectors_coo)
# Map word index to word name and keep original index of documents.
feature_names = tfidf.get_feature_names()
s_out.index = s_out.index.map(lambda x: (s.index[x[0]], feature_names[x[1]]))
s_out.rename_axis(["document", "word"], inplace=True)
# NOTE: Currently: still convert to flat series instead of representation series.
# Will change to return representation series directly in Version 2.
s_out = representation_series_to_flat_series(
s_out, fill_missing_with=0.0, index=s.index
)
if return_feature_names:
return s_out, feature_names
else:
return s_out | 56f07d62254b873fc5581af26160d7bf4fc5d7e6 | 6,307 |
from typing import Tuple
from typing import Any
from typing import Dict
import time
from typing import cast
def decorator(fn: AsyncFn, *, expire: int, maxsize: int) -> AsyncFn:
"""Cache decorator."""
cache = LRUCache(maxsize=maxsize)
@wraps(fn)
async def wrapper(*args: Tuple[Any, ...], **kwds: Dict[str, Any]) -> Any:
"""Wrap the original async `fn`.
Cached results will be returned if cache hit, otherwise
(missing/expired) `fn` will be invoked and its result will be cached.
Args:
args: Positional arguments in function parameters.
kwds: Keyword arguments in function parameters.
Returns:
The (maybe cached) result of `fn(*args, **kwds)`.
"""
key = CacheKey.make(args, kwds)
value = cache[key]
# cache miss/expired
if value is None:
result = await fn(*args, **kwds)
cache[key] = CacheValue(expired=time.monotonic() + expire, data=result)
return result
return value.data
wrapper.__dict__["cache"] = cache
wrapper.__dict__["expire"] = expire
return cast(AsyncFn, wrapper) | 07bcb2181787f5af00098b9de27186a3e6aa1cfa | 6,308 |
import xml
import os
def cleanQrc(uiFile):
"""
Looks for included resources files in provided .ui file
If it doesn't find any, it returns the original file else:
Adds all search paths to Qt
Converts all paths
turns this> :/images/C:/Users/mindd/Desktop/CircleOfFifths.jpg
into this> images:CircleOfFifths.jpg
Removes resources 'include' tag
Creates and returns new _mpi.ui file or original
"""
# uiFile = os.path.join(os.getcwd(),uiFile)
parsed = xml.parse(uiFile)
# No resource.qrc files found
# we return the original file
if parsed.find('resources') is None: return uiFile
# Add search paths
for include in parsed.iter('include'):
location = include.get('location')# qrc file
if 'qrc' in location:
qrcFile = xml.parse(location)
for qresource in qrcFile.iter('qresource'):
prefix = qresource.get('prefix')# prefix
for file in qresource.findall('file'):
QDir.addSearchPath(prefix, os.path.dirname(file.text))
# print(location,prefix, file.text)
# fix resources paths
def fixPath(path):
"""
turns this> :/images/C:/Users/mindd/Desktop/CircleOfFifths.jpg
into this> images:CircleOfFifths.jpg
"""
path = path.replace(':/','')
s = path.index('/')
e = path.rindex('/')
path = path.replace(path[s:e+1],':')
return path
for _any in parsed.iter():
txt = _any.text
if txt:
txt = txt.strip()
if txt != '' and txt.count(':/'):
newTxt = txt
if 'url' in txt:# StyleSheet
# All occurrences of 'url(:' in string
for i in range(len(txt)):
if txt.startswith('url(:', i):
all_from_here = txt[i:]
start = all_from_here.index(':/')
end = all_from_here.index(')')
actual_txt = all_from_here[start:end]
newTxt = newTxt.replace(actual_txt,fixPath(actual_txt))
else:
newTxt = newTxt.replace(txt,fixPath(txt))
_any.text = newTxt
if parsed.find('resources') is not None:
parsed.getroot().remove(parsed.find('resources'))
mpi_file = uiFile.replace('.ui','_mpi.ui')
xml.indent(parsed.getroot())
parsed.write(mpi_file)
return mpi_file | 68d6a175d8080a05cd200ce1830daad0f9920fae | 6,309 |
def pad(data, paddings, mode="CONSTANT", name=None, constant_value=0):
""" PlaidML Pad """
# TODO: use / implement other padding method when required
# CONSTANT -> SpatialPadding ? | Doesn't support first and last axis +
# no support for constant_value
# SYMMETRIC -> Requires implement ?
if mode.upper() != "REFLECT":
raise NotImplementedError("pad only supports mode == 'REFLECT'")
if constant_value != 0:
raise NotImplementedError("pad does not support constant_value != 0")
return plaidml.op.reflection_padding(data, paddings) | b531cb4fe543da346c504c6aaca4b8787473e5d0 | 6,310 |
def taxon_id(_):
"""
Always returns 10090, the mouse taxon id.
"""
return 10090 | 117fe7f8d56eb9be4ee2b0f4d782b806576faedf | 6,311 |
def rthread_if(data, *forms):
"""
Similar to rthread, but each form must be a tuple with (test, fn, ...args)
and only pass the argument to fn if the boolean test is True.
If test is callable, the current value to the callable to decide if fn must
be executed or not.
Like rthread, Arguments are passed as tuples and the value is passed as the
last argument.
Examples:
>>> sk.rthread_if(20, (True, op.div, 2), (False, op.mul, 4), (sk.is_even, op.add, 2))
0.1
See Also:
:func:`thread`
:func:`rthread_if`
"""
for form in forms:
do_it, func, *args = form
if callable(do_it):
do_it = do_it(data)
if do_it:
try:
data = func(*args, data)
except Exception as ex:
raise _thread_error(ex, func, (*args, data)) from ex
return data | a9afa8576ec3a308d7f1514933e462d4565cf738 | 6,312 |
def decompose_matrices(Ks):
"""
Apply Cholesky decomposition to each matrix in the given list
:param Ks: a list of matrices
"""
Ls = []
for i, K_d in enumerate(Ks):
Ls.append(np.linalg.cholesky(K_d))
return Ls | 6a14363eab0646f59d3664843ef47c5ad34c5537 | 6,313 |
import os
import asyncio
import json
async def plugin_remove_cmd(client, message):
"""remove an installed plugin.
alemiBot plugins are git repos, cloned into the `plugins` folder as git submodules.
This will call `git submodule deinit -f`, then remove the related folder in `.git/modules` and last remove \
plugin folder and all its content.
If flag `-lib` is added, libraries installed with pip will be removed too (may break dependancies of other plugins!)
"""
if not alemiBot.allow_plugin_install:
return await edit_or_reply(message, "`[!] โ ` Plugin management is disabled")
out = message.text.markdown if is_me(message) else f"`โ ` {get_username(message.from_user)} requested plugin removal"
msg = message if is_me(message) else await message.reply(out)
try:
if len(message.command) < 1:
out += "\n`[!] โ ` No input"
return await msg.edit(out)
plugin = message.command[0]
out += f"\n`โ ` Uninstalling `{plugin}`"
if "/" in plugin: # If user passes <user>/<repo> here too, get just repo name
plugin = plugin.split("/")[1]
logger.info(f"Removing plugin \"{plugin}\"")
if message.command["-lib"]:
out += "\n` โ ` Removing libraries"
await msg.edit(out)
if os.path.isfile(f"plugins/{plugin}/requirements.txt"):
proc = await asyncio.create_subprocess_exec(
"pip", "uninstall", "-y", "-r", f"plugins/{plugin}/requirements.txt",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT)
stdout, _stderr = await proc.communicate()
logger.info(stdout.decode())
if b"ERROR" in stdout:
out += " [`WARN`]"
else:
out += f" [`{stdout.count(b'Uninstalling')} del`]"
out += "\n` โ ` Removing source code"
await msg.edit(out)
proc = await asyncio.create_subprocess_shell(
f"git submodule deinit -f plugins/{plugin} && rm -rf .git/modules/plugins/{plugin} && git rm -f plugins/{plugin}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT)
stdout, _stderr = await proc.communicate()
res = cleartermcolor(stdout.decode())
logger.info(res)
if not res.startswith("Cleared"):
logger.error(res)
out += f" [`FAIL`]\n`[!] โ ` Could not deinit `{plugin}`"
return await msg.edit(out)
if f"rm 'plugins/{plugin}'" not in res:
logger.error(res)
out += f" [`FAIL`]\n`[!] โ ` Could not delete `{plugin}`"
return await msg.edit(out)
out += f" [`OK`]\n` โ ` Restarting process"
await msg.edit(out)
with open("data/lastmsg.json", "w") as f:
json.dump({"message_id": msg.message_id,
"chat_id": msg.chat.id}, f)
asyncio.get_event_loop().create_task(client.restart())
except Exception as e:
logger.exception("Error while installing plugin")
out += " [`FAIL`]\n`[!] โ ` " + str(e)
await msg.edit(out) | d46ec0a6c82de918bd29b5b68046b6bdf74bfac9 | 6,314 |
def ndcg_score(y_pre, y_true, k=20):
"""
get NDCG@k
:param y_pre: numpy (batch_size,x)
:param y_true: y_truth: list[batch_size][ground_truth_num]
:param k: k
:return: NDCG@k
"""
dcg = dcg_score(y_pre, y_true, k)
idcg = dcg_score(y_true, y_true, k)
return dcg / idcg | 0e9e513f4c8e7ceba18c0a12d0c84e08d210ddcd | 6,315 |
from typing import Optional
def get_discount_weights(
discount_factor: float, traj_len: int, num_trajs: int = 1
) -> Optional[npt.NDArray[np.float32]]:
"""
Return the trajectory discount weight array if applicable
:param discount_factor: the discount factor by which the displacements corresponding to the k^th timestep will
be discounted
:param traj_len: len of traj
:param optional num_trajs: num of ego trajs, default is set to 1, but it's generalized in case we need to
compare multiple ego trajs with expert
:return array of discount_weights.
"""
discount_weights = None
if discount_factor != 1.0:
# Compute discount_factors
pow_arr = np.tile(np.arange(traj_len), (num_trajs, 1)) # type:ignore
discount_weights = np.power(discount_factor, pow_arr)
return discount_weights | 0d09fcf8228b1e04790d7874e0ecfffeae9a009a | 6,316 |
import random
def touch_to_square(touch_x, touch_y, num_rows, num_cols):
""" Given a touch x and y, convert it to a coordinate on the square. """
x = clamp(maprange((PAD_Y_RANGE_MAX, PAD_Y_RANGE_MIN),
(0, num_rows),
touch_y) + random.randrange(-1, 2),
0, num_rows - 1)
y = clamp(maprange((PAD_X_RANGE_MAX, PAD_X_RANGE_MIN),
(0, num_cols),
touch_x) + random.randrange(-1, 2),
0, num_cols - 1)
return (int(x), int(y)) | f7320e7e9738f7e05b3e675c706b28182a12de9a | 6,317 |
def is_valid_scheme(url):
"""Judge whether url is valid scheme."""
return urlparse(url).scheme in ["ftp", "gopher", "http", "https"] | 4240ec4251e8f937c6f755d123b0b52f88057420 | 6,318 |
def height_to_transmission(height, material, energy, rho=0, photo_only=False,
source='nist'):
"""
Calculates the resulting x-ray transmission of an object based on the given
height (thickness) and for a given material and energy.
Parameters
==========
height: grating height (thickness) [um]
material: chemical formula ('Fe2O3', 'CaMg(CO3)2', 'La1.9Sr0.1CuO4')
energy: x-ray energy [keV]
rho: density in [g/cm3], default=0 (no density given)
photo_only: boolean for returning photo cross-section component only,
default=False
source: material params LUT... default='nist'
Returns
=======
transmission: percentage of resulting x-ray transmission
"""
return 1 - height_to_absorption(height, material, energy, rho, photo_only,
source) | 54e2933b06e0489fdc521c4f173d516038f32ee8 | 6,319 |
def assignModelClusters(keyframe_model, colors):
""" Map each colorspace segment to the closest color in the input.
Parameters
----------
keyframe_model : FrameScorer
colors : numpy array of int, shape (num_colors, 3)
"""
hsv_mean_img = keyframe_model.hsv_means.copy().reshape(1, keyframe_model.n_clusters, 3)
hsv_mean_img_saturated = hsv_mean_img.copy()
hsv_mean_img_saturated[:, :, 1] = 1
hsv_mean_img_saturated[:, :, 2] = 1
rgb_mean_img_saturated = imageprocessing.color.hsv2rgb(hsv_mean_img_saturated)
# rgb_mean_img = imageprocessing.color.hsv2rgb(hsv_mean_img)
# imageprocessing.displayImage(rgb_mean_img)
# imageprocessing.displayImage(rgb_mean_img_saturated)
rgb_means_saturated = rgb_mean_img_saturated.reshape(keyframe_model.n_clusters, 3)
distances = np.array(tuple(
np.linalg.norm(rgb_means_saturated - np.array(rgb_color), axis=1)
for rgb_color in colors
)).T
best_idxs = distances.argmin(axis=1)
keyframe_model.color_mappings = best_idxs
return keyframe_model | d32c093c8931272215bec12d08b4b268da50f184 | 6,320 |
def sort_according_to_ref_list(fixturenames, param_names):
"""
Sorts items in the first list, according to their position in the second.
Items that are not in the second list stay in the same position, the others are just swapped.
A new list is returned.
:param fixturenames:
:param param_names:
:return:
"""
cur_indices = []
for pname in param_names:
try:
cur_indices.append(fixturenames.index(pname))
except (ValueError, IndexError):
# can happen in case of indirect parametrization: a parameter is not in the fixture name.
# TODO we should maybe rather add the pname to fixturenames in this case ?
pass
target_indices = sorted(cur_indices)
sorted_fixturenames = list(fixturenames)
for old_i, new_i in zip(cur_indices, target_indices):
sorted_fixturenames[new_i] = fixturenames[old_i]
return sorted_fixturenames | d4f1ca19b54ccbdbd70c865abceca1817ce5b2c1 | 6,321 |
def calc_ef_from_bases(x,*args):
"""
Calculate energies and forces of every samples using bases data.
"""
global _hl1,_ergs,_frcs,_wgt1,_wgt2,_wgt3,_aml,_bml
#.....initialize variables
if _nl == 1:
_wgt1,_wgt2= vars2wgts(x)
elif _nl == 2:
_wgt1,_wgt2,_wgt3= vars2wgts(x)
es=np.zeros(len(_samples))
fs= []
for smpl in _samples:
fs.append(np.zeros((smpl.natm,3)))
p= mp.Pool(_nprcs)
_hl1= []
_aml= []
_bml= []
if _nprcs == 1:
for ismpl in range(len(_samples)):
smpl= _samples[ismpl]
if _nl == 1:
est,fst,hl1s,ams,bms= calc_ef1(ismpl,x,*args)
_hl1.append(hl1s)
_aml.append(ams)
_bml.append(bms)
elif _nl == 2:
est,fst,hl1s,hl2s,ams,bms,cms= calc_ef2(ismpl,x,*args)
_hl1.append(hl1s)
_hl2.append(hl2s)
_aml.append(ams)
_bml.append(bms)
_cml.append(cms)
es[ismpl]= est
for ia in range(smpl.natm):
fs[ismpl][ia,0] += fst[ia,0]
fs[ismpl][ia,1] += fst[ia,1]
fs[ismpl][ia,2] += fst[ia,2]
else:
func_args=[]
if _nl == 1:
for ismpl in range(len(_samples)):
func_args.append( (calc_ef1,ismpl,x) )
elif _nl == 2:
for ismpl in range(len(_samples)):
func_args.append( (calc_ef2,ismpl,x) )
results= p.map(arg_wrapper,func_args)
p.close()
p.join()
for ismpl in range(len(_samples)):
smpl= _samples[ismpl]
if _nl == 1:
est,fst,hl1s,ams,bms= results[ismpl]
_hl1.append(hl1s)
_aml.append(ams)
_bml.append(bms)
elif _nl == 2:
est,fst,hl1s,hl2s,ams,bms,cms= results[ismpl]
_hl1.append(hl1s)
_hl2.append(hl2s)
_aml.append(ams)
_bml.append(bms)
_cml.append(cms)
es[ismpl]= est
for ia in range(smpl.natm):
fs[ismpl][ia,0] += fst[ia,0]
fs[ismpl][ia,1] += fst[ia,1]
fs[ismpl][ia,2] += fst[ia,2]
# print ' es:'
# print es
_ergs= es
_frcs= fs
return (es,fs) | d54c285c04dd8ae948fb13553251f0675c1af4bc | 6,322 |
def HIadj_post_anthesis(
NewCond_DelayedCDs,
NewCond_sCor1,
NewCond_sCor2,
NewCond_DAP,
NewCond_Fpre,
NewCond_CC,
NewCond_fpost_upp,
NewCond_fpost_dwn,
Crop,
Ksw):
"""
Function to calculate adjustment to harvest index for post-anthesis water
stress
<a href="../pdfs/ac_ref_man_3.pdf#page=119" target="_blank">Reference Manual: harvest index calculations</a> (pg. 110-126)
*Arguments:*
`InitCond`: `InitCondClass` : InitCond object containing model paramaters
`Crop`: `CropClass` : Crop object containing Crop paramaters
`Ksw`: `KswClass` : Ksw object containing water stress paramaters
*Returns:*
`NewCond`: `InitCondClass` : InitCond object containing updated model paramaters
"""
## Store initial conditions in a structure for updating ##
# NewCond = InitCond
InitCond_DelayedCDs = NewCond_DelayedCDs*1
InitCond_sCor1 = NewCond_sCor1*1
InitCond_sCor2 = NewCond_sCor2*1
## Calculate harvest index adjustment ##
# 1. Adjustment for leaf expansion
tmax1 = Crop.CanopyDevEndCD - Crop.HIstartCD
DAP = NewCond_DAP - InitCond_DelayedCDs
if (
(DAP <= (Crop.CanopyDevEndCD + 1))
and (tmax1 > 0)
and (NewCond_Fpre > 0.99)
and (NewCond_CC > 0.001)
and (Crop.a_HI > 0)
):
dCor = 1 + (1 - Ksw.Exp) / Crop.a_HI
NewCond_sCor1 = InitCond_sCor1 + (dCor / tmax1)
DayCor = DAP - 1 - Crop.HIstartCD
NewCond_fpost_upp = (tmax1 / DayCor) * NewCond_sCor1
# 2. Adjustment for stomatal closure
tmax2 = Crop.YldFormCD
DAP = NewCond_DAP - InitCond_DelayedCDs
if (
(DAP <= (Crop.HIendCD + 1))
and (tmax2 > 0)
and (NewCond_Fpre > 0.99)
and (NewCond_CC > 0.001)
and (Crop.b_HI > 0)
):
# print(Ksw.Sto)
dCor = np.power(Ksw.Sto, 0.1) * (1 - (1 - Ksw.Sto) / Crop.b_HI)
NewCond_sCor2 = InitCond_sCor2 + (dCor / tmax2)
DayCor = DAP - 1 - Crop.HIstartCD
NewCond_fpost_dwn = (tmax2 / DayCor) * NewCond_sCor2
# Determine total multiplier
if (tmax1 == 0) and (tmax2 == 0):
NewCond_Fpost = 1
else:
if tmax2 == 0:
NewCond_Fpost = NewCond_fpost_upp
else:
if tmax1 == 0:
NewCond_Fpost = NewCond_fpost_dwn
elif tmax1 <= tmax2:
NewCond_Fpost = NewCond_fpost_dwn * (
((tmax1 * NewCond_fpost_upp) + (tmax2 - tmax1)) / tmax2
)
else:
NewCond_Fpost = NewCond_fpost_upp * (
((tmax2 * NewCond_fpost_dwn) + (tmax1 - tmax2)) / tmax1
)
return (
NewCond_sCor1,
NewCond_sCor2,
NewCond_fpost_upp,
NewCond_fpost_dwn,
NewCond_Fpost) | 9101dba12642dc7f1f4b0bbe9f35d7e926f6af0a | 6,323 |
def packify(fmt=u'8', fields=[0x00], size=None, reverse=False):
"""
Packs fields sequence of bit fields into bytearray of size bytes using fmt string.
Each white space separated field of fmt is the length of the associated bit field
If not provided size is the least integer number of bytes that hold the fmt.
If reverse is true reverse the order of the bytes in the byte array before
returning. This is useful for converting between bigendian and littleendian.
Assumes unsigned fields values.
Assumes network big endian so first fields element is high order bits.
Each field in format string is number of bits for the associated bit field
Fields with length of 1 are treated as has having boolean truthy field values
that is, nonzero is True and packs as a 1
for 2+ length bit fields the field element is truncated to the number of
low order bits in the bit field
if sum of number of bits in fmt less than size bytes then the last byte in
the bytearray is right zero padded
if sum of number of bits in fmt greater than size bytes returns exception
to pad just use 0 value in source field.
example
packify("1 3 2 2", (True, 4, 0, 3)). returns bytearry([0xc3])
"""
tbfl = sum((int(x) for x in fmt.split()))
if size is None:
size = (tbfl // 8) + 1 if tbfl % 8 else tbfl // 8
if not (0 <= tbfl <= (size * 8)):
raise ValueError("Total bit field lengths in fmt not in [0, {0}]".format(size * 8))
n = 0
bfp = 8 * size # starting bit field position
bu = 0 # bits used
for i, bfmt in enumerate(fmt.split()):
bits = 0x00
bfl = int(bfmt)
bu += bfl
if bfl == 1:
if fields[i]:
bits = 0x01
else:
bits = 0x00
else:
bits = fields[i] & (2**bfl - 1) # bit-and mask out high order bits
bits <<= (bfp - bfl) #shift left to bit position less bit field size
n |= bits # bit-or in bits
bfp -= bfl #adjust bit field position for next element
return bytify(n=n, size=size, reverse=reverse, strict=True) | 882d4fd9e3ec626f499f7c4653f6c3864ad64095 | 6,324 |
def fix_conf_params(conf_obj, section_name):
"""from a ConfigParser object, return a dictionary of all parameters
for a given section in the expected format.
Because ConfigParser defaults to values under [DEFAULT] if present, these
values should always appear unless the file is really bad.
:param configparser_object: ConfigParser instance
:param section_name: string of section name in config file
(e.g. "MyBank" matches "[MyBank]" in file)
:return: dict with all parameters
"""
config = {
"input_columns": ["Input Columns", False, ","],
"output_columns": ["Output Columns", False, ","],
"input_filename": ["Source Filename Pattern", False, ""],
"path": ["Source Path", False, ""],
"ext": ["Source Filename Extension", False, ""],
"regex": ["Use Regex For Filename", True, ""],
"fixed_prefix": ["Output Filename Prefix", False, ""],
"input_delimiter": ["Source CSV Delimiter", False, ""],
"header_rows": ["Header Rows", False, ""],
"footer_rows": ["Footer Rows", False, ""],
"date_format": ["Date Format", False, ""],
"delete_original": ["Delete Source File", True, ""],
"cd_flags": ["Inflow or Outflow Indicator", False, ","],
"payee_to_memo": ["Use Payee for Memo", True, ""],
"plugin": ["Plugin", False, ""],
"api_token": ["YNAB API Access Token", False, ""],
"api_account": ["YNAB Account ID", False, "|"],
}
for key in config:
config[key] = get_config_line(conf_obj, section_name, config[key])
config["bank_name"] = section_name
# quick n' dirty fix for tabs as delimiters
if config["input_delimiter"] == "\\t":
config["input_delimiter"] = "\t"
return config | 55cdb572e2b45f437c583429ed9ee61f0de9b3de | 6,325 |
def sackStringToSack(sackString):
"""
C{sackString} is a C{str}. Returns a L{window.SACK}.
"""
try:
# If not enough args for split, Python raises ValueError
joinedSackList, ackNumberStr = sackString.rsplit('|', 1)
ackNumber = strToIntInRange(ackNumberStr, -1, 2**53)
sackList = tuple(strToNonNegLimit(s, 2**53) for s in joinedSackList.split(',')) if joinedSackList else ()
except ValueError:
raise InvalidSackString("bad sack")
return SACK(ackNumber, sackList) | 9fd5ef91f6e897758f47de006a582b5b1ec99f82 | 6,326 |
def setup_graph(event, sta, chan, band,
tm_shape, tm_type, wm_family, wm_type, phases,
init_run_name, init_iteration, fit_hz=5, uatemplate_rate=1e-4,
smoothing=0, dummy_fallback=False,
raw_signals=False, init_templates=False, **kwargs):
"""
Set up the graph with the signal for a given training event.
"""
s = Sigvisa()
cursor = s.dbconn.cursor()
try:
input_runid = get_fitting_runid(cursor, init_run_name, init_iteration, create_if_new = False)
runids = (input_runid,)
print "input_runid", input_runid
except RunNotFoundException:
runids = ()
sg = SigvisaGraph(template_model_type=tm_type, template_shape=tm_shape,
wiggle_model_type=wm_type, wiggle_family=wm_family,
phases=phases,
runids = runids,
uatemplate_rate=uatemplate_rate,
min_mb=1.0,
dummy_fallback=dummy_fallback,
raw_signals=raw_signals, **kwargs)
filter_str = band
if not raw_signals:
filter_str += ";env"
wave = load_event_station_chan(event.evid, sta, chan, cursor=cursor, exclude_other_evs=True, phases=None if phases=="leb" else phases, pre_s=100.0).filter(filter_str)
cursor.close()
if smoothing > 0:
wave = wave.filter('smooth_%d' % smoothing)
if fit_hz != wave['srate']:
wave = wave.filter('hz_%.2f' % fit_hz)
if len(mask_blocks(wave.data.mask)) > 2:
raise Exception("wave contains missing data")
if (not raw_signals) and (np.sum(wave.data < 0.0001) > 10):
raise Exception("wave contains regions of zeros")
sg.add_wave(wave=wave, init_extra_noise=True)
evnodes = sg.add_event(ev=event)
eid = evnodes["lon"].eid
stddevs = {"time": 2.0, "mb": 0.2}
sg.observe_event(eid=eid, ev=event, stddevs=stddevs)
if init_templates:
fitid = get_previous_fitid(input_runid, event.evid, sta)
set_templates_from_fitid(sg, 1, fitid, wave)
#sg.fix_arrival_times()
phases = sg.ev_arriving_phases(1, wave["sta"])
assert( "P" in phases or "Pg" in phases or "Pn" in phases or "pP" in phases)
return sg | 45add3585b61db404a9edb31fe7363677c6cbaec | 6,327 |
def getLogisticModelNames(config):
"""
Get the names of the models present in the configobj
Args:
config: configobj object defining the model and its inputs.
Returns:
list: list of model names.
"""
names = []
lmodel_space = config
for key, value in lmodel_space.items():
if isinstance(value, str):
continue
else: # this is a model
names.append(key)
return names | f7f82b12eb50a58c92970b5c2a8f99eb01945523 | 6,328 |
def checkfileCopyright(filename):
""" return true if file has already a Copyright in first X lines """
infile = open(filename, 'r')
for x in xrange(6):
x = x
line = infile.readline()
if "Copyright" in line or "copyright" in line:
return True
return False | 567b485a58e46796238a109de935904d747679c7 | 6,329 |
def TopicFormat(topic_name, topic_project=''):
"""Formats a topic name as a fully qualified topic path.
Args:
topic_name: (string) Name of the topic to convert.
topic_project: (string) Name of the project the given topic belongs to.
If not given, then the project defaults to the currently
selected cloud project.
Returns:
Returns a fully qualified topic path of the
form project/foo/topics/topic_name.
"""
return TopicIdentifier(topic_name, topic_project).GetFullPath() | e8a3d28cc81b7a31a2243b68c77aef77449c1b97 | 6,330 |
def mp0(g0):
"""Return 0th order free energy."""
return g0.sum() | 5aa3580fec1322bd7b4e357ec6bee4d52fae592e | 6,331 |
def create_diamond(color=None):
"""
Creates a diamond.
:param color: Diamond color
:type color: list
:return: OpenGL list
"""
# noinspection PyArgumentEqualDefault
a = Point3(-1.0, -1.0, 0.0)
# noinspection PyArgumentEqualDefault
b = Point3(1.0, -1.0, 0.0)
# noinspection PyArgumentEqualDefault
c = Point3(1.0, 1.0, 0.0)
# noinspection PyArgumentEqualDefault
d = Point3(-1.0, 1.0, 0.0)
# noinspection PyArgumentEqualDefault
e = Point3(0.0, 0.0, 1.0)
# noinspection PyArgumentEqualDefault
f = Point3(0.0, 0.0, -1.0)
obj = _gl.glGenLists(1)
_gl.glNewList(obj, _gl.GL_COMPILE)
_gl.glPushMatrix()
if color is not None:
_gl.glColor4fv(color)
_gl.glBegin(_gl.GL_TRIANGLES)
draw_vertex_list_create_normal([a, b, e])
draw_vertex_list_create_normal([b, c, e])
draw_vertex_list_create_normal([c, d, e])
draw_vertex_list_create_normal([d, a, e])
draw_vertex_list_create_normal([b, a, f])
draw_vertex_list_create_normal([c, b, f])
draw_vertex_list_create_normal([d, c, f])
draw_vertex_list_create_normal([a, d, f])
_gl.glEnd()
_gl.glPopMatrix()
_gl.glEndList()
return obj | 421939be392abdba6ccedb8a946a93ebe35fb612 | 6,332 |
import copy
import collections
def merge_dicts(*dicts):
"""
Recursive dict merge.
Instead of updating only top-level keys,
dict_merge recurses down into dicts nested
to an arbitrary depth, updating keys.
"""
assert len(dicts) > 1
dict_ = copy.deepcopy(dicts[0])
for merge_dict in dicts[1:]:
for k, v in merge_dict.items():
if (k in dict_ and isinstance(dict_[k], dict)
and isinstance(merge_dict[k], collections.Mapping)):
dict_[k] = merge_dicts(dict_[k], merge_dict[k])
else:
dict_[k] = merge_dict[k]
return dict_ | 6595343694b80928417c2a1f096cf4587f3dccbc | 6,333 |
def getAllFWImageIDs(fwInvDict):
"""
gets a list of all the firmware image IDs
@param fwInvDict: the dictionary to search for FW image IDs
@return: list containing string representation of the found image ids
"""
idList = []
for key in fwInvDict:
if 'Version' in fwInvDict[key]:
idList.append(key.split('/')[-1])
return idList | 54bbd28b80905c7b48e5ddc3e61187f5b5ad5f6a | 6,334 |
import os
def create_dataset(dataset_path, batch_size=1, repeat_size=1, max_dataset_size=None,
shuffle=True, num_parallel_workers=1, phase='train', data_dir='testA', use_S=False):
""" create Mnist dataset for train or eval.
dataset_path: Data path
batch_size: The number of data records in each group
repeat_size: The number of replicated data records
num_parallel_workers: The number of parallel workers
"""
# define dataset and apply the transform func
if phase == 'train':
ds = UnalignedDataset(dataset_path, phase, max_dataset_size=max_dataset_size, shuffle=True, use_S=use_S)
column_names = ["image_A", "image_B"]
if use_S:
column_names.append('image_S')
device_num = 1
distributed_sampler = DistributedSampler(len(ds), num_replicas=device_num, rank=0, shuffle=shuffle)
gan_generator_ds = GeneratorDataset(ds, column_names=column_names, sampler=distributed_sampler,
num_parallel_workers=num_parallel_workers)
else:
data_dir = os.path.join(dataset_path, data_dir)
ds = GanImageFolderDataset(data_dir, max_dataset_size=max_dataset_size)
gan_generator_ds = GeneratorDataset(ds, column_names=["image", "image_name"],
num_parallel_workers=num_parallel_workers)
gan_generator_ds = cyclegan_transform.apply_ds(gan_generator_ds,
repeat_size=repeat_size,
batch_size=batch_size,
num_parallel_workers=num_parallel_workers,
shuffle=shuffle,
phase=phase,
use_S=use_S)
dataset_size = len(ds)
return gan_generator_ds, dataset_size | b5dcb8cb507c2e2f928e0becbeb014ce7780ad7f | 6,335 |
def document_uris_from_data(document_data, claimant):
"""
Return one or more document URI dicts for the given document data.
Returns one document uri dict for each document equivalence claim in
document_data.
Each dict can be used to init a DocumentURI object directly::
document_uri = DocumentURI(**document_uri_dict)
Always returns at least one "self-claim" document URI whose URI is the
claimant URI itself.
:param document_data: the "document" sub-object that was POSTed to the API
as part of a new or updated annotation
:type document_data: dict
:param claimant: the URI that the browser was at when this annotation was
created (the top-level "uri" field of the annotation)
:type claimant: unicode
:returns: a list of one or more document URI dicts
:rtype: list of dicts
"""
document_uris = document_uris_from_links(document_data.get("link", []), claimant)
document_uris.extend(
document_uris_from_highwire_pdf(document_data.get("highwire", {}), claimant)
)
document_uris.extend(
document_uris_from_highwire_doi(document_data.get("highwire", {}), claimant)
)
document_uris.extend(document_uris_from_dc(document_data.get("dc", {}), claimant))
document_uris.append(document_uri_self_claim(claimant))
for document_uri in document_uris:
uri = document_uri["uri"]
if uri:
document_uri["uri"] = uri.strip()
document_uris = [d for d in document_uris if d["uri"]]
return document_uris | 70f02c61f8cb1be21dd094c696f257e565d7c04c | 6,336 |
def get_bond_angle_index(edge_index):
"""
edge_index: (2, E)
bond_angle_index: (3, *)
"""
def _add_item(
node_i_indices, node_j_indices, node_k_indices,
node_i_index, node_j_index, node_k_index):
node_i_indices += [node_i_index, node_k_index]
node_j_indices += [node_j_index, node_j_index]
node_k_indices += [node_k_index, node_i_index]
E = edge_index.shape[1]
node_i_indices = []
node_j_indices = []
node_k_indices = []
for edge_i in range(E - 1):
for edge_j in range(edge_i + 1, E):
a0, a1 = edge_index[:, edge_i]
b0, b1 = edge_index[:, edge_j]
if a0 == b0 and a1 == b1:
continue
if a0 == b1 and a1 == b0:
continue
if a0 == b0:
_add_item(node_i_indices, node_j_indices, node_k_indices,
a1, a0, b1)
if a0 == b1:
_add_item(node_i_indices, node_j_indices, node_k_indices,
a1, a0, b0)
if a1 == b0:
_add_item(node_i_indices, node_j_indices, node_k_indices,
a0, a1, b1)
if a1 == b1:
_add_item(node_i_indices, node_j_indices, node_k_indices,
a0, a1, b0)
node_ijk = np.array([node_i_indices, node_j_indices, node_k_indices])
uniq_node_ijk = np.unique(node_ijk, axis=1).astype('int64') # (3, *)
return uniq_node_ijk | 7660b6b27b2a028092d39cac5e1b9dfcf6973984 | 6,337 |
def get_config(section=None, option=None):
"""Return dpm configuration objects.
:param section:
the name of the section in the ini file, e.g. "index:ckan".
- May be omitted only when no other parameters are provided
- Must be omitted elsewhere
:type section: str
:param option:
the name of the option to be retrieved from the section of the ini file, e.g. 'ckan.api_key'
- Can be omitted if a section is provided
- Must be omitted if no section is provided
:type option: str
:return:
[str, str, .., str] -- The section names of the ini file, when no section and no option are provided
-- e.g. ['dpm', 'index:ckan', 'index:db', 'upload:ckan']
[str, str, .., str] -- The option names of the ini file for a given section
-- e.g.['ckan.url', 'ckan.api_key']
[str] -- The option value if a valid section and a valid option name are given.
-- e.g. ['http://thedatahub.org/api/']
"""
if not section and not option:
return dpm.CONFIG.sections()
elif section and not option:
return dpm.CONFIG.options(section)
elif section and option:
return dpm.CONFIG.get(section, option)
else:
raise ValueError("Please provide no parameters OR just section OR both section and option") | e4910cd804593da8a6fd4b1fae7f3bd3fcd32f2b | 6,338 |
def match_intervals(intervals_from, intervals_to, strict=True):
"""Match one set of time intervals to another.
This can be useful for tasks such as mapping beat timings
to segments.
Each element ``[a, b]`` of ``intervals_from`` is matched to the
element ``[c, d]`` of ``intervals_to`` which maximizes the
Jaccard similarity between the intervals::
max(0, |min(b, d) - max(a, c)|) / |max(d, b) - min(a, c)|
In ``strict=True`` mode, if there is no interval with positive
intersection with ``[a,b]``, an exception is thrown.
In ``strict=False`` mode, any interval ``[a, b]`` that has no
intersection with any element of ``intervals_to`` is instead
matched to the interval ``[c, d]`` which minimizes::
min(|b - c|, |a - d|)
that is, the disjoint interval [c, d] with a boundary closest
to [a, b].
.. note:: An element of ``intervals_to`` may be matched to multiple
entries of ``intervals_from``.
Parameters
----------
intervals_from : np.ndarray [shape=(n, 2)]
The time range for source intervals.
The ``i`` th interval spans time ``intervals_from[i, 0]``
to ``intervals_from[i, 1]``.
``intervals_from[0, 0]`` should be 0, ``intervals_from[-1, 1]``
should be the track duration.
intervals_to : np.ndarray [shape=(m, 2)]
Analogous to ``intervals_from``.
strict : bool
If ``True``, intervals can only match if they intersect.
If ``False``, disjoint intervals can match.
Returns
-------
interval_mapping : np.ndarray [shape=(n,)]
For each interval in ``intervals_from``, the
corresponding interval in ``intervals_to``.
See Also
--------
match_events
Raises
------
ParameterError
If either array of input intervals is not the correct shape
If ``strict=True`` and some element of ``intervals_from`` is disjoint from
every element of ``intervals_to``.
Examples
--------
>>> ints_from = np.array([[3, 5], [1, 4], [4, 5]])
>>> ints_to = np.array([[0, 2], [1, 3], [4, 5], [6, 7]])
>>> librosa.util.match_intervals(ints_from, ints_to)
array([2, 1, 2], dtype=uint32)
>>> # [3, 5] => [4, 5] (ints_to[2])
>>> # [1, 4] => [1, 3] (ints_to[1])
>>> # [4, 5] => [4, 5] (ints_to[2])
The reverse matching of the above is not possible in ``strict`` mode
because ``[6, 7]`` is disjoint from all intervals in ``ints_from``.
With ``strict=False``, we get the following:
>>> librosa.util.match_intervals(ints_to, ints_from, strict=False)
array([1, 1, 2, 2], dtype=uint32)
>>> # [0, 2] => [1, 4] (ints_from[1])
>>> # [1, 3] => [1, 4] (ints_from[1])
>>> # [4, 5] => [4, 5] (ints_from[2])
>>> # [6, 7] => [4, 5] (ints_from[2])
"""
if len(intervals_from) == 0 or len(intervals_to) == 0:
raise ParameterError("Attempting to match empty interval list")
# Verify that the input intervals has correct shape and size
valid_intervals(intervals_from)
valid_intervals(intervals_to)
try:
return __match_intervals(intervals_from, intervals_to, strict=strict)
except ParameterError as exc:
raise ParameterError(
"Unable to match intervals with strict={}".format(strict)
) from exc | a3b523b5aafd77a2fc1026c183ae6a690ec3538c | 6,339 |
def correlate(x, y, margin, method='pearson'):
""" Find delay and correlation between x and each column o y
Parameters
----------
x : `pandas.Series`
Main signal
y : `pandas.DataFrame`
Secondary signals
method : `str`, optional
Correlation method. Defaults to `pearson`. Options: `pearson`,`robust`,`kendall`,`spearman`
Returns
-------
`(List[float], List[int])`
List of correlation coefficients and delays in samples in the same order as y's columns
Notes
-----
Uses the pandas method corrwith (which can return pearson, kendall or spearman coefficients) to correlate. If robust
correlation is used, the mapping presented in [1]_ is used and then Pearson correlation is used. To speedup the lag finding,
the delays are calculated in log intervals and then interpolated by splines, as shown in [2]_, and the lag with maximum correlation
found in this interpolated function is then used as the delay.
References
----------
.. [1] Raymaekers, J., Rousseeuw, P. "Fast Robust Correlation for High-Dimensional Data", Technometrics, vol. 63, Pages 184-198, 2021
.. [2] Sakurai, Yasushi & Papadimitriou, Spiros & Faloutsos, Christos. (2005). BRAID: Stream mining through group lag correlations. Proceedings of the ACM SIGMOD International Conference on Management of Data. 599-610.
"""
beg, end = (x.index.min(), x.index.max())
y = interpolate(y,x.index,margin)
if(method == 'robust'):
method='pearson'
x = pd.Series(z(sig.detrend(x)), index=x.index, name=x.name)
x = x.apply(g)
y = y.apply(lambda s: z(sig.detrend(s))).applymap(g)
N = int(x.size*margin)
l = int(np.log2(N))
b = 4
log_lags = np.array([int(2**i+(j*2**i/b)) for i in range(2,l+1) for j in range(4) if 2**i+(j*2**i/b) < N])
log_lags = list(-1*log_lags)[::-1]+[-3,-2,-1,0,1,2,3]+list(log_lags)
new_lags = list(range(-1*max(log_lags),max(log_lags)+1))
vals = pd.DataFrame([lagged_corr(x,y,lag,method) for lag in log_lags])
vals = vals.apply(lambda s: inter.make_interp_spline(log_lags, abs(s),k=3)(new_lags))
peaks = vals.apply(lambda s: pd.Series([new_lags[i] for i in sig.find_peaks(s)[0]]+[new_lags[max(range(len(s)), key=s.__getitem__)]]).drop_duplicates())
peak_corr = pd.DataFrame(np.array([[x.corr((y[col].shift(int(peak)))[beg:end], method=method) if not pd.isna(peak) else 0 for peak in peaks[col]] for col in peaks]).transpose(), columns=y.columns)
dela = [peak_corr[col].abs().idxmax() for col in peak_corr]
delays = [int(peaks[col].iloc[dela[pos]]) for pos, col in enumerate(peak_corr)]
corrs = [round(peak_corr[col].iloc[dela[pos]],2) for pos, col in enumerate(peak_corr)]
return corrs, delays | 45800fd580ad257a8f4663c06577860f952a9a79 | 6,340 |
def sortList2(head: ListNode) -> ListNode:
"""down2up"""
h, length, intv = head, 0, 1
while h: h, length = h.next, length + 1
res = ListNode(0)
res.next = head
# merge the list in different intv.
while intv < length:
pre, h = res, res.next
while h:
# get the two merge head `h1`, `h2`
h1, i = h, intv
while i and h: h, i = h.next, i - 1
if i: break # no need to merge because the `h2` is None.
h2, i = h, intv
while i and h: h, i = h.next, i - 1
c1, c2 = intv, intv - i # the `c2`: length of `h2` can be small than the `intv`.
# merge the `h1` and `h2`.
while c1 and c2:
if h1.val < h2.val: pre.next, h1, c1 = h1, h1.next, c1 - 1
else: pre.next, h2, c2 = h2, h2.next, c2 - 1
pre = pre.next
pre.next = h1 if c1 else h2
while c1 > 0 or c2 > 0: pre, c1, c2 = pre.next, c1 - 1, c2 - 1
pre.next = h
intv *= 2
return res.next | 02ffae2012847b952197f1ed4c2af2178a552b4d | 6,341 |
def _inverse_frequency_max(searcher, fieldname, term):
"""
Inverse frequency smooth idf schema
"""
n = searcher.doc_frequency(fieldname, term)
maxweight = searcher.term_info(fieldname, term).max_weight()
return log(1 + (maxweight / n), 10) if n != 0.0 else 0.0 | e24497c2d67600b9744c5fafb7b503853c54d76c | 6,342 |
def ha(data):
"""
Hadamard Transform
This function is very slow. Implement a Fast Walsh-Hadamard Transform
with sequency/Walsh ordering (FWHT_w) for faster tranforms.
See:
http://en.wikipedia.org/wiki/Walsh_matrix
http://en.wikipedia.org/wiki/Fast_Hadamard_transform
"""
# implementation is a proof of concept and EXTEMEMLY SLOW
# determind the order and final size of input vectors
ord = int(np.ceil(np.log2(data.shape[-1]))) # Walsh/Hadamard order
max = 2**ord
# zero fill to power of 2
pad = max - data.shape[-1]
zdata = zf(data,pad)
# Multiple each vector by the hadamard matrix
nat = np.zeros(zdata.shape,dtype=zdata.dtype)
H = hadamard(max)
nat = np.dot(zdata,H)
nat = np.array(nat,dtype=data.dtype)
# Bit-Reversal Permutation
s = [int2bin(x,digits=ord)[::-1] for x in range(max)]
brp = [bin2int(x) for x in s]
brp_data = np.take(nat,brp,axis=-1)
# Gray code permutation (bit-inverse)
gp = gray(ord)
gp_data = np.take(brp_data,gp,axis=-1)
return gp_data | e46eb465e67ffe61872cdb321cbb642fb8a1a094 | 6,343 |
from typing import Dict
def most_repeated_character(string: str) -> str:
"""
Find the most repeated character in a string.
:param string:
:return:
"""
map: Dict[str, int] = {}
for letter in string:
if letter not in map:
map[letter] = 1
else:
map[letter] += 1
return sorted(map.items(), key=lambda item: item[1], reverse=True)[0][0] | c59a1e0a552f12c7561ecdb11530f98f15076cdc | 6,344 |
import json
def read_config(config_filename):
"""Read the expected system configuration from the config file."""
config = None
with open(config_filename, 'r') as config_file:
config = json.loads(config_file.read())
config_checks = []
for config_check in config:
if '_comment' in config_check:
continue
#Config MUST specify a description of the check
description = config_check['description']
write_str("Description: %s" % description, debug=True)
#Config MUST indicate the confidence of the configuration check
confidence = config_check['confidence']
#Config MUST include at least one test obj
tests = config_check['tests']
#Config MUST specify a fix object
assert 'fix' in config_check
assert isinstance(config_check['fix'], dict)
#Fix object must specify at least one of these:
#command, sudo_command, manual
assert ('command' in config_check['fix'] or
'sudo_command' in config_check['fix'] or
'manual' in config_check['fix'])
fix = None
sudo_fix = None
manual_fix = None
if 'command' in config_check['fix']:
fix = config_check['fix']['command']
if 'sudo_command' in config_check['fix']:
sudo_fix = config_check['fix']['sudo_command']
if 'manual' in config_check['fix']:
manual_fix = config_check['fix']['manual']
config_check_obj = ConfigCheck(
tests=tests,
description=description,
confidence=confidence,
fix=fix,
sudo_fix=sudo_fix,
manual_fix=manual_fix)
config_checks.append(config_check_obj)
return config_checks | 31203f77cb5e507d431ced0311423c2aec546a27 | 6,345 |
def transitions(bits):
"""Count the number of transitions in a bit sequence.
>>> assert transitions([0, 0]) == 0
>>> assert transitions([0, 1]) == 1
>>> assert transitions([1, 1]) == 0
>>> assert transitions([1, 0]) == 1
>>> assert transitions([0, 0, 0]) == 0
>>> assert transitions([0, 1, 0]) == 2
>>> assert transitions([1, 1, 0]) == 1
>>> assert transitions([1, 0, 0]) == 1
>>> assert transitions([0, 0, 1]) == 1
>>> assert transitions([0, 1, 1]) == 1
>>> assert transitions([1, 1, 1]) == 0
>>> assert transitions([1, 0, 1]) == 2
"""
transitions = 0
for i in range(0, len(bits)-1):
if bits[i] != bits[i+1]:
transitions += 1
return transitions | bc65f7b57508fc0c34275c4794d73c106bce07fd | 6,346 |
def _convert_code(code):
"""
ๅฐ่ๅฎฝๅฝขๅผ็ไปฃ็ ่ฝฌๅไธบ xalpha ๅฝขๅผ
:param code:
:return:
"""
no, mk = code.split(".")
if mk == "XSHG":
return "SH" + no
elif mk == "XSHE":
return "SZ" + no | 11ffcde407da7afaaf0eb28a80244d85f5136199 | 6,347 |
def _is_arg_name(s, index, node):
"""Search for the name of the argument. Right-to-left."""
if not node.arg:
return False
return s[index : index+len(node.arg)] == node.arg | b0c995ea553184f266fd968ad60b4c5fb19a55d4 | 6,348 |
import logging
def fitNoise(Sm_bw, L, K, Gamma):
""" Estimate noise parameters
Parameters
----------
Sm_bw : float array
Sufficient statistics from measurements
Sm_fw : float array
Wavefield explained by modeled waves
Returns
-------
BIC : float
Value of the Bayesian information criterion
sigma2_ML : float array
Noise variance estimated from residual signal. One dimensional array of length L.
"""
Sm_fw = zeros(shape(Sm_bw))
(sigma2, SlnGamma_bw) = estimateNoise(Sm_bw, Sm_fw)
LogLikelihood = sum(SlnGamma_bw)
NumParameters = 3 * L
NumPoints = K * L
BIC = -2* LogLikelihood + Gamma *NumParameters *log(NumPoints)
sigma2_ML = sigma2
logging.debug('Additive Gaussian noise fit')
logging.debug('\tLL: {0:.3e} BIC: {1:.3e}'.format(LogLikelihood, BIC))
logging.debug('\tsigma2: {0:.2e} ... {1:.2e} ... {2:.2e}'.format(min(sigma2), mean(sigma2), max(sigma2)))
return(BIC, sigma2_ML) | 49f9c368afc2634fa352da01ffebbe3549c52cd4 | 6,349 |
def run_command(cmd, debug=False):
"""
Execute the given command and return None.
:param cmd: A `sh.Command` object to execute.
:param debug: An optional bool to toggle debug output.
:return: ``sh`` object
"""
if debug:
print_debug('COMMAND', str(cmd))
return cmd() | 605249fbe94e5f1449ed9fdba73e67d4a282a96f | 6,350 |
def goodness(signal, freq_range=None, D=None):
"""Compute the goodness of pitch of a signal."""
if D is None:
D = libtfr.dpss(len(signal), 1.5, 1)[0]
signal = signal * D[0, :]
if freq_range is None:
freq_range = 256
if np.all(signal == 0):
return 0
else:
return np.max(cepstrum(signal)[25:freq_range]) | 00a44a373f56cd07570a89cef9b688f0aae4dd39 | 6,351 |
import os
def checkForRawFile(op, graph, frm, to):
"""
Confirm the source is a raw image.
:param op:
:param graph:
:param frm:
:param to:
:return:
@type op: Operation
@type graph: ImageGraph
@type frm: str
@type to: str
"""
snode = graph.get_node(frm)
exifdata = exif.getexif(os.path.join(graph.dir, snode['file']))
if 'File Type' in exifdata and exifdata['File Type'] in ['AA', 'AAX', 'ACR',
'AI', 'AIT', 'AFM', 'ACFM', 'AMFM',
'PDF', 'PS', 'AVI',
'APE', 'ASF', 'BMP', 'DIB'
'BPG', 'PNG', 'JPEG', 'GIF',
'DIVX', 'DOC', 'DOCX',
'DV', 'EXV',
'F4V', 'F4A', 'F4P', 'F4B',
'EXR', 'HDR', 'FLV', 'FPF', 'FLAC',
'FLA', 'FFF', 'IDML',
'J2C', 'JPC', 'JP2', 'JPF',
'J2K', 'JPX', 'JPM',
'JPE', 'JPG',
'LA', 'LFP',
'MP4', 'MP3',
'M2TS', 'MTS', 'M2T', 'TS',
'M4A', 'M4B', 'M4P', 'M4V',
'MAX', 'MOV', 'QT',
'O', 'PAC', 'MIFF', 'MIF',
'MIE',
'JNG', 'MNG', 'PPT', 'PPS',
'QIF', 'QTI', 'QTIF',
'RIF', 'RIFF', 'SWF',
'VOB', 'TTF', 'TTC', 'SWF',
'SEQ', 'WEBM', 'WEBP']:
return (Severity.ERROR,'Only raw images permitted for this operation')
return None | 35f4a0484f68111f0f3fb003738583b8ed2994fa | 6,352 |
import socket
import fcntl
import struct
def get_ip_address(dev="eth0"):
"""Retrieves the IP address via SIOCGIFADDR - only tested on Linux."""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(),0x8915,struct.pack('256s', dev[:15]))[20:24])
except:
return None | 96f59f17937543ed9cd4652af4703eaf975b8069 | 6,353 |
def connect(**kwargs): # pylint: disable=unused-argument
"""
mock get-a-connection
"""
return MockConn() | a51dd696411e5572344313a73b89d0431bcb5bdf | 6,354 |
def new_organization(request):
"""Creates a new organization."""
if request.method == 'POST':
new_organization_form = OrganizationForm(request.POST)
if new_organization_form.is_valid():
new_organization = new_organization_form.save(commit=False)
new_organization.owner = request.user
new_organization.save()
new_organization.editors.add(request.user)
return redirect(reverse('competencies:organizations'))
new_organization_form = OrganizationForm()
return render_to_response('competencies/new_organization.html',
{'new_organization_form': new_organization_form,},
context_instance=RequestContext(request)) | 2ef5e47a3d42ef3c2ce2dee055cb5311f984496d | 6,355 |
def _build_obs_freq_mat(acc_rep_mat):
"""
build_obs_freq_mat(acc_rep_mat):
Build the observed frequency matrix, from an accepted replacements matrix
The acc_rep_mat matrix should be generated by the user.
"""
# Note: acc_rep_mat should already be a half_matrix!!
total = float(sum(acc_rep_mat.values()))
obs_freq_mat = ObservedFrequencyMatrix(alphabet=acc_rep_mat.alphabet,
build_later=1)
for i in acc_rep_mat:
obs_freq_mat[i] = acc_rep_mat[i] / total
return obs_freq_mat | 8a400ca64c3907ee8c09a5e33f9c45703f267d45 | 6,356 |
def strip(s):
"""strip(s) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
"""
i, j = 0, len(s)
while i < j and s[i] in whitespace: i = i+1
while i < j and s[j-1] in whitespace: j = j-1
return s[i:j] | 7edc91baf8e57e713b464060c05f954510219d34 | 6,357 |
def set_news(title: str, text: str, db_user: User, lang: str, main_page: str) -> dict:
"""
Sets a new news into the news table
:param title: of the news
:param text: of the news
:param db_user: author of the news
:param lang: ui_locales
:param main_page: url
:return:
"""
LOG.debug("Entering set_news function")
author = db_user.firstname
if db_user.firstname != 'admin':
author += ' {}'.format(db_user.surname)
date = arrow.now()
DBDiscussionSession.add(News(title=title, author=author, date=arrow.now(), news=text))
DBDiscussionSession.flush()
transaction.commit()
return_dict = {
'status': 'success',
'title': title,
'date': sql_timestamp_pretty_print(date, lang, False),
'author': author,
'news': text
}
return return_dict | 49ab28d9ef8032f6d39505282920b3757163b609 | 6,358 |
import numpy
def _test_theano_compiled_dtw(input_size, hidden_size, ndim, distance_function, normalize, enable_grads, debug_level,
eps):
"""
Performs a test of a Theano DTW implementation.
:param input_size: The size of the inputs.
:param hidden_size: The size of the hidden values (used only if enable_grads=True).
:param ndim: The number of dimensions to use (2: non-batched, 3: batched).
:param distance_function: The symbolic distance function to use (e.g. a reference to a function in
distance).
:param normalize: Whether the DTW distances should be sequence length normalized.
:param enable_grads: Whether gradients should be computed of a min mean DTW cost function with respect to some
synthetic parameters.
:param debug_level: The debug level to use (see above for explanation).
:param eps: The minimum value to use inside the distance function. Set to the machine epsilon if None.
:return: A compiled Theano function that can be used to compute DTW distances between sequence pairs.
"""
assert 2 <= ndim <= 3
# Create the input variables test values and lengths suitable for testing the implementation.
if ndim == 2:
x1_in, x1 = _var('x1', (4, input_size), 'theano_compiled_dtw', debug_level)
x2_in, x2 = _var('x2', (5, input_size), 'theano_compiled_dtw', debug_level)
x1_lengths_in, x1_lengths = _var('x1_lengths', (), 'theano_compiled_dtw', debug_level, dtype='int32',
test_value_getter=lambda shape: 4)
x2_lengths_in, x2_lengths = _var('x2_lengths', (), 'theano_compiled_dtw', debug_level, dtype='int32',
test_value_getter=lambda shape: 5)
elif ndim == 3:
x1_in, x1 = _var('x1', (5, 4, input_size), 'theano_compiled_dtw', debug_level)
x2_in, x2 = _var('x2', (6, 4, input_size), 'theano_compiled_dtw', debug_level)
if debug_level > 0:
x1.tag.test_value[-1, 0] = 0
x2.tag.test_value[-1, 1] = 0
x1.tag.test_value[-1, 2] = 0
x2.tag.test_value[-1, 2] = 0
x1_lengths_in, x1_lengths = _var('x1_lengths', (2,), 'theano_compiled_dtw', debug_level, dtype='int32',
test_value_getter=lambda shape: numpy.array([4, 5, 4, 5]))
x2_lengths_in, x2_lengths = _var('x2_lengths', (2,), 'theano_compiled_dtw', debug_level, dtype='int32',
test_value_getter=lambda shape: numpy.array([6, 5, 5, 6]))
else:
raise Exception('Unsupported number of dimensions: ' + str(ndim))
if enable_grads:
# Create some synthetic parameters
w = utility.shared_gaussian_random_matrix('w', input_size, hidden_size)
# Transform the inputs using the synthetic parameters
x1 = _debug(theano.dot(x1, w), 'theano_compiled_dtw.z1', debug_level)
x2 = _debug(theano.dot(x2, w), 'theano_compiled_dtw.z2', debug_level)
else:
w = None
# Construct the symbolic expression for DTW
symbolic_dtw = theano_symbolic_dtw(x1, x2, x1_lengths, x2_lengths, distance_function=distance_function,
normalize=normalize, debug_level=debug_level, eps=eps)
outputs = [symbolic_dtw]
if enable_grads:
# Create a min mean DTW cost expression
cost = _debug(tt.mean(symbolic_dtw) if ndim == 3 else symbolic_dtw, 'theano_compiled_dtw.cost', debug_level)
outputs.append(cost)
# Perform symbolic differentiation of the cost expression with respect to the synthetic parameters
outputs.append(_debug(theano.grad(cost, w), 'theano_compiled_dtw.w_grad', debug_level))
return theano.function([x1_in, x2_in, x1_lengths_in, x2_lengths_in], outputs, name='compiled_dtw_' + str(ndim),
on_unused_input='ignore') | 3de3d09027804510c0b9e639aeb640c0e2892e6b | 6,359 |
def test(seriesList):
"""This is a test function"""
return seriesList | 70eb3f5a518533e6243bed74931d5829c9546e2b | 6,360 |
def perform_timeseries_analysis_iterative(dataset_in, intermediate_product=None, no_data=-9999):
"""
Description:
-----
Input:
dataset_in (xarray.DataSet) - dataset with one variable to perform timeseries on
Output:
dataset_out (xarray.DataSet) - dataset containing
variables: normalized_data, total_data, total_clean
"""
data_vars = list(dataset_in.data_vars)
key = data_vars[0]
data = dataset_in[key].astype('float')
processed_data = data.copy(deep=True)
processed_data.values[data.values == no_data] = 0
processed_data_sum = processed_data.sum('time')
clean_data = data.copy(deep=True)
clean_data.values[data.values != no_data] = 1
clean_data.values[data.values == no_data] = 0
clean_data_sum = clean_data.sum('time')
if intermediate_product is None:
processed_data_normalized = processed_data_sum/clean_data_sum
processed_data_normalized.values[np.isnan(processed_data_normalized.values)] = 0
dataset_out = xr.Dataset({'normalized_data': processed_data_normalized,
'total_data': processed_data_sum,
'total_clean': clean_data_sum},
coords={'latitude': dataset_in.latitude,
'longitude': dataset_in.longitude})
else:
dataset_out = intermediate_product.copy(deep=True)
dataset_out['total_data'] += processed_data_sum
dataset_out['total_clean'] += clean_data_sum
processed_data_normalized = dataset_out['total_data'] / dataset_out['total_clean']
processed_data_normalized.values[np.isnan(processed_data_normalized.values)] = 0
dataset_out['normalized_data'] = processed_data_normalized
return dataset_out | 63e3211db70a2ae12db7d1d26a5dad89f308816f | 6,361 |
def getDayOfYear(date):
# type: (Date) -> int
"""Extracts the day of the year from a date.
The first day of the year is day 1.
Args:
date: The date to use.
Returns:
An integer that is representative of the extracted value.
"""
print(date)
return _now().timetuple().tm_yday | 25d7c150a4d7be2e6ae275b10b01e67517ba6cdb | 6,362 |
def predict(network, X_test):
"""์ ๊ฒฝ๋ง์์ ์ฌ์ฉ๋๋ ๊ฐ์ค์น ํ๋ ฌ๋ค๊ณผ ํ
์คํธ ๋ฐ์ดํฐ๋ฅผ ํ๋ผ๋ฏธํฐ๋ก ์ ๋ฌ๋ฐ์์,
ํ
์คํธ ๋ฐ์ดํฐ์ ์์ธก๊ฐ(๋ฐฐ์ด)์ ๋ฆฌํด.
ํ๋ผ๋ฏธํฐ X_test: 10,000๊ฐ์ ํ
์คํธ ์ด๋ฏธ์ง๋ค์ ์ ๋ณด๋ฅผ ๊ฐ์ง๊ณ ์๋ ๋ฐฐ์ด
"""
y_pred = []
for sample in X_test: # ํ
์คํธ ์ธํธ์ ๊ฐ ์ด๋ฏธ์ง๋ค์ ๋ํด์ ๋ฐ๋ณต
# ์ด๋ฏธ์ง๋ฅผ ์ ๊ฒฝ๋ง์ ์ ํ(ํต๊ณผ)์์ผ์ ์ด๋ค ์ซ์๊ฐ ๋ ์ง ํ๋ฅ ์ ๊ณ์ฐ.
sample_hat = forward(network, sample)
# ๊ฐ์ฅ ํฐ ํ๋ฅ ์ ์ธ๋ฑ์ค(-> ์์ธก๊ฐ)๋ฅผ ์ฐพ์.
sample_pred = np.argmax(sample_hat)
y_pred.append(sample_pred) # ์์ธก๊ฐ์ ๊ฒฐ๊ณผ ๋ฆฌ์คํธ์ ์ถ๊ฐ
return np.array(y_pred) | 63ac50b7787c6dd89f04532b0a6266fa4d0f7012 | 6,363 |
def best_fit_decreasing(last_n_vm_cpu, hosts_cpu, hosts_ram,
inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram):
"""The Best Fit Decreasing (BFD) heuristic for placing VMs on hosts.
:param last_n_vm_cpu: The last n VM CPU usage values to average.
:param hosts_cpu: A map of host names and their available CPU in MHz.
:param hosts_ram: A map of host names and their available RAM in MB.
:param inactive_hosts_cpu: A map of inactive hosts and available CPU MHz.
:param inactive_hosts_ram: A map of inactive hosts and available RAM MB.
:param vms_cpu: A map of VM UUID and their CPU utilization in MHz.
:param vms_ram: A map of VM UUID and their RAM usage in MB.
:return: A map of VM UUIDs to host names, or {} if cannot be solved.
"""
LOG.debug('last_n_vm_cpu: %s', str(last_n_vm_cpu))
LOG.debug('hosts_cpu: %s', str(hosts_cpu))
LOG.debug('hosts_ram: %s', str(hosts_ram))
LOG.debug('inactive_hosts_cpu: %s', str(inactive_hosts_cpu))
LOG.debug('inactive_hosts_ram: %s', str(inactive_hosts_ram))
LOG.debug('vms_cpu: %s', str(vms_cpu))
LOG.debug('vms_ram: %s', str(vms_ram))
vms_tmp = []
for vm, cpu in vms_cpu.items():
if cpu:
last_n_cpu = cpu[-last_n_vm_cpu:]
vms_tmp.append((sum(last_n_cpu) / len(last_n_cpu),
vms_ram[vm],
vm))
else:
LOG.warning('No CPU data for VM: %s - skipping', vm)
vms = sorted(vms_tmp, reverse=True)
hosts = sorted(((v, hosts_ram[k], k)
for k, v in hosts_cpu.items()))
inactive_hosts = sorted(((v, inactive_hosts_ram[k], k)
for k, v in inactive_hosts_cpu.items()))
mapping = {}
for vm_cpu, vm_ram, vm_uuid in vms:
mapped = False
while not mapped:
for _, _, host in hosts:
if hosts_cpu[host] >= vm_cpu and hosts_ram[host] >= vm_ram:
mapping[vm_uuid] = host
hosts_cpu[host] -= vm_cpu
hosts_ram[host] -= vm_ram
mapped = True
break
else:
if inactive_hosts:
activated_host = inactive_hosts.pop(0)
hosts.append(activated_host)
hosts = sorted(hosts)
hosts_cpu[activated_host[2]] = activated_host[0]
hosts_ram[activated_host[2]] = activated_host[1]
else:
break
if len(vms) == len(mapping):
return mapping
return {} | 8e5ab522078384ecef6eb7ef548fc537e411bfae | 6,364 |
def __sanitize_close_input(x, y):
"""
Makes sure that both x and y are ht.DNDarrays.
Provides copies of x and y distributed along the same split axis (if original split axes do not match).
"""
def sanitize_input_type(x, y):
"""
Verifies that x is either a scalar, or a ht.DNDarray. If a scalar, x gets wrapped in a ht.DNDarray.
Raises TypeError if x is neither.
"""
if not isinstance(x, dndarray.DNDarray):
if np.ndim(x) == 0:
dtype = getattr(x, "dtype", float)
device = getattr(y, "device", None)
x = factories.array(x, dtype=dtype, device=device)
else:
raise TypeError("Expected DNDarray or numeric scalar, input was {}".format(type(x)))
return x
x = sanitize_input_type(x, y)
y = sanitize_input_type(y, x)
# Do redistribution out-of-place
# If only one of the tensors is distributed, unsplit/gather it
if x.split is not None and y.split is None:
t1 = manipulations.resplit(x, axis=None)
return t1, y
elif x.split != y.split:
t2 = manipulations.resplit(y, axis=x.split)
return x, t2
else:
return x, y | 7f3cfc44a47493fcf18c179556c388f9d9e9c643 | 6,365 |
import os
def _is_buildbot_cmdline(cmdline):
"""Returns (bool): True if a process is a BuildBot process.
We determine this by testing if it has the command pattern:
[...] [.../]python [.../]twistd [...]
Args:
cmdline (list): The command line list.
"""
return any((os.path.basename(cmdline[i]) == 'python' and
os.path.basename(cmdline[i+1]) == 'twistd')
for i in xrange(len(cmdline)-1)) | 936d62d4129f1ae4da538d21f143317e4de9016d | 6,366 |
def table_information_one(soup, div_id_name: str = None) -> dict:
""" first method for bringing back table information as a dict.
works on:
parcelInfo
SummaryPropertyValues
SummarySubdivision
"""
table = []
for x in soup.find_all("div", {"id": div_id_name}):
for div in x.find_all("div"):
for row in x.find_all("tr"):
cols = row.find_all("td")
cols = [element.text.strip() for element in cols if element]
table.extend(cols)
it = iter(table)
test_dict = dict(zip(it, it))
if test_dict.get(""):
del test_dict[""]
return test_dict | 3b317faff07bff028d43f20b7cfaa8afa587ca50 | 6,367 |
import functools
def Eval_point_chan(state, chan, data):
"""External validity, along a channel, where point-data is a
pulled back along the channel """
# for each element, state.sp.get(*a), of the codomain
vals = [(chan >> state)(*a) ** data(*a) for a in data.sp.iter_all()]
val = functools.reduce(lambda p1, p2: p1 * p2, vals, 1)
return val | 99355101853f3caa5c75b7e3f47aa5439a11aef1 | 6,368 |
import pyranges as pr
def dfi2pyranges(dfi):
"""Convert dfi to pyranges
Args:
dfi: pd.DataFrame returned by `load_instances`
"""
dfi = dfi.copy()
dfi['Chromosome'] = dfi['example_chrom']
dfi['Start'] = dfi['pattern_start_abs']
dfi['End'] = dfi['pattern_end_abs']
dfi['Name'] = dfi['pattern']
dfi['Score'] = dfi['contrib_weighted_p']
dfi['Strand'] = dfi['strand']
return pr.PyRanges(dfi) | 98ce4fbac93f81a6022d1cc012ca5270d7d681f3 | 6,369 |
def cleared_nickname(nick: str) -> str:
"""Perform nickname clearing on given nickname"""
if nick.startswith(('+', '!')):
nick = nick[1:]
if nick.endswith('#'):
nick = nick[:-1]
if all(nick.rpartition('(')):
nick = nick.rpartition('(')[0]
return nick | f3a5c838f0518a929dfa8b65f83a1d4c6e6dbbe4 | 6,370 |
def validate_model_on_lfw(
strategy,
model,
left_pairs,
right_pairs,
is_same_list,
) -> float:
"""Validates the given model on the Labeled Faces in the Wild dataset.
### Parameters
model: The model to be tested.
dataset: The Labeled Faces in the Wild dataset, loaded from load_lfw\
function.
pairs: List of LFW pairs, loaded from load_lfw_pairs function.
### Returns
(accuracy_mean, accuracy_std, validation_rate, validation_std, far,\
auc, eer) - Accuracy Mean, Accuracy Standard Deviation, Validation Rate,\
Validation Standard Deviation, FAR, Area Under Curve (AUC) and Equal Error\
Rate (EER).
"""
embeddings, is_same_list = _get_embeddings(
strategy,
model,
left_pairs,
right_pairs,
is_same_list,
)
tpr, fpr, accuracy, val, val_std, far = evaluate(embeddings, is_same_list)
auc = metrics.auc(fpr, tpr)
eer = brentq(lambda x: 1.0 - x - interpolate.interp1d(fpr, tpr)(x), 0.0, 1.0)
return np.mean(accuracy), np.std(accuracy), val, val_std, far, auc, eer | ead4ed84c53b0114c86ecf928d44114b5d896373 | 6,371 |
import sys
def get_allsongs():
"""
Get all the songs in your media server
"""
conn = database_connect()
if(conn is None):
return None
cur = conn.cursor()
try:
# Try executing the SQL and get from the database
sql = """select
s.song_id, s.song_title, string_agg(saa.artist_name,',') as artists
from
mediaserver.song s left outer join
(mediaserver.Song_Artists sa join mediaserver.Artist a on (sa.performing_artist_id=a.artist_id)
) as saa on (s.song_id=saa.song_id)
group by s.song_id, s.song_title
order by s.song_id;"""
r = dictfetchall(cur,sql)
print("return val is:")
print(r)
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return r
except:
# If there were any errors, return a NULL row printing an error to the debug
print("Unexpected error getting All Songs:", sys.exc_info()[0])
raise
cur.close() # Close the cursor
conn.close() # Close the connection to the db
return None | fbef7b34930be8f9e87cc5d9307996238b3e2d25 | 6,372 |
import asyncio
def nlu_audio(settings, logger):
"""Wrapper for NLU audio"""
speech_args = settings['speech']
loop = asyncio.get_event_loop()
interpretations = {}
with Recorder(loop=loop) as recorder:
interpretations = loop.run_until_complete(understand_audio(
loop,
speech_args['url'],
speech_args['app_id'],
unhexlify(speech_args['app_key']),
# context_tag=credentials['context_tag'],
"master",
speech_args['language'],
recorder=recorder,
logger=logger))
# loop.close()
if interpretations is False:
# The user did not speak
return {}
else:
return interpretations | 4d9c5eeacc0c1c36cad4575cc774b3faded33c23 | 6,373 |
def _gauss(sigma, n_sigma=3):
"""Discrete, normalized Gaussian centered on zero. Used for filtering data.
Args:
sigma (float): standard deviation of Gaussian
n_sigma (float): extend x in each direction by ext_x * sigma
Returns:
ndarray: discrete Gaussian curve
"""
x_range = n_sigma * sigma
x = np.arange(-x_range, x_range + 1e-5, 1, dtype=float)
y = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-0.5 * (x / sigma)**2)
return y | baea764c15c33d99f26bbe803844e06df97d908e | 6,374 |
def create_tentacle_mask(points, height, width, buzzmobile_width, pixels_per_m):
"""Creates a mask of a tentacle by drawing the points as a line."""
tentacle_mask = np.zeros((height, width), np.uint8)
for i in range(len(points) - 1):
pt1 = points[i]
pt2 = points[i+1]
cv2.line(tentacle_mask, pt1, pt2, [255, 255, 255],
int(buzzmobile_width * pixels_per_m))
return tentacle_mask | 764251efff298f4b1990910d04c31ea9ed1760fd | 6,375 |
def _matrix_to_real_tril_vec(matrix):
"""Parametrize a positive definite hermitian matrix using its Cholesky decomposition"""
tril_matrix = la.cholesky(matrix, lower=True)
diag_vector = tril_matrix[np.diag_indices(tril_matrix.shape[0])].astype(float)
complex_tril_vector = tril_matrix[np.tril_indices(tril_matrix.shape[0], -1)]
real_tril_vector = _complex_to_real(complex_tril_vector)
return np.concatenate((diag_vector, real_tril_vector)) | c59a0cd9fde6d77619a9681c3989efc9d704c07b | 6,376 |
import os
def is_subdir(path, directory):
"""Check if path is a sub of directory.
Arguments:
path (string):
the path to check
direcotry (string):
the path to use as relative starting point.
Returns:
bool: True if path is a sub of directory or False otherwise.
"""
try:
relative = os.path.relpath(path, directory)
return not relative.startswith(os.pardir)
except ValueError:
# filename and folder are ondifferent mount points
return False | 6d947da6fada3b04f9b75728260b34ddfbbb3724 | 6,377 |
def max_pool(pool_size, strides, padding='SAME', name=None):
"""max pooling layer"""
return tf.layers.MaxPooling2D(pool_size, strides, padding, name=name) | 5259160d3f2955b16e039482e7c51cb2f6d777e9 | 6,378 |
def size_as_minimum_int_or_none(size):
"""
:return: int, max_size as max int or None. For example:
- size = no value, will return: None
- size = simple int value of 5, will return: 5
- size = timed interval(s), like "2@0 22 * * *:24@0 10 * * *", will return: 2
"""
return min(size_as_recurrence_map(size).values()) | 742dc4f2d175a9372cc60e73dad21da9e927dc0c | 6,379 |
import torch
def args_to_numpy(args):
"""Converts all Torch tensors in a list to NumPy arrays
Args:
args (list): list containing QNode arguments, including Torch tensors
Returns:
list: returns the same list, with all Torch tensors converted to NumPy arrays
"""
res = []
for i in args:
if isinstance(i, torch.Tensor):
if i.is_cuda: # pragma: no cover
res.append(i.cpu().detach().numpy())
else:
res.append(i.detach().numpy())
else:
res.append(i)
# if NumPy array is scalar, convert to a Python float
res = [i.tolist() if (isinstance(i, np.ndarray) and not i.shape) else i for i in res]
return res | fbf01c2ea236cc11f7b1d7a835b0a0ba338ba153 | 6,380 |
def optimizer_setup(model, params):
"""
creates optimizer, can have layer specific options
"""
if params.optimizer == 'adam':
if params.freeze_backbone:
optimizer = optimizer_handler.layer_specific_adam(model, params)
else:
optimizer = optimizer_handler.plain_adam(model, params)
elif params.optimizer == 'sgd':
if params.freeze_backbone:
optimizer = optimizer_handler.layer_specific_sgd(model, params)
else:
optimizer = optimizer_handler.plain_sgd(model, params)
if params.zero_bn_bias_decay:
optimizer = zero_wdcay_bn_bias(optimizer)
return optimizer | c58427d7da66a02c2a44f92cb7d6350e2b9a83fd | 6,381 |
def charToEmoji(char, spaceCounter=0):
"""
If you insert a space, make sure you have your own
space counter and increment it. Space counter goes from 0 to 3.
"""
if char in emojitable.table:
print(char)
if char == ' ':
emoji = emojitable.table[char][spaceCounter]
else:
emoji = emojitable.table[char]
return emoji | 4943152d932f1529af86cdff827ff069f173fcb3 | 6,382 |
def averages_area(averages):
"""
Computes the area of the polygon formed by the hue bin averages.
Parameters
----------
averages : array_like, (n, 2)
Hue bin averages.
Returns
-------
float
Area of the polygon.
"""
N = averages.shape[0]
triangle_areas = np.empty(N)
for i in range(N):
u = averages[i, :]
v = averages[(i + 1) % N, :]
triangle_areas[i] = (u[0] * v[1] - u[1] * v[0]) / 2
return np.sum(triangle_areas) | 62ef194172095a9e7ddd6b9cb0cccb6d95fb2c4a | 6,383 |
def _tree_cmp(fpath1: PathLike, fpath2: PathLike, tree_format: str = 'newick') -> bool:
"""Returns True if trees stored in `fpath1` and `fpath2` are equivalent, False otherwise.
Args:
fpath1: First tree file path.
fpath2: Second tree file path.
tree_format: Tree format, i.e. ``newick``, ``nexus``, ``phyloxml`` or ``nexml``.
"""
ref_tree = Phylo.read(fpath1, tree_format)
target_tree = Phylo.read(fpath2, tree_format)
# Both trees are considered equal if they have the same leaves and the same distance from each to the root
ref_dists = {leaf.name: ref_tree.distance(leaf) for leaf in ref_tree.get_terminals()}
target_dists = {leaf.name: target_tree.distance(leaf) for leaf in target_tree.get_terminals()}
return ref_dists == target_dists | 0c42386b94d9bf6c157b0d60593413074af772f4 | 6,384 |
from typing import Sequence
def parse_genemark(input_f, genbank_fp):
""" Extract atypical genes identified by GeneMark
Parameters
----------
input_f: string
file descriptor for GeneMark output gene list (*.lst)
genbank_fp: string
file path to genome in GenBank format
Notes
-----
genbank_fp is the intermediate GenBank file generated by reformat_input.py,
in which multiple sequences are concantenated, instead of the original
GenBank file.
Returns
-------
output: string
gene names (protein_ids) separated by newline
"""
genes = {}
gb = Sequence.read(genbank_fp, format='genbank')
for feature in gb.interval_metadata._intervals:
m = feature.metadata
if m['type'] == 'CDS' and 'protein_id' in m:
protein_id = m['protein_id'].replace('\"', '')
if protein_id not in genes:
strand = m['strand']
start = feature.bounds[0][0] + 1
end = feature.bounds[0][1]
genes[protein_id] = (start, end, strand)
atypical_genes = []
reading = False
for line in input_f:
x = line.strip().split()
if len(x) == 2 and x == ['#', 'Length']:
reading = True
# atypical genes have class '2' in the 6th column
elif reading and len(x) == 6 and x[5] == '2':
(start, end, strand) = (int(x[2].lstrip('<>')),
int(x[3].lstrip('<>')),
x[1])
for (gene, x) in genes.items():
if x[0] == start and x[1] == end and x[2] == strand:
atypical_genes.append(gene)
return '\n'.join(sorted(atypical_genes)) | 3621d81eedad83f66a1be405bc49c2da3ea520d9 | 6,385 |
def get_hex(fh, nbytes=1):
"""
get nbyte bytes (1 by default)
and display as hexidecimal
"""
hstr = ""
for i in range(nbytes):
b = "%02X " % ord(fh)
hstr += b
return hstr | b1d426f7bfcceffa829c9dcc1150f32be5c48413 | 6,386 |
def fetch(pages, per_page, graph):
"""
Get a list of posts from facebook
"""
return [x.replace('\n', '')
for name in pages
for x in fetch_page(name, per_page, graph)] | ea9af2e1d2fe9c2880aebd1148cb8f6457f55bb2 | 6,387 |
def lifted_struct_loss(labels, embeddings, margin=1.0):
"""Computes the lifted structured loss.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
not be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
lifted_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = tf.shape(labels)
labels = tf.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pairwise_distances = metric_learning.pairwise_distance(embeddings)
# Build pairwise binary adjacency matrix.
adjacency = tf.math.equal(labels, tf.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = tf.math.logical_not(adjacency)
batch_size = tf.size(labels)
diff = margin - pairwise_distances
mask = tf.cast(adjacency_not, dtype=tf.dtypes.float32)
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
row_minimums = tf.math.reduce_min(diff, 1, keepdims=True)
row_negative_maximums = tf.math.reduce_max(
tf.math.multiply(diff - row_minimums, mask), 1,
keepdims=True) + row_minimums
# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
# where m_i is the max of alpha - negative D_i's.
# This matches the Caffe loss layer implementation at:
# https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long
max_elements = tf.math.maximum(row_negative_maximums,
tf.transpose(row_negative_maximums))
diff_tiled = tf.tile(diff, [batch_size, 1])
mask_tiled = tf.tile(mask, [batch_size, 1])
max_elements_vect = tf.reshape(tf.transpose(max_elements), [-1, 1])
loss_exp_left = tf.reshape(
tf.math.reduce_sum(
tf.math.multiply(
tf.math.exp(diff_tiled - max_elements_vect), mask_tiled),
1,
keepdims=True), [batch_size, batch_size])
loss_mat = max_elements + tf.math.log(loss_exp_left +
tf.transpose(loss_exp_left))
# Add the positive distance.
loss_mat += pairwise_distances
mask_positives = tf.cast(
adjacency, dtype=tf.dtypes.float32) - tf.linalg.diag(
tf.ones([batch_size]))
# *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.
num_positives = tf.math.reduce_sum(mask_positives) / 2.0
lifted_loss = tf.math.truediv(
0.25 * tf.math.reduce_sum(
tf.math.square(
tf.math.maximum(
tf.math.multiply(loss_mat, mask_positives), 0.0))),
num_positives)
return lifted_loss | ac6de39c7b4fc204dbf46a716f796434da959134 | 6,388 |
def get_uint8_rgb(dicom_path):
"""
Reads dicom from path and returns rgb uint8 array
where R: min-max normalized, G: CLAHE, B: histogram equalized.
Image size remains original.
"""
dcm = _read_dicom_image(dicom_path)
feats = _calc_image_features(dcm)
return (feats*255).astype(np.uint8) | a81362af49a8c93c2e0224f033260ed3a0e5931f | 6,389 |
import random
import subprocess
import time
import socket
def start(host, port, options, timeout=10):
"""Start an instance of mitmproxy server in a subprocess.
Args:
host: The host running mitmproxy.
port: The port mitmproxy will listen on. Pass 0 for automatic
selection.
options: The selenium wire options.
timeout: The number of seconds to wait for the server to start.
Default 10 seconds.
Returns: A MitmProxy object representing the server.
Raises:
TimeoutException: if the mitmproxy server did not start in the
timout period.
RuntimeError: if there was some unknown error starting the
mitmproxy server.
"""
for _ in range(RETRIES):
port = port or random.randint(PORT_RANGE_START, PORT_RANGE_END)
proxy = subprocess.Popen([
'mitmdump',
*_get_upstream_proxy_args(options),
'--set',
'confdir={}'.format(options.get('mitmproxy_confdir', DEFAULT_CONFDIR)),
'--set',
'listen_port={}'.format(port),
'--set',
'ssl_insecure={}'.format(str(options.get('verify_ssl', 'true')).lower()),
'--set',
'upstream_cert={}'.format(DEFAULT_UPSTREAM_CERT),
'--set',
'stream_websockets={}'.format(DEFAULT_STREAM_WEBSOCKETS),
'--set',
'termlog_verbosity={}'.format(DEFAULT_TERMLOG_VERBOSITY),
'--set',
'flow_detail={}'.format(DEFAULT_FLOW_DETAIL),
'-s',
__file__
])
try:
proxy.wait(timeout=2)
except subprocess.TimeoutExpired:
# Subprocess has started
break
else:
raise RuntimeError('Error starting mitmproxy - check console output')
start_time = time.time()
while time.time() - start_time < timeout:
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
# Try and connect to mitmproxy to determine whether it's started up
if sock.connect_ex((host, port)) == 0:
return MitmProxy(host, port, proxy)
# Hasn't yet started so wait a bit and try again
time.sleep(0.5)
raise TimeoutError('mitmproxy did not start within {} seconds'.format(timeout)) | 5396f45d05e8c45a66ad0e658de3b22251883c06 | 6,390 |
def query_db_cluster(instanceid):
"""
Querying whether DB is Clustered or not
"""
try:
db_instance = RDS.describe_db_instances(
DBInstanceIdentifier=instanceid
)
return db_instance['DBInstances'][0]['DBClusterIdentifier']
except KeyError:
return False | 6b84db4ff0b3788085ca4313aa397a6bd675e696 | 6,391 |
from typing import Callable
from re import T
def is_nsfw() -> Callable[[T], T]:
"""A :func:`.check` that checks if the channel is a NSFW channel.
This check raises a special exception, :exc:`.ApplicationNSFWChannelRequired`
that is derived from :exc:`.ApplicationCheckFailure`.
.. versionchanged:: 2.0
Raise :exc:`.ApplicationNSFWChannelRequired` instead of generic :exc:`.ApplicationCheckFailure`.
DM channels will also now pass this check.
"""
def pred(ctx: ApplicationContext) -> bool:
ch = ctx.channel
if ctx.guild is None or (isinstance(ch, (discord.TextChannel, discord.Thread)) and ch.is_nsfw()):
return True
raise ApplicationNSFWChannelRequired(ch) # type: ignore
return check(pred) | 5941b74e55c43597f3c3e367434c3a0b54d92209 | 6,392 |
def calc_MAR(residuals, scalefactor=1.482602218):
"""Return median absolute residual (MAR) of input array. By default,
the result is scaled to the normal distribution."""
return scalefactor * np.median(np.abs(residuals)) | 1691bf7883310562f4ee9e84d07c1fa188fe306b | 6,393 |
def resourceimport_redirect():
"""
Returns a redirection action to the main resource importing view,
which is a list of files available for importing.
Returns:
The redirection action.
"""
return redirect(url_for('resourceimportfilesview.index')) | be5c6e7ef9fcc5c369d31a75960c2849cede2b5f | 6,394 |
def valid_attribute(node, whitelist):
"""Check the attribute access validity. Returns True if the member access is valid, False otherwise."""
# TODO: Support more than gast.Name?
if not isinstance(node.value, gast.Name):
if isinstance(node.value, gast.Attribute):
return valid_attribute(node.value, whitelist)
return False
is_valid = False
for elt in whitelist:
if isinstance(elt, str):
continue
if isinstance(elt, Variable):
is_valid_impl = valid_attribute_impl(node, elt)
is_valid = is_valid_impl or is_valid
if is_valid:
return is_valid
return is_valid | 1576a8e0e08b9f9387180a41137f1fffd786a4a5 | 6,395 |
def gen_fov_chan_names(num_fovs, num_chans, return_imgs=False, use_delimiter=False):
"""Generate fov and channel names
Names have the format 'fov0', 'fov1', ..., 'fovN' for fovs and 'chan0', 'chan1', ...,
'chanM' for channels.
Args:
num_fovs (int):
Number of fov names to create
num_chans (int):
Number of channel names to create
return_imgs (bool):
Return 'chanK.tiff' as well if True. Default is False
use_delimiter (bool):
Appends '_otherinfo' to the first fov. Useful for testing fov id extraction from
filenames. Default is False
Returns:
tuple (list, list) or (list, list, list):
If return_imgs is False, only fov and channel names are returned
If return_imgs is True, image names will also be returned
"""
fovs = [f'fov{i}' for i in range(num_fovs)]
if use_delimiter:
fovs[0] = f'{fovs[0]}_otherinfo'
chans = [f'chan{i}' for i in range(num_chans)]
if return_imgs:
imgs = [f'{chan}.tiff' for chan in chans]
return fovs, chans, imgs
else:
return fovs, chans | 417490259c42a52c58aab418fbb63185602e6750 | 6,396 |
def get_supported():
"""
Returns a list of hints supported by the window manager.
:return: A list of atoms in the _NET_SUPPORTED property.
:rtype: util.PropertyCookie (ATOM[]/32)
"""
return util.PropertyCookie(util.get_property(root, '_NET_SUPPORTED')) | 038e7d74cd6cdf2a0dc1d04a5e54b312a1a44b0e | 6,397 |
def get_exportables():
"""Get all exportables models except snapshot"""
exportables = set(converters.get_exportables().values())
exportables.discard(all_models.Snapshot)
return exportables | 21dddb65f0193d02aae47d88a2743b9c92b3b245 | 6,398 |
import time
def playback(driver, settings, record, output, mode=None): # pylint: disable=W0621,R0912
"""
Playback a given test.
"""
if settings.desc:
output("%s ... " % settings.desc, flush=True)
else:
output("Playing back %s ... " % settings.name, flush=True)
_begin_browsing(driver, settings)
wait_until_loaded(driver)
state = states.OK
err = None
mode = mode or modes.PLAYBACK
try:
for step in record.steps:
step.delayer(driver)
timeout = 0
while timeout < 40:
timeout += 1
if not driver.execute_script(js.isPageChanging(250)): # milliseconds
step.execute(driver, settings, mode)
break
else:
time.sleep(0.25)
if timeout == 40:
raise exc.PlaybackTimeout(
'%s timed out while waiting for the page to be static.' \
% settings.name
)
except Exception as exception: # pylint: disable=W0703
if isinstance(exception, exc.ScreenshotsDiffer):
state = states.FAIL
err = exception
else:
state = states.ERROR
if hasattr(exception, 'msg') and (exception.msg.startswith('element not visible') or
exception.msg.startswith('Element is not currently visible')):
err = exc.ElementNotVisible(
"Element was not visible when expected during playback. If "
"your playback depended on a significant rerender having been "
"done, then make sure you've waited until nothing is changing "
"before taking a screenshot."
)
else:
err = exception
output('%s' % str(state))
if err:
output(': %s' % str(err))
return (state, err) | 36cd718760b76100a9959a1568b8c21f2ea7e334 | 6,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.