content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import List
from typing import Dict
def prepare_answer_extraction_samples(context: str, answer_list: List[Dict] = None):
"""
Args:
context: str (assumed to be normalized via normalize_text)
answer_list: [
{'text': str, 'answer_start': int},
{'text': str, 'answer_start': int},
...
]
"""
prepare_target = True if answer_list else False
# split into sentences
sentence_list = sentence_tokenize(context)
num_sentences = len(sentence_list)
if prepare_target:
answer_list_per_sentence = get_answer_list_per_sentence(sentence_list, answer_list)
# prepare sources (and targets)
samples = []
for sentence_ind in range(num_sentences):
source_text = "extract answers:"
if prepare_target:
answer_list = answer_list_per_sentence[sentence_ind]
answer_list = [answer["text"] for answer in answer_list]
if not answer_list:
continue
answer_list = list(dict.fromkeys(answer_list)) # remove duplicate answers without changing the order
target_text = " <sep> ".join(answer_list) + " <sep>"
else:
target_text = None
for sentence_ind2, sentence in enumerate(sentence_list):
if sentence_ind == sentence_ind2:
sentence = f"<hl> {sentence} <hl>"
source_text = f"{source_text} {sentence}"
source_text = source_text.strip()
sample = {"source_text": source_text, "target_text": target_text, "answer_list": answer_list}
if sample["target_text"] is None:
sample
samples.append(sample)
return samples | 3cb431fa2ec6472f3e060cb7f85eb0a52cfbfe6c | 23,309 |
from typing import Optional
from typing import Callable
from typing import List
import abc
def mix_in(
source: type,
target: type,
should_copy: Optional[Callable[[str, bool], bool]] = None,
) -> List[str]:
"""
Copy all defined functions from mixin into target. It could be
usefull when you cannot inherit from mixin because incompatible
metaclass. It does not copy abstract functions. If `source` is
`ABCMeta`, will register `target` with it.
Returns list of copied methods.
"""
mixed_in_methods = []
try:
abstract_methods = source.__abstractmethods__ # type:ignore
except AttributeError:
abstract_methods = set()
target_members = dir(target)
for n in dir(source):
fn = getattr(source, n)
if isfunction(fn) and n not in abstract_methods:
already_exists = n not in target_members
if should_copy is None or should_copy(n, already_exists):
setattr(target, n, fn)
mixed_in_methods.append(n)
if isinstance(source, abc.ABCMeta):
source.register(target)
return mixed_in_methods | 702552f01d4a915f11d6d3d618aaf151bf5b8af3 | 23,310 |
def get_img_num_per_cls(cifar_version, imb_factor=None):
"""
Get a list of image numbers for each class, given cifar version
Num of imgs follows emponential distribution
img max: 5000 / 500 * e^(-lambda * 0);
img min: 5000 / 500 * e^(-lambda * int(cifar_version - 1))
exp(-lambda * (int(cifar_version) - 1)) = img_max / img_min
args:
cifar_version: str, '10', '100', '20'
imb_factor: float, imbalance factor: img_min/img_max,
None if geting default cifar data number
output:
img_num_per_cls: a list of number of images per class
"""
cls_num = int(cifar_version)
img_max = img_num(cifar_version)
if imb_factor is None:
return [img_max] * cls_num
img_num_per_cls = []
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
return img_num_per_cls | 941be25ed96ef336e11c16bd9b81f2973c25bcf2 | 23,311 |
def xml():
"""
Returns the lti.xml file for the app.
"""
try:
return Response(render_template(
'lti.xml'), mimetype='application/xml'
)
except:
app.logger.error("Error with XML.")
return return_error('''Error with XML. Please refresh and try again. If this error persists,
please contact support.''') | c40ffb52e4931fd41e6627778331a1c7acf9558b | 23,312 |
def log_get_level(client):
"""Get log level
Returns:
Current log level
"""
return client.call('log_get_level') | 2a3915d8c6576187a0af738d3021d495b4efda21 | 23,313 |
def cal_pivot(n_losses,network_block_num):
"""
Calculate the inserted layer for additional loss
"""
num_segments = n_losses + 1
num_block_per_segment = (network_block_num // num_segments) + 1
pivot_set = []
for i in range(num_segments - 1):
pivot_set.append(min(num_block_per_segment * (i + 1), network_block_num - 1))
return pivot_set | d23324fc39f2f1aeec807a4d65a51234a2b76cde | 23,314 |
import asyncio
async def scan_host(
host: IPv4Address,
semaphore: asyncio.Semaphore,
timeout: int,
verbose: bool,
):
"""
Locks the "semaphore" and tries to ping "host" with timeout "timeout" s.
Prints out the result of the ping to the standard output.
"""
async with semaphore:
try:
delay = await aioping.ping(
str(host), timeout, family=AddressFamily.AF_INET
)
print(f"{host} responded after {delay:.4f} ms")
return True
except TimeoutError:
if verbose:
print(f"{host} has not responded")
return False
except OSError as error:
if verbose:
print(
f"Ping to host {host} failed for the following reason: {error}"
)
return False | 66bd0165dbbb7b717fd6ded19c78cb901f89a588 | 23,315 |
def _emit_post_update_statements(
base_mapper, uowtransaction, cached_connections, mapper, table, update
):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
needs_version_id = (
mapper.version_id_col is not None
and mapper.version_id_col in mapper._cols_by_table[table]
)
def update_stmt():
clauses = BooleanClauseList._construct_raw(operators.and_)
for col in mapper._pks_by_table[table]:
clauses.clauses.append(
col == sql.bindparam(col._label, type_=col.type)
)
if needs_version_id:
clauses.clauses.append(
mapper.version_id_col
== sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type,
)
)
stmt = table.update().where(clauses)
if mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(("post_update", table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, records in groupby(
update,
lambda rec: (rec[3], set(rec[4])), # connection # parameter keys
):
rows = 0
records = list(records)
connection = key[0]
assert_singlerow = (
connection.dialect.supports_sane_rowcount
if mapper.version_id_col is None
else connection.dialect.supports_sane_rowcount_returning
)
assert_multirow = (
assert_singlerow
and connection.dialect.supports_sane_multi_rowcount
)
allow_multirow = not needs_version_id or assert_multirow
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, mapper_rec, connection, params in records:
c = cached_connections[connection].execute(statement, params)
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
rows += c.rowcount
else:
multiparams = [
params
for state, state_dict, mapper_rec, conn, params in records
]
check_rowcount = assert_multirow or (
assert_singlerow and len(multiparams) == 1
)
c = cached_connections[connection].execute(statement, multiparams)
rows += c.rowcount
for state, state_dict, mapper_rec, connection, params in records:
_postfetch_post_update(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched."
% (table.description, len(records), rows)
)
elif needs_version_id:
util.warn(
"Dialect %s does not support updated rowcount "
"- versioning cannot be verified."
% c.dialect.dialect_description
) | 13444703b45030a2d0e1dc1f2371a729cd812c11 | 23,316 |
import copy
def build_grid_search_config(params_dict):
"""
传入一个json,按网格搜索的方式构造出符合条件的N个json, 目前网格搜索只作用在optimization范围内
:param params_dict:
:return: param_config_list
"""
model_params_dict = params_dict.get("model")
opt_params = model_params_dict.get("optimization", None)
if not opt_params:
raise ValueError("optimization's params can't be none")
# 获取待网格搜索的dict
train_data_params = params_dict.get("dataset_reader").get("train_reader").get("config", None)
if not train_data_params:
raise ValueError("train_reader config's params can't be none")
# 在need_operate_params中加入待处理网格搜索的dict
need_operate_params = [opt_params, train_data_params]
all_combination_list = []
all_single_param_dict = []
dict_list_key_num = []
for one_operate_param in need_operate_params:
combination_list, single_param_dict = get_list_params(one_operate_param)
all_combination_list.extend(combination_list)
all_single_param_dict.append(single_param_dict)
dict_list_key_num.append(len(combination_list))
task_param_list = []
for params in product(*all_combination_list):
one_task_param = copy.deepcopy(params_dict)
# 在need_update_param中加入待更新的网格搜索的dict,注意顺序要和need_operate_params保持一致
need_update_param = [one_task_param["model"]["optimization"],
one_task_param["dataset_reader"]["train_reader"]["config"]]
i = 0
for index, one_single_param in enumerate(all_single_param_dict):
single_param = copy.deepcopy(one_single_param)
for one_grid in params[i:i + dict_list_key_num[index]]:
single_param.update(one_grid)
need_update_param[index].update(single_param)
i += dict_list_key_num[index]
task_param_list.append(one_task_param)
return task_param_list | a5f2b8249f9a50cad7da855d6a49a3225715fb00 | 23,317 |
def cutoff_countmin_wscore(y, scores, score_cutoff, n_cm_buckets, n_hashes):
""" Learned Count-Min (use predicted scores to identify heavy hitters)
Args:
y: true counts of each item (sorted, largest first), float - [num_items]
scores: predicted scores of each item - [num_items]
score_cutoff: threshold for heavy hitters
n_cm_buckets: number of buckets of Count-Min
n_hashes: number of hash functions
Returns:
loss_avg: estimation error
space: space usage in bytes
"""
if len(y) == 0:
return 0 # avoid division of 0
y_ccm = y[scores > score_cutoff]
y_cm = y[scores <= score_cutoff]
loss_cf = 0 # put y_ccm into cutoff buckets, no loss
loss_cm = count_min(y_cm, n_cm_buckets, n_hashes)
assert len(y_ccm) + len(y_cm) == len(y)
loss_avg = (loss_cf * np.sum(y_ccm) + loss_cm * np.sum(y_cm)) / np.sum(y)
print('\tloss_cf %.2f\tloss_rd %.2f\tloss_avg %.2f' % (loss_cf, loss_cm, loss_avg))
space = len(y_ccm) * 4 * 2 + n_cm_buckets * n_hashes * 4
return loss_avg, space | 99ab1b6174d49ccf4f4517fe713f98cecd65d978 | 23,318 |
def test_lcc_like_epi():
"""
Takes about 5 mins with epicyclic
If burnin is too short (say 200 steps) won't actually find true solution
"""
TORB_FUNC = trace_epicyclic_orbit
mean_now = np.array([50., -100., 25., 1.1, -7.76, 2.25])
age = 10.
mean = TORB_FUNC(mean_now, times=-age)
dx = 5.
dv = 2.
covmatrix = np.identity(6)
covmatrix[:3,:3] *= dx**2
covmatrix[3:,3:] *= dv**2
true_comp = SphereComponent(
attributes={'mean':mean, 'covmatrix':covmatrix, 'age':age,},
trace_orbit_func=TORB_FUNC,
)
nstars = 1000
tiny_measurement_error = 1e-10
# import ipdb; ipdb.set_trace()
best_comp, chain, lnprob, data_dict = run_fit_helper(
true_comp=true_comp, starcounts=nstars,
measurement_error=tiny_measurement_error,
trace_orbit_func=TORB_FUNC,
run_name='lcc_like',
)
assert np.allclose(true_comp.get_mean(), best_comp.get_mean(),
atol=3.0)
assert np.allclose(true_comp.get_age(), best_comp.get_age(),
atol=1.0)
assert np.allclose(true_comp.get_covmatrix(),
best_comp.get_covmatrix(),
atol=5.0)
comp_filename = 'temp_data/{}_compfitter_lcc_like_true_and_best_comp.npy'.format(
PY_VERS
)
SphereComponent.store_raw_components(comp_filename, [true_comp, best_comp])
return true_comp, best_comp, lnprob | 43583e06741632aba41400ee1a6562cd6fc226be | 23,319 |
import numpy as np
def uniquePandasIndexMapping(inputColumn):
"""quickly mapps the unique name entries back to input entries
Keyword arguments:
inputDataToAssess -- a SINGLE column from a pandas dataframe, presumably with
duplications. Will create a frequency table and a mapping back to the source entries.
"""
inputColumn.sort_values(by=['company'], inplace=True)
sortedInputColumn=inputColumn.reset_index()
sortedInputColumn.rename(columns={"index":"userIndex"},inplace=True)
tableUniqueFullNameCounts=inputColumn.iloc[:,0].value_counts()
tableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index()
tableUniqueFullNameCounts.rename(columns={"company":"count","index":"company"},inplace=True)
tableUniqueFullNameCounts.sort_values(by=['company'], inplace=True)
sortedTableUniqueFullNameCounts=tableUniqueFullNameCounts.reset_index()
sortedTableUniqueFullNameCounts['inputIndexMapping']=''
currentSum=0
for index, row in sortedTableUniqueFullNameCounts.iterrows():
currentRange=np.arange(currentSum,currentSum+sortedTableUniqueFullNameCounts['count'].iloc[index])
sortedTableUniqueFullNameCounts['inputIndexMapping'].iloc[index]=sortedInputColumn['userIndex'].iloc[currentRange].array
currentSum=currentSum+sortedTableUniqueFullNameCounts['count'].iloc[index]
return sortedInputColumn, sortedTableUniqueFullNameCounts; | c26fce9b8617963737c4b8dd05c0e8429c92daa3 | 23,320 |
def non_max_suppression(boxlist, thresh, max_output_size, scope=None):
"""Non maximum suppression.
This op greedily selects a subset of detection bounding boxes, pruning
away boxes that have high IOU (intersection over union) overlap (> thresh)
with already selected boxes. Note that this only works for a single class ---
to apply NMS to multi-class predictions, use MultiClassNonMaxSuppression.
Args:
boxlist: BoxList holding N boxes. Must contain a 'scores' field
representing detection scores.
thresh: scalar threshold
max_output_size: maximum number of retained boxes
scope: name scope.
Returns:
a BoxList holding M boxes where M <= max_output_size
Raises:
ValueError: if thresh is not in [0, 1]
"""
with tf.name_scope(scope, 'NonMaxSuppression'):
if not 0 <= thresh <= 1.0:
raise ValueError('thresh must be between 0 and 1')
if not isinstance(boxlist, box_list.BoxList):
raise ValueError('boxlist must be a BoxList')
if not boxlist.has_field('scores'):
raise ValueError('input boxlist must have \'scores\' field')
selected_indices = tf.image.non_max_suppression(
boxlist.get(), boxlist.get_field('scores'),
max_output_size, iou_threshold=thresh)
return gather(boxlist, selected_indices) | aa8990ea579ed5d4614561b617717c9e7b73f798 | 23,321 |
def gen_weekly_ccy_df( start,end ):
""" Generate weekly ccy data table
"""
currency_li =[ "USD_Index",
"EURUSD","GBPUSD","AUDUSD","CADUSD",
"JPYUSD",
"CNYUSD","HKDUSD","TWDUSD",
"KRWUSD","THBUSD","SGDUSD","MYRUSD",
"BRLUSD","INRUSD",
"CNY_raw","JPY_raw"
]
currency_df = get_histroical_ccy(start,end)
temp = currency_df[["JPYUSD","CNYUSD"]]
currency_df["EURUSD"] = 1/currency_df["USDEUR"]
currency_df["GBPUSD"] = 1/currency_df["USDGBP"]
currency_df["AUDUSD"] = 1/currency_df["USDAUD"]
currency_df = currency_df/currency_df.iloc[0]
currency_df["CNY_raw"] = temp["CNYUSD"]
currency_df["JPY_raw"] = temp["JPYUSD"]
return currency_df[currency_li],currency_li | cf856535d148378826fd99a6d8afef5e1eb77778 | 23,323 |
def demean_and_normalise(points_a: np.ndarray,
points_b: np.ndarray):
"""
Independently centre each point cloud around 0,0,0, then normalise
both to [-1,1].
:param points_a: 1st point cloud
:type points_a: np.ndarray
:param points_b: 2nd point cloud
:type points_b: np.ndarray
:return: normalised points clouds, scale factor & translations
"""
translate_a = np.mean(points_a, axis=0)
translate_b = np.mean(points_b, axis=0)
a_demean = points_a - translate_a
b_demean = points_b - translate_b
norm_factor = np.max([np.max(np.abs(a_demean)),
np.max(np.abs(b_demean))])
a_normalised = a_demean / norm_factor
b_normalised = b_demean / norm_factor
scale_matrix = create_scaling_matrix(norm_factor)
translate_a_matrix = create_translation_matrix(translate_a)
translate_b_matrix = create_translation_matrix(translate_b)
return a_normalised, b_normalised, scale_matrix, \
translate_a_matrix, translate_b_matrix | 249bb8edf5ef423613748f0ce599c98e4f437960 | 23,324 |
import yaml
import six
def ParseCustomLevel(api_version):
"""Wrapper around ParseCustomLevel to accept api version."""
def VersionedParseCustomLevel(path):
"""Parse a YAML representation of custom level conditions.
Args:
path: str, path to file containing custom level expression
Returns:
string of CEL expression.
Raises:
ParseError: if the file could not be read into the proper object
"""
data = yaml.load_path(path)
if not data:
raise ParseError(path, 'File is empty')
messages = util.GetMessages(version=api_version)
message_class = messages.Expr
try:
expr = encoding.DictToMessage(data, message_class)
except Exception as err:
raise InvalidFormatError(path, six.text_type(err), message_class)
_ValidateAllCustomFieldsRecognized(path, expr)
return expr
return VersionedParseCustomLevel | e5e414ea29324233d4fc8291f0ea829176805d99 | 23,325 |
def specMergeMSA(*msa, **kwargs):
"""Returns an :class:`.MSA` obtained from merging parts of the sequences
of proteins present in multiple *msa* instances. Sequences are matched
based on species section of protein identifiers found in the sequence labels.
Order of sequences in the merged MSA will follow the order of sequences in the
first *msa* instance. Note that protein identifiers that map to multiple
sequences will be excluded."""
if len(msa) <= 1:
raise ValueError('more than one msa instances are needed')
lbl={}
try:
arrs = [m._getArray() for m in msa]
sets = []
labells = []
for m in msa:
aset = set([])
labell = {}
count = m.countLabel
for label in m.iterLabels():
lbl[label]=label.rsplit('_')[1]
if count(label) == 1 and lbl[label] not in aset:
aset.add(lbl[label])
labell[lbl[label]]=label
sets.append(aset)
labells.append(labell)
except AttributeError:
raise TypeError('all msa arguments must be MSA instances')
sets = iter(sets)
common = next(sets)
for aset in sets:
common = common.intersection(aset)
if not common:
return None
lens = [m.numResidues() for m in msa]
rngs = [0]
rngs.extend(cumsum(lens))
rngs = [(start, end) for start, end in zip(rngs[:-1], rngs[1:])]
idx_arr_rng = list(zip([m.getIndex for m in msa], arrs, rngs))
merger = zeros((len(common), sum(lens)), '|S1')
index = 0
labels = []
mapping = {}
for lbl in common:
merger[index, 0:start]=list(str(msa[0][msa[0].getIndex(labells[0][lbl])]))
merger[index, start:end]=list(str(msa[1][msa[1].getIndex(labells[1][lbl])]))
label = labells[0][lbl]
labels.append(label)
mapping[label] = index
index += 1
merger = MSA(merger, labels=labels, mapping=mapping,
title=' + '.join([m.getTitle() for m in msa]))
return merger | fdb605347bdc88df4c844fa8765640dc5a91b88d | 23,326 |
def parse(filePath):
"""
Returns a full parsed Maya ASCII file.
:type filePath: str
:rtype: mason.asciiscene.AsciiScene
"""
return asciifileparser.AsciiFileParser(filePath).scene | 2f7d50724fcda1d4ef240e362ae6ee3f18bdfacd | 23,327 |
def d_psi(t):
"""Compute the derivative of the variable transform from Ogata 2005."""
t = np.array(t, dtype=float)
a = np.ones_like(t)
mask = t < 6
t = t[mask]
a[mask] = (np.pi * t * np.cosh(t) + np.sinh(np.pi * np.sinh(t))) / (
1.0 + np.cosh(np.pi * np.sinh(t))
)
return a | 8b0fc652a60d2ba45623bc098b7e9d0fae2c7dbe | 23,330 |
from typing import Dict
from typing import Any
import io
def build_environ(request: HTTPRequest, errors: Errors) -> Dict[str, Any]:
"""
参考 https://www.python.org/dev/peps/pep-3333/ 构建 environ
"""
headers = {
f"HTTP_{k.upper().replace('-','_')}": v for k, v in request.header.items()
}
environ = {
# 保持与阿里云函数计算 HTTP 触发器的一致
"fc.context": request.context,
"fc.request_uri": request.path,
# WSGI 标准值
"wsgi.version": (1, 0),
"wsgi.url_scheme": "http",
"wsgi.input": io.BytesIO(request.body),
"wsgi.errors": errors,
"wsgi.multithread": False,
"wsgi.multiprocess": False,
"wsgi.run_once": True,
"SERVER_NAME": "127.0.0.1",
"SERVER_PORT": "80",
"SERVER_PROTOCOL": "HTTP/1.0",
"REQUEST_METHOD": request.method,
"SCRIPT_NAME": "",
"PATH_INFO": request.path,
"QUERY_STRING": "?" + "&".join([f"{k}={v}" for k, v in request.query.items()]),
"CONTENT_TYPE": headers.pop("HTTP_CONTENT_TYPE", ""),
"CONTENT_LENGTH": headers.pop("HTTP_CONTENT_LENGTH", ""),
}
environ.update(headers)
return environ | b2e98d726bf256b06d1c2f82679bd75ada7181cf | 23,331 |
def loadPage(url, filename):
"""
作用:根据url发送请求,获取服务器响应文件
url: 需要爬取的url地址
filename : 处理的文件名
"""
print "正在下载 " + filename
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"}
request = urllib2.Request(url, headers=headers)
return urllib2.urlopen(request).read() | 7250a42d03bc1af7e9f30aa22198f3d4a56605f3 | 23,332 |
def get_exporter_class():
"""Returns exporter class based on preferences and support."""
if _use_xlsx() is True:
return XLSXExporter
else:
return CSVExporter | 3d66e0e0abe8c936e2f86598427ff27180dac6b4 | 23,333 |
def get_twitter_token():
"""This is used by the API to look for the auth token and secret
it should use for API calls. During the authorization handshake
a temporary set of token and secret is used, but afterwards this
function has to return the token and secret. If you don't want
to store this in the database, consider putting it into the
session instead.
"""
return session.get('twitter_token') | 218f16141473e76c4318870bec9516c77f1dfe1b | 23,334 |
def is_text_serializer(serializer):
"""Checks whether a serializer generates text or binary."""
return isinstance(serializer.dumps({}), str) | f08f40662da7fd34f5984028e601d664cac943df | 23,335 |
def plot_power(ngroups, mesh_shape, directory, mode="show"):
"""Plot the integrated fission rates from OpenMC and OpenMOC, as well as
the relative and absolute error of OpenMOC relative to OpenMC.
Parameters:
-----------
ngroups: int; number of energy groups
mesh_shape: str; name of the mesh shape
directory: str; path to the data
"""
mode = _check_mode(mode)
directory, shape = _check_params(directory, mesh_shape)
montecarlo_power = np.zeros(shape)
moc_power = np.zeros(shape)
# Integrate over all energy groups
for g in range(ngroups):
rates_name = "fission_{:02d}-of-{}_{}".format(g+1, ngroups, mesh_shape)
fname = directory + "montecarlo_" + rates_name
montecarlo_group_rates = np.loadtxt(fname)
montecarlo_power += montecarlo_group_rates
fname = directory + "moc_" + rates_name
moc_group_rates = np.loadtxt(fname)
moc_power += moc_group_rates
# Filter out results that are essentially zero
mc_mean = np.nanmean(montecarlo_power)*0.1
indices = (montecarlo_power < mc_mean) + (moc_power < mc_mean)
montecarlo_power[indices] = np.nan
moc_power[indices] = np.nan
# Normalize
montecarlo_power /= np.nanmean(montecarlo_power)
moc_power /= np.nanmean(moc_power)
# Find the errors in the normalized distributions
errors = np.divide(moc_power - montecarlo_power, montecarlo_power/100)
pcmerr = (moc_power - montecarlo_power)*100
if mode == "return":
return montecarlo_power, moc_power, errors, pcmerr
# Plot OpenMC's fission rates in the upper left subplot
plt.subplot(231)
plt.imshow(montecarlo_power.squeeze(), interpolation='none', cmap='jet')
plt.title('OpenMC Power Distribution\n{} groups'.format(ngroups))
cmin = min(np.nanmin(montecarlo_power), np.nanmin(moc_power))
cmax = max(np.nanmax(montecarlo_power), np.nanmax(moc_power))
plt.clim(cmin, cmax)
plt.colorbar()
# Plot OpenMOC's fission rates in the upper right subplot
plt.subplot(232)
plt.imshow(moc_power.squeeze(), interpolation='none', cmap='jet')
plt.title('OpenMOC Power Distribution\n{} groups'.format(ngroups))
plt.clim(cmin, cmax)
plt.colorbar()
# Plot the relative error in the lower left subplot
plt.subplot(233)
pct = plt.imshow(errors.squeeze(), interpolation='none', cmap='jet')
posmax = np.nanmax(errors)
negmax = np.nanmin(errors)
cmax = np.ceil(max(abs(posmax), abs(negmax)))
plt.clim(-cmax, +cmax)
plt.title('Relative error (%)')
plt.colorbar(pct)
# Plot the absolute error in the lower right subplot
plt.subplot(234)
pct = plt.imshow(pcmerr.squeeze(), interpolation='none', cmap='jet')
posmax = np.nanmax(pcmerr)
negmax = np.nanmin(pcmerr)
cmax = np.ceil(max(abs(posmax), abs(negmax)))
plt.clim(-cmax, +cmax)
plt.title('Absolute error (%)')
plt.colorbar(pct)
if mode == "show":
plt.show()
elif mode == "save":
# Save and/or show the plot
plt.tight_layout()
fname = directory + "power_{}-groups.pdf".format(ngroups)
plt.savefig(fname)
print("Figure saved to:", fname)
return montecarlo_power, moc_power, errors, pcmerr | e9db852e840d2f3f8013f006acc191276a74e9d7 | 23,336 |
def glDeleteFramebuffersEXT( baseOperation, n, framebuffers=None ):
"""glDeleteFramebuffersEXT( framebuffers ) -> None
"""
if framebuffers is None:
framebuffers = arrays.GLuintArray.asArray( n )
n = arrays.GLuintArray.arraySize( framebuffers )
return baseOperation( n, framebuffers ) | 514fe96d8088210bfe1251a8a9c7c93856f19504 | 23,337 |
def backtostr(dayback=1, format="%Y/%m/%d", thedate=date.today()):
"""Print backto datetime in string format."""
return(backto(dayback=dayback, thedate=thedate).strftime(format)) | c79b10962537d9eef939e7f49697275f31e900e2 | 23,339 |
def no_conflict_require_POST(f):
"""
Catches resource conflicts on save and returns a 409 error.
Also includes require_POST decorator
"""
@require_POST
@wraps(f)
def _no_conflict(*args, **kwargs):
try:
return f(*args, **kwargs)
except ResourceConflict:
return HttpResponse(status=409)
return _no_conflict | 98a28b5fbc2824eaa5d5020f8be1d55878974fc4 | 23,340 |
def Transition_rep(source_State_name, target_State_name):
"""Representation of a transition
:param source_State_name: The sequence of "name" values of State objects referred to by attribute "source" in this Transition
:type source_State_name: Array
:param target_State_name: The sequence of "name" values of State objects referred to by attribute "target" in this Transition
:type target_State_name: Array
"""
return [f' {source_name}--{target_name}' for source_name, target_name in zip(source_State_name, target_State_name)] | 2e5f7048722997e0931fd6ec3a2d9e880a160359 | 23,342 |
from typing import List
from typing import Any
def search_model(trial: optuna.trial.Trial) -> List[Any]:
"""Search model structure from user-specified search space."""
model = []
n_stride = 0
MAX_NUM_STRIDE = 5
UPPER_STRIDE = 2 # 5(224 example): 224, 112, 56, 28, 14, 7
n_layers = trial.suggest_int("n_layers", 8, 12)
stride = 1
input_max = 64
imput_min = 32
module_info = {}
### 몇개의 레이어를 쌓을지도 search하게 했습니다.
for i in range(n_layers):
out_channel = trial.suggest_int(f"{i+1}units", imput_min, input_max)
block = trial.suggest_categorical(
f"m{i+1}", ["Conv", "DWConv", "InvertedResidualv2", "InvertedResidualv3"]
)
repeat = trial.suggest_int(f"m{i+1}/repeat", 1, 5)
m_stride = trial.suggest_int(f"m{i+1}/stride", low=1, high=UPPER_STRIDE)
if m_stride == 2:
stride += 1
if n_stride == 0:
m_stride = 2
if block == "Conv":
activation = trial.suggest_categorical(
f"m{i+1}/activation", ["ReLU", "Hardswish"]
)
# Conv args: [out_channel, kernel_size, stride, padding, groups, activation]
model_args = [out_channel, 3, m_stride, None, 1, activation]
elif block == "DWConv":
activation = trial.suggest_categorical(
f"m{i+1}/activation", ["ReLU", "Hardswish"]
)
# DWConv args: [out_channel, kernel_size, stride, padding_size, activation]
model_args = [out_channel, 3, 1, None, activation]
elif block == "InvertedResidualv2":
c = trial.suggest_int(
f"m{i+1}/v2_c", low=imput_min, high=input_max, step=16
)
t = trial.suggest_int(f"m{i+1}/v2_t", low=1, high=4)
model_args = [c, t, m_stride]
elif block == "InvertedResidualv3":
kernel = trial.suggest_int(f"m{i+1}/kernel_size", low=3, high=5, step=2)
t = round(
trial.suggest_float(f"m{i+1}/v3_t", low=1.0, high=6.0, step=0.1), 1
)
c = trial.suggest_int(f"m{i+1}/v3_c", low=imput_min, high=input_max, step=8)
se = trial.suggest_categorical(f"m{i+1}/v3_se", [0, 1])
hs = trial.suggest_categorical(f"m{i+1}/v3_hs", [0, 1])
# k t c SE HS s
model_args = [kernel, t, c, se, hs, m_stride]
in_features = out_channel
model.append([repeat, block, model_args])
if i % 2:
input_max *= 2
input_max = min(input_max, 160)
module_info[f"block{i+1}"] = {"type": block, "repeat": repeat, "stride": stride}
# last layer
last_dim = trial.suggest_int("last_dim", low=128, high=1024, step=128)
# We can setup fixed structure as well
model.append([1, "Conv", [last_dim, 1, 1]])
model.append([1, "GlobalAvgPool", []])
model.append([1, "FixedConv", [6, 1, 1, None, 1, None]])
return model, module_info | 228180bc1f02c793273a763db271889f4dcd4f26 | 23,344 |
def create_search_forms(name, language_code, script_code):
"""Return a list of names suitable for searching.
Arguments:
name -- string name
language_code -- string code of language
script_code -- string code of script
"""
# QAZ: It would be useful if something could be done here (or
# wherever is most appropriate) to handle the case where names are
# assembled without spaces between the parts (eg, Chinese), since
# this means that whatever part(s) come after the first will not
# be found in a search.
name = str(name)
search_forms = [name]
if script_code == 'Latn':
ascii_form = asciify_name(name)
if ascii_form and ascii_form != name:
search_forms.append(ascii_form)
macron_as_double_form = demacronise_name(name)
if macron_as_double_form != name:
search_forms.append(macron_as_double_form)
abbreviated_form = abbreviate_name(name, language_code, script_code)
if abbreviated_form != name:
search_forms.append(abbreviated_form)
unpunctuated_form = unpunctuate_name(name)
if unpunctuated_form != name:
search_forms.append(unpunctuated_form)
return search_forms | a417fe9ab37e544c094341fcb7f1249feaed43c6 | 23,345 |
def iv_params(*, N_s, T_degC, I_ph_A, I_rs_1_A, n_1, I_rs_2_A, n_2, R_s_Ohm, G_p_S,
minimize_scalar_bounded_options=minimize_scalar_bounded_options_default,
newton_options=newton_options_default):
"""
Compute I-V curve parameters.
Inputs (any broadcast-compatible combination of python/numpy scalars and numpy arrays):
Same as P_mp().
Outputs (device-level, at each combination of broadcast inputs, return type is numpy.float64 for all scalar inputs):
dict containing the outputs of FF() with the addition of:
R_oc_Ohm resistance at open circuit
R_sc_Ohm resistance at short circuit
"""
result = FF(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, minimize_scalar_bounded_options=minimize_scalar_bounded_options,
newton_options=newton_options)
R_oc_Ohm = R_oc(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, newton_options=newton_options)['R_oc_Ohm']
R_sc_Ohm = R_sc(N_s=N_s, T_degC=T_degC, I_ph_A=I_ph_A, I_rs_1_A=I_rs_1_A, n_1=n_1, I_rs_2_A=I_rs_2_A, n_2=n_2,
R_s_Ohm=R_s_Ohm, G_p_S=G_p_S, newton_options=newton_options)['R_sc_Ohm']
result.update({'R_oc_Ohm': R_oc_Ohm, 'R_sc_Ohm': R_sc_Ohm})
return result | 7998b52f02482d79bae6c0732e99ed5e151326fa | 23,346 |
def update_domain(
uuid, name=None, disabled=None, project_id=None, user_id=None):
"""Update an existing domain."""
res = get_domain(uuid=uuid)
if disabled is not None:
res['disabled'] = disabled
if name is not None:
res['name'] = name
if project_id is not None:
res['project_id'] = project_id
if user_id is not None:
res['user_id'] = user_id
res.save()
return res | 7cdc53c96e17d79dd0998165b87dd745b8afd73e | 23,348 |
def ja_of(tree: Tree) -> str:
"""tree string in the Japanese CCGBank's format
Args:
tree (Tree): tree object
Returns:
str: tree string in Japanese CCGBank's format
"""
def rec(node):
if node.is_leaf:
cat = node.cat
word = normalize(node.word)
token = node.token
poss = [
token.get(pos, '*')
for pos in ('pos', 'pos1', 'pos2', 'pos3')
]
poss = [pos for pos in poss if pos != '*']
pos = '-'.join(poss) if len(poss) else '_'
inflections = [
token.get(i, '*')
for i in ('inflectionForm', 'inflectionType')
]
inflections = [i for i in inflections if i != '*']
inflection = '-'.join(inflections) if len(inflections) else '_'
return f'{{{cat} {word}/{word}/{pos}/{inflection}}}'
else:
children = ' '.join(rec(child) for child in node.children)
return f'{{{node.op_symbol} {node.cat} {children}}}'
return rec(tree) | 7722e9de3b31354be4820cc32a8949ffac333e5f | 23,349 |
def cut_flowlines_at_points(flowlines, joins, points, next_lineID):
"""General method for cutting flowlines at points and updating joins.
Only new flowlines are returned; any that are not cut by points are omitted.
Parameters
----------
flowlines : GeoDataFrame
joins : DataFrame
flowline joins
points : ndarray of MultiPoint or Point geometries
expected to match to flowlines
next_lineID : int
id of next flowline to be created
Returns
-------
(GeoDataFrame, DataFrame, ndarray)
new flowlines, updated joins, remove_ids (original flowline IDs that
need to be removed before merging in returned flowlines)
"""
flowlines = flowlines.copy()
joins = joins.copy()
flowlines["geometry"] = cut_lines_at_multipoints(
flowlines.geometry.values.data, points
)
# discard any that have only one segment; they weren't split and we don't want
# to update them. Split the rest into parts.
ix = pg.get_num_geometries(flowlines.geometry.values.data) > 1
flowlines = explode(
flowlines.loc[ix].reset_index().rename(columns={"lineID": "origLineID"})
).reset_index(drop=True)
# recalculate length and sinuosity
flowlines["length"] = pg.length(flowlines.geometry.values.data).astype("float32")
flowlines["sinuosity"] = calculate_sinuosity(flowlines.geometry.values.data).astype(
"float32"
)
# calculate new ID
flowlines["lineID"] = (flowlines.index + next_lineID).astype("uint32")
### Update flowline joins
# transform new lines to create new joins at the upstream / downstream most
# points of the original line
l = flowlines.groupby("origLineID").lineID
# the first new line per original line is the furthest upstream, so use its
# ID as the new downstream ID for anything that had this origLineID as its downstream
first = l.first().rename("new_downstream_id")
# the last new line per original line is the furthest downstream...
last = l.last().rename("new_upstream_id")
# Update existing joins with the new lineIDs we created at the upstream or downstream
# ends of segments we just created
joins = update_joins(
joins, first, last, downstream_col="downstream_id", upstream_col="upstream_id",
)
### Create new line joins for any that weren't inserted above
# Transform all groups of new line IDs per original lineID
# into joins structure
atts = (
flowlines.groupby("origLineID")[["NHDPlusID", "loop", "HUC4"]]
.first()
.rename(columns={"NHDPlusID": "upstream"})
)
# function to make upstream / downstream side of join
pairs = lambda a: pd.Series(zip(a[:-1], a[1:]))
new_joins = (
l.apply(pairs)
.apply(pd.Series)
.reset_index()
.rename(columns={0: "upstream_id", 1: "downstream_id"})
.join(atts, on="origLineID")
)
# NHDPlusID is same for both sides
new_joins["downstream"] = new_joins.upstream
new_joins["type"] = "internal"
new_joins["marine"] = False
new_joins = new_joins[
[
"upstream",
"downstream",
"upstream_id",
"downstream_id",
"type",
"loop",
"marine",
"HUC4",
]
]
joins = (
joins.append(new_joins, ignore_index=True, sort=False)
.sort_values(["downstream_id", "upstream_id"])
.reset_index(drop=True)
)
remove_ids = flowlines.origLineID.unique()
flowlines = flowlines.drop(columns=["origLineID"]).set_index("lineID")
return flowlines, joins, remove_ids | e535f6d5e073955b74784f5ce3121a929f9bc200 | 23,350 |
def export(gen, directory, file_prefix='{uid}-', **kwargs):
"""
Export a stream of documents to nxstxm_baseline.
.. note::
This can alternatively be used to write data to generic buffers rather
than creating files on disk. See the documentation for the
``directory`` parameter below.
Parameters
----------
gen : generator
expected to yield ``(name, document)`` pairs
directory : string, Path or Manager.
For basic uses, this should be the path to the output directory given
as a string or Path object. Use an empty string ``''`` to place files
in the current working directory.
In advanced applications, this may direct the serialized output to a
memory buffer, network socket, or other writable buffer. It should be
an instance of ``suitcase.utils.MemoryBufferManager`` and
``suitcase.utils.MultiFileManager`` or any object implementing that
inferface. See the suitcase documentation at
https://nsls-ii.github.io/suitcase for details.
file_prefix : str, optional
The first part of the filename of the generated output files. This
string may include templates as in ``{proposal_id}-{sample_name}-``,
which are populated from the RunStart document. The default value is
``{uid}-`` which is guaranteed to be present and unique. A more
descriptive value depends on the application and is therefore left to
the user.
**kwargs : kwargs
Keyword arugments to be passed through to the underlying I/O library.
Returns
-------
artifacts : dict
dict mapping the 'labels' to lists of file names (or, in general,
whatever resources are produced by the Manager)
Examples
--------
Generate files with unique-identifer names in the current directory.
>>> export(gen, '')
Generate files with more readable metadata in the file names.
>>> export(gen, '', '{plan_name}-{motors}-')
Include the experiment's start time formatted as YY-MM-DD_HH-MM.
>>> export(gen, '', '{time:%%Y-%%m-%%d_%%H:%%M}-')
Place the files in a different directory, such as on a mounted USB stick.
>>> export(gen, '/path/to/my_usb_stick')
"""
with Serializer(directory, file_prefix, **kwargs) as serializer:
for item in gen:
#print('ITEM:', item)
serializer(*item)
return serializer.artifacts | ec1b1237f63fc29c8e8d1b14283b392523e86426 | 23,351 |
def services():
"""
Returns the grader-notebook list used as services in jhub
Response: json
example:
```
{
services: [{"name":"<course-id", "url": "http://grader-<course-id>:8888"...}],
groups: {"formgrade-<course-id>": ["grader-<course-id>"] }
}
```
"""
services = GraderService.query.all()
# format a json
services_resp = []
groups_resp = {}
for s in services:
services_resp.append({
'name': s.name,
'url': s.url,
'oauth_no_confirm': s.oauth_no_confirm,
'admin': s.admin,
'api_token': s.api_token
})
# add the jhub user group
groups_resp.update({f'formgrade-{s.course_id}': [f'grader-{s.course_id}']})
return jsonify(services=services_resp, groups=groups_resp) | b1fbc2c90b344f75027538037c5d6786bda810c8 | 23,353 |
def chunks(l, k):
"""
Take a list, l, and create k sublists.
"""
n = len(l)
return [l[i * (n // k) + min(i, n % k):(i+1) * (n // k) + min(i+1, n % k)] for i in range(k)] | 7cf0c39941ed8f358c576046154af6b3ee54b70a | 23,354 |
def bfs(adj, src, dst, cache=None):
"""BFS search from source to destination. Check whether a path exists, does
not return the actual path.
Work on directed acyclic graphs where we assume that there is no path to the
node itself.
Args:
adj: Adjacency matrix.
src: Source node index, 0-based.
dst: Destination node index, 0-based.
cache: 2D matrix, cache[i, j] = 1 indicates path exists between two node
i and j. cache[i, j] = -1 indicates path does not exists between two node
i and j. chace[i, j] = 0 indicates unknown.
Returns:
found: A path is found between source and destination.
"""
if src == dst: return False
num_nodes = adj.shape[0]
if num_nodes == 0:
return False
if src >= num_nodes or dst >= num_nodes:
raise Exception("Index must be smaller than the number of nodes.")
if num_nodes == 1:
return False
# Whether a node has been visited, if not negative, the parent.
parent = np.zeros([num_nodes], dtype=np.int64) - 1
nodes_to_visit = [src]
found = False
# BFS loop.
while len(nodes_to_visit) > 0:
cur = nodes_to_visit.pop(0)
if cur == dst:
found = True
break
if cache is not None:
if cache[cur, dst] == 1:
found = True
break
elif cache[cur, dst] == -1:
continue
for jj in range(num_nodes):
if adj[cur, jj] == 1:
if parent[jj] == -1:
nodes_to_visit.append(jj)
parent[jj] = cur
if not found:
# Add the source node to the cache.
if cache is not None:
#log.info(("Setting -1", src, dst), verbose=2)
for ii in range(num_nodes):
if parent[ii] >= 0:
cache[ii, dst] = -1
cache[src, dst] = -1
return False
else:
# Add all the nodes to the cache.
if cache is not None:
# Backtrack.
while cur != src:
cur = parent[cur]
cache[cur, dst] = 1
#log.info(("Setting 1", cur, dst), verbose=2)
cache[src, dst] = 1
#log.info(("Setting 1", src, dst), verbose=2)
return True | 5e224ffa575dd0bd471142023e828a8e36d1782e | 23,355 |
import _ast
import ast
def get_source_ast(name: str) -> _ast.Module:
"""
Return ast of source code
"""
with open(name, "r") as f:
data = f.read()
return ast.parse(data) | 8e0826175df538bdd894f05b0c6cf143b0b0f69b | 23,356 |
import math
def floor(base):
"""Get the floor of a number"""
return math.floor(float(base)) | 8b00ffccf30765f55ff024b35de364c617b4b20c | 23,358 |
def TranslateSecureTagsForFirewallPolicy(client, secure_tags):
"""Returns a list of firewall policy rule secure tags, translating namespaced tags if needed.
Args:
client: compute client
secure_tags: array of secure tag values
Returns:
List of firewall policy rule secure tags
"""
ret_secure_tags = []
for tag in secure_tags:
if tag.startswith('tagValues/'):
ret_secure_tags.append(
client.messages.FirewallPolicyRuleSecureTag(name=tag))
else:
ret_secure_tags.append(
client.messages.FirewallPolicyRuleSecureTag(
name=tag_utils.GetTagValueFromNamespacedName(tag).name))
return ret_secure_tags | 5cbf71885c167d5c9d0dcae3294be017512db73a | 23,359 |
from typing import Callable
from typing import Dict
from typing import Any
import functools
def memoize(func: Callable):
"""
A decorator that memoizes a function by storing its inputs and outputs.
Calling the function again with the same arguments will return the cached
output.
This function is somewhat more permissive than
:func:`functools.lru_cache` in what kinds of arguments can be cached,
but incurs a larger runtime overhead as a penalty.
"""
memo: Dict[Any, Any] = {}
@functools.wraps(func)
def memoizer(*args, **kwargs):
key = _hash_args_kwargs(*args, **kwargs)
try:
v = memo[key]
except KeyError:
v = memo[key] = func(*args, **kwargs)
return v
return memoizer | 7ba9500c57c867abcb43bf5ffe58087270ebef08 | 23,361 |
def mini_batch(positive_rdd, negative_rdd, num_iterations):
"""get the positive and negative classes with index for mini-batch"""
# mini-batch preparation
pos_num = int(batch_size / 46)
neg_num = pos_num * 45
i = num_iterations % int(74 / pos_num)
# get the new mini-batch rdd for this iteration
new_rdd = positive_rdd. \
filter(lambda x: i * pos_num <= x[1] < (i + 1) * pos_num). \
map(lambda x: (x[0][0], x[0][1])).union(
negative_rdd.filter(
lambda x: i * neg_num <= x[1] < (i + 1) * neg_num).map(
lambda x: (x[0][0], x[0][1])))
return new_rdd | e523c170d66cf9bdabf5f12ab78af417c59333a2 | 23,362 |
import math
def draw_polygon(img, max_sides=8, min_len=32, min_label_len=64):
""" Draw a polygon with a random number of corners and return the position
of the junctions + line map.
Parameters:
max_sides: maximal number of sides + 1
"""
num_corners = random_state.randint(3, max_sides)
min_dim = min(img.shape[0], img.shape[1])
rad = max(random_state.rand() * min_dim / 2, min_dim / 10)
# Center of a circle
x = random_state.randint(rad, img.shape[1] - rad)
y = random_state.randint(rad, img.shape[0] - rad)
# Convert length constrain to pixel if given float number
if isinstance(min_len, float) and min_len <= 1.:
min_len = int(min_dim * min_len)
if isinstance(min_label_len, float) and min_label_len <= 1.:
min_label_len = int(min_dim * min_label_len)
# Sample num_corners points inside the circle
slices = np.linspace(0, 2 * math.pi, num_corners + 1)
angles = [slices[i] + random_state.rand() * (slices[i+1] - slices[i])
for i in range(num_corners)]
points = np.array(
[[int(x + max(random_state.rand(), 0.4) * rad * math.cos(a)),
int(y + max(random_state.rand(), 0.4) * rad * math.sin(a))]
for a in angles])
# Filter the points that are too close or that have an angle too flat
norms = [np.linalg.norm(points[(i-1) % num_corners, :]
- points[i, :]) for i in range(num_corners)]
mask = np.array(norms) > 0.01
points = points[mask, :]
num_corners = points.shape[0]
corner_angles = [angle_between_vectors(points[(i-1) % num_corners, :] -
points[i, :],
points[(i+1) % num_corners, :] -
points[i, :])
for i in range(num_corners)]
mask = np.array(corner_angles) < (2 * math.pi / 3)
points = points[mask, :]
num_corners = points.shape[0]
# Get junction pairs from points
segments = np.zeros([0, 4])
# Used to record all the segments no matter we are going to label it or not.
segments_raw = np.zeros([0, 4])
for idx in range(num_corners):
if idx == (num_corners - 1):
p1 = points[idx]
p2 = points[0]
else:
p1 = points[idx]
p2 = points[idx + 1]
segment = np.concatenate((p1, p2), axis=0)
# Only record the segments longer than min_label_len
seg_len = np.sqrt(np.sum((p1 - p2) ** 2))
if seg_len >= min_label_len:
segments = np.concatenate((segments, segment[None, ...]), axis=0)
segments_raw = np.concatenate((segments_raw, segment[None, ...]),
axis=0)
# If not enough corner, just regenerate one
if (num_corners < 3) or check_segment_len(segments_raw, min_len):
return draw_polygon(img, max_sides, min_len, min_label_len)
# Get junctions from segments
junctions_all = np.concatenate((segments[:, :2], segments[:, 2:]), axis=0)
if junctions_all.shape[0] == 0:
junc_points = None
line_map = None
else:
junc_points = np.unique(junctions_all, axis=0)
# Get the line map
line_map = get_line_map(junc_points, segments)
corners = points.reshape((-1, 1, 2))
col = get_random_color(int(np.mean(img)))
cv.fillPoly(img, [corners], col)
return {
"points": junc_points,
"line_map": line_map
} | 1dc1cdb18424c8d47ee95777d52713c884eedc5d | 23,363 |
def order_budget_update(request, order_id):
"""
Update budget for order
"""
serializer = OrderBudgetSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
order = get_object_or_404(Order, pk=order_id)
budget = serializer.validated_data['budget']
order.budget = budget
order.save()
serializer = OrderSerializer(order)
return Response(serializer.data, status=status.HTTP_202_ACCEPTED) | adc708d7cbc429ea6e5d81c5f9db60d0fa5f298a | 23,364 |
def analyse_readability_metrics(article_text):
"""
Use the textstat library to report multiple readability measures.
The readability metrics analysed are:
* The Flesch Reading Ease Score. A score from 100 (very easy to read) to 0 (very confusing).
* The grade score using the Flesch-Kincaid Grade Formula. For example a score of 9.3 means that a ninth grader would be able to read the document.
* The FOG index of the given text
* The SMOG index of the given text
* The ARI(Automated Readability Index) which outputs a number that approximates the grade level needed to comprehend the text. For example if the ARI is 6.5, then the grade level to comprehend the text is 6th to 7th grade
* The grade level of the text using the Coleman-Liau Formula
* The grade level using the Lisear Write Formula
* The grade level using the New Dale-Chall Formula.
:param article_text: The article text to operate on.
:return: An object containing all measures
"""
sylls = textstat.syllable_count(article_text)
words = textstat.lexicon_count(article_text)
sents = textstat.sentence_count(article_text)
if article_text != "":
"""
returns the Flesch Reading Ease Score. Following table is helpful to access the ease of readability in a document.
* 90-100 : Very Easy
* 80-89 : Easy
* 70-79 : Fairly Easy
* 60-69 : Standard
* 50-59 : Fairly Difficult
* 30-49 : Difficult
* 0-29 : Very Confusing
"""
flesch = textstat.flesch_reading_ease(article_text)
"""
returns the grade score using the Flesch-Kincaid Grade Formula.
For example a score of 9.3 means that a ninth grader would be able to read the document.
"""
flesch_k = textstat.flesch_kincaid_grade(article_text)
"""
returns the FOG index of the given text.
"""
fog = textstat.gunning_fog(article_text)
"""
return the SMOG index of the given text.
"""
smog = textstat.smog_index(article_text)
"""
returns the ARI(Automated Readability Index) which outputs a number that approximates the grade level needed to comprehend the text.
For example if the ARI is 6.5, then the grade level to comprehend the text is 6th to 7th grade
"""
ari = textstat.automated_readability_index(article_text)
"""
returns the grade level of the text using the Coleman-Liau Formula
"""
coleman_l = textstat.coleman_liau_index(article_text)
"""
returns the grade level using the Lisear Write Formula
"""
linsear_write = textstat.linsear_write_formula(article_text)
"""
Different from other tests, since it uses a lookup table of most commonly used 3000 english words.
Thus it returns the grade level using the New Dale-Chall Formula.
"""
dale_chall = textstat.dale_chall_readability_score(article_text)
"""
Based upon all the above tests returns the best grade level under which the given text belongs to.
"""
overall_consensus = textstat.text_standard(article_text)
return {
"syllable_count": sylls,
"word_count": words,
"sentence_count": sents,
"flesch_reading_ease": flesch,
"flesch_kincaid_grade": flesch_k,
"gunning_fog": fog,
"smog_index": smog,
"automated_readability_index": ari,
"coleman_liau_index": coleman_l,
"linsear_write_formula": linsear_write,
"dale_chall_readability_score": dale_chall,
"overall_consensus_grade": overall_consensus
} | 32a89cf4788d469a164f9cdd606f1675ddf219b8 | 23,365 |
def dx(data):
"""
Derivative by central difference
Edges are takes as difference between nearest points
Parameters
----------
data : ndarray
Array of NMR data.
Returns
-------
ndata : ndarray
Derivate of NMR data.
"""
z = np.empty_like(data)
z[..., 0] = data[..., 1] - data[..., 0] # first point
z[..., -1] = data[..., -1] - data[..., -2] # last point
z[..., 1:-1] = data[..., 2:] - data[..., :-2] # interior
return z | 6e88618750ff69662ec7f41ecfc50efbaab717db | 23,366 |
def grad(w):
""" Dao ham """
N = Xbar.shape[0]
return 1/N * Xbar.T.dot(Xbar.dot(w) - y) | 8bcce8c10eeb7ffae6e2af3e2f273f03b6984885 | 23,367 |
import random
def random_binary():
"""
测试 cached 缓存视图的装饰器 设置 key
:return:
"""
return [random.randrange(0, 2) for i in range(500)] | 3c30014d1222c136cb7d3d2fbe6e0d972decc776 | 23,369 |
def remove_from_end(string, text_to_remove):
"""
Remove a String from the end of a string if it exists
Args:
string (str): string to edit
text_to_remove (str): the text to remove
Returns: the string with the text removed
"""
if string is not None and string.endswith(text_to_remove):
return string[:-len(text_to_remove)]
return string | 19cebd002fcf5aea5290a6998129427363342319 | 23,370 |
def get_node_number(self, node, typ) -> str:
"""Get the number for the directive node for HTML."""
ids = node.attributes.get("ids", [])[0]
if isinstance(self, LaTeXTranslator):
docname = find_parent(self.builder.env, node, "section")
else:
docname = node.attributes.get("docname", "")
# Latex does not have builder.fignumbers
fignumbers = self.builder.env.toc_fignumbers.get(docname, {})
number = fignumbers.get(typ, {}).get(ids, ())
return ".".join(map(str, number)) | f88227ef727d45d14cd9343ee26bdbfb15f6a2fc | 23,372 |
def get_client(client, aws_access_key_id, aws_secret_access_key, region=None):
"""Shortcut for getting an initialized instance of the boto3 client."""
return boto3.client(
client,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
region_name=region
) | 4ebed5da9ca146b79563c0efb5bf04e7bc13f791 | 23,374 |
import logging
import traceback
import json
def main(req: func.HttpRequest) -> func.HttpResponse:
""" main function for status/http """
logging.info('Status processed a request.')
try:
response = get_http_response_by_status(200)
if req.get_body() and len(req.get_body()):
response = get_http_response_by_status(202)
headers = {
"Access-Control-Allow-Origin": "*"
}
return func_json_response(response, headers, "message")
#pylint: disable=broad-except
except Exception as err:
logging.error("Status HTTP error occurred: %s", traceback.format_exc())
msg_error = f"This endpoint encountered an error. {err}"
func_response = json.dumps(jsend.error(msg_error))
return func.HttpResponse(func_response, status_code=500) | ac464fd479c0df9f9d462f37d884542bf670dfef | 23,375 |
def _variable_map_by_name(variables):
"""
Returns Dict,representing referenced variable fields mapped by name.
Keyword Parameters:
variables -- list of 'variable_python_type' Warehouse support DTOs
>>> from pprint import pprint
>>> var1 = { 'column':'frob_hz', 'title':'Frobniz Resonance (Hz)'
... ,'python_type': 'float'
... ,'table': 'foo_fact'}
>>> list1 = [var1]
>>> pprint(_variable_map_by_name(list1))
{'frob_hz': {'column': 'frob_hz',
'python_type': 'float',
'table': 'foo_fact',
'title': 'Frobniz Resonance (Hz)'}}
"""
variable_by_field = {}
for var in variables:
field_name = var['column']
variable_by_field[field_name] = var
return variable_by_field | 91c27ceb84614313d036ec216ef4c4d567a68255 | 23,376 |
def true_divide(x, y):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x (np.ndarray): input tensor.
y (np.ndarray): another tensor.
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return np.true_divide(x, y) | 3bdcf5052730fd2c3e2e25a395b0e16534d2dcf9 | 23,377 |
def ObjectNotFoundError(NDARError):
"""S3 object not found"""
def __init__(self, object):
self.object = object
return
def __str__(self):
return 'Object not found: %s' % self.object | 3cc552f7074f8117ed18fd975bc5ac0b09f8016a | 23,378 |
def hello():
"""
Say hello using a template file.
"""
return render_template('index.html') | b2b09afd651a69fdc270238dbf3f724fa9f40ae4 | 23,379 |
def pause_sale(ctx):
"""
Pause the token sale
:param ctx:GetContext() used to access contract storage
:return:bool Whether pausing the sale was successful
"""
if CheckWitness(TOKEN_OWNER):
Put(ctx, SALE_STATUS_KEY, SALE_PAUSED)
return True
return False | 63e99802a852146f7a20460a28a7d277c4104954 | 23,380 |
def parse_item_hash(value):
"""
Parses the item-hash datatype, e.g. sha-256:5b8e5ee02caedd0a6f3539b19d6b462dd2d08918764e7f476506996024f7b84a
:param value: a string to parse
:return: parsed value
"""
if isinstance(value, ItemHash):
return value
if not isinstance(value, str):
raise ValueError('value must be a str')
return ItemHash(value) | 6275ab41d437728ea3448bf150f068668c3f1819 | 23,381 |
def __convert_swizzle_scale(scale, export_settings):
"""Convert a scale from Blender coordinate system to glTF coordinate system."""
if export_settings[gltf2_blender_export_keys.YUP]:
return Vector((scale[0], scale[2], scale[1]))
else:
return Vector((scale[0], scale[1], scale[2])) | 19c2f62c9cd3c267a3edc2451c5f0eba3208c34f | 23,382 |
from pathlib import Path
from typing import Type
from typing import cast
def load_config_from_expt_dir(experiment_dir: Path, loop_config: Type[OptimizerConfig]) -> OptimizerConfig:
"""
Locate a config file in experiment_dir or one of its subdirectories (for a per-seed config).
Config files are now normally in seed subdirectories, as they contain seed values.
"""
config_files = sorted(experiment_dir.glob(f"*/seed*/{CONFIG_FILENAME}")) or [experiment_dir / CONFIG_FILENAME]
config_file = config_files[0]
if not config_file.exists():
raise FileNotFoundError(f"Cannot find {CONFIG_FILENAME} at or under {experiment_dir}") # pragma: no cover
return cast(loop_config, simple_load_config(config_file, config_class=loop_config)) | 5c609b877c6c40f019d234b685caed17b287aca0 | 23,384 |
def glsadf_delay(order, stage):
"""Delay for glsadf
Parameters
----------
order : int
Order of glsadf filter coefficients
stage : int
-1 / gamma
Returns
-------
delay : array
Delay
"""
return np.zeros(_sptk.glsadf_delay_length(order, stage)) | dacf0a754ba2040ba7ae004658f02df3060c6251 | 23,385 |
def find_next(s: str)->[int]:
"""
input:string
output:the next array of string
"""
if len(s) == 1:
return [-1]
result = [0 for i in range(len(s))]
result[0] = -1
result[1] = 0
i = 2
cn = 0
while i < len(result):
if s[i-1] == s[cn]:
cn += 1
result[i] = cn
elif cn > 0:
cn = result[cn]
else:
result[i+1] = 0
i = i + 1
return result | 455297eee28360f75a4f714172f62a7645ca49e0 | 23,386 |
from datetime import datetime
def _read_date():
""" read date from input; default to today """
# show date
while 1:
dts = prompt("Date", default=str(datetime.date.today()))
try:
datetime.datetime.strptime(dts, "%Y-%m-%d")
break
except ValueError:
continue
return dts | 45cd0e68cc6ca14552c2e2953edd2cc15809f122 | 23,387 |
def handle_pending_submission(self, request, layout=None):
""" Renders a pending submission, takes it's input and allows the
user to turn the submission into a complete submission, once all data
is valid.
This view has two states, a completable state where the form values
are displayed without a form and an edit state, where a form is rendered
to change the values.
Takes the following query parameters for customization::
* ``edit`` render the view in the edit state
* ``return-to`` the view redirects to this url once complete
* ``title`` a custom title (required if external submission)
* ``quiet`` no success messages are rendered if present
"""
collection = FormCollection(request.session)
form = request.get_form(self.form_class, data=self.data)
form.action = request.link(self)
form.model = self
if 'edit' not in request.GET:
form.validate()
if not request.POST:
form.ignore_csrf_error()
elif not form.errors:
collection.submissions.update(self, form)
completable = not form.errors and 'edit' not in request.GET
if completable and 'return-to' in request.GET:
if 'quiet' not in request.GET:
request.success(_("Your changes were saved"))
# the default url should actually never be called
return request.redirect(request.url)
if 'title' in request.GET:
title = request.GET['title']
else:
title = self.form.title
price = get_price(request, form, self)
# retain some parameters in links (the rest throw away)
form.action = copy_query(
request, form.action, ('return-to', 'title', 'quiet'))
edit_link = URL(copy_query(
request, request.link(self), ('title', )))
# the edit link always points to the editable state
edit_link = edit_link.query_param('edit', '')
edit_link = edit_link.as_string()
return {
'layout': layout or FormSubmissionLayout(self, request, title),
'title': title,
'form': form,
'completable': completable,
'edit_link': edit_link,
'complete_link': request.link(self, 'complete'),
'model': self,
'price': price,
'checkout_button': price and request.app.checkout_button(
button_label=request.translate(_("Pay Online and Complete")),
title=title,
price=price,
email=self.email or self.get_email_field_data(form),
locale=request.locale
)
} | b591246498c22d6079181654a94b4cd6290732ec | 23,388 |
import functools
def i18n_view(tpl_base_name=None, **defaults):
"""
Renders a template with locale name as suffix. Unlike the normal view
decorator, the template name should not have an extension. The locale names
are appended to the base template name using underscore ('_') as separator,
and lower-case locale identifier.
Any additional keyword arguments are used as default template variables.
For example::
@i18n_view('foo')
def render_foo():
# Renders 'foo_en' for English locale, 'foo_fr' for French, etc.
return
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
locale = request.locale
tpl_name = '%s_%s' % (tpl_base_name, locale.lower())
except AttributeError:
tpl_name = tpl_base_name
tplvars = defaults.copy()
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, **tplvars)
return result
return wrapper
return decorator | 46c33f2be90fb9cca8059ba1f13f97c3e5a61807 | 23,389 |
def git_file_list(path_patterns=()):
"""Returns: List of files in current git revision matching `path_patterns`.
This is basically git ls-files.
"""
return exec_output_lines(['git', 'ls-files', '--exclude-standard'] + path_patterns, False) | 0c8cf1a3570d39e6d5c7f1658fd85b6ef2938d8a | 23,390 |
def unreserve_id():
"""
Removes the reservation of a SCSI ID as well as the memo for the reservation
"""
scsi_id = request.form.get("scsi_id")
reserved_ids = get_reserved_ids()["ids"]
reserved_ids.remove(scsi_id)
process = reserve_scsi_ids(reserved_ids)
if process["status"]:
RESERVATIONS[int(scsi_id)] = ""
flash(_(u"Released the reservation for SCSI ID %(id_number)s", id_number=scsi_id))
return redirect(url_for("index"))
flash(_(u"Failed to release the reservation for SCSI ID %(id_number)s", id_number=scsi_id))
flash(process["msg"], "error")
return redirect(url_for("index")) | 6665297f37420a5ad3498f9aa9a705f5b8f1830f | 23,391 |
def integral_sqrt_a2_minus_x2(x, a):
"""Integral of $\sqrt(a^2 - x^2)$ --- see (30) at
http://integral-table.com.
"""
return 0.5*x*np.sqrt(a**2 - x**2) + 0.5*a**2*np.arctan2(x, np.sqrt(a**2 - x**2)) | 778a5dc745c62727f192616448b6c98da3d93b5c | 23,392 |
def read_length(file_obj): # pragma: no cover
""" Numpy trick to get a 32-bit length from four bytes
Equivalent to struct.unpack('<i'), but suitable for numba-jit
"""
sub = file_obj.read(4)
return sub[0] + sub[1]*256 + sub[2]*256*256 + sub[3]*256*256*256 | 82c311c3a8e2d2e277979c19aaae665b0227f9cd | 23,393 |
import json
def ifttt_comparator_alpha_options():
""" Option values for alphanumeric comparators """
errmsg = check_ifttt_service_key()
if errmsg:
return errmsg, 401
data = {"data": [
{"value": "ignore", "label": "ignore"},
{"value": "equal", "label": "is equal to"},
{"value": "not_equal", "label": "is not equal to"},
{"value": "cont", "label": "contains"},
{"value": "not_cont", "label": "does not contain"},
{"value": "equal_nc", "label": "is equal to (ignore case)"},
{"value": "not_equal_nc", "label": "is not equal to (ignore case)"},
{"value": "cont_nc", "label": "contains (ignore case)"},
{"value": "not_cont_nc", "label": "does not contain (ignore case)"},
{"value": "in", "label": "in [json array]"},
{"value": "not_in", "label": "not in [json array]"},
{"value": "in_nc", "label": "in [json array] (ignore case)"},
{"value": "not_in_nc", "label": "not in [json array] (ignore case)"},
]}
return json.dumps(data) | d04d1f421eeda42324372702b7caf1777dfba964 | 23,396 |
def flat_abs_maximum(data, preserve_sign=True):
"""
Function to return the absolute maximum value in an array. By default,
this function will preserve the sign, meaning that if an array contains [-75, -25, 0, 25, 50]
then the function will return -75 because that value has the highest magnitude but it will return
the original value (preserving the sign).
Removing the sign preservation basically makes this function a composite of abs and max.
:param data: data array source
:param preserve_sign: whether or not to preserve the sign of the output, default is True
:return: largest absolute value in the data array
"""
data = np.asarray(data)
abs_data = np.abs(data)
subset = np.unravel_index(np.argmax(abs_data), data.shape)
return data[subset] if preserve_sign else abs_data[subset] | 6276c874ba9e1dcd047b087f6954a11ee3b680a9 | 23,397 |
def get_image_to_groundplane_homography(P):
"""Given the 3x4 camera projection matrix P, returns the homography
mapping image plane points onto the ground plane."""
return np.linalg.inv(get_groundplane_to_image_homography(P)) | 189bb5b80243c9145065260c591fe00c29a9a947 | 23,398 |
import logging
def create_object_detection_edge_training(
train_object_detection_edge_model_request: TrainImageEdgeModel,
):
"""[Train a Object Detection Model for Edge in AutoML GCP]
Args:
train_object_detection_edge_model_request (TrainImageEdgeModel): [Based on Input Schema]
Raises:
error: [Error]
Returns:
[type]: [description]
"""
try:
logging.info(
f"Create Object Detection Model Router: {train_object_detection_edge_model_request}"
)
return TrainModelController().train_object_detection_edge_model_controller(
request=train_object_detection_edge_model_request
)
except Exception as error:
logging.error(f"{error=}")
raise error | afff20623b96195294740056de6e327add72148f | 23,399 |
from typing import List
def readOneLineFileWithCommas(filepath: str) -> List[str]:
"""
Reads a file that is one line long, separated by commas
"""
try:
with open(filepath) as fp:
s: str = fp.readline()
return s.split(",")
except:
raise Exception(f"Failed to open {filepath}") | 4c181523192fab0ea01ae5da0883c543565119c6 | 23,400 |
from operator import or_
def package_search(filters, context, limit=None, catalog=False):
"""Search packages with different filters
Catalog param controls the base query creation. Catalog queries
only search packages a user can deploy. Non-catalog queries searches
packages a user can edit.
* Admin is allowed to browse all the packages
* Regular user is allowed to browse all packages belongs to user tenant
and all other packages marked is_public.
Also all packages should be enabled.
* Use marker (inside filters param) and limit for pagination:
The typical pattern of limit and marker is to make an initial limited
request and then to use the ID of the last package from the response
as the marker parameter in a subsequent limited request.
"""
session = db_session.get_session()
pkg = models.Package
query = session.query(pkg)
if catalog:
# Only show packages one can deploy, i.e. own + public
query = query.filter(or_(
pkg.owner_id == context.tenant, pkg.is_public)
)
else:
# Show packages one can edit.
if not context.is_admin:
query = query.filter(pkg.owner_id == context.tenant)
# No else here admin can edit everything.
if not filters.get('include_disabled', '').lower() == 'true':
query = query.filter(pkg.enabled)
if filters.get('owned', '').lower() == 'true':
query = query.filter(pkg.owner_id == context.tenant)
if 'type' in filters.keys():
query = query.filter(pkg.type == filters['type'].title())
if 'category' in filters.keys():
query = query.filter(pkg.categories.any(
models.Category.name.in_(filters['category'])))
if 'tag' in filters.keys():
query = query.filter(pkg.tags.any(
models.Tag.name.in_(filters['tag'])))
if 'class_name' in filters.keys():
query = query.filter(pkg.class_definitions.any(
models.Class.name == filters['class_name']))
if 'fqn' in filters.keys():
query = query.filter(pkg.fully_qualified_name == filters['fqn'])
if 'search' in filters.keys():
fk_fields = {'categories': 'Category',
'tags': 'Tag',
'class_definitions': 'Class'}
conditions = []
for attr in dir(pkg):
if attr.startswith('_'):
continue
if isinstance(getattr(pkg, attr),
attributes.InstrumentedAttribute):
search_str = filters['search']
for delim in ',;':
search_str = search_str.replace(delim, ' ')
for key_word in search_str.split():
_word = '%{value}%'.format(value=key_word)
if attr in fk_fields.keys():
condition = getattr(pkg, attr).any(
getattr(models, fk_fields[attr]).name.like(_word))
conditions.append(condition)
elif isinstance(getattr(pkg, attr)
.property.columns[0].type, sa.String):
conditions.append(getattr(pkg, attr).like(_word))
query = query.filter(or_(*conditions))
sort_keys = [SEARCH_MAPPING[sort_key] for sort_key in
filters.get('order_by', ['name'])]
marker = filters.get('marker')
sort_dir = filters.get('sort_dir')
if marker is not None: # set marker to real object instead of its id
marker = _package_get(marker, session)
query = utils.paginate_query(
query, pkg, limit, sort_keys, marker, sort_dir)
return query.all() | 0d15d2936f713437e3d9dad794cd07faf1ca3090 | 23,401 |
def _jitter_boxes(gt_boxes, jitter=0.05):
"""
"""
jittered_boxes = gt_boxes.copy()
ws = jittered_boxes[:, 2] - jittered_boxes[:, 0] + 1.0
hs = jittered_boxes[:, 3] - jittered_boxes[:, 1] + 1.0
width_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * ws
height_offset = (np.random.rand(jittered_boxes.shape[0]) - 0.5) * jitter * hs
jittered_boxes[:, 0] += width_offset
jittered_boxes[:, 2] += width_offset
jittered_boxes[:, 1] += height_offset
jittered_boxes[:, 3] += height_offset
return jittered_boxes | 570fa7a6bd2f898ce1d64dd9f6e666e50251fcf5 | 23,403 |
def lcm_gcd(a, b):
"""Finds the least common multiple of two integers
Args:
a, b: integers greater than or equal to 1
"""
return a * b//greatest_common_divisor(a, b) | 3b23d04164c8e69eee26e48ab2b1a60e8e99fd14 | 23,405 |
def test_ahocorasick_rs_overlapping(benchmark, test_data):
"""ahocorasick_rs overlapping matches."""
patterns, haystacks = test_data
ac = ahocorasick_rs.AhoCorasick(patterns)
def run():
for haystack in haystacks:
x = ac.find_matches_as_strings(haystack, overlapping=True)
return x
print(benchmark(run)) | 3c53369e8006502a5071fb73a75ace4705421a84 | 23,406 |
import warnings
def merge_frames(frames):
"""
Merge the multiple data files downloaded from the M2M system or the Gold
Copy THREDDS server into a single xarray data set. Keep track of how many
files fail to merge.
:param frames: The data frames to concatenate/merge into a single data set
:return data: The final, merged data set
"""
# merge the list of processed data frames into a single data set
nfiles = len(frames)
nframes = nfiles
bad_files = 0
if nframes > 1:
# try merging all of the frames into a single data set (some frames may be corrupted, and will be skipped)
data, fail = _frame_merger(frames[0], frames)
# if all of the files, except for the first one, failed that would suggest the first file is the problem.
# try the merge again, reset the starting frame to skip the first one.
if nframes - fail == 1:
data, fail = _frame_merger(frames[1], frames[1:])
nframes -= 1
# if we still can't merge the frames, then there probably is something more fundamentally wrong, and trying
# to account for it here is not going to be possible
if nframes - 1 - fail == 1:
message = f"Unable to merge the {len(frames)} files downloaded from the Gold Copy THREDDS server."
warnings.warn(message)
return None
else:
bad_files = nfiles - nframes + fail
else:
# there is just the one
data = frames[0]
if bad_files > 0:
message = "{} of the {} downloaded files failed to merge.".format(bad_files, nfiles)
warnings.warn(message)
data = data.sortby(['deployment', 'time'])
data.attrs['time_coverage_start'] = ('%sZ' % data.time.min().values)
data.attrs['time_coverage_end'] = ('%sZ' % data.time.max().values)
data.attrs['time_coverage_resolution'] = ('P%.2fS' % (np.mean(data.time.diff('time').values).astype(float) / 1e9))
return data | b8b083d8f0e9360df325fbeb812b64fffc8d1d0f | 23,407 |
import re
def sorted_nicely(l):
""" This function sorts the given iterable in the way that is expected
Obtained from:
https://arcpy.wordpress.com/2012/05/11/sorting-alphanumeric-strings-in-python/
:param l: The iterable to be sorted
:return: Sorted iterable
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(l, key=alphanum_key) | c2e398e7a654a1a1ec7cc113fcad500beefd876a | 23,408 |
def run_board(effects: list, audio: np.array, sample_rate: float) -> np.array:
"""Run board on input audio data.
Args:
board (list): List of Pedalboard effects.
audio (np.array): Input audio data.
Returns:
Output (effected) audio data
"""
board = Pedalboard(effects, sample_rate=sample_rate)
return board(audio) | 062f7d34aa7eadad5401e64df1e96857606cbcf6 | 23,409 |
def html_escape( s ):
"""
"""
s = s.replace( '&', '&' )
s = s.replace( '<', '<' )
s = s.replace( '>', '>' )
return s | eb47ba4d4651763cb74f081095b78d53ee9bebc1 | 23,410 |
def model_query(context, model, *args, **kwargs):
"""Query helper.
:param context: context to query under
:param session: if present, the session to use
"""
session = kwargs.get('session') or object_sqla.get_session()
query = session.query(model, *args)
return filter_by_project(context, query) | c6e5fb09b7e9a4d85ab6c6abc1e03e227010591f | 23,411 |
def bert_dropout_model(num_classes,
bert_config,
use_mc_dropout_mha=False,
use_mc_dropout_att=False,
use_mc_dropout_ffn=False,
use_mc_dropout_output=False,
channel_wise_dropout_mha=False,
channel_wise_dropout_att=False,
channel_wise_dropout_ffn=False):
"""Creates a BERT classifier model with MC dropout."""
last_layer_initializer = tf.keras.initializers.TruncatedNormal(
stddev=bert_config.initializer_range)
# Build encoder model.
mc_dropout_bert_encoder = get_mc_dropout_transformer_encoder(
bert_config,
use_mc_dropout_mha=use_mc_dropout_mha,
use_mc_dropout_att=use_mc_dropout_att,
use_mc_dropout_ffn=use_mc_dropout_ffn,
channel_wise_dropout_mha=channel_wise_dropout_mha,
channel_wise_dropout_att=channel_wise_dropout_att,
channel_wise_dropout_ffn=channel_wise_dropout_ffn)
# Build classification model.
mc_dropout_bert_model = DropoutBertClassifier(
mc_dropout_bert_encoder,
num_classes=num_classes,
dropout_rate=bert_config.hidden_dropout_prob,
use_mc_dropout=use_mc_dropout_output,
initializer=last_layer_initializer)
return mc_dropout_bert_model, mc_dropout_bert_encoder | 6ee1d09b2070e54ba631bd6e1b8e3e453960073a | 23,412 |
def calculate_monthly_sales(year: int, month: int, beer_style: str) -> int:
"""Calculates the sales of a particular type of beer in a given month.
param: month -- an int ranges from 1 to 12, beer_style;
return: total_sales
"""
total_sales = 0
for item in data:
if item[2].year == year and item[2].month == month and item[3] == beer_style:
total_sales += int(item[5])
return total_sales | fa448a8e9dfb7186652a6dc3000d3a8465320994 | 23,413 |
def check_canopy_height(region_info, regional_lookup):
"""
Check the regional canopy height.
"""
mean_canopy_height = region_info['mean_canopy_height']
if mean_canopy_height == 'no data':
mean_canopy_height = 0
return mean_canopy_height | 5f04ad71df7f0b1c9ef73e97bbe99bea1916ae5e | 23,414 |
def annotated_var(prs):
"""
Parser for annotated variable in parentheses.
Annotation is parsed with prs.
Parser output is a var token
annotation is stored in attribute 'annotation' of var token.
Sample input to parser:
(x : A)
"""
def trt(acc):
v,ann = acc
if len(ann) > 0:
return c.copy_token(v,{'annotation':ann[0]})
return v
return c.paren(var() + colon_annotation(prs)).treat(trt) | 42acdf6eb09952701d17fab73a2ee8fc20c7dc5e | 23,415 |
def action_from_json(project, value):
"""return a action from the given json
"""
json_type = value.get('type')
for class_ in sftoolbox.engine.action_classes_register:
if json_type == class_.json_type:
return class_.from_json(project, value)
return DummyAction.from_json(project, value) | 69658b53e839c7d112b7509e3ecdf57a82de817a | 23,416 |
def get_springer_doi(node):
"""
:param node:
:return:
"""
for elem in find_key(node, 'occurrence'):
if isinstance(elem, list):
for sub_elem in elem:
if isinstance(sub_elem, dict):
values = sub_elem.values()
if len(values) == 2 and values[0] == 'DOI':
return values[1]
return '' | ca8773f10e6fed6b41064a5a5ad6717afd540bb5 | 23,417 |
def check_versions(versions=[]):
""" Check if there are version to build the changelog. """
if len(versions) == 0:
raise NotEnoughVersionsError()
return True | f9c7f81c02f08a867f27f329554ed85eddc34243 | 23,418 |
def create_fnet(widths, nfeat, nfeato, orthoinit, llbias):
""" Creates feature-generating network, a multi-layer perceptron.
Parameters:
widths: list of widths of hidden layers
nfeat, nfeato: # input and output channels of the convolution
orthoinit: whether to use orthogonal weight initialization
llbias: whether to use bias in the last layer
"""
fnet_modules = []
for k in range(len(widths) - 1):
fnet_modules.append(nn.Linear(widths[k], widths[k + 1]))
if orthoinit: init.orthogonal_(fnet_modules[-1].weight, gain=init.calculate_gain('relu'))
fnet_modules.append(nn.ReLU(True))
fnet_modules.append(nn.Linear(widths[-1], nfeat * nfeato, bias=llbias))
if orthoinit: init.orthogonal_(fnet_modules[-1].weight)
return nn.Sequential(*fnet_modules) | 3bdfdd89d77b6ba172e2ac85df191b11e78ab049 | 23,419 |
def pytorch_array_setitem(op):
"""Implementation of array_setitem for pytorch."""
def _impl(array, begin, end, strides, value):
idx = tuple(slice(b, e, s) for b, e, s in zip(begin, end, strides))
ret = array.clone()
ret[idx] = value
return (ret,)
return _impl, op.inputs[1:] | b0c6504b2c0d1971ec16e5fdf198b20a911d4946 | 23,420 |
def time_series_seasonal_test(x: pd.Series, expected_lags: list):
"""
通过自相关系数来获取不同lag的相关系数,通过相关系数来判断时序数据的周期值
PS:需要列出lag的值的列表
:param x: 时序数据x,type: Series
:param expected_lags: 可供选择的的滞后值
:return: 返回滞后值值的自相关性排序序列
"""
acf_scores = []
for lag in expected_lags:
acf_score = acf(x.values, nlags=lag, fft=False)[-1]
acf_scores.append(abs(acf_score))
sorted_idx = np.argsort(acf_scores)
return [expected_lags[i] for i in sorted_idx] | 5c0614b986eb8dfe576821245e80ef0244c70c69 | 23,421 |
def comment_like():
"""
- 1.判断用户是否登陆
- 2.获取参数
- 3.校验参数,为空校验
- 4.操作类型校验
- 5.根据评论编号取出,评论对象
- 6.判断评论对象是否存在
- 7.根据操作类型,点赞,取消点赞
- 8.返回响应
:return:
"""
# - 1.判断用户是否登陆
if not g.user:
return jsonify(errno=RET.NODATA, errmsg="用户未登录")
# - 2.获取参数
comment_id = request.json.get("comment_id")
action = request.json.get("action")
# - 3.校验参数,为空校验
if not all([comment_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数不全")
# - 4.操作类型校验
if not action in ["add", "remove"]:
return jsonify(errno=RET.DATAERR, errmsg="操作类型有误")
# - 5.根据评论编号取出,评论对象
try:
comment = Comment.query.get(comment_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="获取评论失败")
# - 6.判断评论对象是否存在
if not comment:
return jsonify(errno=RET.NODATA, errmsg="评论不存在")
try:
# - 7.根据操作类型,点赞,取消点赞
if action == "add":
# 判断用户是否点过赞
comment_like = CommentLike.query.filter(CommentLike.user_id == g.user.id,
CommentLike.comment_id == comment_id).first()
if not comment_like:
# 创建点赞对象
comment_like = CommentLike()
comment_like.user_id = g.user.id
comment_like.comment_id = comment_id
# 保存点赞对象到数据库
db.session.add(comment_like)
db.session.commit()
# 点赞数量+1
comment.like_count += 1
else:
# 判断用户是否点过赞
comment_like = CommentLike.query.filter(CommentLike.user_id == g.user.id,
CommentLike.comment_id == comment_id).first()
if comment_like:
# 移除点赞对象
db.session.delete(comment_like)
db.session.commit()
# 点赞数量-1
if comment.like_count > 0:
comment.like_count -= 1
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="操作失败")
# - 8.返回响应
return jsonify(errno=RET.OK, errmsg="操作成功") | 09564653f3d843c7d82e16946507c8a081374ce6 | 23,422 |
def create_relationships(model_cls, data):
"""
Create the relationship dict of the specified model class with the data
:param model_cls:
:param data:
:return:
"""
relationships = model_cls.get_relationships()
relationship_map = {}
for key in relationships.keys():
relationship_cls = relationships[key].mapper.class_
relationship_kwargs = data.get(key)
if isinstance(relationship_kwargs, list): # 1:n
relationship = []
for item in relationship_kwargs:
r_ins = create_instance(relationship_cls, item)
if r_ins is not None:
relationship.append(r_ins)
else:
relationship = create_instance(relationship_cls, relationship_kwargs) # 1:1
if relationship is not None:
relationship_map[key] = relationship
return relationship_map | 6ed811b180141190cde5eaa20d4fca817647c970 | 23,424 |
import requests
def get_news_items_from_web(url):
"""
Calls the Athletics News RSS API, parses the resulting response and returns a list of parsed news_items to be
stored in DynamoDB
:param url: Url for the RSS API for UBCO Heat
:return: Parsed news items in a JSON formatted list
"""
try:
request_response = requests.get(url).text
return feedparser.parse(request_response)["entries"]
except RequestException as e:
LOGGER.error("Error in network request to RSS Feed")
detailed_exception(LOGGER)
return [] | aff75310b155475d185f15c5bbaadeda9902aae3 | 23,425 |
def get_node_model(manager, handle_id=None, node=None):
"""
:param manager: Context manager to handle transactions
:type manager: Neo4jDBSessionManager
:param handle_id: Nodes handle id
:type handle_id: str|unicode
:param node: Node object
:type node: neo4j.v1.types.Node
:return: Node model
:rtype: models.BaseNodeModel or sub class of models.BaseNodeModel
"""
bundle = get_node_bundle(manager, handle_id, node)
for label in bundle.get('labels'):
try:
classname = '{meta_type}{base}Model'.format(meta_type=bundle.get('meta_type'), base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
for label in bundle.get('labels'):
try:
classname = '{base}Model'.format(base=label).replace('_', '')
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
pass
try:
classname = '{base}Model'.format(base=bundle.get('meta_type'))
return getattr(models, classname)(manager).load(bundle)
except AttributeError:
return models.BaseNodeModel(manager).load(bundle) | a8c42b8e72b6ae96e897bd5c7f5a06b5820b4b56 | 23,426 |
from functools import reduce
def convert_hcp_plane(plane: list) -> np.ndarray:
"""
four index notion to three index notion for hcp and rhombohedral plane
Args:
plane (list): four index notion
Returns:
three index notion of plane
"""
u1 = plane[0]
v1 = plane[1]
w1 = plane[3]
plane = [u1, v1, w1]
if reduce(gcd, plane) != 1:
index = reduce(gcd, plane)
plane = [int(round(x / index)) for x in plane]
return np.array(plane) | aa6d7527a55d8b14bd03b2f6660ed94c8cf760a8 | 23,427 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.