content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def hex_to_64(hexstr):
"""Convert a hex string to a base64 string.
Keyword arguments:
hexstr -- the hex string we wish to convert
"""
B64CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
## internals
# bits contains the bits read off so far that don't make enough for a char
bits = 0
# bits_left tracks how many bits are left until a char is ready to convert
bits_left = 6
# output holds the accrued base64 string thus far
output = ''
# Read each hex char as four bits. Every time 6 are accrued,
# convert them to base64 and continue.
for h in hexstr:
hbits = int(h, 16)
if bits_left == 6:
# h's bits aren't enough. Hold 'em and keep going.
bits = hbits
bits_left = 2
elif bits_left == 4:
# h's bits are just enough. Add 'em to the bits bin and convert.
bits = (bits << 4) | hbits
output += B64CHARS[bits]
bits = 0
bits_left = 6
else:
# h's top two bits finish a set of 6. Convert the set
# and save the last two of h's bits.
bits = (bits << 2) | (hbits >> 2)
output += B64CHARS[bits]
bits = hbits & 3
bits_left = 4
# After reading hexstr, we may need some zeroes for padding.
# We should also add '=' chars for each pair of padding bits.
if bits_left < 6:
output += B64CHARS[bits << bits_left]
output += '=' * (bits_left // 2)
return output
|
ca8c48bedf4ac776288a8faad06ec80c0289c11e
| 32,488 |
def get_frozen_graph(graph_file):
"""Read Frozen Graph file from disk."""
with tf.gfile.FastGFile(graph_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
|
e3a7bb3e1abf7eb09e6e11b325176eb95109e634
| 32,489 |
def wrap_functional_unit(dct):
"""Transform functional units for effective logging.
Turns ``Activity`` objects into their keys."""
data = []
for key, amount in dct.items():
if isinstance(key, int):
data.append({"id": key, "amount": amount})
else:
try:
data.append({"database": key[0], "code": key[1], "amount": amount})
except TypeError:
data.append({"key": key, "amount": amount})
return data
|
9c86b5c6c4f360e86f39e2bc59ce4a34804cd7fa
| 32,490 |
def full_fuel_requirement(mass: int) -> int:
"""Complete fuel requirements for a single module."""
base_fuel = fuel_requirement(mass)
return base_fuel + sum(additional_fuel_requirements(base_fuel))
|
62c9d7e11afd0805d476216bdd113081285b10c4
| 32,492 |
def is_private(name):
"""Check whether a Python object is private based on its name."""
return name.startswith("_")
|
d04dfd84884bbc8c8be179c6bc5fc1371f426a78
| 32,493 |
import warnings
def get_suitable_output_file_name_for_current_output_format(output_file, output_format):
""" renames the name given for the output_file if the results for current_output format are returned compressed by default
and the name selected by the user does not contain the correct extension.
output_file : str, optional, default None
file name selected by the user
output_format : str, optional, default 'votable'
results format. Available formats in TAP are: 'votable', 'votable_plain',
'fits', 'csv', 'ecsv' and 'json'. Default is 'votable'.
Returned results for formats 'votable' 'ecsv' and 'fits' are compressed
gzip files.
Returns
-------
A string with the new name for the file.
"""
compressed_extension = ".gz"
format_with_results_compressed = ['votable', 'fits', 'ecsv']
output_file_with_extension = output_file
if output_file is not None:
if output_format in format_with_results_compressed:
# In this case we will have to take also into account the .fits format
if not output_file.endswith(compressed_extension):
warnings.warn('By default, results in "votable", "ecsv" and "fits" format are returned in '
f'compressed format therefore your file {output_file} '
f'will be renamed to {output_file}.gz')
if output_format == 'votable':
if output_file.endswith('.vot'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.vot.gz'
elif output_format == 'fits':
if output_file.endswith('.fits'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.fits.gz'
elif output_format == 'ecsv':
if output_file.endswith('.ecsv'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.ecsv.gz'
# the output type is not compressed by default by the TAP SERVER but the users gives a .gz extension
elif output_file.endswith(compressed_extension):
output_file_renamed = output_file.removesuffix('.gz')
warnings.warn(f'The output format selected is not compatible with compression. {output_file}'
f' will be renamed to {output_file_renamed}')
return output_file_with_extension
|
6925d721f06e52c8177c5e4f6404629043642cc4
| 32,494 |
def efficientnet_b1(
num_classes: int = 1000,
class_type: str = "single",
dropout: float = 0.2,
se_mod: bool = False,
) -> EfficientNet:
"""
EfficientNet B1 implementation; expected input shape is (B, 3, 240, 240)
:param num_classes: the number of classes to classify
:param class_type: one of [single, multi] to support multi class training;
default single
:param dropout: the amount of dropout to use while training
:param se_mod: If true, moves squeeze-excite to the end of the block
(after last 1x1)
:return: The created EfficientNet B0 Module
"""
width_mult = 1.0
depth_mult = 1.1
sec_settings, out_channels = _create_section_settings(
width_mult, depth_mult, se_mod
)
return EfficientNet(
sec_settings=sec_settings,
out_channels=out_channels,
num_classes=num_classes,
class_type=class_type,
dropout=dropout,
)
|
e713946ec937d0beb2069f7a273a311cad4b3305
| 32,495 |
def mark_user_authenticated(user, login):
"""
Modify a User so it knows it is logged in - checked via user.is_authenticated()
"""
setattr(user, ACTIVATED_LOGIN_KEY, login)
return user
|
dcd706204747c526c2128a49fcf24c7ef8e075bd
| 32,496 |
def assignments(bmat, order=1, ntry=10):
"""Make assignments between rows and columns.
The objective is to have assigments following the following conditions:
- all association are allowed in bmat,
- each row is associated with a unique column,
- each column is associated with a unique row,
- all rows are associated.
A classical use case is to assign students to defense schedule.
Parameters
----------
bmat : array of bool or int
Binary matrix indicating which assignments are allowed.
order : int, optional
Order of the greedy search. Default: 1. A higher oreder can be used for
small dataset if a solution can not be found with oreder 1.
ntry : int
Number of random tries to use to solve the assignments problem.
Returns
-------
AssignmentResult
Attributes are:
- ``best_assignments`` contains the assigments that solve the problem
or aith the higher number of associated rows.
- ``not_assigned_rows`` contains the indexes of not assignated rows in
the ``best_assignments`` (empty if the problem is solved)
- ``problematic_rows`` contains tuples of problematics rows indexes
and scores. A higher score indicating a row is problematic for the
assignement problem.
"""
min_not_assigned_rows = np.inf
best_assignments = ()
best_not_assigned_rows = ()
not_assigned_rows = []
for _ in range(ntry):
cur_assignments, cur_not_assigned_rows = _assigments_one_try(bmat, order)
if not cur_not_assigned_rows:
return AssignmentsResult(cur_assignments, (), ())
if len(cur_not_assigned_rows) < min_not_assigned_rows:
min_not_assigned_rows = len(cur_not_assigned_rows)
best_assignments = cur_assignments
best_not_assigned_rows = cur_not_assigned_rows
not_assigned_rows.extend(cur_not_assigned_rows)
pb_rows = [
(i, sum(j == i for j in not_assigned_rows) / ntry)
for i in set(not_assigned_rows)
]
pb_rows.sort(key=lambda x: x[1], reverse=True)
return AssignmentsResult(best_assignments, best_not_assigned_rows, tuple(pb_rows))
|
5d183df6b538b13f53cf4a9ec18527525b5d0383
| 32,497 |
from operator import inv
def _tracemin_fiedler(L, X, normalized, tol, method):
"""Compute the Fiedler vector of L using the TraceMIN-Fiedler algorithm.
"""
n = X.shape[0]
if normalized:
# Form the normalized Laplacian matrix and determine the eigenvector of
# its nullspace.
e = sqrt(L.diagonal())
D = spdiags(1. / e, [0], n, n, format='csr')
L = D * L * D
e *= 1. / norm(e, 2)
if not normalized:
def project(X):
"""Make X orthogonal to the nullspace of L.
"""
X = asarray(X)
for j in range(X.shape[1]):
X[:, j] -= X[:, j].sum() / n
else:
def project(X):
"""Make X orthogonal to the nullspace of L.
"""
X = asarray(X)
for j in range(X.shape[1]):
X[:, j] -= dot(X[:, j], e) * e
if method is None:
method = 'pcg'
if method == 'pcg':
# See comments below for the semantics of P and D.
def P(x):
x -= asarray(x * X * X.T)[0, :]
if not normalized:
x -= x.sum() / n
else:
x = daxpy(e, x, a=-ddot(x, e))
return x
solver = _PCGSolver(lambda x: P(L * P(x)), lambda x: D * x)
elif method == 'chol' or method == 'lu':
# Convert A to CSC to suppress SparseEfficiencyWarning.
A = csc_matrix(L, dtype=float, copy=True)
# Force A to be nonsingular. Since A is the Laplacian matrix of a
# connected graph, its rank deficiency is one, and thus one diagonal
# element needs to modified. Changing to infinity forces a zero in the
# corresponding element in the solution.
i = (A.indptr[1:] - A.indptr[:-1]).argmax()
A[i, i] = float('inf')
solver = (_CholeskySolver if method == 'chol' else _LUSolver)(A)
else:
raise nx.NetworkXError('unknown linear system solver.')
# Initialize.
Lnorm = abs(L).sum(axis=1).flatten().max()
project(X)
W = asmatrix(ndarray(X.shape, order='F'))
while True:
# Orthonormalize X.
X = qr(X)[0]
# Compute interation matrix H.
W[:, :] = L * X
H = X.T * W
sigma, Y = eigh(H, overwrite_a=True)
# Compute the Ritz vectors.
X *= Y
# Test for convergence exploiting the fact that L * X == W * Y.
res = dasum(W * asmatrix(Y)[:, 0] - sigma[0] * X[:, 0]) / Lnorm
if res < tol:
break
# Depending on the linear solver to be used, two mathematically
# equivalent formulations are used.
if method == 'pcg':
# Compute X = X - (P * L * P) \ (P * L * X) where
# P = I - [e X] * [e X]' is a projection onto the orthogonal
# complement of [e X].
W *= Y # L * X == W * Y
W -= (W.T * X * X.T).T
project(W)
# Compute the diagonal of P * L * P as a Jacobi preconditioner.
D = L.diagonal().astype(float)
D += 2. * (asarray(X) * asarray(W)).sum(axis=1)
D += (asarray(X) * asarray(X * (W.T * X))).sum(axis=1)
D[D < tol * Lnorm] = 1.
D = 1. / D
# Since TraceMIN is globally convergent, the relative residual can
# be loose.
X -= solver.solve(W, 0.1)
else:
# Compute X = L \ X / (X' * (L \ X)). L \ X can have an arbitrary
# projection on the nullspace of L, which will be eliminated.
W[:, :] = solver.solve(X)
project(W)
X = (inv(W.T * X) * W.T).T # Preserves Fortran storage order.
return sigma, asarray(X)
|
9b14aa1a8973134846bcf31dabe8bb27d70d36fd
| 32,498 |
def default_char_class_join_with():
""" default join for char_class and combine types """
return ''
|
9d8f15413f56202472f2e1257382babbaa444edc
| 32,499 |
def infer_orf(pos1, pos2, corr_coeff=np.zeros(7)):
"""
Approximation of spatial correlations at seven angles, with borders at
30 degrees.
"""
if np.all(pos1 == pos2):
return 1.
else:
eta = np.arccos(np.dot(pos1, pos2))
idx = np.round( eta / np.pi * 180/30.).astype(int)
return corr_coeff[idx]
|
9f624d42b39424ab14fefe68758403709661987f
| 32,500 |
def draw_bs_reps_ind(data, func, size=1):
"""Draw bootstrap replicates, resampling indices"""
# Set up array of indices
inds = np.arange(len(data))
# Initialize replicates
bs_reps = np.empty(size)
# Generate replicates
for i in range(size):
bs_inds = np.random.choice(inds, size=len(data))
bs_data = data[bs_inds]
bs_reps[i] = func(bs_data)
return bs_reps
|
39be61c2c7f3f1b67887c9f4a6159f63cc9f6c92
| 32,502 |
def get_committee_assignment(
state: BeaconState,
config: Eth2Config,
epoch: Epoch,
validator_index: ValidatorIndex,
) -> CommitteeAssignment:
"""
Return the ``CommitteeAssignment`` in the ``epoch`` for ``validator_index``.
``CommitteeAssignment.committee`` is the tuple array of validators in the committee
``CommitteeAssignment.shard`` is the shard to which the committee is assigned
``CommitteeAssignment.slot`` is the slot at which the committee is assigned
``CommitteeAssignment.is_proposer`` is a bool signalling if the validator is expected to
propose a beacon block at the assigned slot.
"""
next_epoch = state.next_epoch(config.SLOTS_PER_EPOCH)
if epoch > next_epoch:
raise ValidationError(
f"Epoch for committee assignment ({epoch}) must not be after next epoch {next_epoch}."
)
active_validators = get_active_validator_indices(state.validators, epoch)
committees_per_slot = (
get_committee_count(
len(active_validators),
config.SHARD_COUNT,
config.SLOTS_PER_EPOCH,
config.TARGET_COMMITTEE_SIZE,
)
// config.SLOTS_PER_EPOCH
)
epoch_start_slot = compute_start_slot_of_epoch(epoch, config.SLOTS_PER_EPOCH)
epoch_start_shard = get_start_shard(state, epoch, CommitteeConfig(config))
for slot in range(epoch_start_slot, epoch_start_slot + config.SLOTS_PER_EPOCH):
offset = committees_per_slot * (slot % config.SLOTS_PER_EPOCH)
slot_start_shard = (epoch_start_shard + offset) % config.SHARD_COUNT
for i in range(committees_per_slot):
shard = Shard((slot_start_shard + i) % config.SHARD_COUNT)
committee = get_crosslink_committee(
state, epoch, shard, CommitteeConfig(config)
)
if validator_index in committee:
is_proposer = validator_index == get_beacon_proposer_index(
state.copy(slot=slot), CommitteeConfig(config)
)
return CommitteeAssignment(
committee, Shard(shard), Slot(slot), is_proposer
)
raise NoCommitteeAssignment
|
ad0661cb166c319fe7026199f0175de8939457ab
| 32,503 |
def from_tfp(posterior, var_names=None, *, coords=None, dims=None):
"""Convert tfp data into an InferenceData object."""
return TfpConverter(
posterior=posterior, var_names=var_names, coords=coords, dims=dims
).to_inference_data()
|
b77b8fc6efbef07a3243026fa0baf40912be5950
| 32,504 |
def collection_file_fixture(config, filesystem):
"""Return a test CollectionFile instance"""
collection_name = 'test-collection'
file_name = 'index'
collection_file = CollectionFile(config, filesystem, collection_name, file_name)
collection_file.get_collection().get_path().mkdir(parents=True)
return collection_file
|
f4c8aab35727cdc345a8cd173c7f4646a66e17dc
| 32,505 |
def get_array_of_data():
"""Function helps to collect all filtered data in one array and it returns that"""
array_of_data = []
array_of_messages = gmail_get.get_bodies_of_messages()
for msg in array_of_messages:
if 'новый сотрудник' in msg:
message = get_data_from_text(msg)
array_of_data.append(message)
return array_of_data
|
2990315722ee263922a119cba38c8e0d6e86675e
| 32,506 |
def call_blade_functions (calls):
"""
Call a set of system functions for each blade in the rack. The results of the calls will be
aggregated together for each blade.
:param calls: A list of tuples containing the functions to call, the name of the parameter that
contains the blade ID, and a list of additional parameters to pass to the function.
:return A list of dictionary aggregations. Each entry are the results for one blade.
"""
blades = []
for i in range (1, pre_check.get_max_num_systems () + 1):
blades.append (call_blade_system_functions (i, calls))
return blades
|
30acf91d9346b9bcf090388a9d2a02b26b02ce36
| 32,507 |
from typing import Callable
from typing import Optional
def seglearn_wrapper(func: Callable, func_name: Optional[str] = None) -> FuncWrapper:
"""Wrapper enabling compatibility with seglearn functions.
As [seglearn feature-functions](https://github.com/dmbee/seglearn/blob/master/seglearn/feature_functions.py)
are vectorized along the first axis (axis=0), we need to expand our window-data.
This wrapper converts `1D np.array` to a `2D np.array` with all the window-data in
`axis=1`.
Parameters
----------
func: Callable
The seglearn function.
func_name: str, optional
The name for the passed function. This will be used when constructing the output
names.
Returns
-------
FuncWrapper
The wrapped seglearn function that is compatible with tsflex.
"""
def wrap_func(x: np.ndarray):
out = func(x.reshape(1, len(x)))
return out.flatten()
wrap_func.__name__ = "[seglearn_wrapped]__" + _get_name(func)
output_names = _get_name(func) if func_name is None else func_name
# A bit hacky (hard coded), bc hist is only func that returns multiple values
if hasattr(func, "bins"):
output_names = [output_names+f"_bin{idx}" for idx in range(1, func.bins+1)]
return FuncWrapper(wrap_func, output_names=output_names)
|
d13427f9c696c9ded8c412401b8879c4bbc66073
| 32,508 |
import scipy
def spline_fit(output_wave,input_wave,input_flux,required_resolution,input_ivar=None,order=3,max_resolution=None):
"""Performs spline fit of input_flux vs. input_wave and resamples at output_wave
Args:
output_wave : 1D array of output wavelength samples
input_wave : 1D array of input wavelengths
input_flux : 1D array of input flux density
required_resolution (float) : resolution for spline knot placement (same unit as wavelength)
Options:
input_ivar : 1D array of weights for input_flux
order (int) : spline order
max_resolution (float) : if not None and first fit fails, try once this resolution
Returns:
output_flux : 1D array of flux sampled at output_wave
"""
if input_ivar is not None :
selection=np.where(input_ivar>0)[0]
if selection.size < 2 :
log=get_logger()
log.error("cannot do spline fit because only {0:d} values with ivar>0".format(selection.size))
raise ValueError
w1=input_wave[selection[0]]
w2=input_wave[selection[-1]]
else :
w1=input_wave[0]
w2=input_wave[-1]
res=required_resolution
n=int((w2-w1)/res)
res=(w2-w1)/(n+1)
knots=w1+res*(0.5+np.arange(n))
## check that nodes are close to pixels
dknots = abs(knots[:,None]-input_wave)
mins = np.amin(dknots,axis=1)
w=mins<res
knots = knots[w]
try :
toto=scipy.interpolate.splrep(input_wave,input_flux,w=input_ivar,k=order,task=-1,t=knots)
output_flux = scipy.interpolate.splev(output_wave,toto)
except ValueError as err :
log=get_logger()
if max_resolution is not None and required_resolution < max_resolution :
log.warning("spline fit failed with resolution={}, retrying with {}".format(required_resolution,max_resolution))
return spline_fit(output_wave,input_wave,input_flux,max_resolution,input_ivar=input_ivar,order=3,max_resolution=None)
else :
log.error("spline fit failed")
raise ValueError
return output_flux
|
2a42f6c85bace99bba7897be81ecd5a779373476
| 32,509 |
def park2_3_mf(z, x):
""" Computes the Park2_3 function. """
f = [z[0][0]/5000.0, z[1]/10.0]
return park2_3_z_x(f, x)
|
801da280fd9287de6124c16a526f66d3526d054f
| 32,510 |
def make_batch_sql(
table, datas, auto_update=False, update_columns=(), update_columns_value=()
):
"""
@summary: 生产批量的sql
---------
@param table:
@param datas: 表数据 [{...}]
@param auto_update: 使用的是replace into, 为完全覆盖已存在的数据
@param update_columns: 需要更新的列 默认全部,当指定值时,auto_update设置无效,当duplicate key冲突时更新指定的列
@param update_columns_value: 需要更新的列的值 默认为datas里边对应的值, 注意 如果值为字符串类型 需要主动加单引号, 如 update_columns_value=("'test'",)
---------
@result:
"""
if not datas:
return
keys = list(datas[0].keys())
values_placeholder = ["%s"] * len(keys)
values = []
for data in datas:
value = []
for key in keys:
current_data = data.get(key)
current_data = format_sql_value(current_data)
value.append(current_data)
values.append(value)
keys = ["`{}`".format(key) for key in keys]
keys = list2str(keys).replace("'", "")
values_placeholder = list2str(values_placeholder).replace("'", "")
if update_columns:
if not isinstance(update_columns, (tuple, list)):
update_columns = [update_columns]
if update_columns_value:
update_columns_ = ", ".join(
[
"`{key}`={value}".format(key=key, value=value)
for key, value in zip(update_columns, update_columns_value)
]
)
else:
update_columns_ = ", ".join(
["`{key}`=values(`{key}`)".format(key=key) for key in update_columns]
)
sql = "insert into {table} {keys} values {values_placeholder} on duplicate key update {update_columns}".format(
table=table,
keys=keys,
values_placeholder=values_placeholder,
update_columns=update_columns_,
)
elif auto_update:
sql = "replace into {table} {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
else:
sql = "insert ignore into {table} {keys} values {values_placeholder}".format(
table=table, keys=keys, values_placeholder=values_placeholder
)
return sql, values
|
48c990994c226bc370c8c5a62694eae29ab8e6e5
| 32,511 |
import bot_config
import traceback
def get_dimensions():
"""Returns bot_config.py's get_attributes() dict."""
# Importing this administrator provided script could have side-effects on
# startup. That is why it is imported late.
try:
if _in_load_test_mode():
# Returns a minimal set of dimensions so it doesn't run tasks by error.
dimensions = os_utilities.get_dimensions()
return {
'id': dimensions['id'],
'load_test': ['1'],
}
out = bot_config.get_dimensions()
if not isinstance(out, dict):
raise ValueError('Unexpected type %s' % out.__class__)
return out
except Exception as e:
try:
out = os_utilities.get_dimensions()
out['error'] = [str(e)]
out['quarantined'] = ['1']
return out
except Exception as e:
try:
botid = os_utilities.get_hostname_short()
except Exception as e2:
botid = 'error_%s' % str(e2)
return {
'id': [botid],
'error': ['%s\n%s' % (e, traceback.format_exc()[-2048:])],
'quarantined': ['1'],
}
|
7e8b2c29cf43ee46df77671370fb5c6a0030643a
| 32,512 |
from pathlib import Path
def find_project_root(srcs):
"""Return a directory containing .git, .robocop or pyproject.toml.
That directory will be a common parent of all files and directories
passed in `srcs`.
If no directory in the tree contains a marker that would specify it's the
project root, the root of the file system is returned.
"""
if not srcs:
return Path("/").resolve()
path_srcs = [Path(Path.cwd(), src).resolve() for src in srcs]
# A list of lists of parents for each 'src'. 'src' is included as a
# "parent" of itself if it is a directory
src_parents = [list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs]
common_base = max(
set.intersection(*(set(parents) for parents in src_parents)),
key=lambda path: path.parts,
)
for directory in (common_base, *common_base.parents):
if (
(directory / ".git").exists()
or (directory / "pyproject.toml").is_file()
or (directory / ".robocop").is_file()
):
return directory
return directory
|
87c2b40498879dccc8f5ec18c1b456390b845b44
| 32,513 |
def __virtual__():
"""
Return virtual name of the module.
:return: The virtual name of the module.
"""
if HAS_SQLI:
return __virtualname__
return False, "sqlite3 module is missing"
|
f40a769e6376e284ddc7ab805b094e056db78d9e
| 32,514 |
def filter_contours(flist, contours, dist):
""" Filter out all visual cues with a eucl_dist(P/A ratio) smaller than .05
"""
n, fltr_cnts, bad_cnts = [], [], []
# loop over each frame to verify condition
for i in range(len(flist)):
fltr_areas = list(np.array(contours[i])[np.where(np.array(dist[i]) < .05)])
bad_areas = list(np.array(contours[i])[np.where(np.array(dist[i]) > .05)])
fltr_cnts.append(fltr_areas)
bad_cnts.append(bad_areas)
n.append(len(fltr_areas))
return n, fltr_cnts, bad_cnts
|
4720bd1b1c860dfe45c067602672b042f060d571
| 32,515 |
def count_ents(phrase):
"""
Counts the number of Named Entities in a spaCy Doc/Span object.
:param phrase: the Doc/Span object from which to remove Named Entities.
:return: The number of Named Entities identified in the input object.
"""
named_entities = list(phrase.ents)
return len(named_entities)
|
7e592442ebaf504320a903c4084454ff73896595
| 32,516 |
def version_command(argv) -> CommandResult:
"""Show FlakeHell version.
"""
print('FlakeHell', colored(flakehell_version, 'green'))
print('Flake8 ', colored(flake8_version, 'green'))
print('For plugins versions use', colored('flakehell plugins', 'green'))
return ExitCode.OK, ''
|
bf541f6c049ed8c385200817912908d7f615bb17
| 32,517 |
import typing
def get_oldest_employees(count_employees: int) -> typing.List[typing.Tuple[str, str]]:
"""
Return a list of pairs in the form of (FirstName, LastName).
The list will consist of the `count` oldest employees from oldest to youngest.
:param count: Number of employees to return.
:type count: int
:return: List of (FirstName, LastName) pairings from oldest to youngest.
:rtype: typing.List[typing.Tuple[str, str]]
"""
session = Session()
empl = rockemsocks.Employees
return (
session.query(
empl.FirstName,
empl.LastName,
)
.order_by(db.asc(empl.BirthDate))
.limit(count_employees)
.all()
)
|
9b1518ddb936d40997a0f8c5bc8770b33956e5c2
| 32,518 |
def get_Q_UT_CL_d_t_i(L_CL_d_t_i, Q_T_CL_d_t_i):
"""冷房設備機器の未処理冷房潜熱負荷(MJ/h)(20b)を計算する
Args:
Q_max_CL_d_t_i(ndarray): 最大冷房潜熱出力
L_CL_d_t_i(ndarray): 冷房潜熱負荷
Q_T_CL_d_t_i: returns: 冷房設備機器の未処理冷房潜熱負荷(MJ/h)
Returns:
ndarray: 冷房設備機器の未処理冷房潜熱負荷(MJ/h)
"""
return L_CL_d_t_i - Q_T_CL_d_t_i
|
4f46048e6df08d634153b176b241ac1caaae252f
| 32,519 |
import re
def html_gscholar_to_json(soup):
"""
"""
# Scrape just PDF links
for pdf_link in soup.select('.gs_or_ggsm a'):
pdf_file_link = pdf_link['href']
print(pdf_file_link)
# JSON data will be collected here
data = []
# Container where all needed data is located
for result in soup.select('.gs_ri'):
title = result.select_one('.gs_rt').text
try:
title_link = result.select_one('.gs_rt a')['href']
except:
title_link = ''
publication_info = result.select_one('.gs_a').text
snippet = result.select_one('.gs_rs').text
cited_by = result.select_one('#gs_res_ccl_mid .gs_nph+ a')['href']
related_articles = result.select_one('a:nth-child(4)')['href']
# get the year of publication of each paper
try:
txt_year = result.find("div", class_="gs_a").text
ref_year = re.findall('[0-9]{4}', txt_year)
ref_year = ref_year[0]
except:
ref_year = 0
# get number of citations for each paper
try:
txt_cite = result.find("div", class_="gs_fl").find_all("a")[2].string
citations = txt_cite.split(' ')
citations = (citations[-1])
citations = int(citations)
except:
citations = 0
try:
all_article_versions = result.select_one('a~ a+ .gs_nph')['href']
except:
all_article_versions = None
data.append({
'year': ref_year,
'title': title,
'title_link': title_link,
'publication_info': publication_info,
'snippet': snippet,
'citations': citations,
'cited_by': f'https://scholar.google.com{cited_by}',
'related_articles': f'https://scholar.google.com{related_articles}',
'all_article_versions': f'https://scholar.google.com{all_article_versions}',
})
return(data)
|
cf7166650b9114e184896c10e17be4a32bc129a0
| 32,520 |
def csi_psroipooling(
cls_prob, roi, spatial_scale, output_dim, group_size, out_dtype, q_params, layer_name=""
):
"""Quantized psroipooling.
Parameters
----------
Returns
-------
result : relay.Expr
"""
return _make.CSIPSROIPooling(
cls_prob, roi, spatial_scale, output_dim, group_size, out_dtype, q_params, layer_name
)
|
864dc29d469820f6ef09a006910aacaefce7c932
| 32,521 |
import logging
def short_links(link):
"""
Shortening a long url to a short link Byt.ly
: param link: long address
: return: string - short link Byt.ly
"""
url = 'https://api-ssl.bitly.com/v4/groups'
res = get(url=url, headers=head)
group = res.json()['groups'][0]['guid']
url = 'https://api-ssl.bitly.com/v4/shorten'
head.pop('Accept')
head['Content-Type'] = 'application/json'
options = {
'group_guid': group,
'long_url': link
}
res = post(url=url, headers=head, json=options)
logging.info(f'Short_links - {res.status_code}')
res.raise_for_status()
return res.json()['link']
|
80b6803812933b0087d5c9d5cb1d3fb3e8f04be4
| 32,522 |
def parse_page_loaded(msg_type, target, logs, log_begin_idx):
"""
Parse WebViewPageLoadedEvent event logs
:params msg_type
:params target
:return: an instance of event.WebViewPageLoadedEvent
"""
assert len(logs) == 1
log_info = util.extract_info(logs[0])
plid, package = log_info['plid'], log_info['package']
content = log_info['content']
begin_ts = log_info['ts']
end_ts = log_info['ts']
webview = content['webview']
url = content['url']
classname = content['clientClassname']
return event.WebViewPageLoadedEvent(
plid=plid,
package=package,
msg_type=msg_type,
begin_ts=begin_ts,
end_ts=end_ts,
target=target,
webview=webview,
url=url,
log_begin_idx=log_begin_idx,
client_classname=classname
)
|
2721155f7b38c299100eb75cec2df1c18aaefd31
| 32,523 |
def suggest_mappings_by_content(resource, pathway_id):
"""Return list of top matches based on gene set similarity.
---
tags:
- mappings
parameters:
- name: resource
type: string
description: name of the database
required: true
- name: pathway_id
type: string
description: identifier of the pathway
required: true
responses:
200:
description: The top 5 most similar pathways by content in JASON
"""
reference_pathway = get_pathway_model_by_id(current_app, resource, pathway_id)
if reference_pathway is None:
return abort(500, "Pathway '{}' not found in manager '{}'".format(pathway_id, resource))
reference_gene_set = reference_pathway.get_gene_set()
# Get all pathway names from each resource
pathways_dict = {
manager: external_manager.get_all_pathways()
for manager, external_manager in current_app.manager_dict.items()
if manager not in BLACK_LIST
}
log.info('Calculating similarity for pathway {} in {}'.format(reference_pathway.name, resource))
similar_pathways = defaultdict(list)
for resource, pathway_list in pathways_dict.items():
for pathway in pathway_list:
if len(pathway.get_gene_set()) == 0:
continue
similarity = calculate_szymkiewicz_simpson_coefficient(reference_gene_set, pathway.get_gene_set())
if similarity == 0:
continue
similar_pathways[resource].append((pathway.resource_id, similarity))
log.info('Calculated for all {} pathways'.format(resource))
results = defaultdict(list)
for resource, pathway_list in similar_pathways.items():
top_matches = get_top_matches(pathway_list, 5)
for pathway_id, similarity in top_matches:
results[resource].append(
[
resource,
pathway_id,
current_app.manager_dict[resource].get_pathway_by_id(pathway_id).name,
round(similarity, 4)
]
)
return jsonify(dict(results))
|
0dec7c0a2759a6aabb1bae5ab22973cd74b30e7f
| 32,524 |
def animation(request):
"""
Controller for the streamflow animation page.
"""
context = {}
return render(request, 'sfpt/animationmap.html', context)
|
d42c4d0e7d51a772c115488636bf84701b9d5ee0
| 32,525 |
def kos_lookup_cmdr_embeds(session, cmdr_name, cmdr_pic=None):
"""
Look up the cmdr in the KOS db, if found return embeds that match (up to 3 closest).
Returns:
[embed, ...]: The discord.py embeds who match the cmdr_name.
[] : No matches in KOS db.
"""
if not cmdr_pic:
cmdr_pic = EMPTY_IMG
kos_cmdrs = cogdb.query.kos_search_cmdr(session, cmdr_name)
embeds = []
for kos in kos_cmdrs[:3]:
embeds += [discord.Embed.from_dict({
'color': KOS_COLORS.get(kos.friendly, KOS_COLORS['default']),
'author': {
'name': "KOS Finder",
'icon_url': cmdr_pic,
},
"fields": [
{'name': 'Name', 'value': kos.cmdr, 'inline': True},
{'name': 'Reg Squadron', 'value': kos.squad if kos.squad else "Indy", 'inline': True},
{'name': 'Is Friendly ?', 'value': kos.friendly, 'inline': True},
{'name': 'Reason', 'value': kos.reason if kos.reason else "No reason.", 'inline': False},
],
})]
return embeds
|
243f46a772e04874fd3a11c77c7ba6d3cdf439d0
| 32,526 |
def decode(s):
"""
Deserialize an MRS object from a SimpleMRS string.
"""
if hasattr(s, 'decode'):
s = s.decode('utf-8')
ms = next(_decode(s.splitlines()))
return ms
|
11a5a914ab53d7de944637337b8f33315dacdb41
| 32,528 |
def fidimag_to_finitedifferencefield(sim):
"""
fidimag_to_finitedifferencefield(sim)
This function takes a Fidimag simulation object, and constructs a
Finite Difference Field object which has the magnetisation configuration
from the simulation at the last time step.
"""
cmin = np.array([sim.mesh.x0, sim.mesh.y0, sim.mesh.z0])*sim.mesh.unit_length
cmax = tuple(cmin +np.array([mesh.Lx, mesh.Ly, mesh.Lz])*sim.mesh.unit_length)
cmin = tuple(cmin)
d = tuple(np.array([sim.mesh.dx, sim.mesh.dy, sim.mesh.dz])*sim.mesh.unit_length)
field = finitedifferencefield.Field(cmin, cmax, d)
numpyfield = sim.spin.copy().reshape((mesh.nx, mesh.ny, mesh.nz, 3))
field.f = numpyfield
return field
|
b612127c457e4d8901a7bf7558210c3ea43a19c4
| 32,529 |
from typing import Tuple
def ERA_IrregularImgGrid(
lons: np.ndarray,
lats: np.ndarray,
bbox: Tuple[float, float, float, float] = None,
) -> CellGrid:
"""
Create a irregular grid from the passed coordinates.
"""
lons_gt_180 = np.where(lons > 180.0)
lons[lons_gt_180] = lons[lons_gt_180] - 360
grid = BasicGrid(lons.flatten(), lats.flatten())\
.to_cell_grid(cellsize=5.0)
if bbox is not None:
gpis = grid.get_bbox_grid_points(
lonmin=bbox[0], latmin=bbox[1], lonmax=bbox[2], latmax=bbox[3]
)
grid = grid.subgrid_from_gpis(gpis)
return grid
|
461d98eaf7de17fa5b0fb510ce7c24698872e74e
| 32,530 |
import inspect
def create_enforce_validation_serializer(serializer=None, **kwargs):
"""
Public function that creates a copy of a serializer which enforces ``must_validate_fields``.
The difference between this function and ``_create_enforce_validation_serializer``
is that this function can be used both as a direct decorator and decorator with
parameters.
For example::
@create_enforce_validation_serializer
class MySerializer(BaseSerializer): pass
# or
@create_enforce_validation_serializer(param=value)
class MySerializer(BaseSerializer): pass
# or
create_enforce_validation_serializer(
MySerializer,
param=value
)
"""
# used as direct decorator so then simply return new serializer
# e.g. @decorator
# class MySerializer(...)
# or used as regular function
# e.g. function(Serializer, foo=bar)
if inspect.isclass(serializer) and issubclass(serializer, serializers.Serializer):
return _create_enforce_validation_serializer(serializer, **kwargs)
# used as decorator with parameters
# e.g. @decorator(foo=bar)
# class MySerializer(...)
elif serializer is None:
def inner(serializer):
return _create_enforce_validation_serializer(serializer, **kwargs)
return inner
else:
raise TypeError(
'create_enforce_validation_serializer can only be only on serializers. '
'It was called with "{}"'.format(type(serializer))
)
|
63f1e05148aa3a8190316898d4b111334bebf47b
| 32,531 |
def read_FDF_cube(filename):
"""Read in a FDF/RMSF cube. Figures out which axis is Faraday depth and
puts it first (in numpy order) to accommodate the rest of the code.
Returns: (complex_cube, header,FD_axis)
"""
HDULst = pf.open(filename, "readonly", memmap=True)
head = HDULst[0].header.copy()
FDFreal = HDULst[0].data
FDFimag = HDULst[1].data
complex_cube = FDFreal + 1j * FDFimag
#Identify Faraday depth axis (assumed to be last one if not explicitly found)
Ndim=head['NAXIS']
FD_axis=Ndim
#Check for FD axes:
for i in range(1,Ndim+1):
try:
if 'FDEP' in head['CTYPE'+str(i)].upper():
FD_axis=i
except:
pass #The try statement is needed for if the FITS header does not
# have CTYPE keywords.
#Move FD axis to first place in numpy order.
if FD_axis != Ndim:
complex_cube=np.moveaxis(complex_cube,Ndim-FD_axis,0)
#Remove degenerate axes to prevent problems with later steps.
complex_cube=complex_cube.squeeze()
return complex_cube, head,FD_axis
|
25ab8de5ab75b7e6170a897e5bee4ec067a1c4d4
| 32,533 |
import torch
def bert_vector(text, tokenizer, model, args):
"""
BERT-based embedding.
:param text: original text to be embedded.
:param tokenizer: BERT tokenizer.
:param model: BERT model.
:param args: args.
:return: BERT embedded sentence.
"""
text = '[CLS] ' + text + ' [SEP]'
tokenized_text = tokenizer.tokenize(text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
tokens_tensor = torch.LongTensor([indexed_tokens]).to(args.device)
layer_output = model(tokens_tensor, output_all_encoded_layers=True)
encoded_layer = np.array(np.mean(layer_output[0][-2].cpu().detach().numpy()[0], axis=0)) # take the second-to-last layer as ecoded layer.
return encoded_layer
|
cbe2b74327d1e05761b922e4cc1f30cc399251ec
| 32,534 |
from typing import List
from typing import Dict
def compute_benchmark_energies(molecule: tq.chemistry.Molecule,
benchmarks: List[str]) -> Dict:
"""
This uses psi4 to perforom the classical benchmarks.
"""
energy_benchmarks = {}
for method in ["hf", "mp2", "ccsd", "fci"]:
if method in benchmarks:
energy_benchmarks[method + "_energy"] = molecule.compute_energy(method=method)
return energy_benchmarks
|
e8350a36c51418f6dd0cd2a571c43234577ec557
| 32,535 |
def fine_paid_message(message):
"""Returns true if the format of paid message is fine"""
return (len(message.mentions) > 0 or message.mention_everyone) and get_amount(message) is not None and message.channel.name == "expenses" and (message.content.split(" ")[0] == '!paid' or message.content.split(" ")[0] == '<@505263369176219658>')
|
d545310d7606c818f9cc434db7b4caad25da9cab
| 32,536 |
def print_qa(questions, answers_gt, answers_gt_original, answers_pred,
era, similarity=_dirac, path=''):
"""
In:
questions - list of questions
answers_gt - list of answers (after modifications like truncation)
answers_gt_original - list of answers (before modifications)
answers_pred - list of predicted answers
era - current era
similarity - measure that measures similarity between gt_original and prediction;
by default dirac measure
path - path for the output (if empty then stdout is used)
by fedault an empty path
Out:
the similarity score
"""
assert(len(questions)==len(answers_gt))
assert(len(questions)==len(answers_pred))
output=['-'*50, 'Era {0}'.format(era)]
score = 0.0
for k, q in enumerate(questions):
a_gt=answers_gt[k]
a_gt_original=answers_gt_original[k]
a_p=answers_pred[k]
score += _dirac(a_p, a_gt_original)
if type(q[0]) is unicode:
tmp = unicode(
'question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n')
else:
tmp = 'question: {0}\nanswer: {1}\nanswer_original: {2}\nprediction: {3}\n'
output.append(tmp.format(q, a_gt, a_gt_original, a_p))
score = (score / len(questions))*100.0
output.append('Score: {0}'.format(score))
if path == '':
print('%s' % '\n'.join(map(str, output)))
else:
list2file(path, output)
return score
|
523e474392c41971de1465ae15e1c12ac6b3d359
| 32,538 |
def compute_zhu(props,
zhu_kwargs):
"""
Compute the approximate Zhu-Nakamura hopping probabilities for
each geom in the dataset.
Args:
props (dict): dataset properties
zhu_kwargs (dict): dictionary with information about how
to calculate the hopping rates.
Returns:
zhu_p (torch.Tensor): hopping probabilities
"""
upper_key = zhu_kwargs["upper_key"]
lower_key = zhu_kwargs["lower_key"]
expec_gap_kcal = zhu_kwargs["expec_gap"] * const.AU_TO_KCAL["energy"]
func_type = zhu_kwargs["func_type"]
zhu_p = batch_zhu_p(batch=cat_props(props),
upper_key=upper_key,
lower_key=lower_key,
expec_gap=expec_gap_kcal,
func_type=func_type,
gap_shape=None)
return zhu_p
|
7ad687145e94b8a4b6eb6a3631783583d3cefdbd
| 32,539 |
import html
def disaster_type_card():
"""
:return: A Div containing dashboard title & descriptions.
"""
return html.Div(
children=[
html.H6("Disaster Type"),
dcc.Dropdown(
options=[
{'label': 'Drought', 'value': 'Drought'},
{'label': 'Flood', 'value': 'Flood'},
{'label': 'Storm', 'value': 'Storm'}
],
value='Drought'
)
]
)
|
8e7ddfc88c075b035be591cd6a0ba6e068f444f8
| 32,540 |
def retrieve_context_topology_node_node_rule_group_inter_rule_group_risk_characteristic_risk_characteristic(uuid, node_uuid, node_rule_group_uuid, inter_rule_group_uuid): # noqa: E501
"""Retrieve risk-characteristic
Retrieve operation of resource: risk-characteristic # noqa: E501
:param uuid: ID of uuid
:type uuid: str
:param node_uuid: ID of node_uuid
:type node_uuid: str
:param node_rule_group_uuid: ID of node_rule_group_uuid
:type node_rule_group_uuid: str
:param inter_rule_group_uuid: ID of inter_rule_group_uuid
:type inter_rule_group_uuid: str
:rtype: List[str]
"""
return 'do some magic!'
|
73a2bcbae8cc023db6dd4378a93fb66c75d1ed11
| 32,541 |
import struct
def txt2domainname(input, canonical_form=False):
"""turn textual representation of a domain name into its wire format"""
if input == ".":
d = b'\x00'
else:
d = b""
for label in input.split('.'):
label = label.encode('ascii')
if canonical_form:
label = label.lower()
length = len(label)
d += struct.pack('B', length) + label
return d
|
d5312ab1333810eaba6bac2f16e15441ea290dc9
| 32,542 |
def hstack(*args):
"""
Stacks images horizontally
If image depths are mismatched then converts grayscale images to bgr before stacking
"""
depths = [depth(im) for im in args]
gray = [d == 1 for d in depths]
if all(gray):
return np.hstack(args)
elif not all(gray):
return np.hstack(args)
else:
ims = [gray_to_bgr(im) if depth(im) == 1 else im for im in args]
return np.hstack(ims)
|
5e4a2278a9f5283446910d88053b52ee17e1cf6f
| 32,545 |
def sample(x, n):
"""
Get n number of rows as a sample
"""
# import random
# print(random.sample(list(x.index), n))
return x.iloc[list(range(n))]
|
3802f976f2a97a08945d8246093784c06fb07e67
| 32,546 |
def floor_div_mul(value, factor):
"""Fuction to get floor number."""
return (((value) // (factor))) *(factor)
|
53adab5c9e0d9d5b27d36e2f77e1e7f0edd05585
| 32,547 |
def dup_div(f, g, K):
"""
Polynomial division with remainder in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ, QQ
>>> R, x = ring("x", ZZ)
>>> R.dup_div(x**2 + 1, 2*x - 4)
(0, x**2 + 1)
>>> R, x = ring("x", QQ)
>>> R.dup_div(x**2 + 1, 2*x - 4)
(1/2*x + 1, 5)
"""
if K.is_Field:
return dup_ff_div(f, g, K)
else:
return dup_rr_div(f, g, K)
|
01541d416a08613b1b400da1c6306fdce1ad29ff
| 32,548 |
def append_Z_trace_vertical(path, new_point, height, ex, middle_layer=None, middle_taper=False):
"""Adds new_point to the path list plus TWO Z or S manhattan interesections.
Args:
path: list of tuples containing necessary info (pya.DPoint, layer, width)
new_point: tuple ((x, y) or pya.DPoint, layer, width)
height: y-coordinate of where to place the inner point,
from 0 to abs(new_point.y - path.y)
ex: orientation of ports
middle_layer (optional): layer of middle trace
middle_taper (default False): Adds a middle point in the Z-shaped trace attempting to avoid collisions and DRC errors.
"""
assert len(path) > 0
ey = rotate90(ex)
P0, l0, w0 = path[-1]
P3, l3, w3 = new_point
height = abs(height)
# assert height <= abs(P0 * ey - P3 * ey)
# Invert sign of height if P3 is below P0
if P3 * ey < P0 * ey:
height = -height
P1 = P0 + height * ey
P2 = P1 * ey * ey + P3 * ex * ex
# selecting middle_layer
if middle_layer is None:
l1, l2 = l0, l3
else:
l1 = l2 = middle_layer
# lmid defined below
# selecting middle widths
w1, w2 = w0, w3
if (P2 - P1).norm() <= w1 + w2:
# w1 = w2 = min(w1, w2)
middle_taper = False # middle taper when points are that close looks weird
if w1 < w2:
wmid = w1
lmid = l1
else:
wmid = w2
lmid = l2
path.append((P1, l1, w1))
# move P2 a little bit to avoid acute corners
delta_w = abs(w2 - w1) / 2
if P3 * ey < P0 * ey:
delta_w = -delta_w
P2 += delta_w * ey
Pmid = (P1 + P2) / 2
if (P1 - P2).norm() <= max(w1, w2):
if (P3 - P2) * ey > max(w1, w2) * 3:
path.append((P2 + ey * max(w1, w2) * 3, l2, w2))
else:
path.append((P3 + ey * max(w1, w2) * 0.2, l3, w3))
else:
if middle_taper:
path.append((Pmid, lmid, wmid))
path.append((P2, l2, w2))
path.append(new_point)
return path
|
17bce8bafd5c627d3e1d8b793e061816348fa086
| 32,549 |
from typing import List
def batch_ext(order: pd.Series, batch: pd.Series, classes: pd.Series,
class_list: List[str], ext: str) -> pd.Series:
"""
get minimum/maximum order of samples of classes in class_list. Auxiliary
function to be used with BatchChecker / FeatureCheckerBatchCorrection
Parameters
----------
order: pandas.Series
run order
batch: pandas.Series
batch number
classes: pandas.Series
sample classes
class_list: list[str]
classes to be considered
ext: {"min", "max"}
Search for the min/max order in each batch.
Returns
-------
pd.Series with the corresponding min/max order with batch as index.
"""
func = {"min": lambda x: x.min(), "max": lambda x: x.max()}
func = func[ext]
ext_order = (order
.groupby([classes, batch])
.apply(func)
.reset_index()
.groupby(classes.name)
.filter(lambda x: x.name in class_list)
.groupby(batch.name)
.apply(func)[order.name])
return ext_order
|
eb48eee2392cb8c1db3691a38e9be97a243dc730
| 32,550 |
from typing import Tuple
import time
def chunk_push(params: dict | None = None) -> Tuple[bool | str, ...]:
"""
A helper function to push the data to database in chunks.
:param params: A dictionary that contains below structure.
{
"data_rowcount": data row count,
"chunksize": chunksize,
"data": data,
"table_name": table name,
"engine": engine object,
"db_row_count": database row count,
"table_exists": True, if table already exists, default False
"start_time": start time of the push operation.
}
:return tuple(bool, str)
"""
try:
data_rc = params.get("data_rowcount", None)
chunksize = params.get("chunksize", None)
data = params.get("data", None)
table_name = params.get("table_name", None)
engine = params.get("engine", None)
db_row_count = params.get("db_row_count", None)
tb_exists = params.get("table_exists", False)
st = params.get("start_time", None)
loop_count, leftout = divmod(data_rc, chunksize)
if leftout:
loop_count += 1
for step in range(loop_count):
data[step * chunksize: (step + 1) * chunksize].to_sql(name=table_name,
con=engine,
if_exists="append",
index=False,
chunksize=chunksize)
logger.log(message=f"Pushed {(step + 1) * chunksize} records out of {data_rc}")
n_row_count = pd.read_sql(sql=f"SELECT COUNT(*) FROM {table_name}", con=engine)["count"][0]
if tb_exists:
logger.log(message=f"Expected to push {data_rc} records, pushed {n_row_count - db_row_count} records to database in {round(time() - st, 3)} seconds")
return tuple([True, f"{n_row_count - db_row_count} pushed"])
else:
logger.log(message=f"Expected to push {data_rc} records, pushed {n_row_count} records to database in {round(time() - st, 3)} seconds")
return tuple([True, f"{n_row_count} pushed"])
else:
for step in range(loop_count):
data[step * chunksize: (step + 1) * chunksize].to_sql(name=table_name,
con=engine,
if_exists="append",
chunksize=chunksize)
logger.log(message=f"Pushed {(step + 1) * chunksize} records out of {data_rc}")
n_row_count = pd.read_sql(sql=f"SELECT COUNT(*) FROM {table_name}", con=engine)["count"][0]
if tb_exists:
logger.log(message=f"Expected to push {data_rc} records, pushed {n_row_count - db_row_count} records to database in {round(time() - st, 3)} seconds")
return tuple([True, f"{n_row_count - db_row_count} pushed"])
else:
logger.log(message=f"Expected to push {data_rc} records, pushed {n_row_count} records to database in {round(time() - st, 3)} seconds")
return tuple([True, f"{n_row_count} pushed"])
except Exception as err:
logger.sys_exception(err)
|
a65612d7ffbc83fa0bceb13a8d2c853aa79dcfeb
| 32,551 |
def total_error_avgAx0(y_true, y_pred):
"""
"""
avgY = K.mean(y_true, axis=0, keepdims=True) # 0 is sample axis
return K.sum(K.square(y_true - avgY))
|
6fd12b66b689b4ff5b26ba6f0456927e902ec36b
| 32,552 |
def var_aexp(compiler, node):
""" Variable compilation """
if node.context == 'assign':
gc = GC(compiler)
if compiler.environment.is_exist_local_var(node.name):
var = compiler.environment.get_local_var(node.name)
var_type = compiler.environment.get_local_var_runtime_type(node.name)
compiler.code.add(Commands.MOV, [Registers.EAX, 'dword [%s]' % Registers.ESP])
compiler.code.add(Commands.MOV, [var_type, Registers.EAX])
compiler.environment.update_local_var_type(node.name, node.type)
compiler.code.add(Commands.MOV, [Registers.EAX, var])
compiler.code.add(Commands.MOV, [Registers.EBX, var_type])
gc.decrement()
else:
var = compiler.environment.add_local_var(node.type, node.name)
var_type = compiler.environment.get_local_var_runtime_type(node.name)
if compiler.environment.defined_object is not None:
compiler.environment.set_link_object(var, compiler.environment.defined_object)
compiler.environment.defined_object = None
compiler.code.add(Commands.MOV, [Registers.EAX, 'dword [%s + 4]' % Registers.ESP])
compiler.code.add(Commands.MOV, [Registers.EBX, 'dword [%s]' % Registers.ESP])
gc.increment()
compiler.code.add(Commands.POP, var_type)
compiler.code.add(Commands.POP, var)
else:
compiler.code.add(Commands.MOV, [Registers.EAX, compiler.environment.get_local_var(node.name)])\
.add(Commands.PUSH, Registers.EAX)
runtime_var_type = compiler.environment.get_local_var_runtime_type(node.name)
compiler.types.set(runtime_var_type)
var_type = compiler.environment.get_local_var_type(node.name)
return var_type
|
003ad8a1db094516b9303e8769ceea48ef67a88b
| 32,553 |
from .model_store import get_model_file
def resnet50_v1b(pretrained=False, root='~/.mxnet/models', ctx=cpu(0), **kwargs):
"""Constructs a ResNetV1b-50 model.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
ctx : Context, default CPU
The context in which to load the pretrained weights.
dilated: bool, default False
Whether to apply dilation strategy to ResNetV1b, yilding a stride 8 model.
norm_layer : object
Normalization layer used in backbone network (default: :class:`mxnet.gluon.BatchNorm`;
last_gamma : bool, default False
Whether to initialize the gamma of the last BatchNorm layer in each bottleneck to zero.
use_global_stats : bool, default False
Whether forcing BatchNorm to use global statistics instead of minibatch statistics;
optionally set to True if finetuning using ImageNet classification pretrained models.
"""
model = ResNetV1b(BottleneckV1b, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_params(get_model_file('resnet%d_v%db'%(50, 1),
root=root), ctx=ctx)
return model
|
5357c998f5132b4121e0ec96f688177987139ff7
| 32,554 |
def _f_expl_5_cash_karp_adptv(x0, Y0, dx, *args, dYdx=None, econ=0.0001889568, eps=0.1, pgrow=-0.2, pshrink=-0.25, safety=0.9, **kwargs):
"""Explicit adaptive 5th-order Cash-Karp method
Parameters
----------
x0 : Intvar
Integration variable at beginning of scheme
Y0 : Field
Variable to be integrated at the beginning of scheme
dx : IntVar
Stepsize of integration variable
dYdx : Field, optional, default : None
Current derivative. Will be calculated, if not set.
econ : float, optional, default : 0.005832
Error controll parameter for setting stepsize
eps : float, optional, default : 0.1
Desired maximum relative error
prgrow : float, optional, default : -1/3
Power for increasing step size
pshrink : float, optional, default : -1/2
Power for decreasing stepsize
safety : float, optional, default : 0.9
Safety factor when changing step size
args : additional positional arguments
kwargs : additional keyworda arguments
Returns
-------
dY : Field
Delta of variable to be integrated
False if step size too large
Butcher tableau
---------------
0 | 0 0 0 0 0 0
1/5 | 1/5 0 0 0 0 0
3/10 | 3/40 9/40 0 0 0 0
3/5 | 3/10 -9/10 6/5 0 0 0
1 | -11/54 5/2 -70/27 35/27 0 0
7/8 | 1631/55296 175/512 575/13824 44275/110592 253/4096 0
------|----------------------------------------------------------------
| 37/378 0 250/621 125/594 0 512/1771
| 2825/27648 0 18575/48384 13525/55296 277/14336 1/4
"""
k0 = Y0.derivative(x0, Y0) if dYdx is None else dYdx
k1 = Y0.derivative(x0 + c1*dx, Y0 + a10*k0 * dx)
k2 = Y0.derivative(x0 + c2*dx, Y0 + (a20*k0 + a21*k1)*dx)
k3 = Y0.derivative(x0 + c3*dx, Y0 + (a30*k0 + a31*k1 + a32*k2)*dx)
k4 = Y0.derivative(x0 + dx, Y0 + (a40*k0 + a41*k1 + a42*k2 + a43*k3)*dx)
k5 = Y0.derivative(x0 + c5*dx, Y0 + (a50*k0 + a51 *
k1 + a52*k2 + a53*k3 + a54*k4)*dx)
Yscale = np.abs(Y0) + np.abs(dx*k0)
Yscale[Yscale == 0.] = 1.e100 # Deactivate for zero crossings
e = dx*(e0*k0 + e2*k2 + e3*k3 + e4*k4 + e5*k5)
emax = np.max(np.abs(e/Yscale)) / eps
# Integration successful
if emax <= 1.:
# Suggest new stepsize
dxnew = safety*dx*emax**pgrow if econ < emax else 5.*dx
x0.suggest(dxnew)
return dx*(b0*k0 + b2*k2 + b3*k3 + b5*k5)
else:
# Suggest new stepsize
dxnew = np.maximum(safety*dx*emax**pshrink, 0.1*dx)
x0.suggest(dxnew)
return False
|
3d50e50d2c11b1e274c6e1a88b79780132e140ae
| 32,555 |
def issubset(list1, list2):
"""
Examples:
>>> issubset([], [65, 66, 67])
True
>>> issubset([65], [65, 66, 67])
True
>>> issubset([65, 66], [65, 66, 67])
True
>>> issubset([65, 67], [65, 66, 67])
False
"""
n = len(list1)
for startpos in range(len(list2) - n + 1):
if list2[startpos:startpos+n] == list1:
return True
return False
|
2f4acbba54b303b7041febbe485d8960baa6528f
| 32,556 |
def make_start_and_end_images_with_words(start_words, end_words, perm: bool = False, repeat=1, size=150):
"""
Make two images from two sets of words.
:param start_words: Words to use for the first image.
:param end_words: Words to use for the second image
:param perm: Whether the words should be permuted or not
:param repeat: How many times the words should be repeated
:param size: The size of the image (a number or tuple)
:return: An image
"""
start_images = list()
for word in start_words:
start_images.append(image_of_text(word))
end_images = list()
for word in end_words:
end_images.append(image_of_text(word))
start_images = start_images * repeat
end_images = end_images * repeat
if perm:
start_images = permute(start_images)
end_images = permute(end_images)
start_im = concatenate_images(start_images)
end_im = concatenate_images(end_images)
end_im = resize_image_to_be_able_to_fit_shape(end_im, size)
end_im = end_im.resize(start_im.size)
return start_im, end_im
|
7ecb10eec8a447ffbc0f5f17f596dfc55e6e822b
| 32,557 |
def bbox2corners(bbox):
"""
box parameters to four box corners
"""
x, y, w, h, _ = bbox.tolist()
xy_tl = [x - w/2, y - h/2]
xy_dl = [x - w/2, y + h/2]
xy_dr = [x + w/2, y + h/2]
xy_tr = [x + w/2, y - h/2]
return np.array([xy_tl, xy_dl, xy_dr, xy_tr])
|
50ac1dd1172c95b1edcf458e55759c01a593759c
| 32,558 |
def clean_game_data_recent(df: pd.DataFrame, shift=False) -> dict:
"""Clean game data post mid-2018.
Follows a slightly different format than pre-2018 data.
"""
month_game_data = {}
game_type = "tables"
if shift == True: # APR_2021, MAY_2021 had no %diff, so we need to shift
add = 1
else:
add = 0
for row in df.to_records("list"):
row_list = list(row)
game = row_list[1 + add]
if "slot" in str(game).lower() or "cent" in str(game).lower():
game_type = "slots"
month_game_data[clean_game(game, game_type)] = dict(
zip(GAME_KEYS, row_list[2 + add : 7 + add])
)
return month_game_data
|
c2d5127b4f8b6d4e7caa03135ce257c5f6ac02a4
| 32,559 |
from typing import Dict
import re
def process_data(dataframe : pd.DataFrame,
label_to_id: Dict,
vocab : Dict = None,
vocab_size : int = 10_000,
max_tokens : int = 200,
max_token_size : int = 40) -> (tf.Tensor, tf.Tensor, dict, dict):
"""
Will take a dataframe read from ``load_data`` and return indexed data, labels, and vocabulary tables
for that dataset.
Parameters
----------
dataframe : ``pd.DataFrame``
A pandas dataframe containing data to be processed, and from which to build vocabulary.
label_to_id : ``Dict``
A dictionary mapping labels to corresponding ids.
vocab : ``Dict``
(Optional) Dictionary mapping tokens to indices.
vocab_size : ``int``
(Optional) If vocab is ``None`` then this denotes the maximum size of the vocabulary
including padding and unknown tokens.
max_tokens : ``int``
(Optional) Maximum number of tokens (aka. words) per sequence. Sequences will be padded to max_tokens if
their length is less than ``max_tokens``.
max_token_size : ``int``
(Optional) Maximum size of an individual token (i.e. how many characters in a token/word).
Returns:
--------
data : ``tf.Tensor``
Tensor containing indexed sequences of tokens.
labels : ``tf.Tensor``
Tensor containing label for each sequence in ``data``.
vocab : ``dict``
Dictionary mapping tokens to indices.
reverse_vocab : ``dict``
Dictionary mapping indices to tokens.
"""
if vocab is not None:
if PAD_TOKEN not in vocab or UNK_TOKEN not in vocab:
raise ValueError('Both {} token and {} token must be in vocabulary.'.format(PAD_TOKEN, UNK_TOKEN))
else:
vocab_size = len(vocab)
def _process_data_helper(text):
# Tokenize text data
tokens = re.findall(r'\w+|[^\w\s]', re.sub(r'[|]{3}', '', text.strip().lower()))[:max_tokens]
# Padding
tokens += [PAD_TOKEN] * (max_tokens - len(tokens))
return np.asarray(tokens).astype('<U{}'.format(max_token_size))
# Tokenize data and labels
data = tf.convert_to_tensor(dataframe['Text'].apply(_process_data_helper))
labels = tf.convert_to_tensor(dataframe['Answer'].apply(lambda x: np.array(re.sub(r'\s+', '_', x))))
if vocab is None:
# Build vocab
counts = np.unique(data, return_counts=True)
counts = [x[counts[0] != PAD_TOKEN.encode('utf8')] for x in counts]
top_words = counts[0][np.argsort(counts[1])[:vocab_size-2:-1]]
top_words = [byte_string.decode('utf8') for byte_string in top_words]
vocab = dict(zip([PAD_TOKEN, UNK_TOKEN]+top_words, range(min(vocab_size, len(top_words)+2))))
reverse_vocab = dict(zip(range(min(vocab_size, len(top_words)+2)), [PAD_TOKEN, UNK_TOKEN]+top_words))
else:
reverse_vocab = swap_key_values(vocab)
# Map tokens to indices
def index_lookup(token):
token = token.decode('utf8')
return vocab[token] if token in vocab else vocab[UNK_TOKEN]
data = tf.keras.backend.map_fn(np.vectorize(index_lookup), data, dtype=tf.int32)
labels = tf.keras.backend.map_fn(np.vectorize(lambda x: label_to_id[x.decode('utf8')]), labels, dtype=tf.int64)
return data, labels, vocab, reverse_vocab
|
c30903af96f1157e08357daedacf67a146ed101c
| 32,560 |
def tan_to_eq(xiDeg, etaDeg, ra0Deg, dec0Deg):
"""Convert tangential coordinates to equatorial (RA, Dec) in degrees."""
xi = xiDeg * np.pi / 180.
eta = etaDeg * np.pi / 180.
ra0 = ra0Deg * np.pi / 180.
dec0 = dec0Deg * np.pi / 180.
ra = np.arctan(xi / (np.cos(dec0) - eta * np.sin(dec0))) + ra0
dec = np.arctan((np.sin(dec0) + eta * np.cos(dec0))
/ (np.cos(dec0) - eta * np.sin(dec0))) * np.cos(ra - ra0)
ra = ra * 180. / np.pi
dec = dec * 180. / np.pi
return ra, dec
|
a0e9bad863192dca391ae324f956562b092066c1
| 32,561 |
def type_from_tensors(tensors):
"""Builds a `tff.Type` from supplied tensors.
Args:
tensors: A nested structure of tensors.
Returns:
The nested TensorType structure.
"""
def _mapping_fn(x):
if not tf.is_tensor(x):
x = tf.convert_to_tensor(x)
return computation_types.TensorType(x.dtype.base_dtype, x.shape)
if isinstance(tensors, anonymous_tuple.AnonymousTuple):
return computation_types.to_type(
anonymous_tuple.map_structure(_mapping_fn, tensors))
else:
return computation_types.to_type(
tf.nest.map_structure(_mapping_fn, tensors))
|
16821ca45a2000107069b992aac9a52638f2c5ea
| 32,563 |
from typing import Any
def is_none_or_empty(obj: Any) -> bool:
"""Determine if object is None or empty
:param obj: object
:return: if object is None or empty
"""
return obj is None or len(obj) == 0
|
a90245326e4c776ca1ee0066e965a4f656a20014
| 32,565 |
def create_python27_start_cmd(app_name,
login_ip, port, load_balancer_host, xmpp_ip):
""" Creates the start command to run the python application server.
Args:
app_name: The name of the application to run
login_ip: The public IP
port: The local port the application server will bind to
load_balancer_host: The host of the load balancer
xmpp_ip: The IP of the XMPP service
Returns:
A string of the start command.
"""
db_location = DATASTORE_PATH
cmd = [
"/usr/bin/python2",
constants.APPSCALE_HOME + "/AppServer/dev_appserver.py",
"--port " + str(port),
"--admin_port " + str(port + 10000),
"--login_server " + login_ip,
"--skip_sdk_update_check",
"--nginx_host " + str(load_balancer_host),
"--require_indexes",
"--enable_sendmail",
"--xmpp_path " + xmpp_ip,
"--php_executable_path=" + str(PHP_CGI_LOCATION),
"--uaserver_path " + db_location + ":"\
+ str(constants.UA_SERVER_PORT),
"--datastore_path " + db_location + ":"\
+ str(constants.DB_SERVER_PORT),
"/var/apps/" + app_name + "/app",
"--host " + appscale_info.get_private_ip()]
if app_name in TRUSTED_APPS:
cmd.extend([TRUSTED_FLAG])
return ' '.join(cmd)
|
5eca494233ab5c38048cc583f564247167306594
| 32,566 |
from operator import and_
import types
import string
def create_query_constraint():
""" Create a constraint for a query WHERE clause """
op = oneOf("= < > >= <= != <>", caseless=True).setName("operator")
basic_constraint = (var + op + var_val).setResultsName("operator")
between = (
var + Suppress(upkey("between")) + value + Suppress(and_) + value
).setResultsName("between")
is_in = (var + Suppress(upkey("in")) + set_).setResultsName("in")
fxn = (
function("attribute_exists", var)
| function("attribute_not_exists", var)
| function("attribute_type", var, types)
| function("begins_with", var, Group(string))
| function("contains", var, value)
| (function("size", var) + op + value)
).setResultsName("function")
all_constraints = between | basic_constraint | is_in | fxn
return Group(all_constraints).setName("constraint")
|
92e3f2dcf87213126b70af7ae1dd302fc15b32e6
| 32,567 |
def reshape2D(x):
"""
Reshapes x from (batch, channels, H, W) to (batch, channels, H * W)
"""
return x.view(x.size(0), x.size(1), -1)
|
29851e54bb816fb45184d3ea20e28d786270f662
| 32,568 |
from operator import add
from operator import mul
from operator import sub
def addNoise(s,x):
""" adds s percent random noise to reduce problems with zero traces """
xdif = max(x)-min(x)
n1,n2,n3 = s1.count,s2.count,s3.count
r = randfloat(n1,n2,n3)
x = add(mul(sub(r,0.5),s*xdif),x)
return x
|
a951f37fd48c4cf63d167189bd5674c4dc219245
| 32,569 |
def calc_cs_face_area(lon_b, lat_b, r_sphere = 6.375e6):
"""Calculate area of cubed-sphere grid cells on one face
Inputs must be in degrees. Edge arrays must be
shaped [N+1 x N+1]
"""
# Convert inputs to radians
lon_b_rad = lon_b * np.pi / 180.0
lat_b_rad = lat_b * np.pi / 180.0
r_sq = r_sphere * r_sphere
n_cs = lon_b.shape[1] - 1
# Allocate output array
cs_area = np.zeros((n_cs,n_cs))
# Ordering
valid_combo = np.array([[1,2,4],[2,3,1],[3,2,4],[4,1,3]]) - 1
for i_lon in range(n_cs):
for i_lat in range(n_cs):
lon_corner = np.zeros(4)
lat_corner = np.zeros(4)
xyz_corner = np.zeros((4,3))
for i_vert in range(4):
x_lon = i_lon + (i_vert > 1)
x_lat = i_lat + (i_vert == 0 or i_vert == 3)
lon_corner[i_vert] = lon_b_rad[x_lon,x_lat]
lat_corner[i_vert] = lat_b_rad[x_lon,x_lat]
for i_vert in range(4):
xyz_corner[i_vert,:] = ll2xyz(lon_corner[i_vert],lat_corner[i_vert])
tot_ang = 0.0
for i_corner in range(4):
curr_combo = valid_combo[i_corner,:]
xyz_mini = np.zeros((3,3))
for i_mini in range(3):
xyz_mini[i_mini,:] = xyz_corner[curr_combo[i_mini],:]
curr_ang = sphere_angle(xyz_mini[0,:],xyz_mini[1,:],xyz_mini[2,:])
tot_ang += curr_ang
cs_area[i_lon,i_lat] = r_sq * (tot_ang - (2.0*np.pi))
return cs_area
|
fdc33180a49552e907d2bf683a7a829f4c1f6c47
| 32,570 |
def yices_new_param_record():
"""Creates a new param object, an opaque object that stores various search parameters and options that control the heuristics used by the solver."""
return libyices.yices_new_param_record()
|
b69bd915d1044703a5308ae1d4670e9905b7a180
| 32,571 |
def parse_multipart(input, boundary):
""" Parse multipart/form-data
Parse mime-encoded multipart/form-data into a
python dictionary. This function returns a tuple
consisting of post vars, and files.
Attributes:
input -- Input data to be parsed
boundary -- Field boundary as returned in the content type
"""
files = {}
post_args = {}
## skip the ending boundary (\r\n--<boundary>--)
skip = len('\r\n' + '--' + boundary + '--')
parts = input[:-skip].split('--' + boundary + '\r\n')
for part in parts:
end = part.find('\r\n\r\n')
if end == -1:
## if the part does not end in '\r\n\r\n', the
## headers are messed up -- better skip this one
continue
headers = parse_headers(part[:end])
name_header = headers.get('Content-Disposition', '')
if not name_header.startswith('form-data'):
## if the header doesn't begin with form-data
## it is an invalid multipart/form-data header
continue
header_fields = parse_header_fields(name_header)
## strip leading and trailing '\r\n'
data = part[end+4:-2]
if 'filename' in header_fields:
name = header_fields.get('name')
files[name] = {
'filename': header_fields.get('filename'),
'content_type': headers.get('Content-Type', 'application/unknown'),
'size': len(data),
'file': data,
}
else:
name = header_fields.get('name')
post_args[name] = data
return (post_args, files)
|
0eef5559bf725654af6489e79c64e9ddc7658bf8
| 32,572 |
def get_fr_trj_ntrj(rslt, start, end, a_params):
"""Check whether event exhibits "blowup" behavior."""
# get spks during candidate replay event
spks_evt = rslt.spks[(start <= rslt.ts) & (rslt.ts < end), :]
# get mask over trj and non-trj PCs
pc_mask = rslt.ntwk.types_rcr == 'PC'
sgm_cutoff = .5 * (1 + rslt.p['SGM_MAX'])
trj_mask = (rslt.ntwk.sgm * pc_mask.astype(float)) > sgm_cutoff
ntrj_mask = (~trj_mask) & pc_mask
# get trj-PC spks
spks_trj = spks_evt[:, trj_mask]
fr_trj = (spks_trj.sum(0) / (end - start)).mean()
# get non-trj-PC spks
spks_ntrj = spks_evt[:, ntrj_mask]
fr_ntrj = (spks_ntrj.sum(0) / (end - start)).mean()
# return trj-PC and non-trj-PC firing rates
return fr_trj, fr_ntrj
|
7cb2f0262debba36789e4e230b4d26dd096cf950
| 32,573 |
from typing import Sequence
from typing import Any
def gapfilling(circuit: cirq.Circuit, placeholder: Sequence[Any]) -> cirq.Circuit:
"""
Fill single qubit gates according to placeholder on circuit
:param circuit:
:param placeholder:
:return:
"""
n_circuit = cirq.Circuit()
all_qubits = sorted(circuit.all_qubits())
i = 0
for m in circuit.moments:
n_circuit.append(m)
occupied_qubits = set()
for g in m:
for q in g.qubits:
occupied_qubits.add(q)
for q in all_qubits:
if q not in occupied_qubits:
if placeholder[i] != cirq.I:
n_circuit.append(placeholder[i](q))
i += 1
return n_circuit
|
23add843a8546bf9177da87c1bb7cb161fedc3cc
| 32,574 |
import logging
def train_data_from_directory(train_path, val_path, target_size=(224, 224), batch_size=16,
rescale=1. / 255, rotation_range=20, width_shift_range=0.2,
height_shift_range=0.20, zoom_range=0.3, vertical_flip=True,
horizontal_flip=True, brightness_range=(0.7, 1.2), classes=None):
"""从指定数据集生成数据,如果没有验证集请将val_path设置为空"""
train_datagen = ImageDataGenerator(rescale=rescale, rotation_range=rotation_range,
width_shift_range=width_shift_range,
height_shift_range=height_shift_range, brightness_range=brightness_range,
zoom_range=zoom_range, vertical_flip=vertical_flip,
horizontal_flip=horizontal_flip)
val_datagen = ImageDataGenerator(rescale=rescale)
train_gen = train_datagen.flow_from_directory(train_path, classes=classes, target_size=target_size,
batch_size=batch_size)
if val_path:
val_gen = val_datagen.flow_from_directory(val_path, target_size=target_size, classes=classes,
batch_size=batch_size)
if train_gen.class_indices == val_gen.class_indices:
return train_gen, val_gen
else:
logging.info("训练集与验证集类别定义不一致!")
return False
else:
return train_gen
|
d3f6906c5fb8e6eebabc1971a1430ac4d183cfeb
| 32,575 |
def add_to_group(ctx, user, group):
"""Adds a user into a group.
Returns ``True`` if is just added (not already was there).
:rtype: bool
"""
result = ctx.sudo(f'adduser {user} {group}').stdout.strip()
return 'Adding' in result
|
abb7b432f4e4206a3509d1376adca4babb02e9f0
| 32,576 |
def ifft(X):
"""
Inverse FFT with normalization by N, so that x == ifft(fft(x)) within
round-off errors.
"""
N, x = len(X), fft(X, sign=1) # e^{j2\pi/N}
for i in range(N):
x[i] /= float(N)
return x
|
df388803e0a7ad91136ad174dee7d82521e60ce4
| 32,577 |
from typing import List
def _validate_fields(fields: List[_Field]
) -> List[mapry.validation.SchemaError]:
"""
Validate the correctness of the the corresponding C++ fields of a composite.
Notably, we validate that there are no duplicates and no reserved keywords
in the C++ fields of a composite.
:param fields: C++ fields corresponding to the composite
:return: list of errors, or an empty list if no errors
"""
errs = [] # type: List[mapry.validation.SchemaError]
name_to_fields = dict() # type: Dict[str, _Field]
for field in fields:
if field.name in _KEYWORDS:
errs.append(
mapry.validation.SchemaError(
message=(
"The C++ field identifier {!r} "
"is a keyword in C++").format(field.name),
ref=field.ref))
if field.name in name_to_fields:
errs.append(
mapry.validation.SchemaError(
message=(
"The C++ field identifier {!r} "
"conflicts another field ({})").format(
field.name, name_to_fields[field.name].ref),
ref=field.ref))
else:
name_to_fields[field.name] = field
return errs
|
49dfd85d473f4934b55539e7db6a707500ead547
| 32,578 |
def frames_collection(collection):
"""
returns a list of frames extracted from a collection of NAF files.
:param collection: collection of dictionaries with relevant info extracted from NAF files
:param collection: list
"""
collection_frames = []
for info_dict in collection:
for frame in frames_from_dict(info_dict):
collection_frames.append(frame)
return collection_frames
|
6255a826d24bce0e26d40e7d1dbbd093c1e43a19
| 32,579 |
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.execute(*cmd, **kwargs)
|
185c18c3c30305d302e8582b708e1f80069fb79a
| 32,580 |
async def async_build_devices(hass, zha_gateway, config_entry, cluster_ids):
"""Build a zigpy device for each cluster id.
This will build devices for all cluster ids that exist in cluster_ids.
They get added to the network and then the sensor component is loaded
which will cause sensor entities to get created for each device.
A dict containing relevant device info for testing is returned. It contains
the entity id, zigpy device, and the zigbee cluster for the sensor.
"""
device_infos = {}
counter = 0
for cluster_id in cluster_ids:
# create zigpy device
device_infos[cluster_id] = {"zigpy_device": None}
device_infos[cluster_id]["zigpy_device"] = await async_init_zigpy_device(
hass,
[cluster_id, general.Basic.cluster_id],
[],
None,
zha_gateway,
ieee=f"00:15:8d:00:02:32:4f:0{counter}",
manufacturer=f"Fake{cluster_id}",
model=f"FakeModel{cluster_id}",
)
counter += 1
# load up sensor domain
await hass.config_entries.async_forward_entry_setup(config_entry, DOMAIN)
await hass.async_block_till_done()
# put the other relevant info in the device info dict
for cluster_id in cluster_ids:
device_info = device_infos[cluster_id]
zigpy_device = device_info["zigpy_device"]
device_info["cluster"] = zigpy_device.endpoints.get(1).in_clusters[cluster_id]
zha_device = zha_gateway.get_device(zigpy_device.ieee)
device_info["zha_device"] = zha_device
device_info[ATTR_ENTITY_ID] = await find_entity_id(DOMAIN, zha_device, hass)
await hass.async_block_till_done()
return device_infos
|
5fb6900fe6bdd65f6ed68da6c51df7149679f36a
| 32,581 |
import torch
def load_ckp(model, ckp_path, device, parallel=False, strict=True):
"""Load checkpoint
Args:
ckp_path (str): path to checkpoint
Returns:
int, int: current epoch, current iteration
"""
ckp = torch.load(ckp_path, map_location=device)
if parallel:
model.module.load_state_dict(
ckp['state_dict'], strict=strict)
else:
model.load_state_dict(ckp['state_dict'], strict=strict)
return ckp['epoch'], ckp['iter']
|
4fe4e368d624583216add3eca62293d5d1539182
| 32,583 |
def show_branch_panel(
on_done,
local_branches_only=False,
remote_branches_only=False,
ignore_current_branch=False,
ask_remote_first=False,
local_branch=None,
selected_branch=None):
"""
Show a quick panel with branches. The callback `on_done(branch)` will
be called when a branch is selected. If the panel is cancelled, `None`
will be passed to `on_done`.
on_done: a callable
ask_remote_first: whether remote should be asked before the branch panel
if `False`. the options will be in forms of `remote/branch`
selected_branch: if `ask_remote_first`, the selected branch will be
`{remote}/{selected_branch}`
"""
bp = BranchPanel(
on_done,
local_branches_only,
remote_branches_only,
ignore_current_branch,
ask_remote_first,
selected_branch)
bp.show()
return bp
|
b458185b63281ab1da9d6b87047e3b8145c0d52b
| 32,584 |
import torch
def map_str(string,char_encoding=default_char_encoding):
"""
Maps
"""
hold = torch.Tensor([char_encoding[x] for x in string])
hold = hold.long()
return hold
|
fd69d80d6bfa3fb7f55c17063f1878b3e9216526
| 32,585 |
def parse_section(section):
"""
Works out the component and section from the "Section" field.
Sections like `python` or `libdevel` are in main.
Sections with a prefix, separated with a forward-slash also show the
component.
It returns a list of strings in the form [component, section].
For example, `non-free/python` has component `non-free` and section
`python`.
``section``
Section name to parse.
"""
if '/' in section:
component, thesection = section.split('/', 1)
if component not in ("main", "contrib", "non-free"):
return ['main', section.replace('/', '_')]
else:
return [component, thesection]
else:
return ['main', section]
|
06ab4f4ebb874e1a5b94c5e6736b4af4a137897f
| 32,586 |
def tpm3_exon1_exon8_t_to_g():
"""Create test fixture for TPM3."""
params = {
"gene": "TPM3",
"chr": "NC_000001.11",
"start": 154192135,
"end": 154170399,
"exon_start": 1,
"exon_end": 8,
"exon_end_offset": 0,
"exon_start_offset": 0,
"transcript": "NM_152263.3",
"strand": -1
}
return GenomicData(**params)
|
75075a328113cf0fe3639139c2f9170e96ae9a47
| 32,587 |
def ecg_rsp(ecg_rate, sampling_rate=1000, method="vangent2019"):
"""**ECG-Derived Respiration (EDR)**
Extract ECG-Derived Respiration (EDR), a proxy of a respiratory signal based on heart rate. *
Note that this implementation is far from being complete, as the information in the related
papers prevents me from getting a full understanding of the procedure. **Help is required to
document, test and validate the function!**
Parameters
----------
ecg_rate : array
The heart rate signal as obtained via ``ecg_rate()``.
sampling_rate : int
The sampling frequency of the signal that contains the R-peaks (in Hz,
i.e., samples/second). Defaults to 1000Hz.
method : str
Can be one of ``"vangent2019"`` (default), ``"soni2019"``, ``"charlton2016"`` or
``"sarkar2015"``.
Returns
-------
array
A Numpy array containing the derived respiratory rate.
Examples
--------
* **Example 1:** Compare to real RSP signal
.. ipython:: python
import neurokit2 as nk
# Get heart rate
data = nk.data("bio_eventrelated_100hz")
rpeaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
ecg_rate = nk.signal_rate(rpeaks, sampling_rate=100, desired_length=len(rpeaks))
# Get ECG Derived Respiration (EDR) and add to the data
data["EDR"] = nk.ecg_rsp(ecg_rate, sampling_rate=100)
# Visualize result
@savefig p_ecg_rsp1.png scale=100%
nk.signal_plot([data["RSP"], data["EDR"]], standardize = True)
* **Example 2:** Methods comparison
.. ipython:: python
data["vangent2019"] = nk.ecg_rsp(ecg_rate, sampling_rate=100, method="vangent2019")
data["sarkar2015"] = nk.ecg_rsp(ecg_rate, sampling_rate=100, method="sarkar2015")
data["charlton2016"] = nk.ecg_rsp(ecg_rate, sampling_rate=100, method="charlton2016")
data["soni2019"] = nk.ecg_rsp(ecg_rate, sampling_rate=100, method="soni2019")
# Visualize results
@savefig p_ecg_rsp2.png scale=100%
nk.signal_plot([data["RSP"], data["vangent2019"], data["sarkar2015"],
data["charlton2016"], data["soni2019"]], standardize = True)
References
----------
* van Gent, P., Farah, H., van Nes, N., & van Arem, B. (2019). HeartPy: A novel heart rate
algorithm for the analysis of noisy signals. Transportation research part F: traffic
psychology and behaviour, 66, 368-378.
* Sarkar, S., Bhattacherjee, S., & Pal, S. (2015). Extraction of respiration signal from ECG for
respiratory rate estimation.
* Charlton, P. H., Bonnici, T., Tarassenko, L., Clifton, D. A., Beale, R., & Watkinson, P. J.
(2016). An assessment of algorithms to estimate respiratory rate from the electrocardiogram
and photoplethysmogram. Physiological measurement, 37(4), 610.
* Soni, R., & Muniyandi, M. (2019). Breath rate variability: a novel measure to study the
meditation effects. International Journal of Yoga, 12(1), 45.
"""
method = method.lower()
if method in [
"sarkar2015"
]: # https://www.researchgate.net/publication/304221962_Extraction_of_respiration_signal_from_ECG_for_respiratory_rate_estimation # noqa: E501
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.7, order=6)
elif method in [
"charlton2016"
]: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5390977/#__ffn_sectitle
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=4 / 60, highcut=60 / 60, order=6)
elif method in ["soni2019"]: # https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6329220/
rsp = signal_filter(ecg_rate, sampling_rate, highcut=0.5, order=6)
elif method in [
"vangent2019"
]: # https://github.com/paulvangentcom/heartrate_analysis_python/blob/1597e8c0b2602829428b22d8be88420cd335e939/heartpy/analysis.py#L541 # noqa: E501
rsp = signal_filter(ecg_rate, sampling_rate, lowcut=0.1, highcut=0.4, order=2)
else:
raise ValueError(
"NeuroKit error: ecg_rsp(): 'method' should be "
"one of 'sarkar2015', 'charlton2016', 'soni2019' or "
"'vangent2019'."
)
return rsp
|
d0964b33053edaf124933c762f40c1e403f7dfd7
| 32,588 |
def _get_version() -> str:
"""Returns the package version.
Adapted from:
https://github.com/deepmind/dm-haiku/blob/d4807e77b0b03c41467e24a247bed9d1897d336c/setup.py#L22
Returns:
Version number.
"""
path = '__init__.py'
version = '__version__'
with open(path) as fp:
for line in fp:
if line.startswith(version):
g = {}
exec(line, g) # pylint: disable=exec-used
return g[version] # pytype: disable=key-error
raise ValueError(f'`{version}` not defined in `{path}`.')
|
4811317fc7a22a11b4d074f41afebc7841577ad3
| 32,589 |
def find_misparsed_node(section_node, label, change, amended_labels):
""" Nodes can get misparsed in the sense that we don't always know where
they are in the tree. This uses label to find a candidate for a mis-parsed
node and creates an appropriate change. """
candidates = find_candidate(section_node, label[-1], amended_labels)
if len(candidates) == 1:
candidate = candidates[0]
change['node'] = candidate
change['candidate'] = True
return change
|
2b6318160b8703dc963335029582f05dece42966
| 32,590 |
def degree_corrected_community_generative_model(num_nodes, class_prob,
bp_mu, bp_alpha, bp_beta,
node_theta,
burnin, end_time, n_cores=1, seed=None):
"""
Degree corrected version of the Community Hawkes Independent Pairs (CHIP) generative model.
Check doc string of `base_community_generative_model` or `base_community_generative_model_parallel`.
"""
if n_cores == 1:
return base_community_generative_model(num_nodes, class_prob, bp_mu, bp_alpha, bp_beta,
node_theta=node_theta, burnin=burnin, end_time=end_time, seed=seed)
return base_community_generative_model_parallel(num_nodes, class_prob, bp_mu, bp_alpha, bp_beta,
node_theta=node_theta, burnin=burnin, end_time=end_time,
n_cores=n_cores, seed=None)
|
7effbf8908ffe8e6b5f1df9ec7c7e52d0f1ce6cb
| 32,591 |
from typing import List
from typing import Any
from typing import Generator
def neighborhood(
iterable: List[Any],
) -> Generator[Any, Any, Any]:
""" """
if not iterable:
return iterable
iterator = iter(iterable)
prev_item = None
current_item = next(iterator) # throws StopIteration if empty.
for next_item in iterator:
yield (prev_item, current_item, next_item)
prev_item = current_item
current_item = next_item
yield (prev_item, current_item, None)
|
4d07ee920e4861fce88658b1fc5ce719907ba897
| 32,592 |
def grafieken():
"""Deze functie maakt grafieken en plaatst ze in de template.
:return: de HTML pagina
"""
top_3_organismen_grafiek() # Functie die grafiek aanmaakt over de database
top_10_hoogste_scores() # Functie die grafiek aanmaakt over de database
return render_template('grafieken.html')
|
04dfb1857f0095f135bf2484f738a5db3caf32ed
| 32,593 |
import time
def path():
"""
传过来给定图片的路径即可,需要绝对路径
:return:
:rtype:
"""
jsonres = request.get_json()
#images是图片的路径
image_dir= jsonres.get('images', None)
#识别图片的路径
image_file_list = get_image_file_list(image_dir)
#把所有的图片的名字,bbox,识别结果放在一个tuple里面返回
images_result = []
for image_file in image_file_list:
img, flag = check_and_read_gif(image_file)
if not flag:
img = cv2.imread(image_file)
if img is None:
logger.info("error in loading image:{}".format(image_file))
continue
starttime = time.time()
# dt_boxes 是一个列表,列表中每个元素时一个bbox坐标,格式是,每个点是x,y,每个点的位置是[左上角点,右上角点,右下角点,左下角点] [[171. 93.], [626. 93.], [626. 139.], [171. 139.]]
# rec_res 是识别结果和置信度的元祖组成的列表, 其中的一个元素是['为你定制元气美肌', 0.9992783]
dt_boxes, rec_res = text_sys(img)
elapse = time.time() - starttime
print("Predict time of %s: %.3fs" % (image_file, elapse))
# drop_score = 0.5
# dt_num = len(dt_boxes)
# for dno in range(dt_num):
# text, score = rec_res[dno]
# if score >= drop_score:
# text_str = "%s, %.3f" % (text, score)
# print(text_str)
# dt的numpy改成列表格式
every_res = []
for rec, dt in zip(rec_res,dt_boxes):
dt = dt.tolist()
one_res = {
"words": rec[0],
"confidence": str(rec[1]),
"left_top": dt[0],
"right_top": dt[1],
"right_bottom":dt[2],
"left_bottom":dt[3],
}
every_res.append(one_res)
one_data = {
"image_name": image_file,
"ocr_result": every_res
}
images_result.append(one_data)
return jsonify(images_result)
|
c0b8f4f5df05b3153df24790c03e3904d3550f5a
| 32,594 |
from operator import lt
def input_pipeline(filenames,
experiment_proto,
final_mbsz,
hps,
num_epochs=None,
num_threads=1):
"""Using Queues create an infinite stream of training minibatches.
Args:
filenames: list of paths to sstables tf.Example protos containing training
data.
experiment_proto: selection_pb2.Experiment describing the experiment.
final_mbsz: minibatch size for returned tensors
hps: optional tf.HParams with hyper-parameters to pass on to preprocess.
num_epochs: optional number of epochs to pass over the data.
num_threads: optional number of threads to use for batching output.
Returns:
A dequeue_many node that produces input/output pairs.
"""
prepro_mbsz = 8 * 8 * 1024
with tf.name_scope('input_pipeline'):
filename_queue = tf.train.string_input_producer(
filenames, num_epochs=num_epochs)
reader = tf.SSTableReader()
_, raw_strs = reader.read_up_to(filename_queue, prepro_mbsz)
strs = lt.LabeledTensor(raw_strs, ['batch'])
input_features = getattr(hps, 'input_features', ())
inputs, outputs = preprocess(
strs,
experiment_proto,
input_features=input_features,
kmer_k_max=hps.kmer_k_max,
ratio_random_dna=hps.ratio_random_dna,
mode=hps.preprocess_mode,
total_reads_defining_positive=hps.total_reads_defining_positive,
additional_output=hps.additional_output.split(','))
args = lt.batch(
list(inputs.values()) + [outputs],
batch_size=final_mbsz,
enqueue_many=True,
capacity=4 * final_mbsz,
num_threads=num_threads,
allow_smaller_final_batch=(num_epochs is not None))
inputs = dict(list(zip(list(inputs.keys()), args[:-1])))
outputs = args[-1]
return inputs, outputs
|
ea5d82b0105fc59201dd2f32b60c3560ad082270
| 32,595 |
def mAP(iou_threshold=0.5, threshold=0.01) -> CallbackProtocol:
"""Build a callback that computes mAP. All kwargs
passed to detector.mAP()"""
# pylint: disable=unused-argument
def callback(detector, summaries, data_dir):
return {
f"{split}_mAP": round(
np.nanmean(
list(
metrics.mAP(
**split_data["collections"],
iou_threshold=iou_threshold,
).values()
)
),
2,
)
for split, split_data in data_dir_to_collections(
data_dir, threshold=threshold, detector=detector
).items()
}
return callback
|
9802814db6a9c65648fd338c4b7dad43b60db25e
| 32,596 |
from pathlib import Path
import time
def run_n_experiment(src_path, target_language='es', n_train=2000, n_to_copy=None, eval_tta=False,
n_test=3000,
do_baseline=True, tta_langs=('et',), **classif_kwargs):
"""Experiment on smaller version of IMBD with different augmentation strategies"""
reference_path = make_small_ds(src_path, None, n_train=n_train, n_test=n_test)
experiment_dir = Path(f'/home/paperspace/text-augmentation/imdb_small_aug_{target_language}')
results = {}
start = time.time()
es_metrics = run_experiment(
target_language, orig_small_data_dir=reference_path, lm_cl=10,
n_to_copy=n_to_copy, **classif_kwargs
)
estime = time.time() - start
results.update({'btrans': es_metrics, 'btrans_time': estime})
if eval_tta:
for tta_lang in tta_langs:
add_aug_files(tta_lang, experiment_dir, subdir='test')
start = time.time()
err_tab, tta_df = run_tta_experiment(experiment_dir / 'models' / 'fwd_clas_1.h5',
experiment_dir / 'tmp' / 'itos.pkl',
experiment_dir)
results.update({'tta_err_tab': err_tab, 'tta_df': tta_df.drop('fnames', 1), 'tta_time': time.time() - start})
if not do_baseline:
return results
start = time.time()
baseline_metrics = run_experiment(target_language, orig_small_data_dir=reference_path,
n_to_copy=0, **classif_kwargs)
base_time = time.time() - start
results.update({'baseline': baseline_metrics, 'baseline_time': base_time, })
return results
|
52430b993954f8310e06e32466b927626dc8af3b
| 32,597 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.