content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def start_nodenetrunner(nodenet_uid):
"""Starts a thread that regularly advances the given nodenet by one step."""
nodenets[nodenet_uid].is_active = True
if runner['runner'].paused:
runner['runner'].resume()
return True | 7511f217beb64936d403a5f5472036206f446c90 | 16,280 |
def transform_coordinates_3d(coordinates, RT):
"""
Input:
coordinates: [3, N]
RT: [4, 4]
Return
new_coordinates: [3, N]
"""
if coordinates.shape[0] != 3 and coordinates.shape[1]==3:
coordinates = coordinates.transpose()
coordinates = np.vstack([coordinates, np.ones((1, coordinates.shape[1]), dtype=np.float32)])
new_coordinates = RT @ coordinates
new_coordinates = new_coordinates[:3, :]/new_coordinates[3, :]
return new_coordinates | 8a31f97bddd1c84a21d4b396e877c2b327e6890b | 16,281 |
def _get_misclass_auroc(preds, targets, criterion, topk=1, expected_data_uncertainty_array=None):
"""
Get AUROC for Misclassification detection
:param preds: Prediction probabilities as numpy array
:param targets: Targets as numpy array
:param criterion: Criterion to use for scoring on misclassification detection.
:param topk: Top-kl class probabilities to consider while making predictions.
:param expected_data_uncertainty_array: Expected data uncertainty as numpy array
:return: AUROC on misclassification detection
"""
misclassification_targets = (1 - _misclass_tgt(preds, targets, (topk,))).astype(bool)
if criterion == 'entropy':
criterion_values = np.sum(-preds * np.log(preds), axis=1)
elif criterion == 'confidence':
criterion_values = -preds.max(axis=1)
elif criterion == 'model_uncertainty':
criterion_values = np.sum(-preds * np.log(preds), axis=1) - expected_data_uncertainty_array
else:
raise NotImplementedError
return auroc(misclassification_targets, criterion_values) | 282ef66926092e99a62003152daccf733913b6c2 | 16,282 |
from typing import Iterable
from typing import List
def flatten(l: Iterable) -> List:
"""Return a list of all non-list items in l
:param l: list to be flattened
:return:
"""
rval = []
for e in l:
if not isinstance(e, str) and isinstance(e, Iterable):
if len(list(e)):
rval += flatten(e)
else:
rval.append(e)
return rval | 2d2202c21e6da7064491d55d5519c259d10f42c0 | 16,283 |
def create_note(dataset_id, fhir_store_id, note_id): # noqa: E501
"""Create a note
Create a note # noqa: E501
:param dataset_id: The ID of the dataset
:type dataset_id: str
:param fhir_store_id: The ID of the FHIR store
:type fhir_store_id: str
:param note_id: The ID of the note that is being created
:type note_id: str
:rtype: NoteCreateResponse
"""
res = None
status = None
try:
store_name = None
try:
store_name = "datasets/%s/fhirStores/%s" % \
(dataset_id, fhir_store_id)
DbFhirStore.objects.get(name=store_name)
except DoesNotExist:
status = 400
res = Error("The specified FHIR store was not found", status)
return res, status
try:
note_create_request = NoteCreateRequest.from_dict(
connexion.request.get_json())
try:
DbPatient.objects.get(
fhirStoreName=store_name,
identifier=note_create_request.patient_id)
except DoesNotExist:
status = 400
res = Error("The specified patient was not found", status)
return res, status
resource_name = "%s/fhir/Note/%s" % (store_name, note_id)
DbNote(
identifier=note_id,
resourceName=resource_name,
fhirStoreName=store_name,
text=note_create_request.text,
type=note_create_request.type,
patientId=note_create_request.patient_id
).save()
note_resource_name = "%s/fhir/Note/%s" % (store_name, note_id)
res = NoteCreateResponse(name=note_resource_name)
status = 201
except NotUniqueError as error:
status = 409
res = Error("Conflict", status, str(error))
except Exception as error:
status = 500
res = Error("Internal error", status, str(error))
return res, status | 396f81b4a6035a9f295faebdd1aa313131d0da2b | 16,284 |
def load_credential_from_args(args):
"""load credential from command
Args:
args(str): str join `,`
Returns:
list of credential content
"""
if ',' not in args:
raise
file_path_list = args.split(',')
if len(file_path_list) != 2:
raise
if not file_path_list[0].endswith('.key'):
file_path_list[0], file_path_list[1] = file_path_list[1], file_path_list[0]
return [load_credential_from_file(file_path) for file_path in file_path_list] | 4f2e0b1e57ee3baaeb1bab3dc0e7e3874aaeec7c | 16,285 |
def encode(string: str, key: str) -> str:
"""
Encode string using the Caesar cipher with the given key
:param string: string to be encoded
:param key: letter to be used as given shift
:return: encoded string
:raises: ValueError if key len is invalid
"""
if len(key) > 1:
raise ValueError("[ERROR] Length of a key may not exceed 1 for Caesar cipher")
return vigener.encode(string, key) | ddba41c5efc01df06290cd6496ef8eb54dbb28be | 16,286 |
def compile_binary(binary, compiler, override_operator=None, **kw):
"""
If there are more than 10 elements in the `IN` set, inline them to avoid hitting the limit of \
the number of query arguments in Postgres (1<<15).
""" # noqa: D200
operator = override_operator or binary.operator
if operator is not in_op and operator is not notin_op:
return compiler.visit_binary(binary, override_operator=override_operator, **kw)
if isinstance(binary.right, BindParameter):
right_len = len(binary.right.value)
else:
right_len = 0
if right_len >= 10:
left = compiler.process(binary.left, **kw)
kw["literal_binds"] = True
use_any = getattr(binary, "any_values", False) and compiler.dialect.name == "postgresql"
negate = use_any and operator is notin_op
if use_any:
# ANY(VALUES ...) seems to be performing the best among these three:
# 1. IN (...)
# 2. IN(ARRAY[...])
# 3. IN(VALUES ...)
right = any_(Grouping(Values(
binary.left, literal_binds=True,
).data(TupleWrapper(binary.right.value))))
operator = operators.eq
else:
right = binary.right
right = compiler.process(right, **kw)
sql = left + OPERATORS[operator] + right
if negate:
sql = "NOT (%s)" % sql
return sql
elif operator is in_op and right_len == 1:
# IN (<value>) -> = <value>
return compiler.process(binary.left == binary.right.value[0], **kw)
return compiler.visit_binary(binary, override_operator=override_operator, **kw) | 1798ded35c12d6a3bf2e5edc34dcf11ff70ce697 | 16,287 |
from typing import Callable
from typing import Iterable
from typing import Iterator
import itertools
def flat_map(
fn: Callable[[_T], Iterable[_S]], collection: Iterable[_T]
) -> Iterator[_S]:
"""Map a function over a collection and flatten the result by one-level"""
return itertools.chain.from_iterable(map(fn, collection)) | a1a09611f920078cb25a23279004acd00ac23142 | 16,288 |
def create_vector_clock(node_id, timeout):
"""This method builds the initial vector clock for a new key.
Parameters
----------
node_id : int
the id of one node in the cluster
timeout : int
the expire timeout of the key
Returns
-------
dict
the vector clock as dictonary
"""
if node_id is not None and timeout is not None:
return {
"versions": [{"nodeId": node_id, "version": 1}],
"timestamp": timeout
}
else:
raise ValueError("You must gave the node id and the timeout.") | ed6df0e7e493d448f52e5fe47b55df8a1de94543 | 16,289 |
def ParseStateFoldersFromFiles(state_files):
"""Returns list of StateFolder objects parsed from state_files.
Args:
state_files: list of absolute paths to state files.
"""
def CreateStateFolder(folderpath, parent_namespace):
del parent_namespace # Unused by StateFolder.
return state_lib.StateFolder(folderpath)
return _ParseFoldersFromFiles(state_files, base_lib.ComponentType.MULTI_STATE,
CreateStateFolder) | c70421da1f193ca2dc86f12e7cffd84a1011af22 | 16,290 |
def spectral_norm(inputs, epsilon=1e-12, singular_value="left"):
"""Performs Spectral Normalization on a weight tensor.
Details of why this is helpful for GAN's can be found in "Spectral
Normalization for Generative Adversarial Networks", Miyato T. et al., 2018.
[https://arxiv.org/abs/1802.05957].
Args:
inputs: The weight tensor to normalize.
epsilon: Epsilon for L2 normalization.
singular_value: Which first singular value to store (left or right). Use
"auto" to automatically choose the one that has fewer dimensions.
Returns:
The normalized weight tensor.
"""
if len(inputs.shape) < 2:
raise ValueError(
"Spectral norm can only be applied to multi-dimensional tensors")
# The paper says to flatten convnet kernel weights from (C_out, C_in, KH, KW)
# to (C_out, C_in * KH * KW). Our Conv2D kernel shape is (KH, KW, C_in, C_out)
# so it should be reshaped to (KH * KW * C_in, C_out), and similarly for other
# layers that put output channels as last dimension. This implies that w
# here is equivalent to w.T in the paper.
w = tf.reshape(inputs, (-1, inputs.shape[-1]))
# Choose whether to persist the first left or first right singular vector.
# As the underlying matrix is PSD, this should be equivalent, but in practice
# the shape of the persisted vector is different. Here one can choose whether
# to maintain the left or right one, or pick the one which has the smaller
# dimension. We use the same variable for the singular vector if we switch
# from normal weights to EMA weights.
var_name = inputs.name.replace("/ExponentialMovingAverage", "").split("/")[-1]
var_name = var_name.split(":")[0] + "/u_var"
if singular_value == "auto":
singular_value = "left" if w.shape[0] <= w.shape[1] else "right"
u_shape = (w.shape[0], 1) if singular_value == "left" else (1, w.shape[-1])
u_var = tf.get_variable(
var_name,
shape=u_shape,
dtype=w.dtype,
initializer=tf.random_normal_initializer(),
trainable=False)
u = u_var
# Use power iteration method to approximate the spectral norm.
# The authors suggest that one round of power iteration was sufficient in the
# actual experiment to achieve satisfactory performance.
power_iteration_rounds = 1
for _ in range(power_iteration_rounds):
if singular_value == "left":
# `v` approximates the first right singular vector of matrix `w`.
v = tf.math.l2_normalize(
tf.matmul(tf.transpose(w), u), axis=None, epsilon=epsilon)
u = tf.math.l2_normalize(tf.matmul(w, v), axis=None, epsilon=epsilon)
else:
v = tf.math.l2_normalize(tf.matmul(u, w, transpose_b=True),
epsilon=epsilon)
u = tf.math.l2_normalize(tf.matmul(v, w), epsilon=epsilon)
# Update the approximation.
with tf.control_dependencies([tf.assign(u_var, u, name="update_u")]):
u = tf.identity(u)
# The authors of SN-GAN chose to stop gradient propagating through u and v
# and we maintain that option.
u = tf.stop_gradient(u)
v = tf.stop_gradient(v)
if singular_value == "left":
norm_value = tf.matmul(tf.matmul(tf.transpose(u), w), v)
else:
norm_value = tf.matmul(tf.matmul(v, w), u, transpose_b=True)
norm_value.shape.assert_is_fully_defined()
norm_value.shape.assert_is_compatible_with([1, 1])
w_normalized = w / norm_value
# Deflate normalized weights to match the unnormalized tensor.
w_tensor_normalized = tf.reshape(w_normalized, inputs.shape)
return w_tensor_normalized | eb6961e984fbb8eb5c3d807faa7fa6d016c011b5 | 16,291 |
def rs_for_staff(user_id):
"""Returns simple JSON for research studies in staff user's domain
---
tags:
- User
- ResearchStudy
operationId: research_studies_for_staff
parameters:
- name: user_id
in: path
description: TrueNTH user ID, typically subject or staff
required: true
type: integer
format: int64
produces:
- application/json
responses:
200:
description:
Returns the list of research_studies the requested staff user is
associated with.
schema:
id: nested_research_studies
properties:
research_study:
type: array
items:
type: object
required:
- title
properties:
title:
type: string
description: Research study title
resourceType:
type: string
description: FHIR like resourceType, "ResearchStudy"
401:
description:
if missing valid OAuth token or if the authorized user lacks
permission to view requested user_id
security:
- ServiceToken: []
"""
user = get_user(
user_id, 'view', allow_on_url_authenticated_encounters=True)
if user.has_role(ROLE.PATIENT.value):
abort(
400,
"wrong request path for patient,"
" see /api/patient/<int:user_id>/research_study")
# Assume some staff like role - find all research studies
# in the org tree at, above or below all of the user's orgs
orgs = set()
ot = OrgTree()
for org in user.organizations:
try:
orgs.update(ot.at_and_above_ids(org.id))
except ValueError as ve:
raise ValueError(f"Failed at_and_above lookup on {org.id}")
orgs.update(ot.here_and_below_id(org.id))
studies = OrganizationResearchProtocol.query.filter(
OrganizationResearchProtocol.organization_id.in_(
tuple(orgs))).join(
ResearchProtocol,
OrganizationResearchProtocol.research_protocol_id ==
ResearchProtocol.id).with_entities(
ResearchProtocol.research_study_id).order_by(
ResearchProtocol.research_study_id).distinct()
results = [
ResearchStudy.query.get(s.research_study_id).as_fhir()
for s in studies]
return jsonify(research_study=results) | 9d36a02cc4909e336730fb27b3bcfe284bcd5d82 | 16,292 |
import re
import click
def validate_memory(_ctx, _param, value):
"""Validate memory string."""
if value is None:
return None
if not re.search(r'\d+[KkMmGg]$', value):
raise click.BadParameter('Memory format: nnn[K|M|G].')
return value | c050863a974c08ccc18fdaa2f03388c8f6674835 | 16,294 |
def BlockAvg3D( data , blocksize , mask ):
"""
3-D version of block averaging. Mainly applicable to making superpixel averages of datfile traces.
Not sure non-averaging calcs makes sense?
mask is a currently built for a 2d boolean array of same size as (data[0], data[1]) where pixels to be averaged are True.
"""
rows = data.shape[0]
cols = data.shape[1]
frames = data.shape[2]
if np.mod(rows,blocksize[0]) == 0 and np.mod(cols,blocksize[1]) == 0:
blockR = rows / blocksize[0]
blockC = cols / blocksize[1]
else:
print( 'Error, blocksize not evenly divisible into data size.')
return None
output = np.zeros((blockR,blockC,frames))
# Previous algorithm was slow and used annoying looping
# Improved algorithm that doeesn't need any looping. takes about 1.4 seconds instead of 60.
msk = np.array( mask , float )
msk.resize(rows, cols , 1 )
masked = np.array( data , float ) * np.tile( msk , ( 1 , 1 , frames ) )
step1 = masked.reshape(rows , blockC , -1 , frames).sum(2)
step2 = np.transpose(step1 , (1,0,2)).reshape(blockC , blockR , -1 , frames).sum(2)
step3 = np.transpose(step2 , (1,0,2))
mask1 = mask.reshape(rows , blockC , -1 ).sum(2)
count = mask1.transpose().reshape(blockC , blockR , -1).sum(2).transpose()
#mask1 = mask.reshape(rows , blockC , -1 , frames).sum(2)
#count = mask1.transpose().reshape(blockC , blockR , -1 , frames).sum(2).transpose()
output = step3 / count[:,:,np.newaxis]
output[ np.isnan(output) ] = 0
output[ np.isinf(output) ] = 0
return output | 4c0c9cb60c80f47289e7bff3e50ae3e39dd31c63 | 16,295 |
def build(buildconfig: BuildConfig, merge_train_and_test_data: bool = False):
"""Build regressor or classifier model and return it."""
estimator = buildconfig.algorithm.estimator()
if merge_train_and_test_data:
train_smiles, train_y = buildconfig.data.get_merged_sets()
else:
train_smiles, train_y, _, _ = buildconfig.data.get_sets()
train_X = descriptor_from_config(train_smiles, buildconfig.descriptor)
estimator.fit(train_X, train_y)
if merge_train_and_test_data:
train_scores = get_merged_train_score(estimator, buildconfig)
test_scores = None
else:
train_scores, test_scores = get_train_test_scores(estimator, buildconfig)
return estimator, train_scores, test_scores | 9a98f15ae9b966e42cda848169b38a651e727205 | 16,296 |
def stellar_radius(M, logg):
"""Calculate stellar radius given mass and logg"""
if not isinstance(M, (int, float)):
raise TypeError('Mass must be int or float. {} type given'.format(type(M)))
if not isinstance(logg, (int, float)):
raise TypeError('logg must be int or float. {} type given'.format(type(logg)))
if M < 0:
raise ValueError('Only positive stellar masses allowed.')
M = float(M)
return M/(10**(logg-4.44)) | 2afbd991c7461d7861370f18d90df840569da857 | 16,298 |
def set_plus_row(sets, row):
"""Update each set in list with values in row."""
for i in range(len(sets)):
sets[i].add(row[i])
return sets | 87f448dc3199c8d3137d5811dd184b3d2bd7cbe3 | 16,299 |
from typing import List
from typing import Union
def bytes_to_string(
bytes_to_convert: List[int], strip_null: bool = False
) -> Union[str, None]:
"""
Litteral bytes to string
:param bytes_to_convert: list of bytes in integer format
:return: resulting string
"""
try:
value = "".join(chr(i) for i in bytes_to_convert)
if strip_null:
return value.strip("\x00")
return value
# AttributeError when None object has no strip attribute
except (ValueError, TypeError, AttributeError):
return None | a04dee89fb8aed33b6069a7ff0ca8c497d0a6062 | 16,300 |
def interpolate(t,y,num_obs=50):
"""
Interpolates each trajectory such that observation times coincide for each one.
Note: initially cubic interpolation gave great power, but this happens as an artifact of the interpolation,
as both trajectories have the same number of observations. Type I error was increased as a result. To avoid
this we settled for a linear interpolation between observations.
Splines were also tried but gave very bad interpolations.
"""
t = np.array([np.sort(row) for row in t])
t = np.insert(t, 0, 0, axis=1)
t = np.insert(t, len(t[0]), 1, axis=1)
y = np.insert(y, 0, y[:,0], axis=1)
y = np.insert(y, len(y[0]), y[:,-1], axis=1)
new_t = np.zeros(num_obs)
new_y = np.zeros(num_obs)
for i in range(len(t)):
f = interp1d(t[i], y[i], kind='linear')
#f = splrep(t[i], y[i])
t_temp = np.random.uniform(low=0.0, high=1.0, size=num_obs)#np.linspace(0.1,0.9,num_obs)
y_temp = f(t_temp)
#y_temp = splev(t_temp, f, der=0)
new_y = np.vstack((new_y, y_temp))
new_t = np.vstack((new_t, t_temp))
return new_t[1:], new_y[1:] | 2418aaf207b214069f45571a21a2b97ecd25f244 | 16,301 |
import re
def locktime_from_duration(duration):
"""
Parses a duration string and return a locktime timestamp
@param duration: A string represent a duration if the format of XXhXXmXXs and return a timestamp
@returns: number of seconds represented by the duration string
"""
if not duration:
raise ValueError("Duration needs to be in the format {}".format(DURATION_TEMPLATE))
match = re.search(DURATION_REGX_PATTERN, duration)
if not match:
raise ValueError("Duration needs to be in the format {}".format(DURATION_TEMPLATE))
values = match.groupdict()
result = 0
if values['hours']:
result += int(values['hours']) * 60 * 60
if values['minutes']:
result += int(values['minutes']) * 60
if values['seconds']:
result += int(values['seconds'])
return int(result) | c65339ee00e750e4425a68215b0600c71136ee68 | 16,302 |
def black_payers_swaption_value_fhess_by_strike(
init_swap_rate,
option_strike,
swap_annuity,
option_maturity,
vol):
"""black_payers_swaption_value_fhess_by_strike
Second derivative of value of payer's swaption with respect to strike
under black model.
See :py:func:`black_payers_swaption_value`.
.. math::
\\frac{\partial^{2} }{\partial K^{2}}
V_{\mathrm{payer}}(K; S, A, T, \sigma)
= - A\phi(d_{2}(K)) d_{2}^{\prime}(K)
where
:math:`S` is `init_swap_rate`,
:math:`K` is `option_strike`,
:math:`A` is `swap_annuity`,
:math:`T` is `option_maturity`,
:math:`\sigma` is `vol`,
:math:`d_{1}, d_{2}` is defined
in :py:func:`black_payers_swaption_value`,
:math:`\Phi(\cdot)` is c.d.f. of standard normal distribution,
:math:`\phi(\cdot)` is p.d.f. of standard normal distribution.
:param float init_swap_rate: initial swap rate.
:param float option_strike:
:param float swap_annuity:
:param float option_maturity:
:param float vol: volatility. must be non-negative.
:return: value of derivative.
:rtype: float
:raises AssertionError: if volatility is not positive.
"""
assert(vol > 0.0)
value = mafipy.function.black_scholes_call_value_fhess_by_strike(
init_swap_rate, option_strike, 0.0, option_maturity, vol)
return swap_annuity * value | 0645992c65e9e13ee44ad3debfe30fb0b05bfae7 | 16,303 |
def get_resource(cls):
""" gets the resource of a timon class if existing """
if not cls.resources:
return None
resources = cls.resources
assert len(resources) == 1
return TiMonResource.get(resources[0]) | 370f0af23fcfe0bf5da3b39012a5e1e9c29b6f0e | 16,304 |
def _log(x):
"""_log
to prevent np.log_log(0), caluculate np.log(x + EPS)
Args:
x (array)
Returns:
array: same shape as x, log equals np.log(x + EPS)
"""
if np.any(x < 0):
print("log < 0")
exit()
return np.log(x + EPS) | e7e7b963cf3cec02ace34256ccdf954a2d61dd4a | 16,305 |
import math
def gauss_distribution(x, mu, sigma):
"""
Calculate value of gauss (normal) distribution
Parameters
----------
x : float
Input argument
mu :
Mean of distribution
sigma :
Standard deviation
Returns
-------
float
Probability, values from range [0-1]
"""
return 1 / (2 * math.sqrt(math.pi) * sigma) * math.exp(-(1 / 2) * ((x - mu) / sigma) ** 2) | 05cf2c14b337b45a81ddbe7655b4d7cf21e352cd | 16,306 |
def extend_vocab_OOV(source_words, word2id, vocab_size, max_unk_words):
"""
Map source words to their ids, including OOV words. Also return a list of OOVs in the article.
WARNING: if the number of oovs in the source text is more than max_unk_words, ignore and replace them as <unk>
Args:
source_words: list of words (strings)
word2id: vocab word2id
vocab_size: the maximum acceptable index of word in vocab
Returns:
ids: A list of word ids (integers); OOVs are represented by their temporary article OOV number. If the vocabulary size is 50k and the article has 3 OOVs, then these temporary OOV numbers will be 50000, 50001, 50002.
oovs: A list of the OOV words in the article (strings), in the order corresponding to their temporary article OOV numbers.
"""
src_ext = []
oov_dict = {}
for w in source_words:
if w in word2id and word2id[w] < vocab_size: # a OOV can be either outside the vocab or id>=vocab_size
src_ext.append(word2id[w])
else:
if len(oov_dict) < max_unk_words:
# e.g. 50000 for the first article OOV, 50001 for the second...
word_id = oov_dict.get(w, len(oov_dict) + vocab_size)
oov_dict[w] = word_id
src_ext.append(word_id)
else:
# exceeds the maximum number of acceptable oov words, replace it with <unk>
word_id = word2id[UNK_WORD]
src_ext.append(word_id)
oov_list = [w for w, w_id in sorted(oov_dict.items(), key=lambda x:x[1])]
return src_ext, oov_dict, oov_list | 2d1b92d9d6b9b3885a7dda6c8d72d80d3b8ecad0 | 16,307 |
def isint(s):
"""**Returns**: True if s is the string representation of an integer
:param s: the candidate string to test
**Precondition**: s is a string
"""
try:
x = int(s)
return True
except:
return False | b15598aee937bcce851ee6c39aa2ba96a84a5dd5 | 16,308 |
def create_app(config_name):
"""function creating the flask app"""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(config[config_name])
app.config.from_pyfile('config.py')
app.register_blueprint(v2)
app.register_error_handler(404, not_found)
app.register_error_handler(405, bad_request)
app.register_error_handler(500, internal_server_error)
db_conn.create_tables()
return app | 7e49a1ee9bae07a7628842855c3524794efaa9c5 | 16,309 |
def build_attention_network(features2d,
attention_groups,
attention_layers_per_group,
is_training):
"""Builds attention network.
Args:
features2d: A Tensor of type float32. A 4-D float tensor of shape
[batch_size, height, width, channels].
attention_groups: (Integer) Number of network groups.
attention_layers_per_group: (Integer) Number of layers per group.
is_training: (Boolean) To indicate training or inference modes.
Returns:
features_embedded: A Tensor of type float32. A 4-D float tensor of shape
[batch_size, height, width, channels].
"""
channels = features2d.shape.as_list()[-1]
with tf.variable_scope("attention_network"):
features_embedded = features2d
for i in range(attention_groups):
filters = channels // 2**(i+1)
for j in range(attention_layers_per_group):
features_embedded = tf.layers.conv2d(
features_embedded,
filters=filters,
kernel_size=3 if j == (attention_layers_per_group-1)
else 1,
strides=1,
dilation_rate=(2, 2) if j == (attention_layers_per_group-1)
else (1, 1),
activation=None,
use_bias=False,
name="features2d_embedding%d_%d" %(i, j),
padding="same")
features_embedded = tf.layers.batch_normalization(
features_embedded, training=is_training,
momentum=MOMENTUM, epsilon=EPS,
name="features2d_embedding%d_%d" %(i, j))
features_embedded = tf.nn.relu(features_embedded)
tf.logging.info("Constructing layer: %s", features_embedded)
return features_embedded | c665994b88027c24ed86e01514fa3fc176a3258a | 16,310 |
def get_catalog_config(catalog):
"""
get the config dict of *catalog*
"""
return resolve_config_alias(available_catalogs[catalog]) | 4d36bc8be8ca2992424f0b97f28d3ac8d852c027 | 16,311 |
def manhatten(type_profile, song_profile):
"""
Calculate the Manhatten distance between the profile of specific
output_colums value (e.g. specific composer) and the profile of a
song
"""
# Sort profiles by frequency
type_profile = type_profile.most_common()
song_profile = song_profile.most_common()
flat_type_profile = [ngram for (ngram, freq) in type_profile]
flat_song_profile = [ngram for (ngram, freq) in song_profile]
manhatten = 0
for i in range(len(flat_song_profile)):
ngram = flat_song_profile[i]
if ngram in flat_type_profile:
manhatten += abs(flat_type_profile.index(ngram) - i)
else:
manhatten += abs(len(flat_type_profile) - i)
return manhatten | 4703585f9f60551bf2a5e2762612d45efb47a453 | 16,312 |
def raven(request):
"""lets you know whether raven is being used"""
return {
'RAVEN': RAVEN
} | 3e047db45a597cf808e5227b358a9833fc0a4fc3 | 16,313 |
from typing import Union
def _non_max_suppress_mask(
bbox: np.array,
scores: np.array,
classes: np.array,
masks: Union[np.array, None],
filter_class: int,
iou: float = 0.8,
confidence: float = 0.001,
) -> tuple:
"""Perform non max suppression on the detection output if it is mask.
:param bbox: Bbox outputs.
:param scores: Score outputs.
:param classes: Class outputs.
:param masks: Mask outputs
:param filter_class: The specific class required.
:param iou: The intersection of union value to be considered.
:param confidence: The confidence threshold for scores.
:returns: tuple of suppressed bbox, suppressed scores,
suppressed classes, and suppressed masks.
"""
filter_idx = _filter_class_and_zero_scores(
scores,
classes,
filter_class,
confidence,
)
scores_filter = np.array(np.array(scores)[filter_idx])
bbox_filter = np.array(np.array(bbox)[filter_idx])
classes_filter = np.array(np.array(classes)[filter_idx])
masks_filter = np.array(np.array(masks)[filter_idx])
areas = np.empty(masks_filter.shape[0])
for index, mask in enumerate(masks_filter):
areas[index] = np.count_nonzero(mask)
sorted_scores = scores_filter.argsort()[::-1]
keep = []
while sorted_scores.size > 0:
score = sorted_scores[0]
# keep the largest sorted score (sorted_scores[0] represent the largest score)
keep.append(score)
# with:
# x = [0 0 1 1] and y = [0 1 1 0],
# the intersect is x && y element-wise -> [0 0 1 0]
intersect = np.empty_like(sorted_scores[1:])
for index, others in enumerate(masks_filter[sorted_scores[1:]]):
intersect[index] = np.count_nonzero(
np.logical_and(masks_filter[score], others)
)
overlap = intersect / (
areas[score] + areas[sorted_scores[1:]] - intersect
)
sorted_scores = sorted_scores[
np.union1d(
np.where(overlap <= 1 - iou)[0],
np.where(
classes_filter[sorted_scores[1:]] != classes_filter[score]
),
)
+ 1
]
detection_boxes = list(map(tuple, bbox_filter[keep]))
detection_scores = list(scores_filter[keep])
detection_classes = list(classes_filter[keep])
detection_masks = list(masks_filter[keep])
detection_boxes = [
(float(item[0]), float(item[1]), float(item[2]), float(item[3]))
for item in detection_boxes
]
detection_scores = [float(item) for item in detection_scores]
detection_classes = [int(item) for item in detection_classes]
return (
detection_boxes,
detection_scores,
detection_classes,
detection_masks,
) | 742261f1854f2ad6d01046926c6017b72a1917a4 | 16,314 |
def _mark_untranslated_strings(translation_dict):
"""Marks all untranslated keys as untranslated by surrounding them with
lte and gte symbols.
This function modifies the translation dictionary passed into it in-place
and then returns it.
"""
# This was a requirement when burton was written, but may be an unwanted
# side effect for other projects that adopt burton. We should replace it
# with something more flexible.
for key in translation_dict:
if key is not None and translation_dict[key] is None:
translation_dict[key] = u"\u2264" + key + u"\u2265"
return translation_dict | d15ac2d0fe8d50d5357bcc1e54b9666f7076aefd | 16,316 |
import warnings
import codecs
def build(app, path):
"""
Build and return documents without known warnings
:param app:
:param path:
:return:
"""
with warnings.catch_warnings():
# Ignore warnings emitted by docutils internals.
warnings.filterwarnings(
"ignore",
"'U' mode is deprecated",
DeprecationWarning)
app.build()
#return (app.outdir / path).read_text()
with codecs.open((app.outdir / path), 'r', encoding='utf-8') as content_file:
return content_file.read() | 09049aad0d46d07144c3d564deb0e5aaf1b828ca | 16,317 |
def SMWatConstrained(CSM, ci, cj, matchFunction, hvPenalty = -0.3, backtrace = False):
"""
Implicit Smith Waterman alignment on a binary cross-similarity matrix
with constraints
:param CSM: A binary N x M cross-similarity matrix
:param ci: The index along the first sequence that must be matched to cj
:param cj: The index along the second sequence that must be matched to ci
:param matchFunction: A function that scores matching/mismatching
:param hvPenalty: The amount by which to penalize horizontal/vertical moves
:returns (Distance (scalar), (N+1)x(M+1) dynamic programming matrix)
"""
res1 = SMWat(CSM[0:ci+1, 0:cj+1], matchFunction, hvPenalty, backtrace = backtrace, backidx = [ci+1, cj+1])
CSM2 = np.fliplr(np.flipud(CSM[ci::, cj::]))
res2 = SMWat(CSM2, matchFunction, hvPenalty, backtrace = backtrace, backidx = [CSM2.shape[0], CSM2.shape[1]])
res = {'score':res1['D'][-1, -1] + res2['D'][-1, -1]}
res['D1'] = res1['D']
res['D2'] = res2['D']
if backtrace:
path2 = [[ci+1+(CSM2.shape[0]+1-x), cj+1+(CSM2.shape[1]+1-y)] for [x, y] in res2['path']]
res['path'] = res1['path'] + path2
return res | a66f17bb40e201a6758c1add4a1590672724dc3e | 16,319 |
def check_images(
coords,
species,
lattice,
PBC=[1, 1, 1],
tm=Tol_matrix(prototype="atomic"),
tol=None,
d_factor=1.0,
):
"""
Given a set of (unfiltered) frac coordinates, checks if the periodic images are too close.
Args:
coords: a list of fractional coordinates
species: the atomic species of each coordinate
lattice: a 3x3 lattice matrix
PBC: the periodic boundary conditions
tm: a Tol_matrix object
tol: a single override value for the distance tolerances
d_factor: the tolerance is multiplied by this amount. Larger values
mean atoms must be farther apart
Returns:
False if distances are too close. True if distances are not too close
"""
# If no PBC, there are no images to check
if PBC == [0, 0, 0]:
return True
# Create image coords from given coords and PBC
coords = np.array(coords)
m = create_matrix(PBC=PBC, omit=True)
new_coords = []
new_species = []
for v in m:
for v2 in coords + v:
new_coords.append(v2)
new_coords = np.array(new_coords)
# Create a distance matrix
dm = distance_matrix(coords, new_coords, lattice, PBC=[0, 0, 0])
# Define tolerances
if tol is None:
tols = np.zeros((len(species), len(species)))
for i, s1 in enumerate(species):
for j, s2 in enumerate(species):
if i <= j:
tols[i][j] = tm.get_tol(s1, s2)
tols[j][i] = tm.get_tol(s1, s2)
tols2 = np.tile(tols, int(len(new_coords) / len(coords)))
if (dm < tols2).any():
return False
else:
return True
elif tol is not None:
if (dm < tol).any():
return False
else:
return True
return True | 20f3ada0aa391d989b638a835581226bd79439f7 | 16,320 |
def get_hamming_distances(genomes):
"""Calculate pairwise Hamming distances between the given list of genomes
and return the nonredundant array of values for use with scipy's squareform function.
Bases other than standard nucleotides (A, T, C, G) are ignored.
Parameters
----------
genomes : list
a list of strings corresponding to genomes that should be compared
Returns
-------
list
a list of distinct Hamming distances as a vector-form distance vector
>>> genomes = ["ATGCT", "ATGCT", "ACGCT"]
>>> get_hamming_distances(genomes)
[0, 1, 1]
>>> genomes = ["AT-GCT", "AT--CT", "AC--CT"]
>>> get_hamming_distances(genomes)
[0, 1, 1]
"""
# Define an array of valid nucleotides to use in pairwise distance calculations.
# Using a numpy array of byte strings allows us to apply numpy.isin later.
nucleotides = np.array([b'A', b'T', b'C', b'G'])
# Convert genome strings into numpy arrays to enable vectorized comparisons.
genome_arrays = [
np.frombuffer(genome.encode(), dtype="S1")
for genome in genomes
]
# Precalculate positions of valid bases (A, T, C, and G) in each genome to speed up later comparisons.
valid_bases = [
np.isin(genome_array, nucleotides)
for genome_array in genome_arrays
]
# Calculate Hamming distance between all distinct pairs of genomes at valid bases.
# The resulting list is a reduced representation of a symmetric matrix that can be
# converted to a square matrix with scipy's squareform function:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.squareform.html
hamming_distances = []
for i in range(len(genomes)):
# Only compare the current genome, i, with all later genomes.
# This avoids repeating comparisons or comparing each genome to itself.
for j in range(i + 1, len(genomes)):
# Find all mismatches between these two genomes.
mismatches = genome_arrays[i] != genome_arrays[j]
# Count the number of mismatches where both genomes have valid bases.
hamming_distances.append((mismatches & valid_bases[i] & valid_bases[j]).sum())
return hamming_distances | dad2e9583bd7fcbbbb87dd93d180e4de39ea3083 | 16,321 |
from typing import Dict
def serialize(name: str, engine: str) -> Dict:
"""Get dictionary serialization for a dataset locator.
Parameters
----------
name: string
Unique dataset name.
engine: string
Unique identifier of the database engine (API).
Returns
-------
dict
"""
return {'name': name, 'database': engine} | 9ab11318050caf3feb4664310e491ed48e7e5357 | 16,322 |
import torch
def repackage_hidden(h):
"""
Wraps hidden states in new Variables, to detach them from their history.
"""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(v.detach() for v in h) | 0ab8cffeaafaf6f39e2938ce2005dbca1d3d7496 | 16,323 |
def support_acctgroup_acctproject(version):
"""
Whether this Lustre version supports acctgroup and acctproject
"""
if version.lv_name == "es2":
return False
return True | 858ec772a90e66431731ffcdd145fa7e56daad02 | 16,325 |
def decodeInventoryEntry_level1(document):
"""
Decodes a basic entry such as: '6 lobster cake' or '6' cakes
@param document : NLP Doc object
:return: Status if decoded correctly (true, false), and Inventory object
"""
count = Inventory(str(document))
for token in document:
if token.pos_ == (u'NOUN' or u'NNS' or u'NN'):
item = str(token)
for child in token.children:
if child.dep_ == u'compound' or child.dep_ == u'ad':
item = str(child) + str(item)
elif child.dep_ == u'nummod':
count.amount = str(child).strip()
for numerical_child in child.children:
# this isn't arithmetic rather than treating it such as a string
count.amount = str(numerical_child) + str(count.amount).strip()
else:
print "WARNING: unknown child: " + str(child) + ':'+str(child.dep_)
count.item = item
count.unit = item
return count | a283f3630a18cdbb0cc22664e583f00866ff759b | 16,326 |
from typing import Collection
def from_ir_objs(ir_objs: Collection[IrCell]) -> AnnData:
"""\
Convert a collection of :class:`IrCell` objects to an :class:`~anndata.AnnData`.
This is useful for converting arbitrary data formats into
the scirpy :ref:`data-structure`.
{doc_working_model}
Parameters
----------
ir_objs
Returns
-------
:class:`~anndata.AnnData` object with :term:`IR` information in `obs`.
"""
ir_df = pd.DataFrame.from_records(
(_process_ir_cell(x) for x in ir_objs), index="cell_id"
)
adata = AnnData(obs=ir_df, X=np.empty([ir_df.shape[0], 0]))
_sanitize_anndata(adata)
return adata | 55e95b2673d6aec02ae5aa7fb5cec014db17cdc7 | 16,328 |
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/start<br/>"
f"/api/v1.0/start/end"
) | bac64c3b2d2e5d883f627dada658cdd9359b61b0 | 16,330 |
from typing import List
def get_cases_from_input_df(input_df: pd.DataFrame) -> List[Case]:
"""
Get the case attributes
:return:
"""
cases: List[Case] = []
for index, row in input_df.iterrows():
# Create a case object from the row values in the input df
cases.append(Case.from_dict(row.to_dict()))
return cases | 34b820880691456fde3ab260be02646590aeafd7 | 16,331 |
from typing import AnyStr
import unicodedata
def normalize_nfc(txt: AnyStr) -> bytes:
"""
Normalize message to NFC and return bytes suitable for protobuf.
This seems to be bitcoin-qt standard of doing things.
"""
str_txt = txt.decode() if isinstance(txt, bytes) else txt
return unicodedata.normalize("NFC", str_txt).encode() | 12b6e037225878e0bbca1d52d9f58d57abb35746 | 16,333 |
from typing import Callable
from typing import Any
import threading
import functools
def synchronized(wrapped: Callable[..., Any]) -> Any:
"""The missing @synchronized decorator
https://git.io/vydTA"""
_lock = threading.RLock()
@functools.wraps(wrapped)
def _wrapper(*args, **kwargs):
with _lock:
return wrapped(*args, **kwargs)
return _wrapper | 39da1efeb93c8dbdba570763d2e66dc8d9d84fc5 | 16,334 |
def corrgroups60__decision_tree():
""" Decision Tree
"""
return sklearn.tree.DecisionTreeRegressor(random_state=0) | fb2405c54208705a105b225e1dd269d45892b7be | 16,335 |
def auth_required(*auth_methods):
"""
Decorator that protects enpoints through multiple mechanisms
Example::
@app.route('/dashboard')
@auth_required('token', 'session')
def dashboard():
return 'Dashboard'
:param auth_methods: Specified mechanisms.
"""
login_mechanisms = {
'token': lambda: _check_token(),
'basic': lambda: _check_http_auth(),
'session': lambda: current_user.is_authenticated()
}
def wrapper(fn):
@wraps(fn)
def decorated_view(*args, **kwargs):
h = {}
mechanisms = [(method, login_mechanisms.get(method)) for method in auth_methods]
for method, mechanism in mechanisms:
if mechanism and mechanism():
return fn(*args, **kwargs)
elif method == 'basic':
r = _security.default_http_auth_realm
h['WWW-Authenticate'] = 'Basic realm="%s"' % r
if _security._unauthorized_callback:
return _security._unauthorized_callback()
else:
return _get_unauthorized_response(headers=h)
return decorated_view
return wrapper | c6613e594abbb979352fe3ec96018fe52109bab0 | 16,336 |
def _get_default_data_dir_name():
"""
Gets default data directory
"""
return _get_path(DATA_DIR) | b4207e108a9f08a72b47c44ab43b3971e67e8165 | 16,337 |
def point_inside_triangle(p, t, tol=None):
"""
Test to see if a point is inside a triangle. The point is first
projected to the plane of the triangle for this test.
:param ndarray p: Point inside triangle.
:param ndarray t: Triangle vertices.
:param float tol: Tolerance for barycentric coordinate check.
:return: *True* if point is inside triangle, *False* if not.
:rtype: bool
"""
if tol is None:
tol = Settings.ptol
v01 = t[1] - t[0]
v02 = t[2] - t[0]
vp = p - t[0]
d01 = dot(v01, v01)
d12 = dot(v01, v02)
d02 = dot(v02, v02)
dp1 = dot(vp, v01)
dp2 = dot(vp, v02)
denom = d01 * d02 - d12 * d12
if denom == 0.:
return False
u = (d02 * dp1 - d12 * dp2) / denom
v = (d01 * dp2 - d12 * dp1) / denom
if u >= -tol and v >= -tol and u + v <= 1. + tol:
return True
return False | a7a4dd52dfa65fdd9e3cb3ac151c7895acb3abb8 | 16,338 |
from datetime import datetime
def merge_dfs(x, y):
"""Merge the two dataframes and download a CSV."""
df = pd.merge(x, y, on='Collection_Number', how='outer')
indexed_df = df.set_index(['Collection_Number'])
indexed_df['Access_Notes_Regarding_Storage_Locations'].fillna('No note', inplace=True)
today = datetime.datetime.today().strftime('%Y-%m-%d')
output_file = 'storage_locations_' + str(today) + '.csv'
indexed_df.to_csv(output_file)
print('Location report exported as ' + output_file)
return indexed_df | 9856d4394ca628fd7eb0f58e6cc805494410c51e | 16,339 |
def consumer(func):
"""A decorator function that takes care of starting a coroutine automatically on call.
See http://www.dabeaz.com/generators/ for more details.
"""
def start(*args, **kwargs):
cr = func(*args, **kwargs)
next(cr)
return cr
return start | e834a081c1f43545684bb4102a92b186c8825f30 | 16,340 |
def convert_openfermion_op(openfermion_op, n_qubits=None):
"""convert_openfermion_op
Args:
openfermion_op (:class:`openfermion.ops.QubitOperator`)
n_qubit (:class:`int`):
if None (default), it automatically calculates the number of qubits required to represent the given operator
Returns:
:class:`qulacs.GeneralQuantumOperator`
"""
if n_qubits is None:
_n_qubits = _count_qubit_in_qubit_operator(openfermion_op)
else:
_n_qubits = n_qubits
res = GeneralQuantumOperator(_n_qubits)
for pauli_product in openfermion_op.terms:
coef = float(np.real(openfermion_op.terms[pauli_product]))
pauli_string = ''
for pauli_operator in pauli_product:
pauli_string += pauli_operator[1] + ' ' + str(pauli_operator[0])
pauli_string += ' '
res.add_operator(coef, pauli_string[:-1])
return res | 416eccc82fbd7dbdcf61ba62f5176ca3e12a01db | 16,341 |
def recommend(uid, data, model, top_n = 100):
"""
Returns the mean and covariance matrix of the demeaned dataset X (e.g. for PCA)
Parameters
----------
uid : int
user id
data : surprise object with data
The entire system, ratings of users (Constructed with reader from surprise)
model : susrprise object
Trained algorithm
top_n : int
The number of movies to recommend
Returns
-------
pd.DataFrame
recommended movies
pd.DataFram
predicted ratings for the recommended movies
data_update
predicted movies and ratings in the movielens format (uid, iid, rating)
"""
all_movie_ids = data.df['iid'].unique()
uid_rated = data.df[data.df['uid'] == uid]['iid']
movies_to_recommend = np.setdiff1d(all_movie_ids, uid_rated)
if len(movies_to_recommend) == 0:
print('NO MOVIES TO RECOMMEND!')
prediction_set = [[uid, iid, 0] for iid in movies_to_recommend] #here 0 is arbitrary, ratings don't matter
predictions = model.test(prediction_set)
pred_ratings = np.array([pred.est for pred in predictions])
top = pred_ratings.argsort()[::-1][:top_n]
data_update = pd.DataFrame([[uid, movies_to_recommend[top][i], pred_ratings[top][i]] for i in range(top_n)], columns = ['uid', 'iid', 'rating'])
return movies_to_recommend[top], pred_ratings[top], data_update | b156826359e3310c8872a07428d0073795ef071b | 16,345 |
def cluster_info(arr):
""" number of clusters (nonzero fields separated by 0s) in array
and size of cluster
"""
data = []
k2coord = []
coord2k = np.empty_like(arr).astype(np.int64)
k = -1
new_cluster = True
for i in range(0,len(arr)):
if arr[i] == 0:
new_cluster = True
coord2k[i] = -1
else:
if new_cluster == True:
k += 1
k2coord.append([i])
data.append(0)
else:
k2coord[k].append(i)
data[k] += 1
coord2k[i] = k
new_cluster = False
Ncl = len(data) # number of clusters
Nk = data # Nk[k] = size of cluster k
return Ncl, Nk, k2coord, coord2k | 23a3d58b13ba4af4977cd25a1dc45d116fd812b5 | 16,347 |
def set_or_none(list_l):
"""Function to avoid list->set transformation to return set={None}."""
if list_l == [None]:
res = None
else:
res = set(list_l)
return res | ee5fb4539e63afc7fd8013610229d9ab784b88c5 | 16,348 |
import re
def case_mismatch(vm_type, param):
"""Return True if vm_type matches a portion of param in a case
insensitive search, but does not equal that portion;
return False otherwise.
The "portions" of param are delimited by "_".
"""
re_portion = re.compile(
"(^(%(x)s)_)|(_(%(x)s)_)|(_(%(x)s)$)" % dict(x=vm_type), re.IGNORECASE
)
found = re_portion.search(param)
if found:
param_vm_type = [x for x in found.groups()[1::2] if x][0]
return param_vm_type != vm_type
else:
return False | e7fb565ac6e10fd15dd62a64fbf7f14a8bcfde6b | 16,349 |
def _async_os(cls):
""" Aliases for aiofiles.os"""
return aiofiles.os | ad37b21f22ed5203451ac8eb4b7a53f4572fec73 | 16,350 |
import torch
def corruption_function(x: torch.Tensor):
""" Applies the Gsaussian blur to x """
return torchdrift.data.functional.gaussian_blur(x, severity=5) | 54b98c6bddb187689c0e70fc2dbf0f3c56e25ad1 | 16,351 |
def filter_by_filename(conn, im_ids, imported_filename):
"""Filter list of image ids by originalFile name
Sometimes we know the filename of an image that has been imported into
OMERO but not necessarily the image ID. This is frequently the case when
we want to annotate a recently imported image. This funciton will help
to filter a list of image IDs to only those associated with a particular
filename.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object
OMERO connection.
im_ids : list of int
List of OMERO image IDs.
imported_filename : str
The full filename (with extension) of the file whose OMERO image
we are looking for. NOT the path of the image.
Returns
-------
filtered_im_ids : list of int
Filtered list of images with originalFile name matching
``imported_filename``.
Notes
-----
This function should be used as a filter on an image list that has been
already narrowed down as much as possible. Note that many different images
in OMERO may share the same filename (e.g., image.tif).
Examples
--------
>>> im_ids = get_image_ids(conn, dataset=303)
>>> im_ids = filter_by_filename(conn, im_ids, "feb_2020.tif")]
"""
q = conn.getQueryService()
params = Parameters()
params.map = {"oname": rstring(imported_filename)}
results = q.projection(
"SELECT i.id FROM Image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" JOIN u.originalFile o"
" WHERE o.name=:oname",
params,
conn.SERVICE_OPTS
)
im_id_matches = [r[0].val for r in results]
return list(set(im_ids) & set(im_id_matches)) | bf9625c06929a80f21a4683b1da687535f296e59 | 16,352 |
def get_count():
"""
:return: 计数的值
"""
counter = Counters.query.filter(Counters.id == 1).first()
return make_succ_response(0) if counter is None else make_succ_response(counter.count) | be0ab2773e661b8e5e34f685b59f16cfdee6b26d | 16,353 |
import math
def perm(x, y=None):
"""Return the number of ways to choose k items from n items without repetition and with order."""
if not isinstance(x, int) or (not isinstance(y, int) and y is not None):
raise ValueError(f"Expected integers. Received [{type(x)}] {x} and [{type(y)}] {y}")
return math.perm(x, y) | c9ad65c6ce3cc3e5ba488c5f2ddd1aabbdc7da6a | 16,354 |
from typing import Union
def ui(candles: np.ndarray, period: int = 14, scalar: float = 100, source_type: str = "close", sequential: bool = False) -> Union[float, np.ndarray]:
"""
Ulcer Index (UI)
:param candles: np.ndarray
:param period: int - default: 14
:param scalar: float - default: 100
:param source_type: str - default: "close"
:param sequential: bool - default: False
:return: float | np.ndarray
"""
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
highest_close = talib.MAX(source, period)
downside = scalar * (source - highest_close)
downside /= highest_close
d2 = downside * downside
res = np.sqrt(talib.SUM(d2, period) / period)
return res if sequential else res[-1] | 1f99a6ee849094f3a695812e37035a13f36e8c49 | 16,357 |
def _padwithzeros(vector, pad_width, iaxis, kwargs):
"""Pad with zeros"""
vector[: pad_width[0]] = 0
vector[-pad_width[1] :] = 0
return vector | 1a3a9fc4fd3b0fc17a905fa9ecd283d60310655d | 16,358 |
def fill_from_sparse_coo(t,elems):
"""
:param elems: non-zero elements defined in COO format (tuple(indices),value)
:type elems: list[tuple(tuple(int),value)]
"""
for e in elems:
t[e[0]]=e[1]
return t | 73c6892464d7d7cf34f40fe1dde9973950cdef79 | 16,359 |
def download_responses(survey_id):
"""Download survey responses."""
if request.method == 'GET':
csv = survey_service.download_responses(survey_id)
return Response(
csv,
mimetype='text/csv',
headers={'Content-disposition': 'attachment; filename=surveydata.csv'}) | 8513caf582b87bf0cd5db80622c530d1ec1c3ef2 | 16,360 |
from collections import deque
from typing import Iterable
from typing import Deque
def array_shift(data: Iterable, shift: int) -> Deque:
"""
left(-) or right(+) shift of array
>>> arr = range(10)
>>> array_shift(arr, -3)
deque([3, 4, 5, 6, 7, 8, 9, 0, 1, 2])
>>> array_shift(arr, 3)
deque([7, 8, 9, 0, 1, 2, 3, 4, 5, 6])
"""
deq = deque(data)
deq.rotate(shift)
return deq | c14e115808592808bc9b0cf20fa8bc3d5ece7768 | 16,361 |
def convert_to_timetable(trains):
"""
列車データを時刻表データに変換する関数
Args:
trains (list of list of `Section`): 列車データ
Returns:
timetable (list): 時刻表データ
timetable[from_station][to_station][dep_time] = (from_time, to_time)
-> 現在時刻が dep_time の時に from_station から to_station まで直近の列車で移動する場合の
乗車・下車時刻(0時からの経過分)のタプル
"""
max_time = 1 + max([section.to_time for train in trains for section in train])
n_stations = len(set([section.to_station for train in trains for section in train]))
timetable = [[[(max_time, max_time) for _ in range(max_time)] for _ in range(n_stations)] for _ in range(n_stations)]
# Step0: 次ステップの探索用に (時刻, 駅) についてのグラフ(adj)を作成
adj = defaultdict(list)
target_time_flag = [0 for _ in range(max_time)]
for train in trains:
for section in train:
adj[(section.from_time, section.from_station)].append((section.to_time, section.to_station))
target_time_flag[section.from_time] = 1
target_time_flag[section.to_time] = 1
target_times = [t for t in range(max_time) if target_time_flag[t] == 1]
for station in range(n_stations):
for from_time, to_time in zip(target_times[:-1], target_times[1:]):
adj[(from_time, station)].append((to_time, station))
# Step1: 出発時刻 = 乗車時刻 のデータを登録
for train in trains:
for section in train:
# 他の駅への最速到着時刻をBFSで求める
min_to_time = [max_time for _ in range(n_stations)]
min_to_time[section.from_station] = section.from_time
que = deque([(section.from_time, section.from_station)])
visited = defaultdict(int)
visited[(section.from_time, section.from_station)] = 1
while len(que) > 0:
from_time, from_station = que.popleft()
for to_time, to_station in adj[(from_time, from_station)]:
if visited[(to_time, to_station)] == 1:
continue
min_to_time[to_station] = min(to_time, min_to_time[to_station])
que.append((to_time, to_station))
visited[(to_time, to_station)] = 1
# 出発時刻 = 乗車時刻 のデータを登録
for to_station in range(n_stations):
if to_station == section.from_station:
continue
to_time = min_to_time[to_station]
if to_time == max_time:
continue
timetable[section.from_station][to_station][section.from_time] = (section.from_time, to_time)
# Step2: 出発時刻 != 乗車時刻 のデータを登録
# 例えば駅1→2の始発列車を考え、5:00(300)発・5:05(305)着だとする。
# step1では timetable[1][2][300] = (300, 305) とデータが登録される。
# ここで駅1を5:00(300)より前に出発するとしても、駅1で待機して同じ列車に乗ることになるため、
# t < 300 に対して timetable[1][2][t] = (300, 305) となるはず。
# step1ではこのデータは入らないので、ここで入れる。
for t in range(max_time - 2, - 1, - 1):
for from_station in range(n_stations):
for to_station in range(n_stations):
timetable[from_station][to_station][t] = \
min(timetable[from_station][to_station][t], timetable[from_station][to_station][t + 1])
return timetable | 042238b090af1b4b4e4a8cf469f9bbcd49edc9af | 16,362 |
import math
def parents(level, idx):
"""
Return all the (grand-)parents of the Healpix pixel idx at level (in nested format)
:param level: Resolution level
:param idx: Pixel index
:return: All the parents of the pixel
"""
assert idx < 12 * 2 ** (2 * level)
plpairs = []
for ind in range(level, 0, -1):
idx = int(math.floor(idx / 4))
plpairs.append(tuple((ind - 1, idx)))
level -= 1
return plpairs[::-1] | 355c3acffa07065de10049059ef064abefdd7ca0 | 16,363 |
def precise_inst_ht(vert_list, spacing, offset):
"""
Uses a set of Vertical Angle Observations taken to a
levelling staff at regular intervals to determine the
height of the instrument above a reference mark
:param vert_list: List of Vertical (Zenith) Angle Observations (minimum of 3) in Decimal Degrees format
:param spacing: Distance in metres between each vertical angle observation
:param offset: Lowest observed height above reference mark
:return: Instrument Height above reference mark and its standard deviation
"""
if len(vert_list) < 3:
raise ValueError('ValueError: 3 or more vertical angles required')
vert_list.sort(reverse=True)
vert_pairs = [(va1, va2) for va1, va2 in zip(vert_list, vert_list[1:])]
base_ht = []
height_comp = []
for num, pair in enumerate(vert_pairs):
base_ht_pair = offset + num * spacing
base_ht.append(base_ht_pair)
dist_a = sin(radians(pair[1])) * (spacing / (sin(radians(pair[0] - pair[1]))))
delta_ht = dist_a * (sin(radians(pair[0] - 90)))
height_comp.append(delta_ht + base_ht[num])
return round(mean(height_comp), 5), round(stdev(height_comp), 5) | d88cf0dc289f2ef96d4b60dabf17c6e4bd04e549 | 16,364 |
def _parse_transform_set(transform_dict, imputer_string, n_images=None):
"""Parse a dictionary read from yaml into a TransformSet object
Parameters
----------
transform_dict : dictionary
The dictionary as read from the yaml config file containing config
key-value pairs
imputer_string : string
The name of the imputer (could be None)
n_images : int > 0
The number of images being read in. Required because we need to create
a new image transform for each image
Returns
-------
image_transforms : list
A list of image Transform objects
imputer : Imputer
An Imputer object
global_transforms : list
A list of global Transform objects
"""
image_transforms = []
global_transforms = []
if imputer_string in _imputers:
imputer = _imputers[imputer_string]()
else:
imputer = None
if transform_dict is not None:
for t in transform_dict:
if type(t) is str:
t = {t: {}}
key, params = list(t.items())[0]
if key in _image_transforms:
image_transforms.append([_image_transforms[key](**params)
for k in range(n_images)])
elif key in _global_transforms:
global_transforms.append(_global_transforms[key](**params))
return image_transforms, imputer, global_transforms | 47e3bf72c9e70bff22bebee7e73a14c349761116 | 16,365 |
import json
import random
def initialize_train_test_dataset(dataset):
""" Create train and test dataset by random sampling.
pct: percentage of training
"""
pct = 0.80
if dataset in ['reddit', 'gab']:
dataset_fname = './data/A-Benchmark-Dataset-for-Learning-to-Intervene-in-Online-Hate-Speech-master/' + dataset + '.csv'
xlist, ylist, zlist = read_EMNLP2019(dataset_fname)
hate_num = 0
for y in ylist:
for i in y.strip('[]').split(', '):
hate_num += 1
X_text, Y_text = [], []
line_num = 0
for x, y, z in zip(xlist, ylist, zlist):
x = x.strip().split('\n')
for i in y.strip('[]').split(', '):
X_text.append('. '.join(x[int(i) - 1].split('. ')[1:]).strip('\t')) # Only the hate speech line.
temp = []
for j in split_response_func(z):
if j.lower() == 'n/a':
continue
temp.append(j)
Y_text.append(temp)
line_num += 1
elif dataset == 'conan':
all_text = [json.loads(line) for line in open('./data/CONAN/CONAN.json', 'r')]
EN_text = [x for x in all_text[0]['conan'] if x['cn_id'][:2] == 'EN']
X_text = [x['hateSpeech'].strip() for x in EN_text]
Y_text = [[x['counterSpeech'].strip()] for x in EN_text]
hate_num = len(X_text)
random_index = [x for x in range(hate_num)]
random.shuffle(random_index)
train_index = sorted(random_index[:int(pct*len(random_index))])
train_x_text = [X_text[i] for i in range(hate_num) if i in train_index]
train_y_text = [Y_text[i] for i in range(hate_num) if i in train_index]
test_x_text = [X_text[i] for i in range(hate_num) if i not in train_index]
test_y_text = [Y_text[i] for i in range(hate_num) if i not in train_index]
return train_x_text, train_y_text, test_x_text, test_y_text | bac5876be313a85213badcce667af550e8f3f65a | 16,366 |
def load_raw_data_xlsx(files):
"""
Load data from an xlsx file
After loading, the date column in the raw data is converted to a UTC datetime
Parameters
----------
files : list
A list of files to read. See the Notes section for more information
Returns
-------
list
A list containing a DataFrame for each file that was read
Notes
-----
- Files is an array of maps containing the following data with the keyword (keyword)
+ ('file_name') the name of the xlsx file
+ ('date_column') the name of the date_column in the raw_data
+ ('time_zone') specifier for the timezone the raw data is recorded in
+ ('sheet_name') name or list of names of the sheets that are to be read
+ ('combine') boolean, all datasheets with true are combined into one, all others are read individually
+ ('start_column') Columns between this and ('end_column') are loaded
+ ('end_column')
"""
print('Importing XLSX Data...')
combined_files = []
individual_files = []
for xlsx_file in files:
print('importing ' + xlsx_file['file_name'])
# if isinstance(file_name, str):
# file_name = [file_name,'UTC']
date_column = xlsx_file['date_column']
raw_data = pd.read_excel(INPATH + xlsx_file['file_name'], xlsx_file['sheet_name'],
parse_dates=[date_column])
# convert load data to UTC
if(xlsx_file['time_zone'] != 'UTC'):
raw_data[date_column] = pd.to_datetime(raw_data[date_column]).dt.tz_localize(xlsx_file['time_zone'], ambiguous="infer").dt.tz_convert('UTC').dt.strftime('%Y-%m-%d %H:%M:%S')
else:
if (xlsx_file['dayfirst']):
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%d-%m-%Y %H:%M:%S').dt.tz_localize(None)
else:
raw_data[date_column] = pd.to_datetime(raw_data[date_column], format='%Y-%m-%d %H:%M:%S').dt.tz_localize(None)
if(xlsx_file['data_abs']):
raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']] = raw_data.loc[:, xlsx_file['start_column']:xlsx_file['end_column']].abs()
# rename column IDs, specifically Time, this will be used later as the df index
raw_data.rename(columns={date_column: 'Time'}, inplace=True)
raw_data.head() # now the data is positive and set to UTC
raw_data.info()
# interpolating for missing entries created by asfreq and original missing values if any
raw_data.interpolate(method='time', inplace=True)
if(xlsx_file['combine']):
combined_files.append(raw_data)
else:
individual_files.append(raw_data)
if(len(combined_files) > 0):
individual_files.append(pd.concat(combined_files))
return individual_files | a2aebdb4d972ef7f46970b3e8fc14ef40ae42bb8 | 16,367 |
def filter_production_hosts(nr):
"""
Filter the hosts inventory, which match the production
attribute.
:param nr: An initialised Nornir inventory, used for processing.
:return target_hosts: The targeted nornir hosts after being
processed through nornir filtering.
"""
# Execute filter based on hosts being in production
target_hosts = nr.filter(F(production__eq=True))
# Print seperator and header
print("=" * 50)
print("The hosts running in Production are:")
# Iterate over filtered results and printout information
for host, data in target_hosts.inventory.hosts.items():
print(
f"Host: {Fore.CYAN}{host} "
+ Fore.RESET
+ f"- Platform: {Fore.CYAN}{data.platform} "
+ Fore.RESET
+ f"- OS Version: {Fore.CYAN}{data['os_version']} "
+ Fore.RESET
+ f"- Production?: {Fore.CYAN}{data['production']}"
)
# Print total and seperator
print(f"Total: {len(target_hosts.inventory.hosts.items())}")
print("=" * 50)
# Return filtered hosts
return target_hosts | 006524e7b014d3f908955fb81d9f928ac7df25d8 | 16,368 |
import random
def get_lightmap(map_name="random"):
"""
Fetches the right lightmap given command line argument.
"""
assert map_name in ["default", "random"] + list(CONSTANTS.ALL_LIGHTMAPS.keys()), f"Unknown lightmap {map_name}..."
if map_name == "random":
map_name = random.choice(list(CONSTANTS.ALL_LIGHTMAPS.keys()))
elif map_name == "default":
map_name = "Subway_Lights"
lightmap = sl.LightMap(CONSTANTS.ALL_LIGHTMAPS[map_name])
return lightmap | 04ea7e901bbde8ba900469d8ed87b1b3c158809a | 16,369 |
def kill_instance(cook_url, instance, assert_response=True, expected_status_code=204):
"""Kill an instance"""
params = {'instance': [instance]}
response = session.delete(f'{cook_url}/rawscheduler', params=params)
if assert_response:
assert expected_status_code == response.status_code, response.text
return response | 3daa954579b15deedc5a66e77a2178a5682bd1a3 | 16,370 |
def _get_n_batch_from_dataloader(dataloader: DataLoader) -> int:
"""Get a batch number in dataloader.
Args:
dataloader: torch dataloader
Returns:
A batch number in dataloader
"""
n_data = _get_n_data_from_dataloader(dataloader)
n_batch = dataloader.batch_size if dataloader.batch_size else 1
return n_data // n_batch | 182e5566c6b9c83d3dabc3c99f32aedf1e3c21e7 | 16,371 |
def get_hidden() -> list:
"""
Returns places that should NOT be shown in the addressbook
"""
return __hidden_places__ | 8d201c25dd3272b2a3b2292ef3d8fa5293a97967 | 16,372 |
def wait_for_unit_state(reactor, docker_client, unit_name,
expected_activation_states):
"""
Wait until a unit is in the requested state.
:param IReactorTime reactor: The reactor implementation to use to delay.
:param docker_client: A ``DockerClient`` instance.
:param unicode unit_name: The name of the unit.
:param expected_activation_states: Activation states to wait for.
:return: ``Deferred`` that fires when required state has been reached.
"""
def is_in_states(units):
for unit in units:
if unit.name == unit_name:
if unit.activation_state in expected_activation_states:
return True
def check_if_in_states():
responded = docker_client.list()
responded.addCallback(is_in_states)
return responded
return loop_until(reactor, check_if_in_states) | 73278f8762a9b0c5d78ea4d5e098bb7a41b97072 | 16,373 |
def get_list_primitives():
"""Get list of primitive words."""
return g_primitives | 2429b646fbe2fbcc344e08ddffb64ccf2a2d853d | 16,374 |
def make_graph(edge_list, threshold=0.0, max_connections=10):
"""Return 2 way graph from edge_list based on threshold"""
graph = defaultdict(list)
edge_list.sort(reverse=True, key=lambda x: x[1])
for nodes, weight in edge_list:
a, b = nodes
if weight > threshold:
if len(graph[a]) < max_connections:
graph[a].append(gv.connection(b, weight))
if len(graph[b]) < max_connections:
graph[b].append(gv.connection(a, weight))
print(f'Total graph nodes considered : {len(graph.keys())}')
print(f'Total graph connections considered : {sum(map(len, graph.values()))}')
return graph | c9414a0b8df8b9de46ad444b376c5316f1960cd0 | 16,375 |
def ping(request):
"""Ping view."""
checked = {}
for service in services_to_check:
checked[service.name] = service().check()
if all(item[0] for item in checked.values()):
return HttpResponse(
PINGDOM_TEMPLATE.format(status='OK'),
content_type='text/xml',
)
else:
body = PINGDOM_TEMPLATE.format(status='FALSE')
for service_result in filter(lambda x: x[0] is False, checked.values()):
body += COMMENT_TEMPLATE.format(comment=service_result[1])
return HttpResponse(
body,
status=status.HTTP_500_INTERNAL_SERVER_ERROR,
content_type='text/xml',
) | 09b3bd76c59e4d69678a6ce9c3018f638248ff88 | 16,376 |
import numbers
def _num_samples(x):
"""Return number of samples in array-like x."""
message = 'Expected sequence or array-like, got %s' % type(x)
if hasattr(x, 'fit') and callable(x.fit):
# Don't get num_samples from an ensembles length!
raise TypeError(message)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError(message)
if hasattr(x, 'shape') and x.shape is not None:
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
# Check that shape is returning an integer or default to len
# Dask dataframes may not return numeric shape[0] value
if isinstance(x.shape[0], numbers.Integral):
return x.shape[0] | 18133457621ec7c79add6d0ff9ab8b1b0c17d524 | 16,377 |
def inf_compress_idb(*args):
"""
inf_compress_idb() -> bool
"""
return _ida_ida.inf_compress_idb(*args) | fd4ef3c50b9fef7213d9f37a0326f5e9f06b9822 | 16,378 |
def tokens_history(corpus_id):
""" History of changes in the corpus
:param corpus_id: ID of the corpus
"""
corpus = Corpus.query.get_or_404(corpus_id)
tokens = corpus.get_history(page=int_or(request.args.get("page"), 1), limit=int_or(request.args.get("limit"), 20))
return render_template_with_nav_info('main/tokens_history.html', corpus=corpus, tokens=tokens) | d87e4486cb2141b3c59e86a3483f4c445476ca20 | 16,379 |
def Hidden(request):
"""
Hidden Field with a visible friend..
"""
schema = schemaish.Structure()
schema.add('Visible', schemaish.String())
schema.add('Hidden', schemaish.String())
form = formish.Form(schema, 'form')
form['Hidden'].widget = formish.Hidden()
return form | 3f5d96339c39c7cf186d4d45d837b0e95402d328 | 16,381 |
def train_and_eval(trial: optuna.Trial, study_dir: str, seed: int):
"""
Objective function for the Optuna `Study` to maximize.
.. note::
Optuna expects only the `trial` argument, thus we use `functools.partial` to sneak in custom arguments.
:param trial: Optuna Trial object for hyper-parameter optimization
:param study_dir: the parent directory for all trials in this study
:param seed: seed value for the random number generators, pass `None` for no seeding
:return: objective function value
"""
# Synchronize seeds between Optuna trials
pyrado.set_seed(seed)
# Load the data
data_set_name = "oscillation_50Hz_initpos-0.5"
data = pd.read_csv(osp.join(pyrado.PERMA_DIR, "time_series", f"{data_set_name}.csv"))
if data_set_name == "daily_min_temperatures":
data = to.tensor(data["Temp"].values, dtype=to.get_default_dtype()).view(-1, 1)
elif data_set_name == "monthly_sunspots":
data = to.tensor(data["Sunspots"].values, dtype=to.get_default_dtype()).view(-1, 1)
elif "oscillation" in data_set_name:
data = to.tensor(data["Positions"].values, dtype=to.get_default_dtype()).view(-1, 1)
else:
raise pyrado.ValueErr(
given=data_set_name,
eq_constraint="'daily_min_temperatures', 'monthly_sunspots', "
"'oscillation_50Hz_initpos-0.5', or 'oscillation_100Hz_initpos-0.4",
)
# Dataset
data_set_hparam = dict(
name=data_set_name,
ratio_train=0.7,
window_size=trial.suggest_int("dataset_window_size", 1, 100),
standardize_data=False,
scale_min_max_data=True,
)
dataset = TimeSeriesDataSet(data, **data_set_hparam)
# Policy
policy_hparam = dict(
dt=0.02 if "oscillation" in data_set_name else 1.0,
hidden_size=trial.suggest_int("policy_hidden_size", 2, 51),
obs_layer=None,
activation_nonlin=fcn_from_str(
trial.suggest_categorical("policy_activation_nonlin", ["to_tanh", "to_sigmoid"])
),
mirrored_conv_weights=trial.suggest_categorical("policy_mirrored_conv_weights", [True, False]),
conv_out_channels=1,
conv_kernel_size=None,
conv_padding_mode=trial.suggest_categorical("policy_conv_padding_mode", ["zeros", "circular"]),
tau_init=trial.suggest_loguniform("policy_tau_init", 1e-2, 1e3),
tau_learnable=True,
kappa_init=trial.suggest_categorical("policy_kappa_init", [0, 1e-4, 1e-2]),
kappa_learnable=True,
potential_init_learnable=trial.suggest_categorical("policy_potential_init_learnable", [True, False]),
init_param_kwargs=trial.suggest_categorical("policy_init_param_kwargs", [None, dict(bell=True)]),
use_cuda=False,
)
policy = NFPolicy(spec=EnvSpec(act_space=InfBoxSpace(shape=1), obs_space=InfBoxSpace(shape=1)), **policy_hparam)
# Algorithm
algo_hparam = dict(
windowed=trial.suggest_categorical("algo_windowed", [True, False]),
max_iter=1000,
optim_class=optim.Adam,
optim_hparam=dict(
lr=trial.suggest_uniform("optim_lr", 5e-4, 5e-2),
eps=trial.suggest_uniform("optim_eps", 1e-8, 1e-5),
weight_decay=trial.suggest_uniform("optim_weight_decay", 5e-5, 5e-3),
),
loss_fcn=nn.MSELoss(),
)
csv_logger = create_csv_step_logger(osp.join(study_dir, f"trial_{trial.number}"))
algo = TSPred(study_dir, dataset, policy, **algo_hparam, logger=csv_logger)
# Train without saving the results
algo.train(snapshot_mode="latest", seed=seed)
# Evaluate
num_init_samples = dataset.window_size
_, loss_trn = TSPred.evaluate(
policy,
dataset.data_trn_inp,
dataset.data_trn_targ,
windowed=algo.windowed,
num_init_samples=num_init_samples,
cascaded=False,
)
_, loss_tst = TSPred.evaluate(
policy,
dataset.data_tst_inp,
dataset.data_tst_targ,
windowed=algo.windowed,
num_init_samples=num_init_samples,
cascaded=False,
)
return loss_trn | 07e1cff3ab9954172ce4c09f673881109df6f08c | 16,382 |
def random_active_qubits(nqubits, nmin=None, nactive=None):
"""Generates random list of target and control qubits."""
all_qubits = np.arange(nqubits)
np.random.shuffle(all_qubits)
if nactive is None:
nactive = np.random.randint(nmin + 1, nqubits)
return list(all_qubits[:nactive]) | c9bab4d02a0afc569907c6ec838d0020878a345a | 16,383 |
import re
import requests
import random
import hashlib
from bs4 import BeautifulSoup
def main(host: str, username: str, password: str):
"""メイン.
Args:
host: ホスト名又はIPアドレス
username: ユーザ名
password: パスワード
"""
url: str = f"http://{host}/"
rlogintoken: re.Pattern = re.compile(r"creatHiddenInput\(\"Frm_Logintoken\", *\"(\d+)\"\)")
rloginchecktoken: re.Pattern = re.compile(r"creatHiddenInput\(\"Frm_Loginchecktoken\", *\"(\d+)\"\)")
s: requests.Session = requests.Session()
res: requests.Response = s.get(url)
m: typ.Optional[re.Match] = rlogintoken.search(res.text)
if m is None:
print("error 1")
return 1
logintoken: str = m[1]
m = rloginchecktoken.search(res.text)
if m is None:
print("error 2")
return 2
loginchecktoken: str = m[1]
pwd_random: int = round(random.random() * 89999999) + 10000000
before_password = hashlib.md5(f"{password}{pwd_random}".encode("utf-8")).hexdigest()
params: typ.Dict[str, str] = {}
params["action"] = "login"
params["Username"] = username
params["Password"] = before_password
params["Frm_Logintoken"] = logintoken
params["UserRandomNum"] = str(pwd_random)
params["Frm_Loginchecktoken"] = loginchecktoken
res2: requests.Response = s.post(url, data=params, allow_redirects=False)
if res2.status_code != 302:
print("error 3")
return 3
res3: requests.Response = s.get(f"{url}getpage.gch?pid=1002&nextpage=pon_status_lan_link_info_t.gch")
if res3.status_code != 200:
print("error 4")
return 4
columns: typ.List[str] = [
"ポート名",
"受信したデータ量(byte)",
"受信したパケットの総数",
"マルチキャストパケットの受信数",
"ブロードキャストパケットの受信数",
"送信したデータ量(byte)",
"送信されたパケットの総数",
"マルチキャストパケットの送信数",
"ブロードキャストパケットの送信数",
]
indexdic: typ.Dict[str, int] = {}
for i, c in enumerate(columns):
indexdic[c] = i
print(", ".join(columns))
soup = BeautifulSoup(res3.text, "html.parser")
index: int = -1
values: typ.List = []
for td in soup.find_all("td"):
if index != -1:
values[index] = td.text.strip()
index = -1
else:
index = indexdic.get(td.text.strip(), -1)
if index == 0:
if len(values) > 0:
print(", ".join(values))
values = [""] * len(columns)
if len(values) > 0:
print(", ".join(values)) | 36efbd8dc18b891934f690091ef8709e0eddb3ce | 16,384 |
from typing import List
import requests
def create_label(project_id: int, label_name: str, templates: list, session=konfuzio_session()) -> List[dict]:
"""
Create a Label and associate it with templates.
If no templates are specified, the label is associated with the first default template of the project.
:param project_id: Project ID where to create the label
:param label_name: Name for the label
:param templates: Templates that use the label
:param session: Session to connect to the server
:return: Label ID in the Konfuzio APP.
"""
url = get_create_label_url()
if len(templates) == 0:
prj_templates = get_project_templates()
default_template = [t for t in prj_templates if t['is_default']][0]
templates_ids = [default_template['id']]
else:
templates_ids = [template.id for template in templates]
data = {"project": project_id, "text": label_name, "templates": templates_ids}
r = session.post(url=url, data=data)
assert r.status_code == requests.codes.created, f'Status of request: {r}'
label_id = r.json()['id']
return label_id | 4dda5f7ac6473be76212c03deb6beb7980b44105 | 16,385 |
def home():
""" Home page """
return render_template("index.html") | 0ac607593cc98871d97c111fc2ca89aa980af83f | 16,386 |
def get_from_parameterdata_or_dict(params,key,**kwargs):
"""
Get the value corresponding to a key from an object that can be either
a ParameterData or a dictionary.
:param params: a dict or a ParameterData object
:param key: a key
:param default: a default value. If not present, and if key is not
present in params, a KeyError is raised, as in params[key]
:return: the corresponding value
"""
if isinstance(params,ParameterData):
params = params.get_dict()
if 'default' in kwargs:
return params.get(key,kwargs['default'])
else:
return params[key] | 864936e9b43c18e4a8dfd7d88c1cedda28fdb23d | 16,388 |
import torch
def test_input_type(temp_files, fsdp_config, input_cls):
"""Test FSDP with input being a list or a dict, only single GPU."""
if torch_version() < (1, 7, 0):
# This test runs multiple test cases in a single process. On 1.6.0 it
# throw an error like this:
# RuntimeError: Container is already initialized! Cannot initialize it twice!
pytest.skip("older pytorch doesn't work well with single process dist_init multiple times")
result = dist_init(rank=0, world_size=1, filename=temp_files[0], filename_rpc=temp_files[1])
assert result, "Dist init failed"
assert isinstance(fsdp_config, dict), str(fsdp_config)
class Model(Module):
def __init__(self):
super().__init__()
self.layer = Linear(4, 4)
def forward(self, input):
if isinstance(input, list):
input = input[0]
else:
assert isinstance(input, dict), input
input = input["in"]
return self.layer(input)
model = FSDP(Model(), **fsdp_config).cuda()
optim = SGD(model.parameters(), lr=0.1)
for _ in range(5):
in_data = torch.rand(64, 4).cuda()
in_data.requires_grad = True
if input_cls is list:
in_data = [in_data]
else:
assert input_cls is dict
in_data = {"in": in_data}
out = model(in_data)
out.sum().backward()
optim.step()
optim.zero_grad()
model.assert_state(TrainingState.IDLE)
teardown() | 6bf7d03f51088518e85d3e6ea8f59bcc86e4a0b4 | 16,389 |
def get_heroesplayed_players(matchs_data, team_longname):
"""Returns a dict linking each player to
- the heroes he/she played
- if it was a win (1) or a loss (0)
"""
picks = get_picks(matchs_data, team_longname)
players = get_players(picks)
results = get_results(matchs_data, team_longname)
heroes_played = {item: [[], []] for item in players}
for pl in players:
i = 0
for rd in picks:
if pl in rd.keys():
heroes_played[pl][0].append(rd[pl])
if results[i] == 1:
heroes_played[pl][1].append(1)
else:
heroes_played[pl][1].append(0)
i += 1
return heroes_played | 53dc68642a4cca7b80ede7b2d54098eb9274b1af | 16,390 |
def autofmt(filename, validfmts, defaultfmt=None):
"""Infer the format of a file from its filename. As a convention all the
format to be forced with prefix followed by a colon (e.g. "fmt:filename").
`validfmts` is a list of acceptable file formats
`defaultfmt` is the format to use if the extension is not on the valid list
returns `filename`,`fmt`
"""
colonix = filename.find(":")
if colonix != -1:
extension = filename[:colonix]
filename = filename[(colonix+1):]
else:
extension = None
for validfmt in validfmts:
if filename.endswith(validfmt):
extension = filename[-len(validfmt):]
return filename, (extension.lower() if extension in validfmts else defaultfmt) | 3e39325f43f8b4a87074a38f7d576d17669151fb | 16,391 |
def get_or_add_dukaan():
""" Add a new business """
if request.method == "POST":
payload = request.json
# payload = change_case(payload, "lower")
business = db.dukaans.find_one({"name": payload["name"]})
if business is not None:
return (
jsonify(
{
"success": False,
"message": "Business name already exists, Please choose another name.",
}
),
400,
)
for required_key in business_schema:
if required_key not in payload.keys():
return jsonify({"message": f"Missing {required_key} parameter"}), 400
db.dukaans.insert_one(payload)
return jsonify({"success": True, "dukaan": clean_dict_helper(payload)}), 201
dukaans = list(db.dukaans.find({}).limit(5))
for dukaan in dukaans:
if len(dukaan.get("categories", [])) > 0:
dukaan["categories"] = [
db.categories.find_one({"_id": ObjectId(_id)})["name"]
for _id in dukaan["categories"]
]
ratings = list(db.ratings.find({"business": str(dukaan["_id"])}, {"rating": 1}))
if len(ratings) > 0:
ratings_sum = sum([r["rating"] for r in ratings])
dukaan["avg_rating"] = float(ratings_sum) / float(len(ratings))
else:
dukaan["avg_rating"] = 0.0
return jsonify({"success": True, "dukaans": clean_dict_helper(dukaans)}) | e522ac8394b7b70949e2854e10251f3bc51279ae | 16,392 |
def nearest(a, num):
"""
Finds the array's nearest value to a given num.
Args:
a (ndarray): An array.
num (float): The value to find the nearest to.
Returns:
float. The normalized array.
"""
a = np.array(a, dtype=float)
return a.flat[np.abs(a - num).argmin()] | cadbad68add910ced502a6802592d1c043f1c914 | 16,393 |
def hex_string(data):
"""Return a hex dump of a string as a string.
The output produced is in the standard 16 characters per line hex +
ascii format:
00000000: 40 00 00 00 00 00 00 00 40 00 00 00 01 00 04 80 @....... @.......
00000010: 01 01 00 00 00 00 00 01 00 00 00 00 ........ ....
"""
pos = 0 # Position in data
line = 0 # Line of data
hex = "" # Hex display
ascii = "" # ASCII display
result = ""
while pos < len(data):
# Start with header
if pos % 16 == 0:
hex = "%08x: " % (line * 16)
ascii = ""
# Add character
hex = hex + "%02x " % (ord(data[pos]))
if ord(data[pos]) < 32 or ord(data[pos]) > 176:
ascii = ascii + '.'
else:
ascii = ascii + data[pos]
pos = pos + 1
# Add separator if half way
if pos % 16 == 8:
hex = hex + " "
ascii = ascii + " "
# End of line
if pos % 16 == 0:
result = result + "%s %s\n" % (hex, ascii)
line = line + 1
# Leftover bits
if pos % 16 != 0:
# Pad hex string
for i in range(0, (16 - (pos % 16))):
hex = hex + " "
# Half way separator
if (pos % 16) < 8:
hex = hex + " "
result = result + "%s %s\n" % (hex, ascii)
return result | 7f827b4f8049b43e86d35bd972f5b6aaa2190869 | 16,394 |
def extract_ego_time_point(history: SimulationHistory) -> npt.NDArray[int]:
"""
Extract time point in simulation history.
:param history: Simulation history.
:return An array of time in micro seconds.
"""
time_point = np.array(
[sample.ego_state.time_point.time_us for sample in history.data]
)
return time_point | 4860b2c7032ea232ace2680c704e4a59051b6c5c | 16,396 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.