content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def to_dense(arr):
"""
Convert a sparse array to a dense numpy array. If the
array is already a numpy array, just return it. If the
array passed in is a list, then we recursively apply this
method to its elements.
Parameters
-----------
arr : :obj:`numpy.ndarray`, :obj:`scipy.sparse.spmatrix`, or list
Any matrix (or list of matrices) that must be converted
to a dense numpy array.
Raises
--------
TypeError
If the array provided is not a list, `numpy` array,
or `scipy.sparse` matrix.
Returns
--------
dense_args: tuple
"""
if isinstance(arr, np.ndarray):
return arr
if isinstance(arr, list):
return [to_dense(el) for el in arr]
# assume it must be a `scipy.sparse.spmatrix`
if isinstance(arr, sp.spmatrix):
return arr.toarray()
error_msg = (
"Can only convert numpy matrices, scipy matrices, or "
"lists of those elements to dense arrays"
)
raise TypeError(error_msg) | 1fa2ccdd184aa4155cfd121310d67e9e73ffff17 | 3,300 |
def output_results(results, way):
"""Helper method with most of the logic"""
tails = way(results)
heads = len(results) - tails
result = ", ".join([["Heads", "Tails"][flip] for flip in results])
return result + f"\n{heads} Heads; {tails} Tails" | f60716004b11e115fe69a14b70957b5b66080dbc | 3,301 |
def guess_init(model, focal_length, j2d, init_pose):
"""Initialize the camera translation via triangle similarity, by using the torso
joints .
:param model: SMPL model
:param focal_length: camera focal length (kept fixed)
:param j2d: 14x2 array of CNN joints
:param init_pose: 72D vector of pose parameters used for initialization (kept fixed)
:returns: 3D vector corresponding to the estimated camera translation
"""
cids = np.arange(0, 12)
# map from LSP to SMPL joints
j2d_here = j2d[cids]
smpl_ids = [8, 5, 2, 1, 4, 7, 21, 19, 17, 16, 18, 20]
opt_pose = ch.array(init_pose)
_, A_global = global_rigid_transformation(opt_pose, model.J, model.kintree_table, xp=ch)
Jtr = ch.vstack([g[:3, 3] for g in A_global])
Jtr = Jtr[smpl_ids].r
# 9 is L shoulder, 3 is L hip
# 8 is R shoulder, 2 is R hip
diff3d = np.array([Jtr[9] - Jtr[3], Jtr[8] - Jtr[2]])
mean_height3d = np.mean(np.sqrt(np.sum(diff3d**2, axis=1)))
diff2d = np.array([j2d_here[9] - j2d_here[3], j2d_here[8] - j2d_here[2]])
mean_height2d = np.mean(np.sqrt(np.sum(diff2d**2, axis=1)))
est_d = focal_length * (mean_height3d / mean_height2d)
init_t = np.array([0., 0., est_d])
return init_t | ce1ca89bc60500cc59441c97cf9d71ef3d9b528b | 3,302 |
def TCnCom_Dump(*args):
"""
Dump(TCnComV const & CnComV, TStr Desc=TStr())
Parameters:
CnComV: TCnComV const &
Desc: TStr const &
TCnCom_Dump(TCnComV const & CnComV)
Parameters:
CnComV: TCnComV const &
"""
return _snap.TCnCom_Dump(*args) | c2ce258a12074106e4c93e938dfa988b1bc29015 | 3,303 |
import os
def roughness_convert(reference, invert) :
"""
Open an image and modify it to acomodate for the way Mitsuba Renderer interprets roughness.
returns the reference for the image.
"""
# Conversion for Mitsuba : no value above .5, and if this is a glossinness map, do an inversion.
# I'm doing a linear inversion, it's closest to the way it seems to behave in 3ds Max.
# However, it's not perfect, there's maybe a need for a custom conversion.
if not pilimported :
print("Pillow doesn't seem to be installed, roughness maps may cause some problems.")
return reference
reference = config.filepath+"export\\textures\\"+reference
input = Image.open(reference)# Convert to luminance
# ri means roughnes inverted, r just roughness.
# Different names in case the same texture is used for roughness, revert roughness, and something else.
filename = ".".join(reference.split("\\")[-1].split(".")[:-1])+("_ri."if invert else "_r.")+reference.split(".")[-1]
# Dither it ? There is a loss of precision with halving and reconverting to 8bit channels
# With simple random function, it does'nt work, the lambda seem to work by blocks in the image.
# Random with pixel coordinates as seed should work
if invert : # Linear inversion : -> convert to linear, invert , reconvert to perceptual
# This is intended if i use 254 instead of 255, this is to ensure that the smoothest surfaces give a pitch black roughness, instead of the greyish tone obtained otherwise.
output = input.point(lambda px : int(.5 * 255. * (max(0, 1 - (float(px)/254.)**(2.2)))**(1./2.2)))
else :
output = input.point(lambda px : int(.5 * 255. * (float(px)/255.)))
savedir = config.filepath+"export\\textures"
if not os.path.exists(savedir) :
os.makedirs(savedir)
output.save(savedir+"\\"+filename)
return "textures\\"+filename | c40812d770b616b1cbdcc469ebe0e4c562b16d41 | 3,304 |
import functools
def get_reparametrize_functions(
params, constraints, scaling_factor=None, scaling_offset=None
):
"""Construct functions to map between internal and external parameters.
All required information is partialed into the functions.
Args:
params (pandas.DataFrame): See :ref:`params`.
constraints (list): List of constraint dictionaries.
scaling_factor (np.ndarray or None): If None, no scaling factor is used.
scaling_offset (np.ndarray or None): If None, no scaling offset is used
Returns:
func: Function that maps an external parameter vector to an internal one
func: Function that maps an internal parameter vector to an external one
"""
params = add_default_bounds_to_params(params)
check_params_are_valid(params)
processed_constraints, processed_params = process_constraints(
constraints=constraints,
params=params,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
# get partialed reparametrize from internal
pre_replacements = processed_params["_pre_replacements"].to_numpy()
post_replacements = processed_params["_post_replacements"].to_numpy()
fixed_values = processed_params["_internal_fixed_value"].to_numpy()
# get partialed reparametrize to internal
internal_free = processed_params["_internal_free"].to_numpy()
partialed_to_internal = functools.partial(
reparametrize_to_internal,
internal_free=internal_free,
processed_constraints=processed_constraints,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
partialed_from_internal = functools.partial(
reparametrize_from_internal,
fixed_values=fixed_values,
pre_replacements=pre_replacements,
processed_constraints=processed_constraints,
post_replacements=post_replacements,
scaling_factor=scaling_factor,
scaling_offset=scaling_offset,
)
return partialed_to_internal, partialed_from_internal | a0d8f283bf44f66fb098c499a6b610174078b980 | 3,305 |
def gaussNewton(P, model, target, targetLandmarks, sourceLandmarkInds, NN, jacobi = True, calcId = True):
"""
Energy function to be minimized for fitting.
"""
# Shape eigenvector coefficients
idCoef = P[: model.idEval.size]
expCoef = P[model.idEval.size: model.idEval.size + model.expEval.size]
# Rotation Euler angles, translation vector, scaling factor
angles = P[model.idEval.size + model.expEval.size:][:3]
R = rotMat2angle(angles)
t = P[model.idEval.size + model.expEval.size:][3: 6]
s = P[model.idEval.size + model.expEval.size:][6]
# Transpose if necessary
if targetLandmarks.shape[0] != 3:
targetLandmarks = targetLandmarks.T
# The eigenmodel, before rigid transformation and scaling
model = model.idMean + np.tensordot(model.idEvec, idCoef, axes = 1) + np.tensordot(model.expEvec, expCoef, axes = 1)
# After rigid transformation and scaling
source = s*np.dot(R, model) + t[:, np.newaxis]
# Find the nearest neighbors of the target to the source vertices
# start = clock()
distance, ind = NN.kneighbors(source.T)
targetNN = target[ind.squeeze(axis = 1), :].T
# print('NN: %f' % (clock() - start))
# Calculate resisduals
rVert = targetNN - source
rLand = targetLandmarks - source[:, sourceLandmarkInds]
rAlpha = idCoef ** 2 / model.idEval
rDelta = expCoef ** 2 / model.expEval
# Calculate costs
Ever = np.linalg.norm(rVert, axis = 0).sum() / model.numVertices
Elan = np.linalg.norm(rLand, axis = 0).sum() / sourceLandmarkInds.size
Ereg = np.sum(rAlpha) + np.sum(rDelta)
if jacobi:
# start = clock()
drV_dalpha = -s*np.tensordot(R, model.idEvec, axes = 1)
drV_ddelta = -s*np.tensordot(R, model.expEvec, axes = 1)
drV_dpsi = -s*np.dot(dR_dpsi(angles), model)
drV_dtheta = -s*np.dot(dR_dtheta(angles), model)
drV_dphi = -s*np.dot(dR_dphi(angles), model)
drV_dt = -np.tile(np.eye(3), [source.shape[1], 1])
drV_ds = -np.dot(R, model)
drR_dalpha = np.diag(2*idCoef / model.idEval)
drR_ddelta = np.diag(2*expCoef / model.expEval)
# Calculate Jacobian
if calcId:
r = np.r_[rVert.flatten('F'), rLand.flatten('F'), rAlpha, rDelta]
J = np.r_[np.c_[drV_dalpha.reshape((source.size, idCoef.size), order = 'F'), drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')], np.c_[drV_dalpha[:, sourceLandmarkInds, :].reshape((targetLandmarks.size, idCoef.size), order = 'F'), drV_ddelta[:, sourceLandmarkInds, :].reshape((targetLandmarks.size, expCoef.size), order = 'F'), drV_dpsi[:, sourceLandmarkInds].flatten('F'), drV_dtheta[:, sourceLandmarkInds].flatten('F'), drV_dphi[:, sourceLandmarkInds].flatten('F'), drV_dt[:sourceLandmarkInds.size * 3, :], drV_ds[:, sourceLandmarkInds].flatten('F')], np.c_[drR_dalpha, np.zeros((idCoef.size, expCoef.size + 7))], np.c_[np.zeros((expCoef.size, idCoef.size)), drR_ddelta, np.zeros((expCoef.size, 7))]]
# Parameter update (Gauss-Newton)
dP = -np.linalg.inv(np.dot(J.T, J)).dot(J.T).dot(r)
else:
r = np.r_[rVert.flatten('F'), rLand.flatten('F'), rDelta]
J = np.r_[np.c_[drV_ddelta.reshape((source.size, expCoef.size), order = 'F'), drV_dpsi.flatten('F'), drV_dtheta.flatten('F'), drV_dphi.flatten('F'), drV_dt, drV_ds.flatten('F')], np.c_[drV_ddelta[:, sourceLandmarkInds, :].reshape((np.prod(targetLandmarks.shape), expCoef.size), order = 'F'), drV_dpsi[:, sourceLandmarkInds].flatten('F'), drV_dtheta[:, sourceLandmarkInds].flatten('F'), drV_dphi[:, sourceLandmarkInds].flatten('F'), drV_dt[:sourceLandmarkInds.size * 3, :], drV_ds[:, sourceLandmarkInds].flatten('F')], np.c_[drR_ddelta, np.zeros((expCoef.size, 7))]]
# Parameter update (Gauss-Newton)
dP = np.r_[np.zeros(model.idEval.size), -np.linalg.inv(np.dot(J.T, J)).dot(J.T).dot(r)]
# print('GN: %f' % (clock() - start))
return Ever + Elan + Ereg, dP
return Ever + Elan + Ereg | 2b54080bf9f76a8a16e26c10f6209f55bcb0c57f | 3,306 |
from re import T
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1-D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument.
"""
return T.arange(start, stop=stop, step=step, dtype=dtype) | 72f505d7f1928d4e35a7e183a30bdc8cddf2edd7 | 3,307 |
def create_attachable_access_entity_profile(infra, entity_profile, **args):
"""Create an attached entity profile. This provides a template to deploy hypervisor policies on a large set of leaf ports. This also provides the association of a Virtual Machine Management (VMM) domain and the physical network infrastructure. """
args = args['optional_args'] if 'optional_args' in args.keys() else args
infra_attentityp = AttEntityP(infra, entity_profile)
if 'enable_infrastructure_vlan' in args.keys():
if args['enable_infrastructure_vlan'] in [True, 'True', 'true', 't', 'T']:
infra_provacc = ProvAcc(infra_attentityp)
elif args['enable_infrastructure_vlan'] in [False, 'False', 'false', 'f', 'F']:
infra_provacc = ProvAcc(infra_attentityp)
infra_provacc.delete()
if 'domain_profiles' in args.keys() and is_valid(args['domain_profiles']):
for domain in args['domain_profiles']:
if domain['type'] == 'physical':
path = 'uni/phys-'
elif domain['type'] == 'vcenter':
path = 'uni/vmmp-VMware/dom-'
elif domain['type'] == 'layer2':
path = 'uni/l2dom-'
elif domain['type'] == 'layer3':
path = 'uni/l3dom-'
else:
print 'Invalid domain type.'
path = ''
infra_rsdomp = RsDomP(infra_attentityp, path+domain['name'])
if is_valid_key(args, 'interface_policy_group'):
infra_funcp = FuncP(infra)
infra_accportgrp = AccPortGrp(infra_funcp, args['interface_policy_group'])
infra_rsattentp = RsAttEntP(infra_accportgrp)
return infra_attentityp | 96c711b8c5de52ca44483edcb478a829986e901a | 3,308 |
from typing import Tuple
from typing import List
import csv
def tensor_projection_reader(
embedding_file_path: str,
label_file_path: str
) -> Tuple[np.ndarray, List[List[str]]]:
"""
Reads the embedding and labels stored at the given paths and returns an np.ndarray and list of labels
:param str embedding_file_path: Path to the embedding file
:param str label_file_path: Path to the labels file
:return: An embedding and list of labels
:rtype: (numpy.ndarray, List[List[str]])
"""
embedding = np.loadtxt(embedding_file_path, delimiter='\t')
labels: List[List[str]] = []
with open(label_file_path) as f:
csv_reader = csv.reader(f, delimiter='\t')
for label_row in csv_reader:
labels.append(label_row)
return embedding, labels | 7e8cc804181ead221a283b4d8aa95a9e9b7d00ef | 3,309 |
def xml_to_dict(xmlobj, saveroot=True):
"""Parse the xml into a dictionary of attributes.
Args:
xmlobj: An ElementTree element or an xml string.
saveroot: Keep the xml element names (ugly format)
Returns:
An ElementDict object or ElementList for multiple objects
"""
if isinstance(xmlobj, basestring):
# Allow for blank (usually HEAD) result on success
if xmlobj.isspace():
return {}
try:
element = ET.fromstring(xmlobj)
except Exception, err:
raise Error('Unable to parse xml data: %s' % err)
else:
element = xmlobj
element_type = element.get('type', '').lower()
if element_type == 'array':
element_list_type = element.tag.replace('-', '_')
return_list = element_containers.ElementList(element_list_type)
for child in element.getchildren():
child_element = xml_to_dict(child, saveroot)
if saveroot and isinstance(child_element, dict):
return_list.append(child_element.values()[0])
else:
return_list.append(child_element)
if saveroot:
return element_containers.ElementDict(element_list_type,
{element_list_type:
return_list})
else:
return return_list
elif element.get('nil') == 'true':
return None
elif element_type in ('integer', 'datetime', 'date',
'decimal', 'double', 'float') and not element.text:
return None
elif element_type == 'integer':
return int(element.text)
elif element_type == 'datetime':
if date_parse:
return date_parse(element.text)
else:
try:
timestamp = calendar.timegm(
time.strptime(element.text, '%Y-%m-%dT%H:%M:%S+0000'))
return datetime.datetime.utcfromtimestamp(timestamp)
except ValueError, err:
raise Error('Unable to parse timestamp. Install dateutil'
' (http://labix.org/python-dateutil) or'
' pyxml (http://pyxml.sf.net/topics/)'
' for ISO8601 support.')
elif element_type == 'date':
time_tuple = time.strptime(element.text, '%Y-%m-%d')
return datetime.date(*time_tuple[:3])
elif element_type == 'decimal':
return decimal.Decimal(element.text)
elif element_type in ('float', 'double'):
return float(element.text)
elif element_type == 'boolean':
if not element.text:
return False
return element.text.strip() in ('true', '1')
elif element_type == 'yaml':
if not yaml:
raise ImportError('PyYaml is not installed: http://pyyaml.org/')
return yaml.safe_load(element.text)
elif element_type == 'base64binary':
return base64.decodestring(element.text)
elif element_type == 'file':
content_type = element.get('content_type',
'application/octet-stream')
filename = element.get('name', 'untitled')
return FileObject(element.text, filename, content_type)
elif element_type in ('symbol', 'string'):
if not element.text:
return ''
return element.text
elif element.getchildren():
# This is an element with children. The children might be simple
# values, or nested hashes.
if element_type:
attributes = element_containers.ElementDict(
underscore(element.get('type', '')), element.items())
else:
attributes = element_containers.ElementDict(singularize(
element.tag.replace('-', '_')), element.items())
for child in element.getchildren():
attribute = xml_to_dict(child, saveroot)
child_tag = child.tag.replace('-', '_')
if saveroot:
# If this is a nested hash, it will come back as
# {child_tag: {key: value}}, we only want the inner hash
if isinstance(attribute, dict):
if len(attribute) == 1 and child_tag in attribute:
attribute = attribute[child_tag]
# Handle multiple elements with the same tag name
if child_tag in attributes:
if isinstance(attributes[child_tag], list):
attributes[child_tag].append(attribute)
else:
attributes[child_tag] = [attributes[child_tag],
attribute]
else:
attributes[child_tag] = attribute
if saveroot:
return {element.tag.replace('-', '_'): attributes}
else:
return attributes
elif element.items():
return element_containers.ElementDict(element.tag.replace('-', '_'),
element.items())
else:
return element.text | 85428aaefc1f48881891ddd910daef1cc4f1547e | 3,310 |
import torch
import numpy
def conve_interaction(
h: torch.FloatTensor,
r: torch.FloatTensor,
t: torch.FloatTensor,
t_bias: torch.FloatTensor,
input_channels: int,
embedding_height: int,
embedding_width: int,
hr2d: nn.Module,
hr1d: nn.Module,
) -> torch.FloatTensor:
"""Evaluate the ConvE interaction function.
:param h: shape: (batch_size, num_heads, 1, 1, dim)
The head representations.
:param r: shape: (batch_size, 1, num_relations, 1, dim)
The relation representations.
:param t: shape: (batch_size, 1, 1, num_tails, dim)
The tail representations.
:param t_bias: shape: (batch_size, 1, 1, num_tails, 1)
The tail entity bias.
:param input_channels:
The number of input channels.
:param embedding_height:
The height of the reshaped embedding.
:param embedding_width:
The width of the reshaped embedding.
:param hr2d:
The first module, transforming the 2D stacked head-relation "image".
:param hr1d:
The second module, transforming the 1D flattened output of the 2D module.
:return: shape: (batch_size, num_heads, num_relations, num_tails)
The scores.
"""
# repeat if necessary, and concat head and relation, batch_size', num_input_channels, 2*height, width
# with batch_size' = batch_size * num_heads * num_relations
x = broadcast_cat(
[
h.view(*h.shape[:-1], input_channels, embedding_height, embedding_width),
r.view(*r.shape[:-1], input_channels, embedding_height, embedding_width),
],
dim=-2,
).view(-1, input_channels, 2 * embedding_height, embedding_width)
# batch_size', num_input_channels, 2*height, width
x = hr2d(x)
# batch_size', num_output_channels * (2 * height - kernel_height + 1) * (width - kernel_width + 1)
x = x.view(-1, numpy.prod(x.shape[-3:]))
x = hr1d(x)
# reshape: (batch_size', embedding_dim) -> (b, h, r, 1, d)
x = x.view(-1, h.shape[1], r.shape[2], 1, h.shape[-1])
# For efficient calculation, each of the convolved [h, r] rows has only to be multiplied with one t row
# output_shape: (batch_size, num_heads, num_relations, num_tails)
t = t.transpose(-1, -2)
x = (x @ t).squeeze(dim=-2)
# add bias term
return x + t_bias.squeeze(dim=-1) | fadf03905ed5c822df0fe099cb439f481073d202 | 3,311 |
def index():
"""Show Homepage"""
return render_template("index.html") | f05985d10a9699783f6f3c4c4f88c8be48a0a7a9 | 3,312 |
import copy
import logging
def structure_standardization(smi: str) -> str:
"""
Standardization function to clean up smiles with RDKit. First, the input smiles is converted into a mol object.
Not-readable SMILES are written to the log file. The molecule size is checked by the number of atoms (non-hydrogen).
If the molecule has more than 100 non-hydrogen atoms, the compound is discarded and written in the log file.
Molecules with number of non-hydrogen atoms <= 100 are standardized with the MolVS toolkit
(https://molvs.readthedocs.io/en/latest/index.html) relying on RDKit. Molecules which failed the standardization
process are saved in the log file. The remaining standardized structures are converted back into their canonical
SMILES format.
:param smi: Input SMILES from the given structure data file T4
:return: smi_clean: Cleaned and standardized canonical SMILES of the given input SMILES.
Args:
smi (str): Non-standardized smiles string
Returns:
str: standardized smiles string
"""
# tautomer.TAUTOMER_TRANSFORMS = update_tautomer_rules()
# importlib.reload(MolVS_standardizer)
# param = ReadConfig()
standardization_param = ConfigDict.get_parameters()["standardization"]
max_num_atoms = standardization_param["max_num_atoms"]
max_num_tautomers = standardization_param["max_num_tautomers"]
include_stereoinfo = standardization_param["include_stereoinfo"]
## Load new tautomer enumarator/canonicalizer
tautomerizer = rdMolStandardize.TautomerEnumerator()
tautomerizer.SetMaxTautomers(max_num_tautomers)
tautomerizer.SetRemoveSp3Stereo(
False
) # Keep stereo information of keto/enol tautomerization
def isotope_parent(mol: Chem.Mol) -> Chem.Mol:
"""
Isotope parent from MOLVS
Return the isotope parent of a given molecule.
The isotope parent has all atoms replaced with the most abundant isotope for that element.
Args:
mol (Chem.Mol): input rdkit mol object
Returns:
Chem.Mol: isotope parent rdkit mol object
"""
mol = copy.deepcopy(mol)
# Replace isotopes with common weight
for atom in mol.GetAtoms():
atom.SetIsotope(0)
return mol
def my_standardizer(mol: Chem.Mol) -> Chem.Mol:
"""
MolVS implementation of standardization
Args:
mol (Chem.Mol): non-standardized rdkit mol object
Returns:
Chem.Mol: stndardized rdkit mol object
"""
mol = copy.deepcopy(mol)
Chem.SanitizeMol(mol)
mol = Chem.RemoveHs(mol)
disconnector = rdMolStandardize.MetalDisconnector()
mol = disconnector.Disconnect(mol)
normalizer = rdMolStandardize.Normalizer()
mol = normalizer.normalize(mol)
reionizer = rdMolStandardize.Reionizer()
mol = reionizer.reionize(mol)
Chem.AssignStereochemistry(mol, force=True, cleanIt=True)
# TODO: Check this removes symmetric stereocenters
return mol
mol = MolFromSmiles(smi) # Read SMILES and convert it to RDKit mol object.
if (
mol is not None
): # Check, if the input SMILES has been converted into a mol object.
if (
mol.GetNumAtoms() <= max_num_atoms
): # check size of the molecule based on the non-hydrogen atom count.
try:
mol = rdMolStandardize.ChargeParent(
mol
) # standardize molecules using MolVS and RDKit
mol = isotope_parent(mol)
if include_stereoinfo is False:
Chem.RemoveStereochemistry(mol)
mol = tautomerizer.Canonicalize(mol)
mol_clean = my_standardizer(mol)
smi_clean = MolToSmiles(
mol_clean
) # convert mol object back to SMILES
else:
mol = tautomerizer.Canonicalize(mol)
mol_clean = my_standardizer(mol)
smi_clean = MolToSmiles(mol_clean)
except (ValueError, AttributeError) as e:
smi_clean = np.nan
logging.error(
"Standardization error, " + smi + ", Error Type: " + str(e)
) # write failed molecules during standardization to log file
else:
smi_clean = np.nan
logging.error("Molecule too large, " + smi)
else:
smi_clean = np.nan
logging.error("Reading Error, " + smi)
return smi_clean | ba739eb6c2a822b3584badb2247ec97846c123e1 | 3,313 |
def with_input_dtype(policy, dtype):
"""Copies "infer" `policy`, adding `dtype` to it.
Policy must be "infer" or "infer_float32_vars" (i.e., has no compute dtype).
Returns a new policy with compute dtype `dtype`. The returned policy's
variable dtype is also `dtype` if `policy` is "infer", and is `float32` if
`policy` is "infer_with_float32_vars".
Args:
policy: An "infer" or "infer_float32_vars" policy
dtype: The dtype of an input to a layer.
Returns:
A new policy copied from `policy`, but with compute dtype and maybe
variable_dtype set to `dtype`.
"""
assert not policy.compute_dtype
dtype = dtypes.as_dtype(dtype).name
if policy.variable_dtype is None:
return Policy(dtype)
else:
# Policies without a compute dtype are either "infer" or
# "infer_with_float32_vars", so the variable_dtype must be float32 here.
assert policy.variable_dtype == 'float32'
try:
Policy._warn_about_float32_vars = False # pylint: disable=protected-access
return Policy(dtype + '_with_float32_vars')
finally:
Policy._warn_about_float32_vars = True # pylint: disable=protected-access | 32815d4499b57ed8623a55414ef7b6115c450726 | 3,314 |
import io
import warnings
def decode_object_based(effects):
"""
Reads and decodes info about object-based layer effects.
"""
fp = io.BytesIO(effects)
version, descriptor_version = read_fmt("II", fp)
try:
descriptor = decode_descriptor(None, fp)
except UnknownOSType as e:
warnings.warn("Ignoring object-based layer effects tagged block (%s)" % e)
return effects
return ObjectBasedEffects(version, descriptor_version, descriptor) | 6471f6f9987b1817f223fe02a5ba5923ddf8c0c8 | 3,315 |
def example_add(x: int, y: int):
"""
...
"""
return x + y | 88e835e872e2ef4eb54f721e3d556ee7f8db1bbc | 3,316 |
from typing import Optional
def inverse(text: str, reset_style: Optional[bool] = True) -> str:
"""Returns text inverse-colored.
Args:
reset_style: Boolean that determines whether a reset character should
be appended to the end of the string.
"""
return set_mode("inverse", False) + text + (reset() if reset_style else "") | 4d8aceada756386348b68c13dabe4948b15986c3 | 3,317 |
def make():
""" hook function for entrypoints
:return:
"""
return LocalFileSystem | 7e48c7c4a9225f4bd3d7d430b6221005e2787e55 | 3,318 |
def configure():
"""read configuration from command line options and config file values"""
opts = parse_options()
defaults = dict(v.split('=') for v in opts.S or [])
with open(opts.config_file) as config:
targets = read_config(config, defaults, opts.ignore_colon)
if opts.T:
return {opts.T: targets[opts.T]}
else:
return targets | 09c85e8fce3947ee54c1524545e14fe25a4d054e | 3,319 |
def proper_loadmat(file_path):
"""Loads using scipy.io.loadmat, and cleans some of the metadata"""
data = loadmat(file_path)
clean_data = {}
for key, value in data.items():
if not key.startswith("__"):
clean_data[key] = value.squeeze().tolist()
return clean_data | d7cbc547ab47235db2df80fdf2ca9decd3a4c42d | 3,320 |
from typing import List
def _get_time_total(responses: List[DsResponse]) -> List[str]:
"""Get formated total time metrics."""
metric_settings = {
"name": "time_total",
"type": "untyped",
"help": "Returns the total time in seconds (time taken to request, render and download).",
"func": lambda response: __float2str(response.time_total),
}
return _get_metrics(responses, metric_settings) | 641bff0a75d1f61afa7ad1d9e9058faee58c18b8 | 3,321 |
async def list_sessions(
cache: Redis = Depends(depends_redis),
) -> ListSessionsResponse:
"""Get all session keys"""
keylist = []
for key in await cache.keys(pattern=f"{IDPREFIX}*"):
if not isinstance(key, bytes):
raise TypeError(
"Found a key that is not stored as bytes (stored as type "
f"{type(key)!r})."
)
keylist.append(key.decode(encoding="utf-8"))
return ListSessionsResponse(keys=keylist) | 7fce8610a5c53317636da7e5408a582c10faff3c | 3,322 |
def square(x, out=None, where=True, **kwargs):
"""
Return the element-wise square of the input.
Args:
x (numpoly.ndpoly):
Input data.
out (Optional[numpy.ndarray]):
A location into which the result is stored. If provided, it must
have a shape that the inputs broadcast to. If not provided or
`None`, a freshly-allocated array is returned. A tuple (possible
only as a keyword argument) must have length equal to the number of
outputs.
where (Optional[numpy.ndarray]):
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value. Note
that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
kwargs:
Keyword args passed to numpy.ufunc.
Returns:
out (numpoly.ndpoly):
Element-wise `x*x`, of the same shape and dtype as `x`.
This is a scalar if `x` is a scalar.
Examples:
>>> numpoly.square([-1j, 1])
polynomial([(-1-0j), (1+0j)])
>>> numpoly.square(numpoly.sum(numpoly.symbols("x y")))
polynomial(y**2+2*x*y+x**2)
"""
return multiply(x, x, out=out, where=where, **kwargs) | a59297f913433ec870a9eb7d8be5eea21a78cc41 | 3,323 |
def evaluate_tuple(columns,mapper,condition):
"""
"""
if isinstance(condition, tuple):
return condition[0](columns,mapper,condition[1],condition[2])
else:
return condition(columns,mapper) | 5200da50900329431db4ce657e79135534b8469e | 3,324 |
import scipy
def imread(path, is_grayscale=True):
"""
Read image using its path.
Default value is gray-scale, and image is read by YCbCr format as the paper said.
"""
if is_grayscale:
return scipy.misc.imread(path, flatten=True, mode='YCbCr').astype(np.float)
else:
return scipy.misc.imread(path, mode='YCbCr').astype(np.float) | b32e918583c7d4a3bc3e38994bc4aef7dfdc5206 | 3,325 |
def get_priority_text(priority):
"""
Returns operation priority name by numeric value.
:param int priority: Priority numeric value.
:return: Operation priority name.
:rtype: str | None
"""
if priority == NSOperationQueuePriorityVeryLow:
return "VeryLow"
elif priority == NSOperationQueuePriorityLow:
return "Low"
elif priority == NSOperationQueuePriorityNormal:
return "Normal"
elif priority == NSOperationQueuePriorityHigh:
return "High"
elif priority == NSOperationQueuePriorityVeryHigh:
return "VeryHigh"
return "{}".format(priority) | 02986079f164672d58d7d5476e82463e1343ba9d | 3,326 |
import posixpath
def get_experiment_tag_for_image(image_specs, tag_by_experiment=True):
"""Returns the registry with the experiment tag for given image."""
tag = posixpath.join(experiment_utils.get_base_docker_tag(),
image_specs['tag'])
if tag_by_experiment:
tag += ':' + experiment_utils.get_experiment_name()
return tag | f45898d1f9adb74ca1133be05ab60da5de9df9e6 | 3,327 |
def call_pager():
"""
Convenient wrapper to call Pager class
"""
return _Pager() | 00cba0c47fc18417ab82ff41ae956961dcff9db4 | 3,328 |
def sign_in(request, party_id, party_guest_id):
"""
Sign guest into party.
"""
if request.method != "POST":
return HttpResponse("Endpoint supports POST method only.", status=405)
try:
party = Party.objects.get(pk=party_id)
party_guest = PartyGuest.objects.get(pk=party_guest_id)
except Party.DoesNotExist:
return HttpResponse("Requested Party ID does not exist.", status=404)
except PartyGuest.DoesNotExist:
return HttpResponse("Requested Party Guest does not exist.", status=404)
if not party.is_list_closed():
return HttpResponse("Can't sign in guests before the party starts.", status=403)
if not party_guest.signed_in:
party.sign_in(party_guest)
party.save()
party_guest.save()
return JsonResponse(party_guest.to_json())
return HttpResponse(
"Guest already signed in. Refresh to see updated list.", status=409
) | 2672344a92fb0d029946bf30d1a0a89d33a24a0f | 3,329 |
from datetime import datetime
def mcoolqc_status(connection, **kwargs):
"""Searches for annotated bam files that do not have a qc object
Keyword arguments:
lab_title -- limit search with a lab i.e. Bing+Ren, UCSD
start_date -- limit search to files generated since a date formatted YYYY-MM-DD
run_time -- assume runs beyond run_time are dead (default=24 hours)
"""
start = datetime.utcnow()
check = CheckResult(connection, 'mcoolqc_status')
my_auth = connection.ff_keys
check.action = "mcoolqc_start"
check.brief_output = []
check.full_output = {}
check.status = 'PASS'
# check indexing queue
check, skip = wfr_utils.check_indexing(check, connection)
if skip:
return check
# Build the query (find mcool files)
default_stati = 'released&status=uploaded&status=released+to+project'
stati = 'status=' + (kwargs.get('status') or default_stati)
query = 'search/?file_format.file_format=mcool&{}'.format(stati)
query += '&type=FileProcessed'
query += '&quality_metric.display_title=No+value'
# add date
s_date = kwargs.get('start_date')
if s_date:
query += '&date_created.from=' + s_date
# add lab
lab = kwargs.get('lab_title')
if lab:
query += '&lab.display_title=' + lab
# The search
print(query)
res = ff_utils.search_metadata(query, key=my_auth)
if not res:
check.action_message = 'No action required at this moment'
check.summary = 'All Good!'
return check
check.summary = '{} files need a mcoolqc'. format(len(res))
check.status = 'WARN'
check = wfr_utils.check_runs_without_output(res, check, 'mcoolQC', my_auth, start)
return check | 44273aa0f7441775258e0b390059cfe9778747e2 | 3,330 |
def isValidListOrRulename(word: str) -> bool:
"""test if there are no accented characters in a listname or rulename
so asciiletters, digitis, - and _ are allowed
"""
return bool(reValidName.match(word)) | ec826f31604f8dd43ba044e1f6ffbaaf758bdb88 | 3,331 |
def glyph_has_ink(font: TTFont, name: Text) -> bool:
"""Checks if specified glyph has any ink.
That is, that it has at least one defined contour associated.
Composites are considered to have ink if any of their components have ink.
Args:
font: the font
glyph_name: The name of the glyph to check for ink.
Returns:
True if the font has at least one contour associated with it.
"""
if 'glyf' in font:
return ttf_glyph_has_ink(font, name)
elif ('CFF ' in font) or ('CFF2' in font):
return cff_glyph_has_ink(font, name)
else:
raise Exception("Could not find 'glyf', 'CFF ', or 'CFF2' table.") | 6450e2ec2ed7158f901c7e50999245042d880dce | 3,332 |
async def async_setup_entry(hass, entry, async_add_entities):
"""
Set up n3rgy data sensor
:param hass: hass object
:param entry: config entry
:return: none
"""
# in-line function
async def async_update_data():
"""
Fetch data from n3rgy API
This is the place to pre-process the data to lookup tables so entities can quickly look up their data
:param: none
:return: power consumption data
"""
return await hass.async_add_executor_job(read_consumption, api, entry)
async def async_initialize():
"""
Initialize objects from n3rgy API
:param: none
:return: data coordinator, device type
"""
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=PLATFORM,
update_method=async_update_data
)
# fetch initial data so we have data when entities subscribe
sensor_name, device_type = await hass.async_add_executor_job(get_device_info, api, entry)
await coordinator.async_refresh()
return (coordinator, sensor_name, device_type)
# initialize n3rgy API
device_type = None
api = init_api_client(entry)
# grant consent options
if GRANT_CONSENT_READY:
# grant consent is enabled for live environment
if process_grant_consent(entry):
coordinator, sensor_name, device_type = await async_initialize()
else:
# grant consent is disabled
coordinator, sensor_name, device_type = await async_initialize()
# add sensor
async_add_entities([N3rgySensor(coordinator, sensor_name, device_type)], False) | e2dd956428eb377c56d104e49889760f6ba9b653 | 3,333 |
def main():
""" Process command line arguments and run the script """
bp = BrPredMetric()
result = bp.Run()
return result | b61a80ee805dfc2d6e146b24ae0564bb5cda6e83 | 3,334 |
def step(init_distr,D):
"""
"""
for k in init_distr.keys():
init_distr[k] = D[init_distr[k]]()
return init_distr | 6270dd2818d2148e7d979d249fbb2a3a596dc2de | 3,335 |
def from_json(data: JsonDict) -> AttributeType:
"""Make an attribute type from JSON data (deserialize)
Args:
data: JSON data from Tamr server
"""
base_type = data.get("baseType")
if base_type is None:
logger.error(f"JSON data: {repr(data)}")
raise ValueError("Missing required field 'baseType'.")
if base_type == Boolean._tag:
return BOOLEAN
elif base_type == Double._tag:
return DOUBLE
elif base_type == Int._tag:
return INT
elif base_type == Long._tag:
return LONG
elif base_type == String._tag:
return STRING
elif base_type == Array._tag:
inner_type = data.get("innerType")
if inner_type is None:
logger.error(f"JSON data: {repr(data)}")
raise ValueError("Missing required field 'innerType' for Array type.")
return Array(inner_type=from_json(inner_type))
elif base_type == Map._tag:
inner_type = data.get("innerType")
if inner_type is None:
logger.error(f"JSON data: {repr(data)}")
raise ValueError("Missing required field 'innerType' for Map type.")
return Map(inner_type=from_json(inner_type))
elif base_type == Record._tag:
attributes = data.get("attributes")
if attributes is None:
logger.error(f"JSON data: {repr(data)}")
raise ValueError("Missing required field 'attributes' for Record type.")
return Record(
attributes=tuple([subattribute.from_json(attr) for attr in attributes])
)
else:
logger.error(f"JSON data: {repr(data)}")
raise ValueError(f"Unrecognized 'baseType': {base_type}") | eba662ed1c1c3f32a5b65908fae68d7dd41f89e3 | 3,336 |
def ftduino_find_by_name(name):
"""
Returns the path of the ftDuino with the specified `name`.
:param name: Name of the ftDuino.
:return: The path of the ftDuino or ``None`` if the ftDuino was not found.
"""
for path, device_name in ftduino_iter():
if device_name == name:
return path
return None | 8a03d0b84dc9180fb2885d46fc8f1755cd2c6eed | 3,337 |
import numbers
def spectral_entropy (Sxx, fn, flim=None, display=False) :
"""
Compute different entropies based on the average spectrum, its variance,
and its maxima [1]_ [2]_
Parameters
----------
Sxx : ndarray of floats
Spectrogram (2d).
It is recommended to work with PSD to be consistent with energy conservation
fn : 1d ndarray of floats
frequency vector
flim : tupple (fmin, fmax), optional, default is None
Frequency band used to compute the spectral entropy.
For instance, one may want to compute the spectral entropy for the
biophony bandwidth
display : boolean, optional, default is False
Display the different spectra (mean, variance, covariance, max...)
Returns
-------
EAS : scalar
Entropy of Average Spectrum
ECU : scalar
Entropy of spectral variance (along the time axis for each frequency)
ECV : scalar
Entropy of Coefficient of Variation (along the time axis for each frequency)
EPS : scalar
Entropy of spectral maxima (peaks)
EPS_KURT : scalar
Kurtosis of spectral maxima
EPS_SKEW : scalar
Skewness of spectral maxima
References
----------
.. [1] TOWSEY, Michael W. The calculation of acoustic indices derived from long-duration recordings of the natural environment. 2017. https://eprints.qut.edu.au/110634/1/QUTePrints110634_TechReport_Towsey2017August_AcousticIndices%20v3.pdf
.. [2] QUT : https://github.com/QutEcoacoustics/audio-analysis. Michael Towsey, Anthony Truskinger, Mark Cottman-Fields, & Paul Roe. (2018, March 5). Ecoacoustics Audio Analysis Software v18.03.0.41 (Version v18.03.0.41). Zenodo. http://doi.org/10.5281/zenodo.1188744
Examples
--------
>>> s, fs = maad.sound.load('../data/cold_forest_daylight.wav')
>>> Sxx_power, tn, fn, _ = maad.sound.spectrogram (s, fs)
>>> EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW = maad.features.spectral_entropy(Sxx_power, fn, flim=(2000,10000))
>>> print('EAS: %2.2f / ECU: %2.2f / ECV: %2.2f / EPS: %2.2f / EPS_KURT: %2.2f / EPS_SKEW: %2.2f' % (EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW))
EAS: 0.27 / ECU: 0.49 / ECV: 0.24 / EPS: 1.00 / EPS_KURT: 17.58 / EPS_SKEW: 3.55
"""
if isinstance(flim, numbers.Number) :
print ("WARNING: flim must be a tupple (fmin, fmax) or None")
return
if flim is None : flim=(fn.min(),fn.max())
# select the indices corresponding to the frequency range
iBAND = index_bw(fn, flim)
# force Sxx to be an ndarray
X = np.asarray(Sxx)
# TOWSEY : only on the bio band
# EAS [TOWSEY] #
#### COMMENT : Result a bit different due to different Hilbert implementation
X_mean = mean(X[iBAND], axis=1)
Hf = entropy(X_mean)
EAS = 1 - Hf
#### Entropy of spectral variance (along the time axis for each frequency)
""" ECU [TOWSEY] """
X_Var = var(X[iBAND], axis=1)
Hf_var = entropy(X_Var)
ECU = 1 - Hf_var
#### Entropy of coefficient of variance (along the time axis for each frequency)
""" ECV [TOWSEY] """
X_CoV = var(X[iBAND], axis=1)/mean(X[iBAND], axis=1)
Hf_CoV = entropy(X_CoV)
ECV = 1 - Hf_CoV
#### Entropy of spectral maxima
""" EPS [TOWSEY] """
ioffset = np.argmax(iBAND==True)
Nbins = sum(iBAND==True)
imax_X = np.argmax(X[iBAND],axis=0) + ioffset
imax_X = fn[imax_X]
max_X_bin, bin_edges = np.histogram(imax_X, bins=Nbins, range=flim)
if sum(max_X_bin) == 0 :
max_X_bin = np.zeros(len(max_X_bin))
EPS = float('nan')
#### Kurtosis of spectral maxima
EPS_KURT = float('nan')
#### skewness of spectral maxima
EPS_SKEW = float('nan')
else:
max_X_bin = max_X_bin/sum(max_X_bin)
Hf_fmax = entropy(max_X_bin)
EPS = 1 - Hf_fmax
#### Kurtosis of spectral maxima
EPS_KURT = kurtosis(max_X_bin)
#### skewness of spectral maxima
EPS_SKEW = skewness(max_X_bin)
if display:
fig, ax = plt.subplots()
ax.plot(fn[iBAND], X_mean/max(X_mean),label="Normalized mean")
plt.plot(fn[iBAND], X_Var/max(X_Var),label="Normalized variance")
ax.plot(fn[iBAND], X_CoV/max(X_CoV),label="Normalized covariance")
ax.plot(fn[iBAND], max_X_bin/max(max_X_bin),label="Normalized Spectral max")
ax.set_title('Signals')
ax.set_xlabel('Frequency [Hz]')
ax.legend()
return EAS, ECU, ECV, EPS, EPS_KURT, EPS_SKEW | 533b388781e158b558ee38645271194adb414729 | 3,338 |
def _percentages(self):
"""
An extension method for Counter that
returns a dict mapping the keys of the Counter to their percentages.
:param self: Counter
:return: a dict mapping the keys of the Counter to their percentages
"""
# type: () -> dict[any, float]
length = float(sum(count for count in self.viewvalues()))
return {value: self[value] / length for value in self} | 752781a9697113ebf3297050649a7f4ba1580b97 | 3,339 |
def find_best_word_n(draw, nb_letters, path):
"""
"""
lexicon = get_lexicon(path, nb_letters)
mask = [is_word_in_draw(draw, word) for word in lexicon["draw"]]
lexicon = lexicon.loc[mask]
return lexicon | ff3e06e69e6c56f59cf278c10e6860c6d0529b87 | 3,340 |
import json
def feature_reader(path):
"""
Reading the feature matrix stored as JSON from the disk.
:param path: Path to the JSON file.
:return out_features: Dict with index and value tensor.
"""
features = json.load(open(path))
features = {int(k): [int(val) for val in v] for k, v in features.items()}
return features | 959e37ae5a3b0b482d67e5e917211e2131b3c643 | 3,341 |
def locate_all_occurrence(l, e):
"""
Return indices of all element occurrences in given list
:param l: given list
:type l: list
:param e: element to locate
:return: indices of all occurrences
:rtype: list
"""
return [i for i, x in enumerate(l) if x == e] | 95b662f359bd94baf68ac86450d94298dd6b366d | 3,342 |
def UVectorFromAngles(reflection):
"""
Calculate the B&L U vector from bisecting geometry
angles
"""
u = np.zeros((3,), dtype='float64')
# The tricky bit is set again: Busing & Levy's omega is 0 in
# bisecting position. This is why we have to correct for
# stt/2 here
om = np.deg2rad(reflection['om'] - reflection['stt']/2.)
chi = np.deg2rad(reflection['chi'])
phi = np.deg2rad(reflection['phi'])
u[0] = cos(om) * cos(chi) * cos(phi) - sin(om) * sin(phi)
u[1] = cos(om) * cos(chi) * sin(phi) + sin(om) * cos(phi)
u[2] = cos(om) * sin(chi)
return u | fe282e8ac67e5fafb34c63e1745cb9b262602a7a | 3,343 |
import numbers
def to_pillow_image(img_array, image_size=None):
"""Convert an image represented as a numpy array back into a
Pillow Image object."""
if isinstance(image_size, (numbers.Integral, np.integer)):
image_size = (image_size, image_size)
img_array = skimage.img_as_ubyte(img_array)
img = pil_image.fromarray(img_array)
if image_size:
img = img.resize((image_size[1], image_size[0]), pil_image.LANCZOS)
return img | 435bfe79afc59f1cbdd250ca9e1558de8921f7b6 | 3,344 |
from datetime import datetime
import os
def new_session_dir(rootdir, pid, sid):
"""
Creates a path to a new session directory.
Example:
<DATA_ROOT>/p0/session_2014-08-12_p0_arm1
"""
date_str = datetime.date.today().strftime('%Y-%m-%d')
session_dir = os.path.join(
rootdir,
pid,
'session_' + date_str + '_' + pid + '_' + sid)
return (session_dir, date_str) | 0dd3af50c7ad83478ab8be136b775c63aceca0c4 | 3,345 |
from typing import Iterator
def seq_to_sentence(seq: Iterator[int], vocab: Vocab, ignore: Iterator[int]) -> str:
"""Convert a sequence of integers to a string of (space-separated) words according to a vocabulary.
:param seq: Iterator[int]
A sequence of integers (tokens) to be converted.
:param vocab: Vocab
A Torchtext Vocab object containing a mapping from integers to strings (words).
:param ignore: Iterator[int]
A sequence of integers representing "special tokens" to ignore (convert as blanks).
:return: str
The resulting sentence.
"""
return ' '.join(vocab.itos[i] if vocab.itos[i] not in ignore else '' for i in seq).strip() | 2138bd3454c61b7e2a6e3dad25876fdcc4cabe4e | 3,346 |
from skimage.exposure import histogram, match_histograms
import gc
def estimate_exposures(imgs, exif_exp, metadata, method, noise_floor=16, percentile=10,
invert_gamma=False, cam=None, outlier='cerman'):
"""
Exposure times may be inaccurate. Estimate the correct values by fitting a linear system.
:imgs: Image stack
:exif_exp: Exposure times read from image metadata
:metadata: Internal camera metadata dictionary
:method: Pick from ['gfxdisp', 'cerman']
:noise_floor: All pixels smaller than this will be ignored
:percentile: Use a small percentage of the least noisy pixels for the estimation
:invert_gamma: If the images are gamma correct invert to work with linear values
:cam: Camera noise parameters for better estimation
:return: Corrected exposure times
"""
assert method in ('gfxdisp', 'cerman')
num_exp = len(imgs)
assert num_exp > 1, f'Files not found or are invalid: {files}'
# Mask out saturated and noisy pixels
black_frame = np.tile(metadata['black_level'].reshape(2, 2), (metadata['h']//2, metadata['w']//2)) \
if metadata['raw_format'] else metadata['black_level']
Y = np.maximum(imgs - black_frame, 1e-6) # Add epsilon since we need log(Y)
if invert_gamma:
max_value = np.iinfo(metadata['dtype']).max
Y = (Y / max_value)**(invert_gamma) * max_value
if method == 'cerman':
'''
L. Cerman and V. Hlavac, “Exposure time estimation for high dynamic range imaging with
hand held camera” in Proc. of Computer Vision Winter Workshop, Czech Republic. 2006.
'''
rows, cols, m, W = np.zeros((4, 0))
for i in range(num_exp - 1):
# Ensure images are sorted in increasing order of exposure time
assert all(e1 <= e2 for e1, e2 in zip(exif_exp[:-1], exif_exp[1:])), \
'Please name the input files in increasing order of exposure time when sorted'
im1, im2 = Y[i], Y[i+1]
mask = np.stack((im1 + black_frame < metadata['saturation_point'],
im2 + black_frame < metadata['saturation_point'],
im1 > noise_floor, im2 > noise_floor)).all(axis=0)
# Match histograms of consecutive exposures
im1_hat = match_histograms(im1, im2)
im2_hat = match_histograms(im2, im1)
# Construct the simple sparse linear system. There are 2 sets for each pair (Eq. 4)
num_pix = np.count_nonzero(mask)
rows = np.concatenate((rows, np.arange(2*num_pix) + len(rows)))
cols = np.concatenate((cols, np.repeat(i, 2*num_pix)))
m = np.concatenate((m, (im1_hat[mask]/im1[mask]), (im2[mask]/im2_hat[mask])))
# Weights are given by sqrt() of histogram counts (Eq. 4)
im1, im2 = im1.astype(np.uint16), im2.astype(np.uint16)
counts, bins = histogram(im1)
weights1 = np.sqrt(counts[np.searchsorted(bins, im1[mask])])
counts, bins = histogram(im2)
weights2 = np.sqrt(counts[np.searchsorted(bins, im2[mask])])
W = np.concatenate((W, weights1, weights2))
num_rows = rows.shape[0]
data = np.ones(num_rows)
O = csr_matrix((data, (rows, cols)), shape=(num_rows, (num_exp - 1)))
elif method == 'gfxdisp':
logger.info(f'Estimate using logarithmic linear system with noise model')
num_pix = int(percentile/100*metadata['h']*metadata['w'])
# If noise parameters is provided, retrieve variances, else use simplified model
L = np.log(Y)
if cam == 'default':
cam = HDRutils.NormalNoise('Sony', 'ILCE-7R', 100, bits=14)
bits = cam.bits if cam else 14
scaled_var = np.stack([(cam.var(y)/y**2) if cam else 1/y**2 for y in Y/(2**bits - 1)])
# Construct logarithmic sparse linear system W.O.e = W.m
logger.info(f'Constructing sparse matrix (O) and vector (m) using {num_pix} pixels')
rows = np.arange(0, (num_exp - 1)*num_pix, 0.5)
cols, data = np.repeat(np.ones_like(rows)[None], 2, axis=0)
data[1::2] = -1
m = np.zeros((num_exp - 1)*num_pix, dtype=np.float32)
W = np.zeros_like(m)
for i in range(num_exp - 1):
cols[i*num_pix*2:(i + 1)*num_pix*2:2] = i
# Collect unsaturated pixels from all longer exposures
for j in range(i + 1, num_exp):
mask = np.stack((Y[i] + black_frame < metadata['saturation_point'],
Y[j] + black_frame < metadata['saturation_point'],
Y[i] > noise_floor, Y[j] > noise_floor)).all(axis=0)
# if mask.sum() < num_pix:
# continue
weights = np.concatenate((W[i*num_pix:(i+1)*num_pix],
(1/(scaled_var[i] + scaled_var[j]) * mask).flatten()))
logdiff = np.concatenate((m[i*num_pix:(i+1)*num_pix], (L[i] - L[j]).flatten()))
selected = np.argsort(weights)[-num_pix:]
W[i*num_pix:(i + 1)*num_pix] = weights[selected]
m[i*num_pix:(i + 1)*num_pix] = logdiff[selected]
cols[i*num_pix*2 + 1:(i + 1)*num_pix*2:2][selected > num_pix] = j
O = csr_matrix((data, (rows, cols)), shape=((num_exp - 1)*num_pix, num_exp))
logger.info('Solving the sparse linear system using least squares')
if outlier == 'cerman':
err_prev = np.finfo(float).max
t = trange(1000, leave=False)
for i in t:
exp = lsqr(diags(W) @ O, W * m)[0]
err = (W*(O @ exp - m))**2
selected = err < 3*err.mean()
W = W[selected]
m = m[selected]
O = O[selected]
if err.mean() < 1e-6 or err_prev - err.mean() < 1e-6:
# assert err_prev - err.mean() > 0
break
err_prev = err.mean()
t.set_description(f'loss={err.mean()}')
del err, selected
gc.collect()
logger.warning(f'Used {O.shape[0]/(num_exp - 1)/num_pix*100}% of the initial pixels')
elif outlier == 'ransac':
assert method == 'gfxdisp'
num_rows = W.shape[0]
# Randomly select 10% of the data
selected = np.zeros(num_rows, dtype=bool)
selected[:num_rows//10] = True
loss = np.finfo(float).max
WO = diags(W) @ O
Wm = W*m
t = trange(100, leave=False)
for i in t:
np.random.shuffle(selected)
exp_i = lsqr(WO[selected], Wm[selected])[0]
exp_i = np.exp(exp_i - exp_i.max()) * exif_exp.max()
reject = np.maximum(exp_i/exif_exp, exif_exp/exp_i) > 3
exp_i[reject] = exif_exp[reject]
err = ((W*(O @ exp_i - m))**2).sum()
if err < loss:
loss = err
exp = np.log(exp_i)
t.set_description(f'loss={err}; i={i}')
else:
exp = lsqr(diags(W) @ O, W * m)[0]
if method == 'cerman':
exp = np.append(exp, exif_exp[-1])
for e in range(num_exp - 2, -1, -1):
exp[e] = exif_exp[e+1]/exp[e]
elif method == 'gfxdisp':
exp = np.exp(exp - exp.max()) * exif_exp.max()
# logger.warning(f'Exposure times in EXIF: {exif_exp}, estimated exposures: {exp}. Outliers removed {i} times')
# reject = np.maximum(exp/exif_exp, exif_exp/exp) > 3
# exp[reject] = exif_exp[reject]
# if reject.any():
# logger.warning(f'Exposure estimation failed {reject}. Try using more pixels')
return exp | db80a45dc30cea86a71688a56447ef0166bb49b2 | 3,347 |
def default_reverse(*args, **kwargs):
"""
Acts just like django.core.urlresolvers.reverse() except that if the
resolver raises a NoReverseMatch exception, then a default value will be
returned instead. If no default value is provided, then the exception will
be raised as normal.
NOTE: Any exception that is not NoReverseMatch will always be raised as
normal, even if a default is provided.
"""
# We're explicitly NOT happy to just re-raise the exception, as that may
# adversely affect stack traces.
if 'default' not in kwargs:
return reverse(*args, **kwargs)
else:
default = kwargs.pop('default', None)
try:
return reverse(*args, **kwargs)
except NoReverseMatch:
return default | cadf9452c309adb4f2a865a3ea97ee2aca5b1acc | 3,348 |
def get_company_periods_up_to(period):
""" Get all periods for a company leading up to the given period, including the given period
"""
company = period.company
return (company.period_set
.filter(company=company, end__lte=period.end)) | 604814f60a58f9155a47faba62561f94d3197fb2 | 3,349 |
from typing import List
def format_count(
label: str, counts: List[int], color: str, dashed: bool = False
) -> dict:
"""Format a line dataset for chart.js"""
ret = {
"label": label,
"data": counts,
"borderColor": color,
"borderWidth": 2,
"fill": False,
}
if dashed:
ret["borderDash"] = [5, 5]
return ret | 40f5aee7ad5d66f57737345b7d82e45a97cf6633 | 3,350 |
def detect_ripples(eeg):
"""Detect sharp wave ripples (SWRs) from single channel eeg (AnalogSignalArray).
"""
# Maggie defines ripples by doing:
# (1) filter 150-250
# (2) hilbert envelope
# (3) smooth with Gaussian (4 ms SD)
# (4) 3.5 SD above the mean for 15 ms
# (5) full ripple defined as window back to mean
assert eeg.n_signals == 1, "only single channel ripple detection currently supported!"
# (1)
ripple_eeg = nel.filtering.sosfiltfilt(eeg, fl=150, fh=250)
# (2, 3)
ripple_envelope = nel.utils.signal_envelope1D(ripple_eeg, sigma=0.004)
# (4, 5)
bounds, maxes, events = nel.utils.get_events_boundaries(
x=ripple_envelope.data,
PrimaryThreshold=ripple_envelope.mean() + 3.5*ripple_envelope.std(), # cm/s
SecondaryThreshold=ripple_envelope.mean(), # cm/s
minThresholdLength=0.015, # threshold crossing must be at least 15 ms long
minLength=0.0, # total ripple duration must be at least XXX ms long
ds = 1/ripple_envelope.fs
)
# convert bounds to time in seconds
timebounds = ripple_envelope.time[bounds]
# add 1/fs to stops for open interval
timebounds[:,1] += 1/eeg.fs
# create EpochArray with bounds
ripple_epochs = nel.EpochArray(timebounds)
# Adjust ripple centers to align to a peak
ripple_centers = np.floor( (ripple_epochs.centers - eeg.time[0])*eeg.fs ).astype(int)
ch = 7 # this was on some of Sibo's data, for CA1
adjusted_centers = [(p-10)+np.argmax(eeg.data[ch,p-10:p+10]) for p in ripple_centers[1:-1].tolist()]
return ripple_epochs | c92190ee6c31e6c1805841258224fa2aa7d4a749 | 3,351 |
def cymdtodoy(str):
"""cymdtodoy(str) -> string
Convert a legal CCSDS time string with the date expressed as a month and day
to a simple time string with the date expressed as a day-of-year."""
try:
(year, mon, day, hour, min, sec) = cymdtoaymd(str)
doy = aymdtodoy(year, mon, day, hour, min, sec)
except TypeError, e:
raise TypeError, e.args[0]
except ValueError, e:
raise ValueError, e.args[0]
return doy | a3f8af2bac6f2d11aad5ec87c22b9e6254256d26 | 3,352 |
def configured_hosts(hass):
"""Return a set of the configured hosts."""
"""For future to use with discovery!"""
out = {}
for entry in hass.config_entries.async_entries(DOMAIN):
out[entry.data[CONF_ADDRESS]] = {
UUID: entry.data[UUID],
CONF_ADDRESS: entry.data[CONF_ADDRESS],
ACCESS_KEY: entry.data[ACCESS_KEY],
SENSORS: entry.data.get(SENSORS, []),
}
return out | 04d24a8011a706d618699528129ba394ec54a590 | 3,353 |
import sys
import importlib
import os
import torch
def load_model(model_name, weights, model_paths, module_name, model_params):
"""Import model and load pretrained weights"""
if model_paths:
sys.path.extend(model_paths)
try:
module = importlib.import_module(module_name)
creator = getattr(module, model_name)
model = creator(**model_params)
except ImportError as err:
if model_paths:
print('Module {} in {} doesn\'t exist. Check import path and name'.format(
model_name, os.pathsep.join(model_paths)))
else:
print('Module {} doesn\'t exist. Check if it is installed'.format(model_name))
sys.exit(err)
except AttributeError as err:
print('ERROR: Module {} contains no class or function with name {}!'
.format(module_name, model_name))
sys.exit(err)
try:
if weights:
model.load_state_dict(torch.load(weights, map_location='cpu'))
except RuntimeError as err:
print('ERROR: Weights from {} cannot be loaded for model {}! Check matching between model and weights'.format(
weights, model_name))
sys.exit(err)
return model | c23362011980d796a43bfd8125070b4fd35d76cf | 3,354 |
def generate_keys(directory: str, pwd: bytes = None) -> (ec.EllipticCurvePrivateKey, ec.EllipticCurvePublicKey):
"""
Generate the public and private keys
Generated keys have a default name, you should rename them
This can be done with os.rename()
:param directory: folder where the keys are made
overwrite the existing keys
:param pwd: password: if not None, Best available encryption is chosen
and the private key is encrypted with a the password
:return: private, public keys
"""
private_key = generate_private_key(directory, pwd)
public_key = generate_public_key(directory, private_key)
return private_key, public_key | 28821be8d081e8c8369b889e3ce1a18336ab3c9f | 3,355 |
import os
import subprocess
def test_notebooks():
"""
Run all notebooks in the directories given by the list `notebook_paths`.
The notebooks are run locally using [treon](https://github.com/ReviewNB/treon)
and executed in each directory so that local resources can be imported.
Returns:
num_errors (int): Number of notebooks that failed to run
num_passed (int): Number of notebooks that successfully run
"""
num_errors = 0
num_passed = 0
for nb_path in notebook_paths:
abs_nb_path = os.path.join(SGDIR, nb_path)
cmd_line = f"treon . --threads=2"
print(f"\033[1;33;40m Running {abs_nb_path}\033[0m")
# Add path to PYTHONPATH
environ = dict(os.environ, PYTHONPATH=abs_nb_path)
procout = subprocess.run(
cmd_line,
shell=True,
check=False,
env=environ,
cwd=abs_nb_path,
# stdout=subprocess.PIPE,
# stderr=subprocess.PIPE,
)
if procout.returncode != 0:
num_errors += 1
else:
num_passed += 1
print()
return num_errors, num_passed | 4daec4eb274ef0c25e124704b77ace0a6e87b801 | 3,356 |
def get_storage_config_by_data_type(result_table_id):
"""
根据rt_id获取存储配置列表
:param result_table_id:rtid
:return: response:存储配置列表
"""
return DataStorageConfig.objects.filter(result_table_id=result_table_id, data_type="raw_data") | 12df282ece7176f003726dfb5ee1c1a1707ef6ad | 3,357 |
from typing import List
def separate_args(args: List[str]) -> (List[str], List[str]):
"""Separate args into preparser args and primary parser args.
Args:
args: Raw command line arguments.
Returns:
A tuple of lists (preparser_args, mainparser_args).
"""
preparser_args = []
if args and args[0].startswith("-"):
cur = 0
while cur < len(args) and args[cur].startswith("-"):
if args[cur] in _repobee.cli.preparser.PRE_PARSER_OPTS:
preparser_args += args[cur : cur + 2]
cur += 2
elif args[cur] in _repobee.cli.preparser.PRE_PARSER_FLAGS:
preparser_args.append(args[cur])
cur += 1
else:
break
return preparser_args, args[len(preparser_args) :] | 49829516f6982d041386d95c20b0028034e066a9 | 3,358 |
def lookup_alive_tags_shallow(repository_id, start_pagination_id=None, limit=None):
""" Returns a list of the tags alive in the specified repository. Note that the tags returned
*only* contain their ID and name. Also note that the Tags are returned ordered by ID.
"""
query = (Tag
.select(Tag.id, Tag.name)
.where(Tag.repository == repository_id)
.order_by(Tag.id))
if start_pagination_id is not None:
query = query.where(Tag.id >= start_pagination_id)
if limit is not None:
query = query.limit(limit)
return filter_to_alive_tags(query) | a0970da049fb2fa7cd3cc69c459fb7917d8185c8 | 3,359 |
def getLesson(request):
"""
Get the JSON representation for a lesson.
"""
print("getLesson called...")
lesson_id = None
if 'lesson_id' in request.matchdict:
lesson_id = request.matchdict['lesson_id']
if lesson_id is None:
# This should return an appropriate error about not finding the
# requested lesson.
pass
lesson = getLessonById(lesson_id)
return lesson | d721ab060462368d9ce3af071faa7e0751b34984 | 3,360 |
def vector3d(mode,xdata,ydata,zdata,udata,vdata,wdata,scalardata=None,fig=None,zscale=500.,vector_color=(0,0,0),vector_cmap=None,alpha=1.0,vector_mode='2darrow', scale=1, spacing=8., set_view=None):
"""
fig: integer or string, optional. Figure key will plot data on corresponding mlab figure, if it exists, or create a new one
mode: string; coordinate system of 3D projection. Options are 'rectangle' (default), 'sphere' or 'cylinder'
xdata: 1D array; longitude values for data array
ydata: 1D array; latitude values for data array
zdata: 1D array; depth values for data array
udata: 2D or 3D array; u vector component
vdata: 2D or 3D array; v vector component
wdata: 2D or 3D array; w vector component
zscale: scalar, optional; change vertical scaling for plotting, such that the vertical axis is scaled as topo_z/zscale (assumes topo_z units are m); default zscale is 500
vector_mode: string, optional; style of vector plot
color: colormap or rgb triplet,optional; color of quiver plot default is black (0,0,0).
alpha: float or int, optional; opacity for data surface from 0 to 1, default is 1
scale: float or int, optional; scaling for length of vectors, default is 1.
spacing: int, optional; If supplied, only one out of 'spacing' data points is displayed. This option is useful to reduce the number of points displayed on large datasets Must be an integer (int or long) or None
set_view: array_like, optional; set the mayavi camera angle with input [azimuth, elevation, distance, focal point], default is
"""
#make figure
if fig is None:
mlab.figure(size = (1024,768),bgcolor = (1,1,1))
mlab.clf()
else:
mlab.figure(figure=fig,bgcolor = (1,1,1))
#do coordinate transformation
if xdata is not None and ydata is not None and zdata is not None:
#TODO add an error message if not all data fields are provided
#prep data grid
phi_iso, theta_iso = np.meshgrid(((ydata*np.pi*2)/360.)+np.pi/2.,(xdata*np.pi*2)/360.)
if mode is 'sphere':
x_iso = np.sin(phi_iso) * np.cos(theta_iso[::-1]) * (1 -zdata/zscale)
y_iso = np.sin(phi_iso) * np.sin(theta_iso[::-1]) * (1 -zdata/zscale)
z_iso = np.cos(phi_iso) * (1 -zdata/zscale)
elif mode is 'cylinder':
x_iso = np.sin(phi_iso) * np.cos(theta_iso[::-1])
y_iso = np.sin(phi_iso) * np.sin(theta_iso[::-1])
z_iso = zdata/zscale
elif mode is 'rectangle':
y_iso,z_iso = np.meshgrid(ydata,zdata)
x_iso,z_iso = np.meshgrid(xdata,zdata)
z_iso =-z_iso/zscale
else:
#raise error if all three fields are not provided
print('ERROR: not all data fields are provided. Must provide 1D data x, y and z data points')
#do quiver plot
if scalardata is not None:
m = mlab.quiver3d(x_iso, y_iso, z_iso, udata, vdata, wdata, scalars=scalardata, scale_mode=None,colormap=vector_cmap,mode=vector_mode,opacity=alpha,scale_factor=scale,mask_points=spacing)
elif vector_cmap is not None:
m = mlab.quiver3d(x_iso, y_iso, z_iso, udata, vdata, wdata, colormap=vector_cmap,mode=vector_mode,opacity=alpha,scale_factor=scale,mask_points=spacing)
else:
m = mlab.quiver3d(x_iso, y_iso, z_iso, udata, vdata, wdata, color=vector_color,mode=vector_mode,opacity=alpha,scale_factor=scale,mask_points=spacing)
#optional: change mayavi camera settings
return m | dcffddd28f966c87801ae98b5da32fe06f5b9b08 | 3,361 |
def make_daysetting_from_data(data):
""" Constructs a new setting from a given dataset. This method will automatically
instantiate a new class matching the type of the given dataset. It will fill
all values provided by the dataset and then return the created instance """
factory = {
"color": ColorType,
"scalar": ScalarType
}
return make_setting_from_factory(data, factory) | d3f78fe67441e555d5b525ce1ca6cb334769942a | 3,362 |
from typing import Optional
def read_report(file) -> Optional[Report]:
"""
Reads the report meta-data section of the file.
:param file: The file being read from.
:return: The report section of the file.
"""
# Use a peeker so we don't read beyond the end of the header section
peeker = line_peeker(file)
# Read each line as a property
properties = {}
while True:
line = next(peeker)
# Finish when we reach a non-report line
if not is_report_line(line):
break
# Skip comment lines
if is_comment_line(line):
continue
# Extract the property name and value from the line
name, value = split_field_line(line)
properties[name] = value
# Return the report (if there was one)
if len(properties) == 0:
return None
else:
return properties_to_report(properties) | 557402ee57675fcc11a0a05da02d554c1b2f13db | 3,363 |
def get_valid_segment(text):
""" Returns None or the valid Loki-formatted urn segment for the given input string. """
if text == '':
return None
else:
# Return the converted text value with invalid characters removed.
valid_chars = ['.', '_', '-']
new_text = ''
for char in text:
if char in valid_chars or char.isalnum():
new_text += char
return new_text | 423c1764b590df635b0794bfe52a0a8479d53fbf | 3,364 |
import os
import json
def get_aimpoint_offsets():
"""
Get most recent aimpoint offset values
:returns: tuple of dy_acis_i, dz_acis_i, dy_acis_s, dz_acis_s (arcsec)
"""
info_file = os.path.join(opt.data_root, 'info.json')
with open(info_file, 'r') as fh:
info = json.load(fh)
process_time = Time(opt.process_time) if opt.process_time else Time.now()
if (process_time - Time(info['date'])).jd > 14:
logger.info('WARNING: offsets are more than 2 weeks old, last updated {}'
.format(info['date']))
offsets = (info['acisi']['dDY'], info['acisi']['dDZ'],
info['aciss']['dDY'], info['aciss']['dDZ'])
logger.info('Read {} updated {} and found offsets {:.2f}, {:.2f}, {:.2f}, {:.2f}'
.format(info_file, info['date'], *offsets))
return offsets | 08df0254ddb85f57a5b1be75aa2b1d4cf9224f3b | 3,365 |
def mparse(filename, staticObstacleList=list(), **kwargs):
"""
Parses a map file into a list of obstacles
@param filename The file name of the map file
@return A list of obstacles
"""
polyList = kwargs.get("nodes", list())
obstacleList = list()
try:
if filename is not None:
f = open(filename, "r+")
numberOfPolys = int(f.readline())
file_ext = filename.split(".")[-1]
# determine if obstacles are dynamic
if file_ext == "obstacles":
dynamicObstacle = True
else:
dynamicObstacle = False
# loop through file and create PolyObstacle objects
for _ in range(numberOfPolys):
# parse obstacle details
polyList = list()
line = [line for line in f.readline().split()[1:]]
intList = map(lambda s: int(float(s)), line)
polyList += [
[
(
mapVal(
intList[2*i],
-29,
29,
0,
con.Configuration.xSize
),
con.Configuration.ySize - mapVal(
intList[2*i + 1],
-29,
29,
0,
con.Configuration.ySize
)
) for i in range(len(intList) / 2)
]
]
# create and append PolyObstacle to obstacleList
obstacleList += [
obstacle.PolyObstacle(
pList,
con.Configuration.screen,
dynamic=dynamicObstacle
) for pList in polyList
]
else:
# auto generate dyanmic obstacles
for pList in polyList:
obst = obstacle.PolyObstacle(
pList,
con.Configuration.screen,
dynamic=True,
start_point=kwargs.get("start_point", None),
end_point=kwargs.get("end_point", None)
)
obstacleList.append(obst)
except Exception:
print("Error occured while parsing file [{0}]!".format(filename))
finally:
return obstacleList | ea62ff3e4f42ad9150be248c5a13d3c367f668b2 | 3,366 |
import typing
import logging
def install_pip_packages(python_executable: str,
pip_packages: typing.List[str]) -> bool:
"""Install pip packages for the specified python.
Args:
python_executable: Python executable used to install pip packages.
pip_packages: List of pip packages to install.
Raises:
subprocess.CalledProcessError if package installation fails.
Returns:
True if packages get installed, False otherwise.
"""
if pip_packages:
for package in pip_packages:
if not is_package_installed(package):
logging.info('Package %s not installed.', package)
command = ' '.join(
[python_executable, '-m', 'pip', 'install', '--user'])
command += ' ' + package
logging.info('Install pip package: %s', package)
if not run_and_check_result(command):
return False
return True
logging.debug('no python packages were provided for installation.')
return True | d8316fde5249def1b2d98aa5e247f3713ee40653 | 3,367 |
def sort_f_df(f_df):
"""Sorts f_df by s_idx first then by l_idx.
E.g. for scenario 0, see all decision alternatives in order,
then scenario 1, scenario 2, etc.
Parameters
----------
f_df : pandas.DataFrame
A dataframe of performance values, `f`, with indexes for the
scenario, `s`, and decision alternative, `l`.
Columns: `['s_idx', 'l_idx', '<f1_name>', '<f2_name>', ...]`
"""
# This will sort first by s_idx then by l_idx, both from 0 to ...
f_df.sort_values(['l_idx', 's_idx'], ascending=[True, True])
return f_df | ec82966a7a2fb417312198afe42109ed5883d31d | 3,368 |
def get_empath_scores(text):
"""
Obtains empath analysis on the text. Takes the dictionary mapping categories to
scores, which is produced by passing the text to empath, and returns the scores.
Args:
text: string containing text to perform empath analysis on
Returns:
A list of empath scores, such that there is a score in the list for each
of empath's pre-built categories
"""
empath_dict = lexicon.analyze(text, normalize=True)
empath_scores = list(empath_dict.values())
return empath_scores | f68a55a2dc4ba98696e9df3f88aaacf73d81ff2d | 3,369 |
def ending_counts(sequences):
"""Return a dictionary keyed to each unique value in the input sequences list
that counts the number of occurrences where that value is at the end of
a sequence.
For example, if 18 sequences end with DET, then you should return a
dictionary such that your_starting_counts[DET] == 18
"""
# TODO: Finish this function!
# Initialize the ending_dict
ending_dict = {}
# Create the starting_dict with tags as key
for tag in data.training_set.tagset:
ending_dict[tag] = 0
# Update the value of the starting dict
for tag_list in data.training_set.Y:
tag = tag_list[-1]
if tag in ending_dict:
ending_dict[tag] += 1
return ending_dict | c12bd2565649d373e86cef31f9e23856f4302fba | 3,370 |
import json
import logging
def sqlite_insert(engine, table_name, data):
"""
Inserts data into a table - either one row or in bulk.
Create the table if not exists.
Parameters
----------
engine: sqlalchemy engine for sqlite
uri: string
data: dict
Returns
-------
bool
"""
dtype = type(data)
try:
with session_scope(engine) as session:
try:
conditionally_create_generic_table(engine, table_name)
except TableCreationException:
pass # most likely because it already exists, ignore
if dtype is list:
for row in data:
session.execute('insert into ' + table_name + ' (data) values (:values)',
{'values': json.dumps(row)})
elif dtype is dict:
# investigate: http://docs.sqlalchemy.org/en/latest/faq/performance.html
# Bulk_insert_mappings or use raw sqlite3
row = data
session.execute('insert into ' + table_name + ' (data) values (:values)',
{'values': json.dumps(row)})
return True
except IntegrityError as e:
logging.error(e)
raise DuplicateRowException
except (OperationalError, StatementError) as e:
logging.error(e)
raise InsertException
except Exception as e:
logging.error(e)
raise Exception('not sure what went wrong - could not insert data') | 430e3109d233119043bc6c5f10ba966aa08992c1 | 3,371 |
def _fit_solver(solver):
"""
Call ``fit`` on the solver. Needed for multiprocessing.
"""
return solver.fit() | 7007752777445d2cc6d476d7af1f83d6cdfe236b | 3,372 |
import types
def flatten(l):
"""
recursively turns any nested list into a regular list (using a DFS)
"""
res = []
for x in l:
if (isinstance(x, types.ListType)):
res += flatten(x)
else:
res.append(x)
return res | 5947566b7dfd1d03204c2a39f1e853ce812e18fe | 3,373 |
def build_column_hierarchy(param_list, level_names, ts_columns, hide_levels=[]):
"""For each parameter in `param_list`, create a new column level with parameter values.
Combine this level with columns `ts_columns` using Cartesian product."""
checks.assert_same_shape(param_list, level_names, axis=0)
param_indexes = []
for i in range(len(param_list)):
if level_names[i] not in hide_levels:
param_index = index_fns.index_from_values(param_list[i], name=level_names[i])
param_indexes.append(param_index)
if len(param_indexes) > 1:
param_columns = index_fns.stack_indexes(*param_indexes)
elif len(param_indexes) == 1:
param_columns = param_indexes[0]
else:
param_columns = None
if param_columns is not None:
return index_fns.combine_indexes(param_columns, ts_columns)
return ts_columns | f19d4f93a0cfbc6ebe700285c8761c78ca5f9b1a | 3,374 |
def gradient_descent(f, xk, delta = 0.01, plot=False, F = None, axlim = 10):
"""
f: multivariable function with 1 array as parameter
xk : a vector to start descent
delta : precision of search
plot : option to plot the results or not
F : the function f expressed with 2 arrays in argument (X,Y) representing the colomns xk[0] and xk[1] for ploting issues. used only if plot == True
axlim : limit of the plot 3 axis (x,y,z)
"""
if plot : ax = plt.axes(projection='3d')
A = []
t = perf_counter()
dk = nd.Gradient(f)(xk)
while la.norm(dk) > delta :
if plot and len(A) < 10 : A.append(xk)
xt = xk
phi = lambda s : f(xk - s * dk)
alpha = op.newton(phi, 1)
xk -= alpha * dk
if plot and len(A) < 10 : A.append(xk)
dk = nd.Gradient(f)(xk)
if la.norm(xk - xt) < delta : break
t = perf_counter() - t
print("execution time: ",t)
if plot :
for u in A:
ax.scatter(u[0], u[1], f(u), c = 'b', s = 50)
ax.scatter(xk[0], xk[1], f(xk), c = 'r', s = 50,label="optimum")
x = np.arange(-axlim, axlim, axlim/100)
y = np.arange(-axlim, axlim, axlim/100)
X, Y = np.meshgrid(x, y)
Z = F(X,Y)
ax.set_xlabel('x', labelpad=20)
ax.set_ylabel('y', labelpad=20)
ax.set_zlabel('z', labelpad=20)
surf = ax.plot_surface(X, Y, Z, cmap = plt.cm.cividis)
plt.legend()
plt.title("optimizition with Gradient Descent")
plt.show()
return xk | 99b8da92c6df296c2d02b2f0d14f38b94ea87aef | 3,375 |
def _get_cells(obj):
"""Extract cells and cell_data from a vtkDataSet and sort it by types."""
cells, cell_data = {}, {}
data = _get_data(obj.GetCellData())
arr = vtk2np(obj.GetCells().GetData())
loc = vtk2np(obj.GetCellLocationsArray())
types = vtk2np(obj.GetCellTypesArray())
for typ in VTK_TYP:
if not isinstance(typ, int):
continue
cell_name = VTK_TYP[typ]
n_no = NODE_NO[cell_name]
cell_loc_i = np.where(types == typ)[0]
loc_i = loc[cell_loc_i]
# if there are no cells of the actual type continue
if len(loc_i) == 0:
# if not loc_i:
continue
arr_i = np.empty((len(loc_i), n_no), dtype=int)
for i in range(n_no):
arr_i[:, i] = arr[loc_i + i + 1]
cells[cell_name] = arr_i
cell_data_i = {}
for data_i in data:
cell_data_i[data_i] = data[data_i][cell_loc_i]
if cell_data_i != {}:
cell_data[cell_name] = cell_data_i
return cells, cell_data | 84f603d92e1548b6d9ebe33b31ac4277bed49281 | 3,376 |
def check_X(X, enforce_univariate=False, enforce_min_instances=1):
"""Validate input data.
Parameters
----------
X : pd.DataFrame
enforce_univariate : bool, optional (default=False)
Enforce that X is univariate.
enforce_min_instances : int, optional (default=1)
Enforce minimum number of instances.
Returns
-------
X : pd.DataFrame
Raises
------
ValueError
If X is an invalid input
"""
if not isinstance(X, pd.DataFrame):
raise ValueError(f"X must be a pd.DataFrame, but found: "
f"{(type(X))}")
if enforce_univariate:
_enforce_X_univariate(X)
if enforce_min_instances > 0:
_enforce_min_instances(X, min_instances=enforce_min_instances)
return X | 2022cbccfaec72cc68e2d9692d96fb3241d9991a | 3,377 |
def parse_amount(value: int) -> Decimal:
"""Return a scaled down amount."""
return Decimal(value) / Decimal(AMOUNT_SCALE_FACTOR) | 66e7668ed5da3d451644de00dc98bfb2bf8745f0 | 3,378 |
def svn_ra_do_update2(*args):
"""
svn_ra_do_update2(svn_ra_session_t session, svn_ra_reporter3_t reporter,
void report_baton, svn_revnum_t revision_to_update_to,
char update_target, svn_depth_t depth,
svn_boolean_t send_copyfrom_args, svn_delta_editor_t update_editor,
void update_baton,
apr_pool_t pool) -> svn_error_t
"""
return apply(_ra.svn_ra_do_update2, args) | 139fd5b8ea5b86ad70f056d5b53a86cc01ce3952 | 3,379 |
from jsonschemaplus.schemas import metaschema
import string
import array
def resolve(schema, copy=False):
"""Resolve schema references.
:param schema: The schema to resolve.
:return: The resolved schema.
"""
_substitutions = {'%25': '%', '~1': '/', '~0': '~'}
def _resolve_refs(schema, root=None, id_acc=None):
"""Resolve schema references and modify supplied
schema as a side effect.
If function parses value that equals schema's root,
_resolve_refs early exits because references have
already been resolved.
:param schema: The schema to resolve.
:param root: The root of the schema.
:side effect: Modifies schema.
:return: None
:TODO: resolve all http ref values
"""
if root is None:
root = schema
ref = '$ref'
id_ = 'id'
if object_(schema):
value = schema.get(id_)
if value and string(value):
if uri(value):
id_acc = value
else:
if id_acc is None:
raise SchemaError('Error resolving schema with id: %s' % value)
else:
id_acc += value
if not uri(id_acc):
raise SchemaError('Error resolving schema with id: %s' % value)
value = schema.get(ref)
if value and string(value):
if uri(value):
schema.pop(ref)
if (value == 'http://json-schema.org/draft-04/schema#'
and root != metaschema):
schema.update(deepcopy(metaschema))
# elif (value == 'http://json-schema.org/draft-04/hyper-schema#'
# and root != hyperschema):
# schema.update(deepcopy(hyperschema))
else:
try:
(url_, path_) = url(value)
data = resolve(get(url_))
schema.update(_path(data, path_))
except:
raise SchemaError('Error resolving schema with $ref: %s' % value)
_resolve_refs(schema, root, id_acc)
elif value[0] == '#':
schema.pop(ref)
subschema = _path(root, value)
if object_(subschema) and ref in subschema and string(subschema[ref]):
_resolve_refs(subschema, root, id_acc)
subschema = _path(root, value)
schema.update(subschema)
elif value.find('.json') != -1:
schema.pop(ref)
(url_, path_) = url(id_acc + value)
data = resolve(get(url_))
schema.update(_path(data, path_))
_resolve_refs(schema, root, id_acc)
else:
raise SchemaError('Error resolving schema with $ref: %s' % value)
for k, v in schema.items():
if k != ref and k != id_ and v != root:
_resolve_refs(v, root, id_acc)
elif array(schema):
for item in schema:
if item != root:
_resolve_refs(item, root, id_acc)
def _path(schema, path):
components = path[1:].split('/')[1:]
subschema = schema
for c in components:
for k, v in _substitutions.items():
if k in c:
c = c.replace(k, v)
if array(subschema):
try:
index = int(c)
subschema = subschema[index]
except:
raise SchemaError('Invalid path %s' % path)
elif object_(subschema):
subschema = subschema.get(c)
else:
raise SchemaError('Invalid path %s' % path)
return subschema
resolve.resolve_refs = _resolve_refs
resolve.path = _path
if copy:
schema_ = deepcopy(schema)
else:
schema_ = schema
_resolve_refs(schema_)
return schema_ | b410d2a7e165b988787ed3041a2eef13f35ea188 | 3,380 |
from ._sentiwords import tag
def sentiwords_tag(doc, output="bag"):
"""Tag doc with SentiWords polarity priors.
Performs left-to-right, longest-match annotation of token spans with
polarities from SentiWords.
Uses no part-of-speech information; when a span has multiple possible
taggings in SentiWords, the mean is returned.
Parameters
----------
doc : document or list of strings
output : string, optional
Output format. Either "bag" for a histogram (dict) of annotated token
span frequencies, or "tokens" a mixed list of strings and (list of
strings, polarity) pairs.
"""
doc = _tokenize_if_needed(fetch(doc))
tagged = tag(doc)
if output == "bag":
d = {}
for ngram, polarity in tagged:
if polarity == 0:
continue
if ngram in d:
d[ngram][1] += 1
else:
d[ngram] = [polarity, 1]
return d
elif output == "tokens":
return [ngram if polarity == 0 else (ngram, polarity)
for ngram, polarity in tagged]
else:
raise ValueError("unknown output format %r" % output) | c4769e82d9b9aff55f7d6e3de08188f5ba6501bb | 3,381 |
def _GetCommandTaskIds(command):
"""Get a command's task ids."""
# A task count is the number of tasks we put in the command queue for this
# command. We cap this number to avoid a single command with large run count
# dominating an entire cluster. If a task count is smaller than a run count,
# completed tasks will be rescheduled as needed.
task_count = min(command.run_count, MAX_TASK_COUNT)
_, request_id, _, command_id = command.key.flat()
return ["%s-%s-%s" % (request_id, command_id, i) for i in range(task_count)] | 98d6ac89e4f5569968740475eba924840170464f | 3,382 |
import uuid
import json
def save_orchestrator_response(url, jsonresponse, dryrun):
"""Given a URL and JSON response create/update the corresponding mockfile."""
endpoint = url.split("/api/")[1].rstrip("/")
try:
path, identifier = endpoint.rsplit("/", maxsplit=1)
except ValueError:
path, identifier = None, endpoint
if any(char in identifier for char in "?&="):
# Skip urls with query parameters for now (can be normalized if it's needed)
print(f"Unsupported URL parameters: {url}")
return
if any(pattern in url for pattern in TO_EXCLUDE):
print(f"Excluding URL {url}")
return
def get_id(string):
"""Defines how final URL component can be used as identifier"""
try:
parsed = uuid.UUID(string)
return str(parsed)[:8]
except ValueError:
if string.isnumeric():
return string
return None
try:
response = json.loads(jsonresponse)
except json.JSONDecodeError as e:
print(f"Invalid JSON response: {url} ({e})")
return
if (parsed_id := get_id(identifier)) is None:
# URL ends on a word "products" or "organisations"
filename = f"{identifier}.json"
else:
# URL ends on UUID or integer
if "/domain-model/" in url:
filename_prefix = "".join(c for c in response["product"]["tag"].lower() if c.isalpha())
else:
filename_prefix = ""
filename = f"{filename_prefix}-{parsed_id}.json" if filename_prefix else f"{parsed_id}.json"
if not path:
# Store in data/
fpath = DATA_ROOT / filename
print(
f"{endpoint} -> {'update (if changed)' if fpath.is_file() else 'create'} '{filename}' in root directory"
)
else:
# Store in data/<subfolder>/
dpath = DATA_ROOT / path
fpath = dpath / filename
print(
f"{endpoint} -> {'update (if changed)' if fpath.is_file() else 'create'} '{filename}' "
f"in {'new' if not dpath.is_dir() else 'existing'} directory '{path}'"
)
if not dpath.is_dir() and not dryrun:
dpath.mkdir(parents=True)
if not dryrun:
with fpath.open(mode="w") as handle:
json.dump(response, handle, sort_keys=True, indent=4) | aab7d3e925d7b4d695832ba7aa45bd93b3824fb1 | 3,383 |
from operator import or_
def aggregate_responses(instrument_ids, current_user, patch_dstu2=False):
"""Build a bundle of QuestionnaireResponses
:param instrument_ids: list of instrument_ids to restrict results to
:param current_user: user making request, necessary to restrict results
to list of patients the current_user has permission to see
"""
# Gather up the patient IDs for whom current user has 'view' permission
user_ids = OrgTree().visible_patients(current_user)
annotated_questionnaire_responses = []
questionnaire_responses = QuestionnaireResponse.query.filter(
QuestionnaireResponse.subject_id.in_(user_ids)).order_by(
QuestionnaireResponse.authored.desc())
if instrument_ids:
instrument_filters = (
QuestionnaireResponse.document[
("questionnaire", "reference")
].astext.endswith(instrument_id)
for instrument_id in instrument_ids
)
questionnaire_responses = questionnaire_responses.filter(or_(*instrument_filters))
patient_fields = ("careProvider", "identifier")
for questionnaire_response in questionnaire_responses:
subject = questionnaire_response.subject
encounter = questionnaire_response.encounter
encounter_fhir = encounter.as_fhir()
questionnaire_response.document["encounter"] = encounter_fhir
questionnaire_response.document["subject"] = {
k: v for k, v in subject.as_fhir().items() if k in patient_fields
}
if subject.organizations:
questionnaire_response.document["subject"]["careProvider"] = [
Reference.organization(org.id).as_fhir()
for org in subject.organizations
]
# Hack: add missing "resource" wrapper for DTSU2 compliance
# Remove when all interventions compliant
if patch_dstu2:
questionnaire_response.document = {
'resource': questionnaire_response.document,
# Todo: return URL to individual QuestionnaireResponse resource
'fullUrl': url_for(
'.assessment',
patient_id=questionnaire_response.subject_id,
_external=True,
),
}
annotated_questionnaire_responses.append(questionnaire_response.document)
bundle = {
'resourceType': 'Bundle',
'updated': FHIR_datetime.now(),
'total': len(annotated_questionnaire_responses),
'type': 'searchset',
'entry': annotated_questionnaire_responses,
}
return bundle | 4a829cceec49a583aa69676ec2847a60ce5979da | 3,384 |
import math
def compute_bleu(reference_corpus, translation_corpus, max_order=4,
use_bp=True):
"""Computes BLEU score of translated segments against one or more references.
Args:
reference_corpus: list of references for each translation. Each
reference should be tokenized into a list of tokens.
translation_corpus: list of translations to score. Each translation
should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
use_bp: boolean, whether to apply brevity penalty.
Returns:
BLEU score.
"""
reference_length = 0
translation_length = 0
bp = 1.0
geo_mean = 0
matches_by_order = [0] * max_order
possible_matches_by_order = [0] * max_order
precisions = []
for (references, translations) in zip(reference_corpus, translation_corpus):
reference_length += len(references)
translation_length += len(translations)
ref_ngram_counts = _get_ngrams_with_counter(references, max_order)
translation_ngram_counts = _get_ngrams_with_counter(translations, max_order)
overlap = dict((ngram,
min(count, translation_ngram_counts[ngram]))
for ngram, count in ref_ngram_counts.items())
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for ngram in translation_ngram_counts:
possible_matches_by_order[len(ngram) - 1] += translation_ngram_counts[
ngram]
precisions = [0] * max_order
smooth = 1.0
for i in range(0, max_order):
if possible_matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[i]
if matches_by_order[i] > 0:
precisions[i] = float(matches_by_order[i]) / possible_matches_by_order[
i]
else:
smooth *= 2
precisions[i] = 1.0 / (smooth * possible_matches_by_order[i])
else:
precisions[i] = 0.0
if max(precisions) > 0:
p_log_sum = sum(math.log(p) for p in precisions if p)
geo_mean = math.exp(p_log_sum / max_order)
if use_bp:
ratio = translation_length / reference_length
bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0
bleu = geo_mean * bp
return np.float32(bleu) | e0978e3b05513c32ac55f72b298e8752eb887fb1 | 3,385 |
def flip(inputDirection: direction) -> direction:
"""
Chooses what part of the general pointer to flip, by DP%2 == CC rule, providing the following flow:
(0,0) -> (0,1)
(0,1) -> (1,1)
(1,1) -> (1,0)
(1,0) -> (2,0)
(2,0) -> (2,1)
(2,1) -> (3,1)
(3,1) -> (3,0)
(3,0) -> (0,0)
:param inputDirection: Original state of the pointers
:return: Tuple of ints containing new pointers
"""
if inputDirection.pointers[0] % 2 == inputDirection.pointers[1]:
return direction((inputDirection.pointers[0], flipCC(inputDirection.pointers[1])))
return direction((flipDP(inputDirection.pointers[0]), inputDirection.pointers[1])) | 44797849c14736de7380c5169e29bc9095f11a45 | 3,386 |
def save(request):
"""Update the column levels in campaign_tree table with the user's input from the data warehouse frontend."""
if any(request["changes"]):
query = 'UPDATE campaign_tree SET '
query += ', '.join([f"""levels[{index + 1}] = trim(regexp_replace(%s, '\s+', ' ', 'g'))"""
for index, change in enumerate(request["changes"])
if change])
where_clause, variables = _build_where_clause(request)
query += ' ' + where_clause
with mara_db.postgresql.postgres_cursor_context('mara') as cursor: # type: psycopg2.extensions.cursor
cursor.execute(query, tuple([change for change in request['changes'] if change] + variables))
return f'Successfully updated {cursor.rowcount} rows: <tt>{str(cursor.query.decode("utf-8"))}</tt>'
else:
return 'No changes to be made' | 7cc75024db3de8596bc685439d02a1023bfbae25 | 3,387 |
def _print_model(server, user_key, device_type_model):
"""
Print the model for a given device type
:param device_type_model: Device type ID to print the model for
"""
name = None
model = []
parameters = _get_parameters(server, user_key)
parameters = parameters['deviceParams']
try:
device_type_model = int(device_type_model)
except:
print(Color.RED + 'Please provide an integer device type.' + Color.END + '\n')
return 0
if device_type_model == 22 or device_type_model == 23 or device_type_model == 24:
if device_type_model == 22:
name = 'Web Camera'
elif device_type_model == 23:
name = 'Android Camera'
elif device_type_model == 24:
name = 'iOS Camera'
model = ['accessCameraSettings', 'audioStreaming', 'videoStreaming', 'ppc.hdStatus', 'ppc.rapidMotionStatus', 'batteryLevel', 'ppc.charging', 'motionStatus', 'selectedCamera', 'ppc.autoFocus', 'ppc.recordSeconds', 'ppc.motionSensitivity', 'version', 'ppc.robotConnected', 'ppc.robotMotionDirection', 'ppc.robotOrientation', 'ppc.robotVantageSphericalCoordinates', 'ppc.robotVantageTimer', 'ppc.robotVantageConfigurationStatus', 'ppc.robotVantageName', 'ppc.robotVantageSequence', 'ppc.robotVantageMoveToIndex', 'ppc.availableBytes', 'twitterAutoShare', 'twitterDescription', 'ppc.twitterReminder', 'ppc.twitterStatus', 'ppc.motionCountDownTime', 'ppc.blackoutScreenOn', 'ppc.warningStatus', 'ppc.warningText', 'ppc.recordFullDuration', 'ppc.flashOn', 'streamError', 'ppc.streamStatus', 'model', 'timeZoneId', 'ppc.motionActivity', 'ppc.outputVolume', 'ppc.captureImage', 'recordStatus', 'ppc.alarm', 'ppc.countdown', 'ppc.playSound', 'ppc.motionAlarm', 'ppc.cameraName', 'ppc.throttleStatus']
elif device_type_model == 31:
name = 'Gateway'
model = ['firmware', 'ipAddress', 'manufacturer', 'model', 'numberOfChildren', 'permitJoining', 'zbChannel', 'reboot', 'cloud', 'firmwareUpdateStatus', 'firmwareUrl', 'firmwareChecksum']
elif device_type_model == 130:
name = 'LintAlert PRO Plus'
model = ['sig.led', 'sig.pressure', 'sig.wciPressure', 'sig.status', 'sig.runtime', 'sig.maxled', 'sig.curMaxLed', 'sig.type', 'sig.table', 'sig.clean', 'waterLeak', 'version', 'rssi']
elif device_type_model == 4200:
name = 'Netatmo Healthy Home Coach'
model = ['degC', 'co2', 'relativeHumidity', 'noise', 'firmware', 'wifiSignal', 'pressure', 'nam.healthIdx']
elif device_type_model == 4201:
name = 'Netatmo Weather Station Indoor Module'
model = ['degC', 'co2', 'relativeHumidity', 'noise', 'pressure', 'firmware', 'wifiSignal']
elif device_type_model == 4202:
name = 'Netatmo Weather Station Outdoor Module'
model = ['degC', 'relativeHumidity', 'firmware', 'signalStrength', 'batteryLevel']
elif device_type_model == 4204:
name = 'Netatmo Welcome'
model = ['status', 'ipc.sdStatus', 'ppc.charging', 'ipc.mainVideoUrl']
elif device_type_model == 4220:
name = 'Sensibo'
model = ['degC', 'relativeHumidity', 'powerStatus', 'systemMode', 'coolingSetpoint', 'fanMode', 'swingMode', 'systemModeValues', 'fanModeValues', 'swingValues', 'tempValues']
elif device_type_model == 9001:
name = 'GE Dimmer Switch'
model = ['currentLevel', 'state', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9002:
name = 'Siren'
model = ['ppc.alarmWarn', 'ppc.alarmDuration', 'ppc.alarmStrobe', 'ppc.alarmSquawk', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9003:
name = 'Temperature & Humidity Sensor'
model = ['relativeHumidity', 'degC', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9006:
name = 'Fire Alarm'
model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9007:
name = 'Smoke Detector'
model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9008:
name = 'Heat Detector'
model = ['alarmStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 9010:
name = 'Smart Lock'
model = ['degC', 'lockStatus', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10014:
name = 'Entry Sensor'
model = ['doorStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10017:
name = 'Water Sensor'
model = ['waterLeak', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10019:
name = 'Touch Sensor'
model = ['vibrationStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10031:
name = 'Gateway'
model = ['firmware', 'ipAddress', 'model', 'numberOfChildren', 'permitJoining', 'zbChannel']
elif device_type_model == 10033:
name = 'Temperature Sensor'
model = ['degC', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10034:
name = 'Humidity Sensor'
model = ['relativeHumidity', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10035:
name = 'Smart Plug'
model = ['power', 'energy', 'outletStatus', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10036:
name = 'Smart Bulb'
model = ['currentLevel', 'state', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10037:
name = 'Thermostat'
model = ['degC', 'fanModeSequence', 'systemMode', 'controlSequenceOfOperation', 'coolingSetpoint', 'heatingSetpoint', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
elif device_type_model == 10038:
name = 'Motion Sensor'
model = ['motionStatus', 'batteryLevel', 'batteryVoltage', 'lqi', 'rssi', 'model', 'manufacturer']
if len(model) > 0:
print(Color.GREEN + name + Color.END)
for m in model:
description = ''
for p in parameters:
if p['name'] == m:
description = '('
if 'systemUnit' in p:
description += p['systemUnit'] + ', '
if p['numeric']:
description += 'numeric'
else:
description += 'non-numeric'
if 'description' in p:
description += ', ' + p['description']
description += ')'
print(' ' + Color.BOLD + m + Color.END + ' ' + description)
else:
print(Color.RED + 'This device type does not yet have a model defined.' + Color.END)
return | b901cb21d39ac0fce1d8a60c3926d8b274ce5189 | 3,388 |
def parse_xiaomi(self, data, source_mac, rssi):
"""Parser for Xiaomi sensors"""
# check for adstruc length
i = 9 # till Frame Counter
msg_length = len(data)
if msg_length < i:
_LOGGER.debug("Invalid data length (initial check), adv: %s", data.hex())
return None
# extract frame control bits
frctrl = data[4] + (data[5] << 8)
frctrl_mesh = (frctrl >> 7) & 1 # mesh device
frctrl_version = frctrl >> 12 # version
frctrl_auth_mode = (frctrl >> 10) & 3
frctrl_solicited = (frctrl >> 9) & 1
frctrl_registered = (frctrl >> 8) & 1
frctrl_object_include = (frctrl >> 6) & 1
frctrl_capability_include = (frctrl >> 5) & 1
frctrl_mac_include = (frctrl >> 4) & 1 # check for MAC address in data
frctrl_is_encrypted = (frctrl >> 3) & 1 # check for encryption being used
frctrl_request_timing = frctrl & 1 # old version
# Check that device is not of mesh type
if frctrl_mesh != 0:
_LOGGER.debug("Xiaomi device data is a mesh type device, which is not supported. Data: %s", data.hex())
return None
# Check that version is 2 or higher
if frctrl_version < 2:
_LOGGER.debug("Xiaomi device data is using old data format, which is not supported. Data: %s", data.hex())
return None
# Check that MAC in data is the same as the source MAC
if frctrl_mac_include != 0:
i += 6
if msg_length < i:
_LOGGER.debug("Invalid data length (in MAC check), adv: %s", data.hex())
return None
xiaomi_mac_reversed = data[9:15]
xiaomi_mac = xiaomi_mac_reversed[::-1]
if xiaomi_mac != source_mac:
_LOGGER.debug("Xiaomi MAC address doesn't match data MAC address. Data: %s", data.hex())
return None
else:
xiaomi_mac = source_mac
# determine the device type
device_id = data[6] + (data[7] << 8)
try:
device_type = XIAOMI_TYPE_DICT[device_id]
except KeyError:
if self.report_unknown == "Xiaomi":
_LOGGER.info(
"BLE ADV from UNKNOWN Xiaomi device: RSSI: %s, MAC: %s, ADV: %s",
rssi,
to_mac(source_mac),
data.hex()
)
_LOGGER.debug("Unknown Xiaomi device found. Data: %s", data.hex())
return None
packet_id = data[8]
sinfo = 'MiVer: ' + str(frctrl_version)
sinfo += ', DevID: ' + hex(device_id) + ' : ' + device_type
sinfo += ', FnCnt: ' + str(packet_id)
if frctrl_request_timing != 0:
sinfo += ', Request timing'
if frctrl_registered != 0:
sinfo += ', Registered and bound'
else:
sinfo += ', Not bound'
if frctrl_solicited != 0:
sinfo += ', Request APP to register and bind'
if frctrl_auth_mode == 0:
sinfo += ', Old version certification'
elif frctrl_auth_mode == 1:
sinfo += ', Safety certification'
elif frctrl_auth_mode == 2:
sinfo += ', Standard certification'
# check for MAC presence in sensor whitelist, if needed
if self.discovery is False and xiaomi_mac not in self.sensor_whitelist:
_LOGGER.debug("Discovery is disabled. MAC: %s is not whitelisted!", to_mac(xiaomi_mac))
return None
# check for unique packet_id and advertisement priority
try:
prev_packet = self.lpacket_ids[xiaomi_mac]
except KeyError:
# start with empty first packet
prev_packet = None
if device_type in ["LYWSD03MMC", "CGG1", "MHO-C401", "CGDK2"]:
# Check for adv priority and packet_id for devices that can also send in ATC format
adv_priority = 19
try:
prev_adv_priority = self.adv_priority[xiaomi_mac]
except KeyError:
# start with initial adv priority
prev_adv_priority = 0
if adv_priority > prev_adv_priority:
# always process advertisements with a higher priority
self.adv_priority[xiaomi_mac] = adv_priority
elif adv_priority == prev_adv_priority:
# only process messages with same priority that have a unique packet id
if prev_packet == packet_id:
if self.filter_duplicates is True:
return None
else:
pass
else:
pass
else:
# do not process advertisements with lower priority (ATC advertisements will be used instead)
prev_adv_priority -= 1
self.adv_priority[xiaomi_mac] = prev_adv_priority
return None
else:
if prev_packet == packet_id:
if self.filter_duplicates is True:
# only process messages with highest priority and messages with unique packet id
return None
self.lpacket_ids[xiaomi_mac] = packet_id
# check for capability byte present
if frctrl_capability_include != 0:
i += 1
if msg_length < i:
_LOGGER.debug("Invalid data length (in capability check), adv: %s", data.hex())
return None
capability_types = data[i - 1]
sinfo += ', Capability: ' + hex(capability_types)
if (capability_types & 0x20) != 0:
i += 1
if msg_length < i:
_LOGGER.debug("Invalid data length (in capability type check), adv: %s", data.hex())
return None
capability_io = data[i - 1]
sinfo += ', IO: ' + hex(capability_io)
# check that data contains object
if frctrl_object_include != 0:
# check for encryption
if frctrl_is_encrypted != 0:
sinfo += ', Encryption'
firmware = "Xiaomi (MiBeacon V" + str(frctrl_version) + " encrypted)"
if frctrl_version <= 3:
payload = decrypt_mibeacon_legacy(self, data, i, xiaomi_mac)
else:
payload = decrypt_mibeacon_v4_v5(self, data, i, xiaomi_mac)
else: # No encryption
# check minimum advertisement length with data
firmware = "Xiaomi (MiBeacon V" + str(frctrl_version) + ")"
sinfo += ', No encryption'
if msg_length < i + 3:
_LOGGER.debug("Invalid data length (in non-encrypted data), adv: %s", data.hex())
return None
payload = data[i:]
else:
# data does not contain Object
_LOGGER.debug("Advertisement doesn't contain payload, adv: %s", data.hex())
return None
result = {
"rssi": rssi,
"mac": ''.join(f'{i:02X}' for i in xiaomi_mac),
"type": device_type,
"packet": packet_id,
"firmware": firmware,
"data": False,
}
if payload is not None:
result.update({"data": True})
sinfo += ', Object data: ' + payload.hex()
# loop through parse_xiaomi payload
payload_start = 0
payload_length = len(payload)
# assume that the data may have several values of different types
while payload_length >= payload_start + 3:
obj_typecode = payload[payload_start] + (payload[payload_start + 1] << 8)
obj_length = payload[payload_start + 2]
next_start = payload_start + 3 + obj_length
if payload_length < next_start:
_LOGGER.debug("Invalid payload data length, payload: %s", payload.hex())
break
dobject = payload[payload_start + 3:next_start]
if obj_length != 0:
resfunc = xiaomi_dataobject_dict.get(obj_typecode, None)
if resfunc:
if hex(obj_typecode) in ["0x1001", "0xf", "0xb"]:
result.update(resfunc(dobject, device_type))
else:
result.update(resfunc(dobject))
else:
if self.report_unknown == "Xiaomi":
_LOGGER.info("%s, UNKNOWN dataobject in payload! Adv: %s", sinfo, data.hex())
payload_start = next_start
return result | 2891ecaa538f7eb2c70b9c9cf0073fc39f1cc3d8 | 3,389 |
def parse_ipv6_addresses(text):
"""."""
addresses = ioc_grammars.ipv6_address.searchString(text)
return _listify(addresses) | 6177bc5fcb3b6613e945a7c63931d88a12d372cd | 3,390 |
def as_jenks_caspall_sampled(*args, **kwargs):
"""
Generate Jenks-Caspall Sampled classes from the provided queryset. If the queryset
is empty, no class breaks are returned. For more information on the Jenks
Caspall Sampled classifier, please visit:
U{http://pysal.geodacenter.org/1.2/library/esda/mapclassify.html#pysal.esda.mapclassify.Jenks_Caspall_Sampled}
@type queryset: QuerySet
@param queryset: The query set that contains the entire distribution of
data values.
@type field: string
@param field: The name of the field on the model in the queryset that
contains the data values.
@type nclasses: integer
@param nclasses: The number of class breaks desired.
@type geofield: string
@param geofield: The name of the geometry field. Defaults to 'geom'.
@rtype: L{sld.StyledLayerDescriptor}
@returns: An SLD object that represents the class breaks.
"""
return _as_classification(Jenks_Caspall_Sampled, *args, **kwargs) | 7a271d3c48b6d813cacc9502f214d2045d8accfe | 3,391 |
def positive_dice_parse(dice: str) -> str:
"""
:param dice: Formatted string, where each line is blank or matches
t: [(t, )*t]
t = (0|T|2A|SA|2S|S|A)
(note: T stands for Triumph here)
:return: Formatted string matching above, except tokens are replaced
with their corresponding values in the 4-tuple system,
(successes, advantages, triumphs, despairs)
"""
return dice.replace("0", "(0, 0, 0, 0)")\
.replace("T", "(1, 0, 1, 0)")\
.replace("2A", "(0, 2, 0, 0)")\
.replace("SA", "(1, 1, 0, 0)")\
.replace("2S", "(2, 0, 0, 0)")\
.replace("S", "(1, 0, 0, 0)")\
.replace("A", "(0, 1, 0, 0)") | 5b266a4025706bfc8f4deabe67735a32f4b0785d | 3,392 |
def fmt_title(text):
"""Article title formatter.
Except functional words, first letter uppercase. Example:
"Google Killing Annoying Browsing Feature"
**中文文档**
文章标题的格式, 除了虚词, 每个英文单词的第一个字母大写。
"""
text = text.strip()
if len(text) == 0: # if empty string, return it
return text
else:
text = text.lower() # lower all char
# delete redundant empty space
chunks = [chunk for chunk in text.split(" ") if len(chunk) >= 1]
new_chunks = list()
for chunk in chunks:
if chunk not in _function_words:
chunk = chunk[0].upper() + chunk[1:]
new_chunks.append(chunk)
new_chunks[0] = new_chunks[0][0].upper() + new_chunks[0][1:]
return " ".join(new_chunks) | 44474f8f92888904a56f63bdcf1031f2d7c472e1 | 3,393 |
def insn_add_off_drefs(*args):
"""
insn_add_off_drefs(insn, x, type, outf) -> ea_t
"""
return _ida_ua.insn_add_off_drefs(*args) | 684af1cea2deafffc33b007f064a67fb89ffd54f | 3,394 |
def lambda_handler(event, context):
"""Lambda function that responds to changes in labeling job status, updating
the corresponding dynamo db tables and publishing to sns after a job is cancelled.
Parameters
----------
event: dict, required API gateway request with an input SQS arn, output SQS arn
context: object, required Lambda Context runtime methods and attributes
Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
Lambda Output Format: dict
Return doc: https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
log.log_request_and_context(event, context)
job_status = event["status"]
job_arns = event["job_arns"]
if len(job_arns) != 1:
raise ValueError("incorrect number of job arns in event: ", job_arns)
job_arn = job_arns[0]
# We received a new status for the job_arn.
process_new_status(job_arn, job_status, context.invoked_function_arn)
return "success" | 3640bded7316a573d0740084e8006a876bb7300c | 3,395 |
from typing import Union
def get_answer(question: Union[dns.message.Message, bytes],
server: Union[IPv4Address, IPv6Address],
port: int = 53,
tcp: bool = False,
timeout: int = pydnstest.mock_client.SOCKET_OPERATION_TIMEOUT) -> dns.message.Message:
"""Get an DNS message with answer with specific query"""
sock = pydnstest.mock_client.setup_socket(str(server), port, tcp=tcp)
with sock:
pydnstest.mock_client.send_query(sock, question)
return pydnstest.mock_client.get_dns_message(sock, timeout=timeout) | 95493396393ce09e4610fb32c063d0d9676b14ea | 3,396 |
def intersectionPoint(line1, line2):
"""
Determining intersection point b/w two lines of the form r = xcos(R) + ysin(R)
"""
y = (line2[0][0]*np.cos(line1[0][1]) - line1[0][0]*np.cos(line2[0][1]))/(np.sin(line2[0][1])*np.cos(line1[0][1]) - np.sin(line1[0][1])*np.cos(line2[0][1]))
x = (line1[0][0] - y*np.sin(line1[0][1]))/np.cos(line1[0][1])
return [x,y] | f23ec1960de85b72724388747ec8157925eefae1 | 3,397 |
def _remove_keywords(d):
"""
copy the dict, filter_keywords
Parameters
----------
d : dict
"""
return { k:v for k, v in iteritems(d) if k not in RESERVED } | 0eb7ed59898c3ec323574d0068af745250aef63b | 3,398 |
def build_trib_exp(trib_identifier, trib_key_field):
"""Establishes a SQL query expresion associating a given tributary id"""
return '"{0}"'.format(trib_key_field) + " LIKE '%{0}%'".format(trib_identifier) | 792d5e4237268410f050323ff1748246a5cdee5d | 3,399 |
Subsets and Splits