content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_statement_at_line(source: str, lineno: int, checker):
"""Get statements at line *lineno* from a source string.
:param source: The source to get the statements from.
:param lineno: Line number which the statement must include. Counted from 1.
:param checker: A function that checks each statement. It must return *None* if the check
fails. If anything else is returned, that becomes the return value of this function.
:returns: A list of tuples of string with the found statements and and offset between the
beginning of the match and *lineno*.
"""
module = ast_utils.cached_parse(source)
for stmt in module.body:
position = ast_utils.get_position(source, stmt)
if position.lineno <= lineno <= position.end_lineno:
res = checker(stmt, source)
if res is not None:
return res
raise RuntimeError('Statement not found.') | d2066f5fafa1c20c4b5276e44d82ae95ffa2f59b | 15,300 |
def ptrace(Q, sel):
"""
Partial trace of the Qobj with selected components remaining.
Parameters
----------
Q : :class:`qutip.Qobj`
Composite quantum object.
sel : int/list
An ``int`` or ``list`` of components to keep after partial trace.
Returns
-------
oper : :class:`qutip.Qobj`
Quantum object representing partial trace with selected components
remaining.
Notes
-----
This function is for legacy compatibility only. It is recommended to use
the ``ptrace()`` Qobj method.
"""
if not isinstance(Q, Qobj):
raise TypeError("Input is not a quantum object")
return Q.ptrace(sel) | a98e7bea41cff00b44534cecac7f86958ef47ebb | 15,301 |
from operator import index
def createConformations(outputfile, forcefield, smiles, sid):
"""Generate the conformations for a molecule and save them to disk."""
print(f'Generating {index}: {smiles}')
try:
mol = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
fftop = Topology()
fftop.add_molecule(mol)
mmtop = fftop.to_openmm()
system = forcefield.create_openmm_system(fftop)
except:
print(' failed to parametrize')
return
# Generate 10 diverse starting points. Run MD from each one to generate a total
# of 100 high energy conformations.
mol.generate_conformers(n_conformers=10, rms_cutoff=0*unit.nanometers)
assert len(mol.conformers) == 10
def simulate(pos):
integrator = openmm.LangevinMiddleIntegrator(500*unit.kelvin, 1/unit.picosecond, 0.001*unit.picosecond)
simulation = app.Simulation(mmtop, system, integrator, openmm.Platform.getPlatformByName('Reference'))
simulation.context.setPositions(pos)
simulation.minimizeEnergy()
simulation.context.setVelocitiesToTemperature(500*unit.kelvin)
states = []
for i in range(10):
simulation.step(10000)
state = simulation.context.getState(getPositions=True, getEnergy=True)
if state.getPotentialEnergy() < 1e4*unit.kilojoules_per_mole:
states.append(state)
return states
futures = []
with ThreadPoolExecutor() as executor:
for pos in mol.conformers:
futures.append(executor.submit(simulate, pos))
states = []
for future in futures:
states += future.result()
# Select 25 that are most different from each other.
if len(states) < 25:
print(' failed to generate states')
return
states = filterByRMSD(states, mmtop)
# Create a nearby, lower energy conformation from each one.
integrator = openmm.LangevinMiddleIntegrator(100*unit.kelvin, 1/unit.picosecond, 0.001*unit.picosecond)
simulation = app.Simulation(mmtop, system, integrator, openmm.Platform.getPlatformByName('Reference'))
for state in states[:]:
simulation.context.setState(state)
simulation.minimizeEnergy(maxIterations=5)
simulation.context.setVelocitiesToTemperature(100*unit.kelvin)
simulation.step(1000)
states.append(simulation.context.getState(getPositions=True))
saveToFile(outputfile, mol, states, sid) | fce6cb1c7620b755a500e76822aa3ac27b7a12f4 | 15,302 |
import numpy
import math
def two_angle_circular_correlation_coef(angles1, angles2, mean1, mean2):
"""
Circular correlation measure. SenGupta 2001
"""
centered_a = angles1-mean1
centered_b = angles2-mean2
sin_centered_a = numpy.sin(centered_a)
sin_centered_b = numpy.sin(centered_b)
sin2_a = sin_centered_a*sin_centered_a
sin2_b = sin_centered_b*sin_centered_b
return numpy.dot(sin_centered_a, sin_centered_b) / math.sqrt(numpy.dot(sin2_a, sin2_b)) | 6a95f8726f45105c68b9c0b4f8f13191a88734e2 | 15,303 |
from typing import Union
import yaml
def format_data(data: Union[dict, list]) -> str:
"""
:param data: input data
:return: pretty formatted yaml representation of a dictionary
"""
return yaml.dump(data, sort_keys=False, default_flow_style=False) | b4e79a8957995fb8e2eaa549a6a208a48574a598 | 15,304 |
import os
def fasta2select(fastafilename, is_aligned=False,
ref_resids=None, target_resids=None,
ref_offset=0, target_offset=0, verbosity=3,
alnfilename=None, treefilename=None, clustalw="clustalw2"):
"""Return selection strings that will select equivalent residues.
The function aligns two sequences provided in a FASTA file and
constructs MDAnalysis selection strings of the common atoms. When
these two strings are applied to the two different proteins they
will generate AtomGroups of the aligned residues.
`fastafilename` contains the two un-aligned sequences in FASTA
format. The reference is assumed to be the first sequence, the
target the second. ClustalW_ produces a pairwise
alignment (which is written to a file with suffix ``.aln``). The
output contains atom selection strings that select the same atoms
in the two structures.
Unless `ref_offset` and/or `target_offset` are specified, the resids
in the structure are assumed to correspond to the positions in the
un-aligned sequence, namely the first residue has resid == 1.
In more complicated cases (e.g., when the resid numbering in the
input structure has gaps due to missing parts), simply provide the
sequence of resids as they appear in the topology in `ref_resids` or
`target_resids`, e.g. ::
target_resids = [a.resid for a in trj.select_atoms('name CA')]
(This translation table *is* combined with any value for
`ref_offset` or `target_offset`!)
Parameters
----------
fastafilename : str, path to filename
FASTA file with first sequence as reference and
second the one to be aligned (ORDER IS IMPORTANT!)
is_aligned : bool (optional)
``False`` (default)
run clustalw for sequence alignment;
``True``
use the alignment in the file (e.g. from STAMP) [``False``]
ref_offset : int (optional)
add this number to the column number in the FASTA file
to get the original residue number, default: 0
target_offset : int (optional)
add this number to the column number in the FASTA file
to get the original residue number, default: 0
ref_resids : str (optional)
sequence of resids as they appear in the reference structure
target_resids : str (optional)
sequence of resids as they appear in the target
alnfilename : str (optional)
filename of ClustalW alignment (clustal format) that is
produced by *clustalw* when *is_aligned* = ``False``.
default ``None`` uses the name and path of *fastafilename* and
subsititutes the suffix with '.aln'.
treefilename: str (optional)
filename of ClustalW guide tree (Newick format);
if default ``None`` the the filename is generated from *alnfilename*
with the suffix '.dnd' instead of '.aln'
clustalw : str (optional)
path to the ClustalW (or ClustalW2) binary; only
needed for `is_aligned` = ``False``, default: "ClustalW2"
Returns
-------
select_dict : dict
dictionary with 'reference' and 'mobile' selection string
that can be used immediately in :class:`AlignTraj` as
``select=select_dict``.
See Also
--------
:func:`sequence_alignment`, which does not require external
programs.
.. _ClustalW: http://www.clustal.org/
.. _STAMP: http://www.compbio.dundee.ac.uk/manuals/stamp.4.2/
"""
protein_gapped = Bio.Alphabet.Gapped(Bio.Alphabet.IUPAC.protein)
if is_aligned:
logger.info("Using provided alignment {}".format(fastafilename))
with open(fastafilename) as fasta:
alignment = Bio.AlignIO.read(
fasta, "fasta", alphabet=protein_gapped)
else:
if alnfilename is None:
filepath, ext = os.path.splitext(fastafilename)
alnfilename = filepath + '.aln'
if treefilename is None:
filepath, ext = os.path.splitext(alnfilename)
treefilename = filepath + '.dnd'
run_clustalw = Bio.Align.Applications.ClustalwCommandline(
clustalw,
infile=fastafilename,
type="protein",
align=True,
outfile=alnfilename,
newtree=treefilename)
logger.debug(
"Aligning sequences in %(fastafilename)r with %(clustalw)r.",
vars())
logger.debug("ClustalW commandline: %r", str(run_clustalw))
try:
stdout, stderr = run_clustalw()
except:
logger.exception("ClustalW %(clustalw)r failed", vars())
logger.info(
"(You can get clustalw2 from http://www.clustal.org/clustal2/)")
raise
with open(alnfilename) as aln:
alignment = Bio.AlignIO.read(
aln, "clustal", alphabet=protein_gapped)
logger.info(
"Using clustalw sequence alignment {0!r}".format(alnfilename))
logger.info(
"ClustalW Newick guide tree was also produced: {0!r}".format(treefilename))
nseq = len(alignment)
if nseq != 2:
raise ValueError(
"Only two sequences in the alignment can be processed.")
# implict assertion that we only have two sequences in the alignment
orig_resids = [ref_resids, target_resids]
offsets = [ref_offset, target_offset]
for iseq, a in enumerate(alignment):
# need iseq index to change orig_resids
if orig_resids[iseq] is None:
# build default: assume consecutive numbering of all
# residues in the alignment
GAP = a.seq.alphabet.gap_char
length = len(a.seq) - a.seq.count(GAP)
orig_resids[iseq] = np.arange(1, length + 1)
else:
orig_resids[iseq] = np.asarray(orig_resids[iseq])
# add offsets to the sequence <--> resid translation table
seq2resids = [resids + offset for resids, offset in zip(
orig_resids, offsets)]
del orig_resids
del offsets
def resid_factory(alignment, seq2resids):
"""Return a function that gives the resid for a position ipos in
the nseq'th alignment.
resid = resid_factory(alignment,seq2resids)
r = resid(nseq,ipos)
It is based on a look up table that translates position in the
alignment to the residue number in the original
sequence/structure.
The first index of resid() is the alignmment number, the
second the position in the alignment.
seq2resids translates the residues in the sequence to resid
numbers in the psf. In the simplest case this is a linear map
but if whole parts such as loops are ommitted from the protein
the seq2resids may have big gaps.
Format: a tuple of two numpy arrays; the first array is for
the reference, the second for the target, The index in each
array gives the consecutive number of the amino acid in the
sequence, the value the resid in the structure/psf.
Note: assumes that alignments have same length and are padded if
necessary.
"""
# could maybe use Bio.PDB.StructureAlignment instead?
nseq = len(alignment)
t = np.zeros((nseq, alignment.get_alignment_length()), dtype=int)
for iseq, a in enumerate(alignment):
GAP = a.seq.alphabet.gap_char
t[iseq, :] = seq2resids[iseq][np.cumsum(np.where(
np.array(list(a.seq)) == GAP, 0, 1)) - 1]
# -1 because seq2resid is index-1 based (resids start at 1)
def resid(nseq, ipos, t=t):
return t[nseq, ipos]
return resid
resid = resid_factory(alignment, seq2resids)
res_list = [] # collect individual selection string
# could collect just resid and type (with/without CB) and
# then post-process and use ranges for continuous stretches, eg
# ( resid 1:35 and ( backbone or name CB ) ) or ( resid 36 and backbone )
# should be the same for both seqs
GAP = alignment[0].seq.alphabet.gap_char
if GAP != alignment[1].seq.alphabet.gap_char:
raise ValueError(
"Different gap characters in sequence 'target' and 'mobile'.")
for ipos in range(alignment.get_alignment_length()):
aligned = list(alignment[:, ipos])
if GAP in aligned:
continue # skip residue
template = "resid %i"
if 'G' not in aligned:
# can use CB
template += " and ( backbone or name CB )"
else:
template += " and backbone"
template = "( " + template + " )"
res_list.append([template % resid(iseq, ipos) for iseq in range(nseq)])
sel = np.array(res_list).transpose()
ref_selection = " or ".join(sel[0])
target_selection = " or ".join(sel[1])
return {'reference': ref_selection, 'mobile': target_selection} | b259f15cae11fa3a7678b3d143613996d49c61f3 | 15,305 |
def eval_on_dataset(
model, state, dataset,
pmapped_eval_step):
"""Evaluates the model on the whole dataset.
Args:
model: The model to evaluate.
state: Current state associated with the model (contains the batch norm MA).
dataset: Dataset on which the model should be evaluated. Should already
being batched.
pmapped_eval_step: A pmapped version of the `eval_step` function (see its
documentation for more details).
Returns:
A dictionary containing the loss and error rate on the batch. These metrics
are averaged over the samples.
"""
eval_metrics = []
total_num_samples = 0
for eval_batch in dataset:
# Load and shard the TF batch.
eval_batch = load_and_shard_tf_batch(eval_batch)
# Compute metrics and sum over all observations in the batch.
metrics = pmapped_eval_step(model, state, eval_batch)
eval_metrics.append(metrics)
# Number of samples seen in num_replicas * per_replica_batch_size.
total_num_samples += (
eval_batch['label'].shape[0] * eval_batch['label'].shape[1])
# Metrics are all the same across all replicas (since we applied psum in the
# eval_step). The next line will fetch the metrics on one of them.
eval_metrics = common_utils.get_metrics(eval_metrics)
# Finally, we divide by the number of samples to get the mean error rate and
# cross entropy.
eval_summary = jax.tree_map(lambda x: x.sum() / total_num_samples,
eval_metrics)
return eval_summary | dd2296f80db37687de6fc8a4bcf0046d43cda115 | 15,306 |
import os
def create_directory(path):
"""Creates the given directory and returns the path."""
if not os.path.isdir(path):
os.makedirs(path)
return path | 7e1d254276b3f4fd4560e206d7a77b11c6dfdfae | 15,307 |
def factorize(n):
""" Prime factorises n """
# Loop upto sqrt(n) and check for factors
ret = []
sqRoot = int(n ** 0.5)
for f in xrange(2, sqRoot+1):
if n % f == 0:
e = 0
while n % f == 0:
n, e = n / f, e + 1
ret.append((f, e))
if n > 1:
ret.append((n, 1))
return ret | bc4b4a26010f2f18c9989acd2b7d81615b21f8db | 15,308 |
import random
def createSimpleDataSet( numOfAttr, numOfObj ):
"""
This creates a simple data base with 3 attributes
The second one is 2 times the first one with some
Gauss noise. The third one is just random noise.
"""
database = []
for i in range(numOfObj):
data = dataObject(numOfAttr)
w=[random.gauss(2.0, 2.0)]
w.append(w[0]*3+random.gauss(0.0, 0.05))
w.append(random.random()*6)
data.setAttributes(w)
database.append(data)
return database | dd4e8005634bd49411a785982fe3112acaf8e544 | 15,309 |
import tqdm
import warnings
def clean_data(
data,
isz=None,
r1=None,
dr=None,
edge=0,
bad_map=None,
add_bad=None,
apod=True,
offx=0,
offy=0,
sky=True,
window=None,
darkfile=None,
f_kernel=3,
verbose=False,
*,
mask=None,
):
"""Clean data.
Parameters:
-----------
`data` {np.array} -- datacube containing the NRM data\n
`isz` {int} -- Size of the cropped image (default: {None})\n
`r1` {int} -- Radius of the rings to compute background sky (default: {None})\n
`dr` {int} -- Outer radius to compute sky (default: {None})\n
`edge` {int} -- Patch the edges of the image (VLT/SPHERE artifact, default: {200}),\n
`checkrad` {bool} -- If True, check the resizing and sky substraction parameters (default: {False})\n
Returns:
--------
`cube` {np.array} -- Cleaned datacube.
"""
n_im = data.shape[0]
cube_cleaned = [] # np.zeros([n_im, isz, isz])
l_bad_frame = []
bad_map, add_bad = _get_3d_bad_pixels(bad_map, add_bad, data)
for i in tqdm(range(n_im), ncols=100, desc="Cleaning", leave=False):
img0 = data[i]
img0 = _apply_edge_correction(img0, edge=edge)
if bad_map is not None:
img1 = fix_bad_pixels(img0, bad_map[i], add_bad=add_bad[i])
else:
img1 = img0.copy()
img1 = _remove_dark(img1, darkfile=darkfile, verbose=verbose)
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
center = find_max(img1, filtmed=filtmed, f=f_kernel)
else:
center = None
if sky and (r1 is not None or mask is not None):
img_biased = sky_correction(
img1, r1=r1, dr=dr, verbose=verbose, center=center, mask=mask
)[0]
elif sky:
warnings.warn(
"sky is set to True, but r1 and mask are set to None. Skipping sky correction",
RuntimeWarning,
)
img_biased = img1.copy()
else:
img_biased = img1.copy()
img_biased[img_biased < 0] = 0 # Remove negative pixels
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
im_rec_max = crop_max(
img_biased, isz, offx=offx, offy=offy, filtmed=filtmed, f=f_kernel
)[0]
else:
im_rec_max = img_biased.copy()
if (
(im_rec_max.shape[0] != im_rec_max.shape[1])
or (isz is not None and im_rec_max.shape[0] != isz)
or (isz is None and im_rec_max.shape[0] != img0.shape[0])
):
l_bad_frame.append(i)
else:
if apod and window is not None:
img = apply_windowing(im_rec_max, window=window)
elif apod:
warnings.warn(
"apod is set to True, but window is None. Skipping apodisation",
RuntimeWarning,
)
img = im_rec_max.copy()
else:
img = im_rec_max.copy()
cube_cleaned.append(img)
if verbose:
print("Bad centering frame number:", l_bad_frame)
cube_cleaned = np.array(cube_cleaned)
return cube_cleaned | d50cb5b723661925c81f215e3bba903b4f9bb56c | 15,310 |
def select_points():
""" Select points (empty) objects.
Parameters:
None
Returns:
list: Empty objects or None.
"""
selected = bpy.context.selected_objects
if selected:
return [object for object in selected if object.type == 'EMPTY']
print('***** Point (empty) objects were not selected *****')
return None | 4134277f427518da188d8bcac4d5023d0b39e55a | 15,311 |
import sys
def _get_best_encoding(stream):
"""Returns the default stream encoding if not found."""
rv = getattr(stream, 'encoding', None) or sys.getdefaultencoding()
if _is_ascii_encoding(rv):
return 'utf-8'
return rv | c27739e48395e32457ad7955dcc70a3c0a9973bb | 15,312 |
def mask_conv1d1(in_channels,
out_channels,
strides=1,
groups=1,
use_bias=False,
data_format="channels_last",
**kwargs):
"""
Masked 1-dim kernel version of the 1D convolution layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
use_bias : bool, default False
Whether the layer uses a bias vector.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return MaskConv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
groups=groups,
use_bias=use_bias,
data_format=data_format,
**kwargs) | 0c06482e36ef55322ed3b52e68f321750843ef01 | 15,313 |
import decimal
def decimal_from_tuple(signed, digits, expo):
"""Build `Decimal` objects from components of decimal tuple.
Parameters
----------
signed : bool
True for negative values.
digits : iterable of ints
digits of value each in [0,10).
expo : int or {'F', 'n', 'N'}
exponent of decimal.
Returns
-------
y : Decimal
corresponding decimal object.
"""
# Get everything in correct type because the Py3 decimal package is anal
signed = int(signed)
digits = ensure_tuple_of_ints(digits)
expo = expo if expo in ("F", "n", "N") else int(expo)
y = decimal.Decimal(decimal.DecimalTuple(signed, digits, expo))
return y | c3b67505440600b5e9f3ce944c9018539b32bbf7 | 15,314 |
from typing import Dict
def metadata_update(
repo_id: str,
metadata: Dict,
*,
repo_type: str = None,
overwrite: bool = False,
token: str = None,
) -> str:
"""
Updates the metadata in the README.md of a repository on the Hugging Face Hub.
Example:
>>> from huggingface_hub import metadata_update
>>> metadata = {'model-index': [{'name': 'RoBERTa fine-tuned on ReactionGIF',
... 'results': [{'dataset': {'name': 'ReactionGIF',
... 'type': 'julien-c/reactiongif'},
... 'metrics': [{'name': 'Recall',
... 'type': 'recall',
... 'value': 0.7762102282047272}],
... 'task': {'name': 'Text Classification',
... 'type': 'text-classification'}}]}]}
>>> update_metdata("julien-c/reactiongif-roberta", metadata)
Args:
repo_id (`str`):
The name of the repository.
metadata (`dict`):
A dictionary containing the metadata to be updated.
repo_type (`str`, *optional*):
Set to `"dataset"` or `"space"` if updating to a dataset or space,
`None` or `"model"` if updating to a model. Default is `None`.
overwrite (`bool`, *optional*, defaults to `False`):
If set to `True` an existing field can be overwritten, otherwise
attempting to overwrite an existing field will cause an error.
token (`str`, *optional*):
The Hugging Face authentication token.
Returns:
`str`: URL of the commit which updated the card metadata.
"""
filepath = hf_hub_download(
repo_id,
filename=REPOCARD_NAME,
repo_type=repo_type,
use_auth_token=token,
force_download=True,
)
existing_metadata = metadata_load(filepath)
for key in metadata:
# update model index containing the evaluation results
if key == "model-index":
if "model-index" not in existing_metadata:
existing_metadata["model-index"] = metadata["model-index"]
else:
# the model-index contains a list of results as used by PwC but only has one element thus we take the first one
existing_metadata["model-index"][0][
"results"
] = _update_metadata_model_index(
existing_metadata["model-index"][0]["results"],
metadata["model-index"][0]["results"],
overwrite=overwrite,
)
# update all fields except model index
else:
if key in existing_metadata and not overwrite:
if existing_metadata[key] != metadata[key]:
raise ValueError(
f"""You passed a new value for the existing meta data field '{key}'. Set `overwrite=True` to overwrite existing metadata."""
)
else:
existing_metadata[key] = metadata[key]
# save and push to hub
metadata_save(filepath, existing_metadata)
return HfApi().upload_file(
path_or_fileobj=filepath,
path_in_repo=REPOCARD_NAME,
repo_id=repo_id,
repo_type=repo_type,
identical_ok=False,
token=token,
) | 1faf2ae158d598a7538f86ce328ea22b55308507 | 15,315 |
def convertStringToArabic(numStr, stripChars=0):
"""
Convert a string to an arabic number;
Always returns numeric!
12-09-2004: Changed default stripChars to 0, because otherwise a roman I was
stripped before processing! Need to watch for programs that need
to now explicitly set stripChars to 1
>>> convertStringToArabic("IV")
4
>>> convertStringToArabic("123")
123
>>> convertStringToArabic("MC")
1100
"""
num = 0
if type(numStr) == type(1):
num = numStr # already numeric, arabic
elif isinstance(numStr, str):
numStr = trimPunctAndSpaces(numStr)
if stripChars:
numStr = trimNonDigits(numStr)
if numStr != "":
if isRoman(numStr.upper()):
num = convRomanToArabic(numStr.upper())
else:
try:
num = int(numStr)
except Exception as e:
raise ValueError("Cannot convert: %s" % numStr)
else:
# try not causing an exception on this. If there's no numeric, then return None.
logger.debug("Empty String. convertStringToArabic Conversion error")
else:
try:
num = int(numStr)
except Exception as e:
raise ValueError(e)
if num is None:
num = 0
raise ValueError("Cannot convert: %s" % numStr)
return num | 17020a3131b91aeb8cfc63ee5c04d827c8585478 | 15,316 |
import logging
def set_logging():
"""Sets additional logging to file for debug."""
logger_migrator = logging.getLogger('migrator')
logger_migrator.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(message)s - \n '
'[in %(pathname)s:%(lineno)d]')
fh = logging.FileHandler('migrator.log')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger_migrator.addHandler(fh)
logger_matcher = logging.getLogger('cds_dojson.matcher.dojson_matcher')
logger_matcher.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(message)s - \n '
'[in %(pathname)s:%(lineno)d]')
fh = logging.FileHandler('matcher.log')
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
logger_matcher.addHandler(fh)
return logger_migrator | 8de4def9910d2ffb94631b6505c26e2a2dbcb632 | 15,317 |
def system_types():
"""
系统类型(工作空间类型)
:return:
"""
return Workspace.sys_types().values() | 968fbf7993d4ad645fe741ac48702440ba01a2e3 | 15,318 |
def get_rnd_simplex(dimension, random_state):
"""
Uniform random point on a simplex, i.e. x_i >= 0 and sum of the coordinates is 1.
Donald B. Rubin, The Bayesian bootstrap Ann. Statist. 9, 1981, 130-134.
https://cs.stackexchange.com/questions/3227/uniform-sampling-from-a-simplex
Parameters
----------
dimension: int
Dimensionality of the simplex
random_state: optional, RandomState object
Returns
-------
numpy array corresponding to random sample in dimension of space
"""
t = random_state.uniform(0, 1, dimension - 1)
t = np.append(t, [0, 1])
t.sort()
return np.array([(t[i + 1] - t[i]) for i in range(len(t) - 1)]) | d5e1105655192fe13bcad5e3dd08a7247461d8bf | 15,319 |
def backup_generate_metadata(request, created_at='', secret=''):
"""
Generates metadata code for the backup.
Meant to be called by the local handler only with shared secret (not directly).
"""
if not secret == settings.GAEBAR_SECRET_KEY:
return HttpResponseForbidden()
backup = models.GaebarBackup.all().filter('created_at = ', timestamp_to_datetime(created_at)).get()
if not backup:
raise Http404
context = dict(backup = backup)
response = HttpResponse(loader.render_to_string('gaebar/metadata.py', context), 'text/plain')
response['Content-Disposition'] = 'attachment; filename=metadata.py'
return response | 30ae381be8454a1df6b47fd5fc55af68f10e8b1f | 15,320 |
import torch
def contains_conv(module: torch.nn.Module) -> bool:
""" Returns `True` if given `torch.nn.Module` contains at least one convolution module/op (based on `deepcv.meta.nn.is_conv` for convolution definition) """
return any(map(module.modules, lambda m: is_conv(m))) | 0f9ae25fa1189c9c576089c913a5d7d9e2739c78 | 15,321 |
def _construct_cell(empty=False):
"""Constructs a test cell."""
cell = scheduler.Cell('top')
if empty:
return cell
rack1 = scheduler.Bucket('rack:rack1', traits=0, level='rack')
rack2 = scheduler.Bucket('rack:rack2', traits=0, level='rack')
cell.add_node(rack1)
cell.add_node(rack2)
srv1 = scheduler.Server('srv1', [10, 20, 30], traits=1,
valid_until=1000, label='part')
srv2 = scheduler.Server('srv2', [10, 20, 30], traits=3,
valid_until=2000, label='part')
srv3 = scheduler.Server('srv3', [10, 20, 30], traits=0,
valid_until=3000, label='_default')
srv4 = scheduler.Server('srv4', [10, 20, 30], traits=0,
valid_until=4000, label='_default')
rack1.add_node(srv1)
rack1.add_node(srv2)
rack2.add_node(srv3)
rack2.add_node(srv4)
tenant1 = scheduler.Allocation()
cell.partitions['_default'].allocation.add_sub_alloc('t1', tenant1)
tenant11 = scheduler.Allocation()
tenant1.add_sub_alloc('t11', tenant11)
alloc1 = scheduler.Allocation([10, 10, 10], rank=100, traits=0)
tenant11.add_sub_alloc('a1', alloc1)
tenant2 = scheduler.Allocation()
cell.partitions['part'].allocation.add_sub_alloc('t2', tenant2)
alloc2 = scheduler.Allocation([10, 10, 10], rank=100, traits=3)
tenant2.add_sub_alloc('a2', alloc2)
return cell | c1b8016b8ff048ab0ecad8c69f960ce3d099bd8c | 15,322 |
def gaussian_kernel(F: np.ndarray) -> np.ndarray:
"""Compute dissimilarity matrix based on a Gaussian kernel."""
D = squared_dists(F)
return np.exp(-D/np.mean(D)) | 62f97009c791213255d8bdb4efc0fcfa60c20bb0 | 15,323 |
def _parse_yearweek(yearweek):
"""Utility function to convert internal string representations of calender weeks into datetime objects. Uses strings of format `<year>-KW<week>`. Weeks are 1-based."""
year, week = yearweek_regex.search(yearweek).groups()
# datetime.combine(isoweek.Week(int(year), int(week)).wednesday(),time(0))
return isoweek.Week(int(year), int(week)) | 319166595c506a73d125ed53a11433976aa4f106 | 15,324 |
def get_subpixel_indices(galtable, hpix=[], border=0.0, nside=0):
"""
Routine to get subpixel indices from a galaxy table.
Parameters
----------
galtable: `redmapper.Catalog`
A redmapper galaxy table master catalog
hpix: `list`, optional
Healpix number (ring format) of sub-region. Default is [] (full catalog)
border: `float`, optional
Border around hpix (in degrees) to find pixels. Default is 0.0.
Only works if hpix is a single-length list
nside: `int`, optional
Nside of healpix subregion. Default is 0 (full catalog).
Returns
-------
indices: `np.array`
Integer array of indices of galaxy table pixels in the subregion.
"""
if len(hpix) == 0 or nside == 0:
return np.arange(galtable.filenames.size)
theta, phi = hp.pix2ang(galtable.nside, galtable.hpix)
ipring_big = hp.ang2pix(nside, theta, phi)
_, indices = esutil.numpy_util.match(hpix, ipring_big)
# Ignore border if using full catalog
if border > 0.0 and len(hpix) > 0:
if len(hpix) != 1:
raise NotImplementedError("Cannot do boundary around a pixel list.")
# now we need to find the extra boundary...
boundaries = hp.boundaries(nside, hpix[0], step=galtable.nside // nside)
inhpix = galtable.hpix[indices]
for i in range(boundaries.shape[1]):
pixint = hp.query_disc(galtable.nside, boundaries[:, i],
border*np.pi/180., inclusive=True, fact=8)
inhpix = np.append(inhpix, pixint)
inhpix = np.unique(inhpix)
_, indices = esutil.numpy_util.match(inhpix, galtable.hpix)
return indices | 5a2d18f79ef8cc478752ef8059c71a512efced9f | 15,325 |
def is_common_secret_key(key_name: str) -> bool:
"""Return true if the key_name value matches a known secret name or pattern."""
if key_name in COMMON_SECRET_KEYS:
return True
return any(
[
key_name.lower().endswith(key_suffix)
for key_suffix in COMMON_SECRET_KEY_SUFFIXES
]
) | b0250f28638a0ad58a3a45dd8e333610fea378d5 | 15,326 |
from sys import path
def check_md5(func):
""" A decorator that checks if a file has been changed. """
@wraps(func)
def wrapper(*args, **kwargs):
ret = func(*args, **kwargs)
assert _check_md5(path, original_path), 'The file has been changed after {}().'.format(func.__name__)
return ret
return wrapper | bb7dcae110be816af1796cdd66c7b9d74e7c265d | 15,327 |
def showgraphwidth(context, mapping):
"""Integer. The width of the graph drawn by 'log --graph' or zero."""
# just hosts documentation; should be overridden by template mapping
return 0 | 6e2fad8c80264a1030e5a113d66233c3adc28af8 | 15,328 |
def diff_last_filter(trail, key=lambda x: x['pid']):
""" Filter out trails with last two key different
"""
return trail if key(trail[-1]) != key(trail[-2]) else None | 82e67a98a1b09e11f2f1ebd76f470969b2dd1a51 | 15,329 |
def cpu_times():
"""Return a named tuple representing the following system-wide
CPU times:
(user, nice, system, idle, iowait, irq, softirq [steal, [guest,
[guest_nice]]])
Last 3 fields may not be available on all Linux kernel versions.
"""
procfs_path = get_procfs_path()
set_scputimes_ntuple(procfs_path)
with open_binary('%s/stat' % procfs_path) as f:
values = f.readline().split()
fields = values[1:len(scputimes._fields) + 1]
fields = [float(x) / CLOCK_TICKS for x in fields]
return scputimes(*fields) | 70dd518296bc873add8a7164446e908d80e74174 | 15,330 |
def calc_centeroid(x, network: DTClustering, n_clusters: int):
"""クラスタ中心を計算します.
Notes:
Input x: [batch, sequence, feature, 1]
Output: [n_clusters, hidden sequence, hidden feature, 1]
"""
code = network.encode(x)
feature = code.view(code.shape[0], -1) # [batch, sequence * feature]
feature = feature.detach().cpu().numpy()
km = cluster.KMeans(n_clusters=n_clusters, n_init=10)
km.fit(feature)
centers = km.cluster_centers_.reshape(n_clusters, code.shape[1], code.shape[2], 1)
centers = centers.astype(np.float32)
return centers | cf0a158d86105e34ad476dbfb7bc6ff911a65e52 | 15,331 |
def start_run():
"""
Starts at test run..
TODO all of it.
"""
uuid = request.form.get('uuid', default='none', type=str)
print('Starting a run: %s' % uuid)
return "ok" | 49b304801cb1ff9875f16c9f66253ff282087692 | 15,332 |
def softXrayMono1(eV, k, m, c, rb_mm, bounce, inOff_deg, outOff_deg, verbose):
"""
# calculate premirror and grating angles for NSLS-II soft xray monos
# eV: energy
# k: central line density in mm-1
# m: diffraction order
# c: cff 0 < cff < infinity
# bounce = 'up' or 'down'
# inOff_deg - input beam angle relative to horizontal, NSLSII sense
# outOff_deg - output beam angle relative to horizontal, NSLSII sense
"""
# correct for several energies for Centurion
# correctly reverses sign of angles if geometry is flipped upside-down
# consider bounce direction
if bounce == "up":
a = -1
elif bounce == "down":
a = +1
else:
a = float("nan")
# calculate angles, no offsets
alpha_deg = ruben2005eqn8m(eV, c, k, m)
beta_deg = getBetaDeg(eV, alpha_deg, k, m)
# include offsets
thetaPMinc_deg = abs(
+0.5 * (outOff_deg - inOff_deg + a * (180.0 - alpha_deg + beta_deg))
)
thetaPM_deg = +0.5 * (outOff_deg + inOff_deg + a * (180.0 - alpha_deg + beta_deg))
thetaGR_deg = a * (90.0 + beta_deg) + outOff_deg
disp = getLinDisp(alpha_deg, beta_deg, k, m, rb_mm)
if verbose:
# alpha, beta both relative to normal and surface
print("eV=", eV, "c=", c)
print("alpha=", alpha_deg, 90.0 - alpha_deg)
print("beta=", beta_deg, (90 + beta_deg))
print("incident angle on pm=", thetaPMinc_deg)
print("dispersion (eV/mm) =", disp)
# grating and premirror rotation angles
print("rotation angles relative to horizontal")
print(" premirror", thetaPM_deg)
print(" grating", thetaGR_deg)
return (thetaPM_deg, thetaGR_deg, alpha_deg, beta_deg, thetaPMinc_deg, disp) | 3309b8ec7e3f5433025c4c676bf2966281df4d02 | 15,333 |
def createHelmholtz3dExteriorCalderonProjector(
context, hminusSpace, hplusSpace, waveNumber,
label=None, useInterpolation=False, interpPtsPerWavelength=5000):
"""
Create and return the exterior Calderon projector for the
Helmholtz equation in 3D.
*Parameters:*
- context (Context)
A Context object to control the assembly of the weak form of the
newly constructed operator.
- hminusSpace (Space)
Function space representing functions in H^{-1/2}.
- hplusSpace (Space)
Function space representing functions in H^{+1/2}.
- waveNumber (float or complex)
Wave number, i.e. the number k in the Helmholtz equation
nabla^2 u + k^2 u = 0.
- label (string)
Textual label of the operator. If set to None (default), a unique
label will be generated automatically.
- useInterpolation (bool)
If set to False (default), the standard exp() function will be used
to evaluate the exponential factor occurring in the kernel. If set
to True, the exponential factor will be evaluated by piecewise-cubic
interpolation of values calculated in advance on a regular
grid. This normally speeds up calculations, but might result in a
loss of accuracy. This is an experimental feature: use it at your
own risk.
- interpPtsPerWavelength (int)
If useInterpolation is set to True, this parameter determines the
number of points per "effective wavelength" (defined as 2 pi /
abs(waveNumber)) used to construct the interpolation grid. The
default value (5000) is normally enough to reduce the relative or
absolute error, *whichever is smaller*, below 100 * machine
precision. If useInterpolation is set to False, this parameter is
ignored.
*Returns* a newly constructed BoundaryOperator_BasisFunctionType_ResultType
object, with BasisFunctionType and ResultType determined automatically from
the context argument and equal to either float32, float64, complex64 or
complex128.
"""
basisFunctionType = context.basisFunctionType()
if (basisFunctionType != hminusSpace.basisFunctionType() or
basisFunctionType != hplusSpace.basisFunctionType()):
raise TypeError("BasisFunctionType of context and all spaces must be the same")
resultType = context.resultType()
# construct object
if not label:
label = ""
result = _constructObjectTemplatedOnBasis(
core, 'helmholtz3dExteriorCalderonProjector', basisFunctionType,
context, hminusSpace, hplusSpace, waveNumber, label,
useInterpolation, interpPtsPerWavelength)
result._context = context
result._hminusSpace = hminusSpace
result._hplusSpace = hplusSpace
return result | bfb6c139787355d07cb8c82475f34dcc349c55f3 | 15,334 |
def prepare_xrf_map(data, chunk_pixels=5000, n_chunks_min=4):
"""
Convert XRF map from it's initial representation to properly chunked Dask array.
Parameters
----------
data: da.core.Array, np.ndarray or RawHDF5Dataset (this is a custom type)
Raw XRF map represented as Dask array, numpy array or reference to a dataset in
HDF5 file. The XRF map must have dimensions `(ny, nx, ne)`, where `ny` and `nx`
define image size and `ne` is the number of spectrum points
chunk_pixels: int
The number of pixels in a single chunk. The XRF map will be rechunked so that
each block contains approximately `chunk_pixels` pixels and contain all `ne`
spectrum points for each pixel.
n_chunks_min: int
Minimum number of chunks. The algorithm will try to split the map into the number
of chunks equal or greater than `n_chunks_min`. If HDF5 dataset is not chunked,
then the whole map is treated as one chunk. This should happen only to very small
files, so parallelism is not important.
Returns
-------
data: da.core.Array
XRF map represented as Dask array with proper chunk size. The XRF map may be loaded
block by block when processing using `dask.array.map_blocks` and `dask.array.blockwise`
functions with Dask multiprocessing scheduler.
file_obj: h5py.File object
File object that points to HDF5 file. `None` if input parameter `data` is Dask or
numpy array. Note, that `file_obj` must be kept alive until processing is completed.
Closing the file will invalidate references to the dataset in the respective
Dask array.
Raises
------
TypeError if input parameter `data` is not one of supported types.
"""
file_obj = None # It will remain None, unless 'data' is 'RawHDF5Dataset'
if isinstance(data, da.core.Array):
chunk_size = _compute_optimal_chunk_size(
chunk_pixels=chunk_pixels,
data_chunksize=data.chunksize[0:2],
data_shape=data.shape[0:2],
n_chunks_min=n_chunks_min,
)
data = data.rechunk(chunks=(*chunk_size, data.shape[2]))
elif isinstance(data, np.ndarray):
data = _array_numpy_to_dask(data, chunk_pixels=chunk_pixels, n_chunks_min=n_chunks_min)
elif isinstance(data, RawHDF5Dataset):
fpath, dset_name = data.abs_path, data.dset_name
# Note, that the file needs to remain open until the processing is complete !!!
file_obj = h5py.File(fpath, "r")
dset = file_obj[dset_name]
if dset.ndim != 3:
raise TypeError(
f"Dataset '{dset_name}' in file '{fpath}' has {dset.ndim} dimensions: 3D dataset is expected"
)
ny, nx, ne = dset.shape
if dset.chunks:
chunk_size = _compute_optimal_chunk_size(
chunk_pixels=chunk_pixels,
data_chunksize=dset.chunks[0:2],
data_shape=(ny, nx),
n_chunks_min=n_chunks_min,
)
else:
# The data is not chunked. Process data as one chunk.
chunk_size = (ny, nx)
data = da.from_array(dset, chunks=(*chunk_size, ne))
else:
raise TypeError(f"Type of parameter 'data' is not supported: type(data)={type(data)}")
return data, file_obj | a8c4b442f367759237f77571c51e98bd1cc9d53a | 15,335 |
def colorbar_factory(cax, mappable, **kwargs):
"""
Create a colorbar on the given axes for the given mappable.
.. note::
This is a low-level function to turn an existing axes into a colorbar
axes. Typically, you'll want to use `~.Figure.colorbar` instead, which
automatically handles creation and placement of a suitable axes as
well.
Parameters
----------
cax : `~matplotlib.axes.Axes`
The `~.axes.Axes` to turn into a colorbar.
mappable : `~matplotlib.cm.ScalarMappable`
The mappable to be described by the colorbar.
**kwargs
Keyword arguments are passed to the respective colorbar class.
Returns
-------
`.Colorbar`
The created colorbar instance.
"""
return Colorbar(cax, mappable, **kwargs) | 37b0198ea77db887d92ee4fd45e6df73d49f4223 | 15,336 |
import time
def fourth_measurer_I_R(uniquePairsDf):
"""
fourth_measurer_I_R: computes the measure I_R that is based on the minimal number of tuples that should
be removed from the database for the constraints to hold.
The measure is computed via an ILP and the Gurobi optimizer is used to solve the ILP.
- There is a binary variable x for every tuple in the database.
- The constraints are of the form x + y >= 1 where x and y represent two tuples that jointly vioalte a constraint.
- The objective function is to minimize the sum of all x's.
Parameters
----------
uniquePairsDf : dataframe
the result of the query that finds all pairs of tuples that jointly violate a constraint.
Returns
-------
list of two int variables:
database_measurer.objVal is the minimal number of tuples that should be removed for the constraints to hold.
end1 - start is the running time of the function.
"""
start = time.time()
rows_violations = uniquePairsDf.values
varsDict2 = {}
database_measurer = gp.Model('Minimal deletions of tuples')
database_measurer.setParam('OutputFlag', 0) # do not show any comments on the screen
# variables
for i in rows_violations :
varsDict2[i[0]] = database_measurer.addVar(vtype=GRB.BINARY, name="x")
varsDict2[i[1]] = database_measurer.addVar(vtype=GRB.BINARY, name="x")
# constraints
for i in rows_violations :
database_measurer.addConstr(varsDict2[i[0]]+varsDict2[i[1]]>=1, name='con')
vars= []
for i in varsDict2:
vars.append(varsDict2[i])
# objective function
database_measurer.setObjective(sum(vars), GRB.MINIMIZE)
opt = database_measurer.optimize()
end1 = time.time()
return database_measurer.objVal , end1 - start | a8e29e0a70dfd2e2a4c151ca25b2f7fd528e25f3 | 15,337 |
def is_sublist_equal(list_one, list_two):
"""
Compare the values of two lists of equal length.
:param list_one: list - A list
:param list_two: list - A different list
:return EQUAL or UNEQUAL - If all values match, or not.
>>> is_sublist_equal([0], [0])
EQUAL
>>> is_sublist_equal([1], [0])
UNEQUAL
Iterate over values in each list and compare them
Assumes lists are of equal sizes
"""
for index, value in enumerate(list_one):
if value != list_two[index]:
return UNEQUAL
# Otherwise, all values matched, so it's equal
return EQUAL | 717b4287e212498ef85719fbf4d8e5437f16db48 | 15,338 |
def black_box_function(x, y):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its output values, as unknown.
"""
return -x ** 2 - (y - 1) ** 2 + 1 | 962c0dd5638ac71ee375f4bb1ba07b2bd241a6e8 | 15,339 |
def file_extension(path):
"""Lower case file extension."""
return audeer.file_extension(path).lower() | 264f8afd0a2328d342693b2ec893706760b5c7ae | 15,340 |
import os
def locate(verbose):
"""Print location of the current workspace.
:param verbose: Unused.
"""
if not os.path.islink(ws_file):
print('no current workspace found, see "ros-get ws-create --help" how to create one')
return 1
else:
print(os.path.realpath(ws_file)) | 75e9d90cba4c8394fdbc18e505fe19120bd1c5dd | 15,341 |
import math
def motion(x, u, dt):
"""
motion model
"""
x[2] += u[1] * dt
x[0] += u[0] * math.cos(x[2]) * dt
x[1] += u[0] * math.sin(x[2]) * dt
x[3] = u[0]
x[4] = u[1]
return x | e33adae2a6c5934dc7e0662570c42292eacbfd89 | 15,342 |
from typing import Union
from typing import Callable
def sweep(
sweep: Union[dict, Callable], entity: str = None, project: str = None,
) -> str:
"""Initialize a hyperparameter sweep.
To generate hyperparameter suggestions from the sweep and use them
to train a model, call `wandb.agent` with the sweep_id returned by
this command. For command line functionality, see the command line
tool `wandb sweep` (https://docs.wandb.ai/ref/cli/wandb-sweep).
Args:
sweep: dict, SweepConfig, or callable. The sweep configuration
(or configuration generator). If a dict or SweepConfig,
should conform to the W&B sweep config specification
(https://docs.wandb.ai/guides/sweeps/configuration). If a
callable, should take no arguments and return a dict that
conforms to the W&B sweep config spec.
entity: str (optional). An entity is a username or team name
where you're sending runs. This entity must exist before you
can send runs there, so make sure to create your account or
team in the UI before starting to log runs. If you don't
specify an entity, the run will be sent to your default
entity, which is usually your username. Change your default
entity in [Settings](wandb.ai/settings) under "default
location to create new projects".
project: str (optional). The name of the project where you're
sending the new run. If the project is not specified, the
run is put in an "Uncategorized" project.
Returns:
sweep_id: str. A unique identifier for the sweep.
Examples:
Basic usage
<!--yeadoc-test:one-parameter-sweep-->
```python
import wandb
sweep_configuration = {
"name": "my-awesome-sweep",
"metric": {"name": "accuracy", "goal": "maximize"},
"method": "grid",
"parameters": {
"a": {
"values": [1, 2, 3, 4]
}
}
}
def my_train_func():
# read the current value of parameter "a" from wandb.config
wandb.init()
a = wandb.config.a
wandb.log({"a": a, "accuracy": a + 1})
sweep_id = wandb.sweep(sweep_configuration)
# run the sweep
wandb.agent(sweep_id, function=my_train_func)
```
"""
if callable(sweep):
sweep = sweep()
"""Sweep create for controller api and jupyter (eventually for cli)."""
if entity:
env.set_entity(entity)
if project:
env.set_project(project)
# Make sure we are logged in
wandb_login._login(_silent=True)
api = InternalApi()
sweep_id, warnings = api.upsert_sweep(sweep)
handle_sweep_config_violations(warnings)
print("Create sweep with ID:", sweep_id)
sweep_url = _get_sweep_url(api, sweep_id)
if sweep_url:
print("Sweep URL:", sweep_url)
return sweep_id | 50ba0d79a8fca5d5eba08b4e739845b797c0c839 | 15,343 |
def connection_end_point (id, node_uuid, nep_uuid, cep_uuid):
"""Retrieve NodeEdgePoint by ID
:param topo_uuid: ID of Topology
:type uuid: str
:param node_uuid: ID of Node
:type node_uuid: str
:param nep_uuid: ID of NodeEdgePoint
:type nep_uuid: str
:param cep_uuid: ID of ConnectionEndPoint
:type cep_uuid: str
:rtype: ConnectionEndPoint
"""
for topo in context.topology_context.topology:
if topo.uuid == topo_uuid:
for node in topo.node:
if node.uuid == node_uuid:
for nep in node.owned_node_edge_point:
if nep.uuid == nep_uuid:
for cep in nep.cep_list.connection_end_point:
if cep.uuid == cep_uuid:
return cep | 76dc345732d3209730b6022ba12cb2ca191e4a40 | 15,344 |
def remove_melt_from_perplex(perplex,melt_percent=-1):
""" Extrapolate high temperature values to remove melt content using sub-solidus values.
The assumption is that alpha and beta are constant and temperature-independent at high temperature."""
Tref = 273
Pref = 0
rho = perplex.rho.reshape( int(perplex.np), int(perplex.nt))
rhoresidual = perplex.rhoresidual.reshape( int(perplex.np), int(perplex.nt))
rhomelt = perplex.rhomelt.reshape( int(perplex.np), int(perplex.nt))
T = perplex.T.reshape( int(perplex.np), int(perplex.nt))
P = perplex.P.reshape( int(perplex.np), int(perplex.nt))
alpha = perplex.alpha.reshape(int(perplex.np), int(perplex.nt))
alpharesidual = perplex.alpharesidual.reshape(int(perplex.np), int(perplex.nt))
alphamelt = perplex.alphamelt.reshape(int(perplex.np), int(perplex.nt))
beta = perplex.beta.reshape( int(perplex.np), int(perplex.nt))
betaresidual = perplex.betaresidual.reshape( int(perplex.np), int(perplex.nt))
betamelt = perplex.betamelt.reshape( int(perplex.np), int(perplex.nt))
cp = perplex.cp.reshape( int(perplex.np), int(perplex.nt))
cpmelt = perplex.cpmelt.reshape( int(perplex.np), int(perplex.nt))
cpresidual = perplex.cpresidual.reshape( int(perplex.np), int(perplex.nt))
melt = perplex.melt.reshape( int(perplex.np), int(perplex.nt))
# smoothing alpha and beta along the boundaries to avoid vertical discontinuities not suitable
n_smooth = 3
rho_smooth = []
rhomelt_smooth = []
rhoresidual_smooth = []
alpha_smooth = []
beta_smooth = []
cp_smooth = []
alphamelt_smooth = []
betamelt_smooth = []
cpmelt_smooth = []
alpharesidual_smooth = []
betaresidual_smooth = []
cpresidual_smooth = []
i_smooth = 0
i_int = 0
#alpha_beta_values = False
for j in range(0,int(perplex.np)):
if (melt_percent<0):
are_values = False
for i in range(int(perplex.nt)-1,-1,-1):
#print('None T {} P {} melt {}'.format(T[j,i],P[j,i],melt[j,i]))
if ( melt[j,i] > 0.0e0 ):
#print('None T {} P {}'.format(T[j,i],P[j,i]))
pass
else:
if (i_smooth<n_smooth):
alpha_smooth.append(alpha[j,i])
beta_smooth.append(beta[j,i])
cp_smooth.append(cp[j,i])
cpmelt_smooth.append(cpmelt[j,i])
cpresidual_smooth.append(cpresidual[j,i])
alphamelt_smooth.append(alphamelt[j,i])
betamelt_smooth.append(betamelt[j,i])
alpharesidual_smooth.append(alpharesidual[j,i])
betaresidual_smooth.append(betaresidual[j,i])
rho_smooth.append(rho[j,i])
rhomelt_smooth.append(rhomelt[j,i])
rhoresidual_smooth.append(rhoresidual[j,i])
i_smooth = i_smooth + 1
else:
alpha_smooth[i_int] = alpha[j,i]
beta_smooth[i_int] = beta[j,i]
cp_smooth[i_int] = cp[j,i]
cpmelt_smooth[i_int] = cpmelt[j,i]
cpresidual_smooth[i_int] = cpresidual[j,i]
alphamelt_smooth[i_int] = alphamelt[j,i]
betamelt_smooth[i_int] = betamelt[j,i]
alpharesidual_smooth[i_int] = alpharesidual[j,i]
betaresidual_smooth[i_int] = betaresidual[j,i]
rho_smooth[i_int] = rho[j,i]
rhomelt_smooth[i_int] = rhomelt[j,i]
rhoresidual_smooth[i_int] = rhoresidual[j,i]
i_int = i_int + 1
if (i_int>=n_smooth):
i_int = 0
alpha_used = sum(alpha_smooth)/len(alpha_smooth)
beta_used = sum(beta_smooth)/len(beta_smooth)
cp_used = sum(cp_smooth)/len(cp_smooth)
rho_ref = sum(rho_smooth)/len(rho_smooth) / ( (1+beta_used*(P[j,i]-Pref)) * (1-alpha_used*(T[j,i]-Tref)) )
alpha_used_melt = sum(alphamelt_smooth)/len(alphamelt_smooth)
beta_used_melt = sum(betamelt_smooth)/len(betamelt_smooth)
cp_used_melt = sum(cpmelt_smooth)/len(cpmelt_smooth)
rho_ref_melt = sum(rhomelt_smooth)/len(rhomelt_smooth) / ( (1+beta_used_melt*(P[j,i]-Pref)) * (1-alpha_used_melt*(T[j,i]-Tref)) )
alpha_used_residual = sum(alpharesidual_smooth)/len(alpharesidual_smooth)
beta_used_residual = sum(betaresidual_smooth)/len(betaresidual_smooth)
cp_used_residual = sum(cpresidual_smooth)/len(cpresidual_smooth)
rho_ref_residual = sum(rhoresidual_smooth)/len(rhoresidual_smooth) / ( (1+beta_used_residual*(P[j,i]-Pref)) * (1-alpha_used_residual*(T[j,i]-Tref)) )
#if ( not alpha_beta_values):
# # we use low pressure value for alpha and beta - upper-bound estimation of it then
# alpha_used = alpha[j,i]
# beta_used = beta[j,i]
# alpha_beta_values = True
#rho_ref = rho[j,i] / ( (1+beta_used*(P[j,i]-Pref)) * (1-alpha_used*(T[j,i]-Tref)) )
melt_ref = 0.0e0
are_values = True
break
if (are_values):
for i in range(int(perplex.nt)-1,-1,-1):
if ( melt[j,i] > 0.0e0 ):
# rho[j,i] = rho_ref*(1+beta_used*(P[j,i]-Pref))*(1-alpha_used*(T[j,i]-Tref))
rho[j,i] = rho_ref*(1+betaresidual[j,i]*(P[j,i]-Pref))*(1-alpharesidual[j,i]*(T[j,i]-Tref))
#alpha[j,i] = alpha_used
#beta[j,i] = beta_used
# we do not extrapolate alpha and beta but only rho_ref
# we keep alpha and beta from residual in order to keep them P,T dependant
alpha[j,i] = alpharesidual[j,i]
beta[j,i] = betaresidual[j,i]
cp[j,i] = cpresidual[j,i]
melt[j,i] = melt_ref
rhomelt[j,i] = float('nan')
alphamelt[j,i] = float('nan')
betamelt[j,i] = float('nan')
cpmelt[j,i] = float('nan')
else:
melt[j,i] = melt_ref
rhomelt[j,i] = float('nan')
alphamelt[j,i] = float('nan')
betamelt[j,i] = float('nan')
cpmelt[j,i] = float('nan')
break
else:
for i in range(int(perplex.nt)-1,-1,-1):
# print('melt[j,i] {}'.format(melt[j,i]))
if (melt[j,i]>melt_percent/100.0e0):
melt[j,i] = melt_percent/100.0e0
rho[j,i] = rhoresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + rhomelt[j,i]*melt_percent/100.0e0
alpha[j,i] = alpharesidual[j,i]*(100.0e0-melt_percent)/100.0e0 + alphamelt[j,i]*melt_percent/100.0e0
beta[j,i] = betaresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + betamelt[j,i]*melt_percent/100.0e0
cp[j,i] = cpresidual[j,i]*(100.0e0-melt_percent)/100.0e0 + cpmelt[j,i]*melt_percent/100.0e0
if (np.isnan(rho[j,i])):
print('NaN melt {} rho {} rhoresidual {} rhomelt {} alpha {} beta {}'.format(
melt[j,i],rho[j,i],rhoresidual[j,i], rhomelt[j,i], alpha[j,i], beta[j,i]))
quit()
perplex.rho = rho.reshape(perplex.np*perplex.nt)
perplex.T = T.reshape(perplex.np*perplex.nt)
perplex.P = P.reshape(perplex.np*perplex.nt)
perplex.alpha = alpha.reshape(perplex.np*perplex.nt)
perplex.beta = beta.reshape(perplex.np*perplex.nt)
perplex.cp = cp.reshape(perplex.np*perplex.nt)
perplex.melt = melt.reshape(perplex.np*perplex.nt)
perplex.melt = np.zeros_like(perplex.melt)
perplex.rhomelt = rhomelt.reshape(perplex.np*perplex.nt)
perplex.alphamelt = alphamelt.reshape(perplex.np*perplex.nt)
perplex.betamelt = betamelt.reshape(perplex.np*perplex.nt)
perplex.cpmelt = cpmelt.reshape(perplex.np*perplex.nt)
perplex.rhoresidual = rhoresidual.reshape(perplex.np*perplex.nt)
perplex.alpharesidual = alpharesidual.reshape(perplex.np*perplex.nt)
perplex.betaresidual = betaresidual.reshape(perplex.np*perplex.nt)
perplex.cpresidual = cpresidual.reshape(perplex.np*perplex.nt)
return perplex | 6d2473d7147cdecdcd64cbb7e3beafd3b5df5c6a | 15,345 |
def similarity_score(text_small, text_large, min_small = 10, min_large = 50):
"""
complexity: len(small) * len(large)
@param text_small: the smaller text
(in this case the text which's validity is being checked)
@param text_large: the larger text (in this case the scientific study)
returns: a number (-1 <= n <= 100) representing the similarity
-1 if the data isn't populated enough for reliability
"""
# cleaning text:
filtered_small = clean(text_small)
filtered_large = clean(text_large)
fSmallLen = len(filtered_small)
fLargeLen = len(filtered_large)
if (fSmallLen < min_small) or (fLargeLen < min_large): return -1
max_rating = fLargeLen * fSmallLen
hits = 0
for sm_word in filtered_small:
for big_word in filtered_large:
if sm_word == big_word: hits += 1
return 100. * hits / max_rating | 8449b5273909382225f9de43d8fb936424d1a43e | 15,346 |
def abline(a_coords, b_coords, ax=None, **kwargs):
"""Draw a line connecting a point `a_coords` with a point `b_coords`.
Parameters
----------
a_coords : array-like, shape (2,)
xy coordinates of the start of the line.
b_coords : array-like, shape(2,)
xy coordiantes of the end of the line.
ax : matplotlib axis
Axe to plot the line
**kwargs : dict
Arguments to pass along to the matplotlib `plot` function.
"""
if ax is None:
ax = plt.gca()
line_start, line_end = list(zip(a_coords, b_coords))
line, = ax.plot(line_start, line_end, **kwargs)
return line | e262b689046ac5dd75152b8472a841c7a1e5db29 | 15,347 |
def get_extensions():
"""
Returns supported extensions of the DCC
:return: list(str)
"""
return ['.hip', '.hiplc', '.hipnc', '.hip*'] | 414391db5cd4f8989967100bae347e741ca4b46c | 15,348 |
from datetime import datetime
import os
def precheck_data_format(idir, hlsp_name):
"""
Generates parameter file for check_metadata_format based on file endings.
:param idir: The directory containing HLSP files to check.
:type idir: str
:param hlsp_name: The name of the HLSP.
:type hlsp_name: str
"""
# Start logging to an output file.
logname = "precheck_data_format.log"
precheck_log = new_logger(logname)
precheck_log.info('Started at ' + datetime.datetime.now().isoformat())
# Initialize a new HLSPFile object.
new_file = HLSPFile(name=hlsp_name)
new_file.update_filepaths(input=os.path.realpath(idir))
# Get unique set of file endings.
all_file_endings = get_all_file_endings(idir)
# Sort these based on the extension type.
file_endings = set([x.split('.')[-1] for x in all_file_endings])
for fe in all_file_endings:
new_type = FileType(fe)
new_file.add_filetype(new_type)
precheck_log.info("Found the following file type: {0}".format(fe))
# Create the output file, based on the name of the HLSP.
new_file.toggle_ingest(1, state=True)
filename = new_file.save(caller=__file__)
#make_parameter_file(filename, file_endings, all_file_endings, idir)
precheck_log.info('Finished at ' + datetime.datetime.now().isoformat())
return filename | 4e8c99fa83bee879ebba73c3ef0229ffd32acd40 | 15,349 |
def calc_spatially_diffusion_factors(
regions,
fuel_disagg,
real_values,
low_congruence_crit,
speed_con_max,
p_outlier
):
"""
Calculate spatial diffusion values
Arguments
---------
regions : dict
Regions
fuel_disagg : dict
Disaggregated fuel per region
real_values : dict
Real values
p_outlier : float
Percentage of min and max outliers are flattened
Returns
-------
f_reg_norm_abs : dict
Diffusion values with normed population. If no value
is larger than 1, the total sum of all shares calculated
for every region is identical to the defined scenario variable.
spatial_diff_values : dict
Spatial diffusion values (not normed, only considering differences
in speed and congruence values)
Explanation
============
(I) Load diffusion values
(II) Calculate diffusion factors
(III) Calculate sigmoid diffusion values for technology
specific enduse service shares for every region
"""
# -----
# I. Diffusion diffusion values
# -----
spatial_diff_values = spatial_diffusion_values(
regions=regions,
real_values=real_values,
speed_con_max=speed_con_max,
low_congruence_crit=low_congruence_crit,
p_outlier=p_outlier)
# -----
# II. Calculation of diffusion factors (Not weighted with demand)
# -----
# Not weighted with demand
max_value_diffusion = max(list(spatial_diff_values.values()))
f_reg = {}
for region in regions:
f_reg[region] = spatial_diff_values[region] / max_value_diffusion
# Weighted with demand
f_reg_norm_abs, f_reg_norm = calc_diffusion_f(
regions,
f_reg,
spatial_diff_values,
[fuel_disagg['residential'], fuel_disagg['service'], fuel_disagg['industry']])
return f_reg, f_reg_norm, f_reg_norm_abs | 95361bb3f8ba5d3d47cd1a4ad065ec857e291f7b | 15,350 |
def get_set(path):
"""Returns a matrix of data given the path to the CSV file. The heading row and NaN values are excluded."""
df = pd.read_csv(path, sep=';', encoding='latin')
return df.dropna(subset=['PMID1', 'PMID2', 'Authorship'], how='any').values | aa701f440a9535d534826a50e8803fa0095bda25 | 15,351 |
from typing import Callable
from typing import Any
import os
import logging
def _client_get(client_create_fn: Callable[..., Any], params: ClientGetParams) -> Any:
"""
:param client_create_fn: the `boto3.client` or `boto3.resource` function
"""
which_service = params.boto3_client_name
endpoint_url = os.getenv(params.endpoint_url_key)
access_key_id = os.getenv(params.access_key_id_key)
access_key_secret = os.getenv(params.access_key_secret_key)
access_session_token = os.getenv(params.access_session_token)
# AWS_REGION is Fargate-specific, most AWS stuff uses AWS_DEFAULT_REGION.
region = os.getenv("AWS_REGION") or os.getenv("AWS_DEFAULT_REGION")
if not region:
raise FromEnvException("Please set AWS_REGION= or AWS_DEFAULT_REGION=")
# Not needed long term, more to help migrate to `env_helpers`.
# Notably, when `is_local` is not set, it won't break anything.
is_local = os.getenv("IS_LOCAL", None)
# Unlike Rust FromEnv, we rely on boto3's built in region handling.
if _running_in_localstack():
return _localstack_client(client_create_fn, params)
elif all((endpoint_url, access_key_id, access_key_secret)):
# Local, all are passed in from docker-compose.yml
logging.info(f"Creating a local client for {which_service}")
assert (
is_local != False
), f"You must pass in credentials for a local {which_service} client"
return client_create_fn(
params.boto3_client_name,
endpoint_url=endpoint_url,
aws_access_key_id=access_key_id,
aws_secret_access_key=access_key_secret,
aws_session_token=access_session_token,
region_name=region,
)
elif endpoint_url and not any((access_key_id, access_key_secret)):
# Local or AWS doing cross-region stuff
return client_create_fn(
params.boto3_client_name,
endpoint_url=endpoint_url,
region_name=region,
)
elif not any((endpoint_url, access_key_id, access_key_secret)):
# AWS
logging.info("Creating a prod client")
assert (
is_local != True
), f"You can't pass in credentials for a prod {which_service} client"
return client_create_fn(
params.boto3_client_name,
region_name=region,
)
else:
raise FromEnvException(
f"You specified access key but not endpoint for {params.boto3_client_name}?"
) | 19f3ff3238768a3bea4884a7b5be21e69a749ebc | 15,352 |
import os
from datetime import datetime
def parse_metadata_from_sensorcommunity_csv_filename(filename):
"""Parse sensor id, sensor type and date from a raw luftdaten.info AQ .csv
filename.
Parameters:
filename (path): the file to parse. Format of the file is expected to be
the one used by the luftdaten.info project and saved by
sensor.community, for example as in the one below:
https://archive.sensor.community/2020-01-13/
2020-01-12_sds011_sensor_35233.csv
Return:
tuple: (sensor_id, sensor_type, date) if possible,
(None, None, None) otherwise
"""
tokens = os.path.basename(os.path.splitext(filename)[0]).split("_")
if len(tokens) == 4 and tokens[2] == "sensor":
try:
date = datetime.strptime(tokens[0], "%Y-%m-%d")
except ValueError:
date = None
sensor_type = tokens[1]
sensor_id = int(tokens[3])
return (sensor_id, sensor_type, date)
# failure
return (None, None, None) | 3b16a945930311fdbb0094956c8a58fa5f4b5a70 | 15,353 |
def gaussian_target(img_shape, t, MAX_X=0.85, MIN_X=-0.85, MAX_Y=0.85, MIN_Y=-0.85, sigma2=10):
"""
Create a gaussian bivariate tensor for target or robot position.
:param t: (th.Tensor) Target position (or robot position)
"""
X_range = img_shape[1]
Y_range = img_shape[2]
XY_range = np.arange(X_range*Y_range)
for i in range(t.size(0)):
X_t = int((MAX_X+t[i][1])*(img_shape[1]/(MAX_X-MIN_X)))
Y_t = int((MAX_Y-t[i][0])*(img_shape[2]/(MAX_Y-MIN_Y)))
bi_var_gaussian = -0.5 * (((XY_range // X_range)- X_t)**2 + (XY_range - (XY_range//Y_range)*Y_range - Y_t)**2)/sigma2
img_target = th.from_numpy((np.exp(bi_var_gaussian)/(2*np.pi*sigma2)).reshape(X_range, Y_range))
img_target = img_target[None,...][None,...]
if i==0: output = img_target
else: output = th.cat([output,img_target],0)
return output | 47fbb46e2e46b1a4cc2cec3906e9c0dfb5282c0e | 15,354 |
def XMLToPython (pattern):
"""Convert the given pattern to the format required for Python
regular expressions.
@param pattern: A Unicode string defining a pattern consistent
with U{XML regular
expressions<http://www.w3.org/TR/xmlschema-2/index.html#regexs>}.
@return: A Unicode string specifying a Python regular expression
that matches the same language as C{pattern}."""
new_pattern_elts = []
new_pattern_elts.append('^')
position = 0
while position < len(pattern):
cg = MaybeMatchCharacterClass(pattern, position)
if cg is None:
new_pattern_elts.append(pattern[position])
position += 1
else:
(cps, position) = cg
new_pattern_elts.append(cps.asPattern())
new_pattern_elts.append('$')
return ''.join(new_pattern_elts) | 14072879e11ea0425903be314fdba6fb8bfd2538 | 15,355 |
import fcntl
import termios
import struct
def __termios(fd):
"""Try to discover terminal width with fcntl, struct and termios."""
#noinspection PyBroadException
try:
cr = struct.unpack('hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except Exception:
return None
return cr | 78f3450d65a453cfd22c575bbddb77fcfbef1496 | 15,356 |
def encode(numbers, GCE=GCE):
"""
do extended encoding on a list of numbers for the google chart api
>>> encode([1690, 90,1000])
'chd=e:aaBaPo'
"""
encoded = []
for number in numbers:
if number > 4095: raise ValueError('too large')
first, second = divmod(number, len(GCE))
encoded.append("%s%s" % (GCE[first], GCE[second]))
return "chd=e:%s" % ''.join(encoded) | 9a15134e0266a0dfc60b654b9da8d2a6169bee7f | 15,357 |
def compute_f_mat(mat_rat,user_count,movie_count):
"""
compute the f matrix
:param mat_rat: user`s rating matrix([user number,movie number]) where 1 means user likes the index movie.
:param user_count: statistics of moive numbers that user have watch.
:param movie_count: statistics of user numbers that movie have been rated.
:return: f matrix
"""
temp = (mat_rat / user_count.reshape([-1,1]) )/ movie_count.reshape([1,-1])
D = np.dot(mat_rat.T, temp)
f = np.dot(D, mat_rat.T).T
return f | ff13544c28dde9025630878aed0844f56453e08e | 15,358 |
import json
def metadata(path="xdress/metadata.json"):
"""Build a metadata file."""
md = {}
md.update(INFO)
# FIXME: Add the contents of CMakeCache.txt to the metadata dictionary
# write the metadata file
with open(path, 'w') as f:
json.dump(md, f, indent=2)
return md | cbc1981ecd9c7146f082321a1b05c094cfdd0cc6 | 15,359 |
import logging
import tqdm
def query_assemblies(organism, output, quiet=False):
"""from a taxid or a organism name, download all refseq assemblies
"""
logger = logging.getLogger(__name__)
assemblies = []
genomes = Entrez.read(Entrez.esearch(
"assembly",
term=f"{organism}[Organism]",
retmax=10000))["IdList"]
logger.info(
f"Found {len(genomes)} organisms in ncbi assemblies for {organism}")
logger.info("Downloading the assemblies. Please be patient.")
for id in tqdm(genomes, disable=quiet):
try:
entrez_assembly = Entrez.read(
Entrez.esummary(
db="assembly",
id=id))["DocumentSummarySet"]["DocumentSummary"][0]
except KeyError as e:
entrez_assembly = Entrez.read(
Entrez.esummary(db="assembly", id=id))["DocumentSummarySet"]
print(entrez_assembly.keys())
raise
else:
assembly = Assembly(entrez_assembly)
output_file = f"{output}/{assembly.accession}.fasta"
download(assembly.ftp_refseq, output_file)
assemblies.append(assembly)
return assemblies | 48b4f4946f4368865b2934f1ac30ce650a00b5d0 | 15,360 |
def main(params):
"""Loads the file containing the collation results from Wdiff.
Then, identifies various kinds of differences that can be observed.
Assembles this information for each difference between the two texts."""
print("\n== coleto: running text_analyze. ==")
difftext = get_difftext(params["wdiffed_file"])
analysisresults = analyse_diffs(difftext, params)
analysissummary = save_summary(difftext, analysisresults,
params["analysissummary_file"])
save_analysis(analysisresults, params["analysis_file"])
return analysissummary | 425aa27fb7ba1ee0b1aa23c2489e8431b4a57726 | 15,361 |
def matrix_horizontal_stack(matrices: list, _deepcopy: bool = True):
"""
stack matrices horizontally.
:param matrices: (list of Matrix)
:param _deepcopy: (bool)
:return: (Matrix)
"""
assert matrices
for _i in range(1, len(matrices)):
assert matrices[_i].basic_data_type() == matrices[0].basic_data_type()
assert matrices[_i].size()[0] == matrices[0].size()[0]
if _deepcopy:
_matrices = deepcopy(matrices)
else:
_matrices = matrices
_kernel = []
for _i in range(_matrices[0].size()[0]):
_kernel.append([])
for _j in range(len(_matrices)):
for _k in range(_matrices[_j].size()[1]):
_kernel[_i].append(_matrices[_j].kernel[_i][_k])
return Matrix(_kernel) | 83419b0f77fc055145026b61ab8a0200172fcb62 | 15,362 |
def inds_to_invmap_as_array(inds: np.ndarray):
"""
Returns a mapping that maps global indices to local ones
as an array.
Parameters
----------
inds : numpy.ndarray
An array of global indices.
Returns
-------
numpy.ndarray
Mapping from global to local.
"""
res = np.zeros(inds.max() + 1, dtype=inds.dtype)
for i in prange(len(inds)):
res[inds[i]] = i
return res | 25dc5fa9f1225cb9da64a513ebea3dff935c3c44 | 15,363 |
from sys import path
def install_plugin_urls():
"""
urlpatterns - bCTF original urlpatterns
"""
urls = []
for plugin in list_plugins():
urls.append(path('{0}/'.format(plugin), include('plugins.{0}.urls'.format(plugin))))
return urls | 798706e47b1e79d2af99583b209eb7078db95902 | 15,364 |
def ident_keys(item, cfg):
"""Returns the list of keys in item which gives its identity
:param item: dict with type information
:param cfg: config options
:returns: a list of fields for item that give it its identity
:rtype: list
"""
try:
return content.ident_keys(item)
except Exception as e:
logger.error('Failed to extract ident keys for %s' % (item), e)
raise e | e911ea9bb0dbccbdf4d0ab5cdfeb5742297ae9e8 | 15,365 |
def playlists_by_date(formatter, albums):
"""Returns a single playlist of favorite tracks from albums
sorted by decreasing review date.
"""
sorted_tracks = []
sorted_albums = sorted(albums, key=lambda x: x["date"], reverse=True)
for album in sorted_albums:
if album["picks"] is None:
continue
tracks = [
{
"artist_tag": album["artist_tag"],
"album_tag": album["album_tag"],
"artist": album["artist"],
"album": album["album"],
"track": album["tracks"][p],
}
for p in album["picks"]
]
sorted_tracks.extend(tracks)
return formatter.parse_list(sorted_tracks, formatter.format_track) | 0448fd941c0219f6e854a15df62e4811c1cecf3e | 15,366 |
def merge_two_sorted_array(l1, l2):
"""
Time Complexity: O(n+m)
Space Complexity: O(n+m)
:param l1: List[int]
:param l2: List[int]
:return: List[int]
"""
if not l1:
return l2
if not l2:
return l1
merge_list = []
i1 = 0
i2 = 0
l1_len = len(l1) - 1
l2_len = len(l2) - 1
while i1 <= l1_len and i2 <= l2_len:
if l1[i1] < l2[i2]:
merge_list.append(l1[i1])
i1 += 1
else:
merge_list.append(l2[i2])
i2 += 1
while i1 <= l1_len:
merge_list.append(l1[i1])
i1 += 1
while i2 <= l2_len:
merge_list.append(l2[i2])
i2 += 1
return merge_list | 2671d21707056741bbdc4e3590135e7e1be4c7e9 | 15,367 |
def regression_metrics(y_true,y_pred):
"""
param1: pandas.Series/pandas.DataFrame/numpy.darray
param2: pandas.Series/pandas.DataFrame/numpy.darray
return: dictionary
Function accept actual prediction labels from the dataset and predicted values from the model and utilizes this
two values/data to calculate r2 score, mean absolute error, mean squared error, and root mean squared error at same time add them to result dictionary.
Finally return the result dictionary
"""
result=dict()
result['R2']=round(r2_score(y_true, y_pred),3)
result['MAE']=round(mean_absolute_error(y_true, y_pred),3)
result['MSE']=round(mean_squared_error(y_true, y_pred),3)
result['RMSE']=round(mean_squared_error(y_true, y_pred,squared=False),3)
return result | 085bf6d9006443c752f5b665480fce4f24e5f850 | 15,368 |
def convert_from_quint8(arr):
"""
Dequantize a quint8 NumPy ndarray into a float one.
:param arr: Input ndarray.
"""
assert isinstance(arr, np.ndarray)
assert (
"mgb_dtype" in arr.dtype.metadata
and arr.dtype.metadata["mgb_dtype"]["name"] == "Quantized8Asymm"
), "arr should be a ndarray with quint8 dtype"
scale, zp = (
arr.dtype.metadata["mgb_dtype"]["scale"],
arr.dtype.metadata["mgb_dtype"]["zero_point"],
)
return (arr.astype(np.float32) - zp) * scale | 50143a309108bf68cb65b266e2aec84090eb30e6 | 15,369 |
def classpartial(*args, **kwargs):
"""Bind arguments to a class's __init__."""
cls, args = args[0], args[1:]
class Partial(cls):
__doc__ = cls.__doc__
def __new__(self):
return cls(*args, **kwargs)
Partial.__name__ = cls.__name__
return Partial | 7cdc96e314a2ce3c658ecb886922df4d7bda5b99 | 15,370 |
import os
def load_xml(xml_path):
"""
Загружает xml в etree.ElementTree
"""
if os.path.exists(xml_path):
xml_io = open(xml_path, 'rb')
else:
raise ValueError(xml_path)
xml = objectify.parse(xml_io)
xml_io.close()
return xml | ef02b163fbcabca797ba8346c5fc0bf0bd185365 | 15,371 |
def alphabetize_concat(input_list):
"""
Takes a python list.
List can contain arbitrary objects with .__str__() method
(so string, int, float are all ok.)
Sorts them alphanumerically.
Returns a single string with result joined by underscores.
"""
array = np.array(input_list, dtype=str)
array.sort()
return '_'.join(array) | 4bac3712696fd776b96ca8501f696c505c05e699 | 15,372 |
def kick(state, ai, ac, af, cosmology=cosmo, dtype=np.float32, name="Kick",
**kwargs):
"""Kick the particles given the state
Parameters
----------
state: tensor
Input state tensor of shape (3, batch_size, npart, 3)
ai, ac, af: float
"""
with tf.name_scope(name):
state = tf.convert_to_tensor(state, name="state")
fac = 1 / (ac ** 2 * E(cosmo,ac)) * (Gf(cosmo,af) - Gf(cosmo,ai)) / gf(cosmo,ac)
indices = tf.constant([[1]])
#indices = tf.constant([1])
Xjl = tf.multiply(fac, state[2])
update = tf.expand_dims(Xjl, axis=0)
shape = state.shape
update = tf.scatter_nd(indices, update, shape)
state = tf.add(state, update)
return state | e7deeca9001fccc078f2f8ab7e51ac38d72a1125 | 15,373 |
def check_similarity(var1, var2, error):
"""
Check the simulatiry between two numbers, considering a error margin.
Parameters:
-----------
var1: float
var2: float
error: float
Returns:
-----------
similarity: boolean
"""
if((var1 <= (var2 + error)) and (var1 >= (var2 - error))):
return True
else:
return False | 305fd08cf4d8b1718d8560315ebf7bd03a4c7e2a | 15,374 |
def model_type_by_code(algorithm_code):
"""
Method which return algorithm type by algorithm code.
algorithm_code MUST contain any 'intable' type
:param algorithm_code: code of algorithm
:return: algorithm type name by algorithm code or None
"""
# invalid algorithm code case
if algorithm_code not in ALGORITHM[ALGORITHM_CODE].keys():
return None
return ALGORITHM[TYPE][algorithm_code] | bcd811e200855cc026134ce05b67add807e176ca | 15,375 |
def getCasing(word):
""" Returns the casing of a word"""
if len(word) == 0:
return 'other'
elif word.isdigit(): #Is a digit
return 'numeric'
elif word.islower(): #All lower case
return 'allLower'
elif word.isupper(): #All upper case
return 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
return 'initialUpper'
return 'other' | 2af70926c0cbbde6310abb573ccc3ee8260b86bd | 15,376 |
def normalize_angle(deg):
"""
Take an angle in degrees and return it as a value between 0 and 360
:param deg: float or int
:return: float or int, value between 0 and 360
"""
angle = deg
while angle > 360:
angle -= 360
while angle < 360:
angle += 360
return angle | cd4788819bbc8fce17ca7c7b1b320499a3893dee | 15,377 |
from datetime import datetime
from dateutil import tz
import math
def current_global_irradiance(site_properties, solar_properties, timestamp):
"""Calculate the clear-sky POA (plane of array) irradiance for a specific time (seconds timestamp)."""
dt = datetime.datetime.fromtimestamp(timestamp=timestamp, tz=tz.gettz(site_properties.tz))
n = dt.timetuple().tm_yday
sigma = math.radians(solar_properties.tilt)
rho = solar_properties.get('rho', 0.0)
C = 0.095 + 0.04 * math.sin(math.radians((n - 100) / 365))
sin_sigma = math.sin(sigma)
cos_sigma = math.cos(sigma)
altitude = get_altitude(latitude_deg=site_properties.latitude, longitude_deg=site_properties.longitude, when=dt)
beta = math.radians(altitude)
sin_beta = math.sin(beta)
cos_beta = math.cos(beta)
azimuth = get_azimuth(latitude_deg=site_properties.latitude, longitude_deg=site_properties.longitude, when=dt)
phi_s = math.radians(180 - azimuth)
phi_c = math.radians(180 - solar_properties.azimuth)
phi = phi_s - phi_c
cos_phi = math.cos(phi)
# Workaround for a quirk of pvsolar since the airmass for the sun ele===altitude of zero
# is infinite and very small numbers close to zero result in NaNs being returned rather
# than zero
if altitude < 0.0:
altitude = -1.0
cos_theta = cos_beta * cos_phi * sin_sigma + sin_beta * cos_sigma
ib = get_radiation_direct(when=dt, altitude_deg=altitude)
ibc = ib * cos_theta
idc = C * ib * (1 + cos_sigma) / 2
irc = rho * ib * (sin_beta + C) * ((1 - cos_sigma) / 2)
igc = ibc + idc + irc
# If we still get a bad result just return 0
if math.isnan(igc):
igc = 0.0
return igc | d8e180b9768d5cf6c3064a7d30a2e7d918307366 | 15,378 |
from datetime import datetime
def date_formatting(format_date, date_selected):
"""Date formatting management.
Arguments:
format_date {str} -- Date
date_selected {str} -- Date user input
Returns:
str -- formatted date
"""
if len(date_selected) == 19:
date_selected = datetime.strptime(
date_selected, "%d/%m/%Y %H:%M:%S")
elif len(date_selected) == 10:
date_selected = datetime.strptime(date_selected, "%d/%m/%Y")
try:
if "yyyy" in format_date:
format_date = format_date.replace(
"yyyy", date_selected.strftime("%Y"))
elif "yy" in format_date:
format_date = format_date.replace(
"yy", date_selected.strftime("%y"))
if "mm" in format_date:
format_date = format_date.replace(
"mm", date_selected.strftime("%m"))
if "dd" in format_date:
format_date = format_date.replace(
"dd", date_selected.strftime("%d"))
if "hh" in format_date:
format_date = format_date.replace(
"hh", date_selected.strftime("%H"))
if "nn" in format_date:
format_date = format_date.replace(
"nn", date_selected.strftime("%M"))
if "ss" in format_date:
format_date = format_date.replace(
"ss", date_selected.strftime("%S"))
return (format_date, None)
except AttributeError:
return (
None,
_("Date entry error, format is dd/mm/yyyy or dd/mm/yyyy hh:mm:ss")
) | 48ff4cb59de3e8f75238420d8d211812148db34c | 15,379 |
def parse_activity_from_metadata(metadata):
"""Parse activity name from metadata
Args:
metadata: List of metadata from log file
Returns
Activity name from metadata"""
return _parse_type_metadata(metadata)[1] | c583d34a8cb0db8ddf26ff79d1a0885aab5c6af9 | 15,380 |
def mask_data_by_FeatureMask(eopatch, data_da, mask):
"""
Creates a copy of array and insert 0 where data is masked.
:param data_da: dataarray
:type data_da: xarray.DataArray
:return: dataaray
:rtype: xarray.DataArray
"""
mask = eopatch[FeatureType.MASK][mask]
if len(data_da.values.shape) == 4:
mask = np.repeat(mask, data_da.values.shape[-1], -1)
else:
mask = np.squeeze(mask, axis=-1)
data_da = data_da.copy()
data_da.values[~mask] = 0
return data_da | 6639cc2cbf4956edbd637f07308fb33f00fcb8af | 15,381 |
def makeFields(prefix, n):
"""Generate a list of field names with this prefix up to n"""
return [prefix+str(n) for n in range(1,n+1)] | 435571557ef556b99c4729500f372cc5c9180052 | 15,382 |
def process_input_dir(input_dir):
"""
Find all image file paths in subdirs, convert to str and extract labels from subdir names
:param input_dir Path object for parent directory e.g. train
:returns: list of file paths as str, list of image labels as str
"""
file_paths = list(input_dir.rglob('*.png'))
file_path_strings = [str(path) for path in file_paths]
label_strings = [path.parent.name for path in file_paths]
return file_path_strings, label_strings | 569d4539368888c91a12538156c611d311da03b6 | 15,383 |
def fak(n):
""" Berechnet die Fakultaet der ganzen Zahl n. """
erg = 1
for i in range(2, n+1):
erg *= i
return erg | 9df6f4fa912a25535369f4deb0a06baef8e6bdcc | 15,384 |
import csv
def save(data, destination_path, **kwargs):
"""Generate a csv file from a datastructure.
:param data: Currently data must be a list of dicts.
:type data: list of dict
:param destination_path: Path of the resulting CSV file.
:type destination_path: str
:raises ValueError: If the format of data cannot be determined.
:return: Returns True on success.
:rtype: bool
:Keyword Arguments:
Currently None
"""
if isinstance(data, list):
if isinstance(data[0], dict):
with open(destination_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=data[0].keys(), dialect=csv.excel)
writer.writeheader()
writer.writerows(data)
log.debug(
f"successfully written {len(data)} lines of data to {destination_path}"
)
return True
raise ValueError(f"csv save not implemented for list of {type(data[0])}")
raise ValueError(f"csv save not implemented for {type(data)}") | b9b1d33ab72602a4887b0929f86d59e283d4193c | 15,385 |
import re
def create_sequences_sonnets(sonnets):
"""
This creates sequences as done in Homework 6, by mapping each word
to an integer in order to create a series of sequences. This function
specifically makes entire sonnets into individual sequences
and returns the list of processed sonnets back to be used in the basic
HMM notebook for generation.
"""
sequences = []
obs_counter = 0
obs_map = {}
for sonnet in sonnets:
sequence = []
for i, line in enumerate(sonnet):
split = line.split()
for word in split:
word = re.sub(r'[^\w]', '', word).lower()
if word not in obs_map:
# Add unique words to the observations map.
obs_map[word] = obs_counter
obs_counter += 1
# Add the encoded word.
sequence.append(obs_map[word])
# Add the encoded sequence.
sequences.append(sequence)
return obs_map, sequences | 56087140fe5ed8934b64a18567b4e9023ddc6f59 | 15,386 |
import pathlib
import sys
import shutil
def detect_venv_command(command_name: str) -> pathlib.Path:
"""Detect a command in the same venv as the current utility."""
venv_path = pathlib.Path(sys.argv[0]).parent.absolute()
expected_command_path = venv_path / command_name
if expected_command_path.is_file():
result = expected_command_path
else:
# assume command in the PATH when run...
found = shutil.which(command_name)
if found:
result = pathlib.Path(found)
else:
raise CommandError(
"Command not in users path or venv, {0}".format(command_name)
)
return result | fbc7cbda5a4bf4ba9c2d3654176c2bc92692e884 | 15,387 |
def l_to_rgb(img_l):
"""
Convert a numpy array (l channel) into an rgb image
:param img_l:
:return:
"""
lab = np.squeeze(255 * (img_l + 1) / 2)
return color.gray2rgb(lab) / 255 | 362a1ef926e780b311902c3637a5299afbce4c6a | 15,388 |
def blockchain_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp):
"""
Checks if current time in millis exceeds the time specified in condition
"""
try:
expected_mili_time = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
current_time = timestamp
if current_time <= expected_mili_time:
return Err.ASSERT_SECONDS_ABSOLUTE_FAILED
return None | 7b3ce8801239a524b150c9191b49eb24575b3fbb | 15,389 |
def show_aip(mets_file):
"""Show a METS file"""
mets_instance = METS.query.filter_by(metsfile='%s' % (mets_file)).first()
level = mets_instance.level
original_files = mets_instance.metslist
dcmetadata = mets_instance.dcmetadata
divs = mets_instance.divs
filecount = mets_instance.originalfilecount
aip_uuid = mets_file
for element in dcmetadata:
tag = element.get('element')
if tag and tag == 'ark identifier':
aip_uuid = element['value']
break
return render_template(
'aip.html', original_files=original_files,
mets_file=mets_file, level=level, dcmetadata=dcmetadata, divs=divs,
filecount=filecount, aip_uuid=aip_uuid
) | 606dfd4dba45fe8dae918f795f27bfeddb0fcd70 | 15,390 |
def testCartesianEpehemeris(
ephemeris_actual,
ephemeris_desired,
position_tol=1*u.m,
velocity_tol=(1*u.mm/u.s),
magnitude=True,
raise_error=True
):
"""
Tests that the two sets of cartesian ephemeris are within the desired absolute tolerances
of each other. The absolute difference is calculated as |actual - desired|.
Parameters
----------
ephemeris_actual : `~numpy.ndarray` (N, 3) or (N, 6)
Array of ephemeris to compare to the desired ephemeris, may optionally
include velocities.
Assumed units for:
positions : AU,
velocities : AU per day
ephemeris_desired : `~numpy.ndarray` (N, 3) or (N, 6)
Array of desired ephemeris to which to compare the actual ephemeris to, may optionally
include velocities.
Assumed units for:
positions : AU,
velocities : AU per day
position_tol : `~astropy.units.quantity.Quantity` (1)
Absolute tolerance positions need to satisfy (x, y, z, r).
velocity_tol : `~astropy.units.quantity.Quantity` (1)
Absolute tolerance velocity need to satisfy. (vx, vy, vz, v).
magnitude : bool
Test the magnitude of the position difference
and velocity difference vectors as opposed to testing per individual coordinate.
Raises
------
AssertionError:
If |ephemeris_actual - ephemeris_desired| > tolerance.
ValueError:
If ephemeris shapes are not equal.
ValueError:
If coordinate dimensions are not one of 3 or 6.
Returns
-------
None
"""
any_error = False
error_message = "\n"
differences = {}
statistics = {}
if ephemeris_actual.shape != ephemeris_desired.shape:
err = (
"The shapes of the actual and desired ephemeris should be the same."
)
raise ValueError(err)
N, D = ephemeris_actual.shape
if D not in (3, 6):
err = (
"The number of coordinate dimensions should be one of 3 or 6.\n"
"If 3 then the expected inputs are x, y, z positions in AU.\n"
"If 6 then the expected inputs are x, y, z postions in AU\n"
"and vx, vy, vz velocities in AU per day."
)
raise ValueError(err)
# Test positions
if magnitude:
names = ["r"]
else:
names = ["x", "y", "z"]
diff, stats, error = _evaluateDifference(
ephemeris_actual[:, :3],
ephemeris_desired[:, :3],
u.AU,
position_tol,
magnitude=magnitude
)
for i, n in enumerate(names):
differences[n] = diff[:, i]
statistics[n] = {k : v[i] for k, v in stats.items()}
# If any of the differences between desired and actual are
# greater than the allowed tolerance set any_error to True
# and build the error message
if error:
any_error = True
error_message += "{} difference (|actual - desired|) is not within {}.\n".format(names, position_tol)
error_message = __statsToErrorMessage(
stats,
error_message
)
if D == 6:
# Test velocities
if magnitude:
names = ["v"]
else:
names = ["vx", "vy", "vz"]
diff, stats, error = _evaluateDifference(
ephemeris_actual[:, 3:],
ephemeris_desired[:, 3:],
(u.AU / u.d),
velocity_tol,
magnitude=magnitude
)
for i, n in enumerate(names):
differences[n] = diff[:, i]
statistics[n] = {k : v[i] for k, v in stats.items()}
# If any of the differences between desired and actual are
# greater than the allowed tolerance set any_error to True
# and build the error message
if error:
any_error = True
error_message += "{} difference (|actual - desired|) is not within {}.\n".format(names, velocity_tol)
error_message = __statsToErrorMessage(
stats,
error_message
)
if any_error and raise_error:
raise AssertionError(error_message)
return differences, statistics, error | 5ab38140ed7ff446f0f961e147e7d8af3e6c97e0 | 15,391 |
import requests
def get_longitude_latitude(city_info, station):
"""
利用高德地图查询对应的地铁站经纬度信息,下面的key需要自己去高德官网申请
https://lbs.amap.com/api/webservice/guide/api/georegeo
:param city_info: 具体城市的地铁,如:广州市地铁
:param station: 具体的地铁站名称,如:珠江新城站
:return: 经纬度
"""
addr = city_info + station
print('*要查找的地点:' + addr)
parameters = {'address': addr, 'key': '98a3444618af14c0f20c601f5a442000'}
base = 'https://restapi.amap.com/v3/geocode/geo'
response = requests.get(base, parameters, timeout=10) # 超时设置为10s,翻墙开了全局代理会慢点的
if response.status_code == 200:
answer = response.json()
x, y = answer['geocodes'][0]['location'].split(',')
coor = (float(x), float(y))
print('*' + station + '的坐标是:', coor)
return coor
else:
return (None, None) | 9b0132702e14af9dec1ce65724139af0188b14a0 | 15,392 |
def make_resource_object(resource_type, credentials_path):
"""Creates and configures the service object for operating on resources.
Args:
resource_type: [string] The Google API resource type to operate on.
credentials_path: [string] Path to credentials file, or none for default.
"""
try:
api_name, resource = resource_type.split('.', 1)
except ValueError:
raise ValueError('resource_type "{0}" is not in form <api>.<resource>'
.format(resource_type))
version = determine_version(api_name)
service = make_service(api_name, version, credentials_path)
path = resource.split('.')
node = service
for elem in path:
try:
node = getattr(node, elem)()
except AttributeError:
path_str = '.'.join(path[0:path.index(elem)])
raise AttributeError('"{0}{1}" has no attribute "{2}"'.format(
api_name, '.' + path_str if path_str else '', elem))
return node | 018cac83513b61c8bc99e06a07ded004685016b2 | 15,393 |
def AreBenchmarkResultsDifferent(result_dict_1, result_dict_2, test=MANN,
significance_level=0.05):
"""Runs the given test on the results of each metric in the benchmarks.
Checks if the dicts have been created from the same benchmark, i.e. if
metric names match (e.g. first_non_empty_paint_time). Then runs the specified
statistical test on each metric's samples to find if they vary significantly.
Args:
result_dict_1: Benchmark result dict of format {metric: list of values}.
result_dict_2: Benchmark result dict of format {metric: list of values}.
test: Statistical test that is used.
significance_level: The significance level the p-value is compared against.
Returns:
test_outcome_dict: Format {metric: (bool is_different, p-value)}.
"""
AssertThatKeysMatch(result_dict_1, result_dict_2)
test_outcome_dict = {}
for metric in result_dict_1:
is_different, p_value = AreSamplesDifferent(result_dict_1[metric],
result_dict_2[metric],
test, significance_level)
test_outcome_dict[metric] = (is_different, p_value)
return test_outcome_dict | d9e1eaa16c2329511dd3e1fc7e5cad63cab0c208 | 15,394 |
def _load_pascal_annotation(image_index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
#image_index = _load_image_set_index()
classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
num_classes = len(classes)
_class_to_ind = dict(zip(classes, xrange(num_classes)))
_data_path = "/var/services/homes/kchakka/py-faster-rcnn/VOCdevkit/VOC2007"
image_index = [image_index]
for index in image_index:
filename = os.path.join(_data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if True:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
##
# commented below by chaitu
##
#overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
##
# commented above by chaitu
##
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = _class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
#overlaps[ix, cls] = 1.0
#seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
#overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes' : boxes, 'gt_classes' : gt_classes} | e656d78003d17ca7b8dae204ed9ea2812e5871dc | 15,395 |
def create_global_var(shape,
value,
dtype,
persistable=False,
force_cpu=False,
name=None):
"""
This function creates a new tensor variable with value in the global block(block 0).
Parameters:
shape (list of int): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
var = layers.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
helper = LayerHelper("global_var", **locals())
var = helper.create_global_variable(
dtype=dtype,
shape=shape,
persistable=persistable,
name=name,
stop_gradient=True)
helper.set_variable_initializer(
var, initializer=Constant(
value=float(value), force_cpu=force_cpu))
return var | 0d64b4bd15c97b2f32058bfe298249c3e528ec16 | 15,396 |
def select(population, fitness_val):
"""
选择操作,用轮盘赌法进行选择
:param population: 种群基因型
:param fitness_val: 种群适应度
:return selected_pop: 选择后的种群
"""
f_sum = sum(fitness_val)
cumulative = []
for i in range(1, len(fitness_val)+1):
cumulative.append(sum(fitness_val[:i]) / f_sum)
selected_pop = []
for i in range(len(fitness_val)):
rand = np.random.rand()
prand = [(c - rand) for c in cumulative]
j = 0
while prand[j] < 0:
j = j+1
selected_pop.append(population[j])
return selected_pop | dd3529eaf6ac35801c589152078e7fe1dd0ed9fe | 15,397 |
def _nan_helper(y, nan=False, inf=False, undef=None):
"""
Helper to handle indices and logical indices of NaNs, Infs or undefs.
Definition
----------
def _nan_helper(y, nan=False, inf=False, undef=None):
Input
-----
y 1d numpy array with possible missing values
Optional Input
--------------
At least one of the following has to be given
nan if True, check only for NaN and not Inf.
inf if True, check only for Inf and not NaN.
undef if given then check for undef value rather than NaN and Inf.
Output
------
ind logical indices of missing values
find function, with signature indices = find(ind),
to convert logical indices of NaNs to 'equivalent' indices
Examples
--------
>>> # linear interpolation of NaNs
>>> y = np.array([1, np.nan, 3])
>>> nans, z = _nan_helper(y, nan=True)
>>> y[nans] = np.interp(z(nans), z(~nans), y[~nans])
History
-------
Written, Matthias Cuntz, Jul 2013 - modified from
http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array
Modified, Matthias Cuntz, Apr 2014 - assert
Matthias Cuntz, Sep 2021 - code refactoring
"""
assert not ((not nan) & (not inf) & (undef is None)), (
'at least one of nan, inf or undef has to be given.')
out = np.zeros(y.shape, dtype=bool)
if nan:
out = out | np.isnan(y)
if inf:
out = out | np.isinf(y)
if undef is not None:
out = out | (y == undef)
return out, lambda ind: ind.nonzero()[0] | 742433c2140f4827f11e79c691e3a16be124ef99 | 15,398 |
def unpacking(block_dets, *, repeat=False, **_kwargs):
"""
Identify name unpacking e.g. x, y = coord
"""
unpacked_els = block_dets.element.xpath(ASSIGN_UNPACKING_XPATH)
if not unpacked_els:
return None
title = layout("""\
### Name unpacking
""")
summary_bits = []
for unpacked_el in unpacked_els:
unpacked_names = [
name_el.get('id') for name_el in unpacked_el.xpath('elts/Name')]
if not unpacked_names:
continue
nice_str_list = gen_utils.get_nice_str_list(unpacked_names, quoter='`')
summary_bits.append(layout(f"""\
Your code uses unpacking to assign names {nice_str_list}
"""))
summary = ''.join(summary_bits)
if not repeat:
unpacking_msg = get_unpacking_msg()
else:
unpacking_msg = ''
message = {
conf.Level.BRIEF: title + summary,
conf.Level.EXTRA: unpacking_msg,
}
return message | 512238569efe4c17ef7afd3a26e2bc17a0f77cfb | 15,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.