content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def seepage_from_unitary(U):
"""
Calculates leakage by summing over all in and output states in the
computational subspace.
L1 = 1- sum_i sum_j abs(|<phi_i|U|phi_j>|)**2
"""
sump = 0
for i in range(2):
for j in range(2):
bra_i = qtp.tensor(qtp.ket([i], dim=[2]),
qtp.ket([2], dim=[3])).dag()
ket_j = qtp.tensor(qtp.ket([j], dim=[2]),
qtp.ket([2], dim=[3]))
p = np.abs((bra_i*U*ket_j).data[0, 0])**2
sump += p
sump /= 2 # divide by dimension of comp subspace
L1 = 1-sump
return L1 | 8bd4185a69d7280868871dc3c62bb10abac1579c | 5,300 |
def auto_get(*args):
"""
auto_get(type, lowEA, highEA) -> ea_t
Retrieve an address from queues regarding their priority. Returns
'BADADDR' if no addresses not lower than 'lowEA' and less than
'highEA' are found in the queues. Otherwise *type will have queue
type.
@param type (C++: atype_t *)
@param lowEA (C++: ea_t)
@param highEA (C++: ea_t)
"""
return _ida_auto.auto_get(*args) | 810ea49a414e3a044bc94cac4d780c7b624433a2 | 5,301 |
def isLineForUser(someLine=None, username=None):
"""determins if a raw output line is for a user"""
doesMatch = False
try:
doesMatch = utils.isLineForMatch(someLine, username)
except Exception as matchErr:
logs.log(str(type(matchErr)), "Error")
logs.log(str(matchErr), "Error")
logs.log(str((matchErr.args)), "Error")
matchErr = None
del matchErr
doesMatch = False
return doesMatch | dbf6b92976c8419fc3b9271eb87871c8c7cf6a1b | 5,302 |
import importlib
def get_view_class(callback):
"""
Try to get the class from given callback
"""
if hasattr(callback, 'view_class'):
return callback.view_class
if hasattr(callback, 'cls'):
return callback.cls
# TODO: Below code seems to not do anything..
mod = importlib.import_module(callback.__module__)
cls = getattr(mod, callback.__name__)
return cls | a705917f680bd35643f57377d430cdaef02228a9 | 5,303 |
def create_multipart_upload(s3_obj, bucketname, object_key):
"""
Initiates Multipart Upload
Args:
s3_obj (obj): MCG or OBC object
bucketname (str): Name of the bucket on which multipart upload to be initiated on
object_key (str): Unique object Identifier
Returns:
str : Multipart Upload-ID
"""
mpu = s3_obj.s3_client.create_multipart_upload(Bucket=bucketname, Key=object_key)
upload_id = mpu["UploadId"]
return upload_id | 375d7d04aefa0ef4f91a42e2478ae624057e1bee | 5,304 |
import sqlite3
def cn(DB):
"""Return the cursor and connection object."""
conn = sqlite3.connect(DB)
c = conn.cursor()
return (c,conn) | 76abbec283d45732213f8b94031242146cdb4ee0 | 5,305 |
def _build_category_tree(slug, reference=None, items=None):
"""
Builds a recursive tree with category relations as children.
"""
if items is None:
items = []
for key in reference:
category = reference[key]
if category["parent"] == slug:
children = _build_category_tree(category["nicename"],
reference=reference)
category["children"] = children
items.append(category)
return items | d06cb736b12025b862363a724b2497a71a8a8a30 | 5,306 |
import copy
def partially_matched_crossover(random, mom, dad, args):
"""Return the offspring of partially matched crossover on the candidates.
This function performs partially matched crossover (PMX). This type of
crossover assumes that candidates are composed of discrete values that
are permutations of a given set (typically integers). It produces offspring
that are themselves permutations of the set.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
if random.random() < crossover_rate:
size = len(mom)
points = random.sample(range(size), 2)
x, y = min(points), max(points)
bro = copy.copy(dad)
bro[x:y+1] = mom[x:y+1]
sis = copy.copy(mom)
sis[x:y+1] = dad[x:y+1]
for parent, child in zip([dad, mom], [bro, sis]):
for i in range(x, y+1):
if parent[i] not in child[x:y+1]:
spot = i
while x <= spot <= y:
spot = parent.index(child[spot])
child[spot] = parent[i]
return [bro, sis]
else:
return [mom, dad] | b0d5132cf4ca14095f3d7c637cb50db3fe37d244 | 5,307 |
import re
def regex_trim(input, regex, replace=''):
"""
Trims or replaces the regex match in an input string.
input (string): the input string to search for matches
regex (string): regex to match
replace (string - optional): a string to replace any matches with. Defaults to trimming the match.
"""
return re.sub(regex, replace, input) | 169bfaa0d2bfd7a1f32c1e05a63b41993f82bf4b | 5,308 |
import os
import glob
def getENVIframeDir(strPathScene, sSubDir=None):
""" Return directory containing envi frames
frame bsqs in this dir are named FR_yyyy.mm.dd_X.bsq
Optional subdirectory name sSubDir. workaround for non standard directory organization.
"""
strWild = strPathScene + r'SEQhdr\ENVI_FR*'
if sSubDir is not None:
strWild = strWild + os.sep + sSubDir
lstDirs = [f for f in glob.glob(strWild) if os.path.isdir(f)]
if len(lstDirs) == 0:
raise Exception('No match found for: ' + strWild)
elif len(lstDirs) > 1:
raise Exception('Multiple matchs found for: ' + strWild)
return lstDirs[0] | 4dacf64418f97d734f87ecfa5cd7632b7707b5e3 | 5,309 |
def LoadAllSuitesOfProject(project_name):
"""Loads all of the suites of a project."""
project_key = db.Key.from_path(bite_project.BiteProject.kind(),
project_name)
return BiteSuite.all().ancestor(project_key) | 7a1a27229542ed364dddd2cc9a9cd3343c1d934d | 5,310 |
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVG, and TMAX
"""
return session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all() | 567bc943ecfe34c0604427e0fa8cea11f10c7205 | 5,311 |
import logging
from typing import Callable
from typing import Any
from typing import Tuple
def __check_rse_usage(rse: RseData, greedy: bool = False, logger: "Callable[..., Any]" = logging.log) -> 'Tuple[int, bool]':
"""
Internal method to check RSE usage and limits.
:param rse_name: The RSE name.
:param rse_id: The RSE id.
:param greedy: If True, needed_free_space will be set to 1TB regardless of actual rse usage.
:returns: needed_free_space, only_delete_obsolete.
"""
# First of all check if greedy mode is enabled for this RSE
if greedy:
return 1000000000000, False
rse.ensure_loaded(load_limits=True, load_usage=True, load_attributes=True)
# Get RSE limits
min_free_space = rse.limits.get('MinFreeSpace', 0)
# Check from which sources to get used and total spaces
# Default is storage
source_for_total_space = rse.attributes.get('source_for_total_space', 'storage')
source_for_used_space = rse.attributes.get('source_for_used_space', 'storage')
logger(logging.DEBUG, 'RSE: %s, source_for_total_space: %s, source_for_used_space: %s',
rse.name, source_for_total_space, source_for_used_space)
# Get total, used and obsolete space
total_space_entry = None
used_space_entry = None
obsolete_entry = None
for entry in rse.usage:
if total_space_entry and used_space_entry and obsolete_entry:
break
entry_source = entry['source']
if not total_space_entry and entry_source == source_for_total_space:
total_space_entry = entry
if not used_space_entry and entry_source == source_for_used_space:
used_space_entry = entry
if not obsolete_entry and entry_source == 'obsolete':
obsolete_entry = entry
obsolete = 0
if obsolete_entry:
obsolete = obsolete_entry['used']
# If no information is available about disk space, do nothing except if there are replicas with Epoch tombstone
needed_free_space = 0
if not total_space_entry:
if not obsolete:
return needed_free_space, False
return obsolete, True
if not used_space_entry:
return needed_free_space, False
# Extract the total and used space
total, used = total_space_entry['total'], used_space_entry['used']
free = total - used
if min_free_space:
needed_free_space = min_free_space - free
# If needed_free_space negative, nothing to delete except if some Epoch tombstoned replicas
if needed_free_space <= 0:
return obsolete, True
else:
return needed_free_space, False | a097443530ef5fa7c122426ef180a697cf89b5a7 | 5,312 |
def train_model(ad, rsrc_loc, algo='IR', log_dir=None):
"""
Train a CellO model based on the genes of an
input dataset.
Parameters
----------
ad : AnnData object
Expression matrix of n cells by m genes
algo : String
The name of the algorithm used to train the model. 'IR'
trains a model using isotonic regression. 'CLR' trains
a model using cascaded logistic regression.
rsrc_loc: String
The location of the "resources" directory downloaded
via the ''
log_dir : String
Path to a directory in which to write logging information
Returns
-------
A trained CellO model
"""
_download_resources(rsrc_loc)
genes = ad.var.index
# Load the training data
r = load_training_data.load(UNITS, rsrc_loc)
og = r[0]
label_graph = r[1]
label_to_name = r[2]
the_exps = r[3]
exp_to_index = r[4]
exp_to_labels = r[5]
exp_to_tags = r[6]
exp_to_study = r[7]
study_to_exps = r[8]
exp_to_ms_labels = r[9]
X = r[10]
all_genes = r[11]
# Match genes in test data to those in training
# data
train_genes, gene_to_indices = _match_genes(
genes,
all_genes,
rsrc_loc,
log_dir=log_dir
)
# Take a subset of the columns for the training-genes. Note
# that if a given gene in the test set maps to multiple training
# genes, then we sum over the training genes.
X_train = []
for gene in train_genes:
indices = gene_to_indices[gene]
X_train.append(np.sum(X[:,indices], axis=1))
X_train = np.array(X_train).T
assert X_train.shape[1] == len(train_genes)
# Train the model on these genes
print('Training model...')
mod = model.train_model(
ALGO_TO_INTERNAL[algo],
ALGO_TO_PARAMS[algo],
X_train,
the_exps,
exp_to_labels,
label_graph,
item_to_group=exp_to_study,
features=train_genes,
preprocessor_names=PREPROCESSORS,
preprocessor_params=PREPROCESSOR_PARAMS
)
print('done.')
return mod | 46c7736a4f0127ec882d54075564fd447336a332 | 5,313 |
def ps_roi_max_align_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size, sampling_ratio=None
):
"""Position Sensitive Region of Interest (ROI) Max align function.
This function computes position sensitive max value of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
sampling_ratio ((int, int) or int): Sampling step for the alignment.
It must be an integer over :math:`1` or :obj:`None`, and the value
is automatically decided when :obj:`None` is passed. Use of
different ratio in height and width axis is also supported by
passing tuple of int as ``(sampling_ratio_h, sampling_ratio_w)``.
``sampling_ratio=s`` and ``sampling_ratio=(s, s)`` are equivalent.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
See the original paper proposing ROIAlign:
`Mask R-CNN <https://arxiv.org/abs/1703.06870>`_.
"""
return PSROIMaxAlign2D(
outsize, spatial_scale,
group_size, sampling_ratio)(x, rois, roi_indices) | 394879a014d855dd71786cb0941a3aefd30b70b8 | 5,314 |
def received_date_date(soup):
"""
Find the received date in human readable form
"""
return utils.date_text(history_date(soup, date_type="received")) | 5fbba6129da8d6facc66f9ec21e9b6f45fcb399a | 5,315 |
from datetime import datetime
def create_observation_from_inat_data(inaturalist_data):
"""Creates an observation in our local database according to the data from iNaturalist API.
:returns: the observation (instance of Nest or Individual) created.
Raises:
TaxonMatchError
"""
observation_time = dateparser.parse(inaturalist_data['observed_on_string'],
settings={'TIMEZONE': inaturalist_data['observed_time_zone']})
if observation_time is None:
# Sometimes, dateparser doesn't understand the string but we have the bits and pieces in
# inaturalist_data['observed_on_details']
details = inaturalist_data['observed_on_details']
observation_time = datetime(year=details['year'],
month=details['month'],
day=details['day'],
hour=details['hour']) # in the observed cases, we had nothing more precise than the
# hour
# Sometimes, the time is naive (even when specifying it to dateparser), because (for the detected cases, at least)
# The time is 00:00:00. In that case we make it aware to avoid Django warnings (in the local time zone since all
# observations occur in Belgium
if is_naive(observation_time):
# Some dates (apparently)
observation_time = make_aware(observation_time)
if observation_time:
# Reconcile the taxon
try:
taxon = get_taxon_from_inat_taxon_id(inaturalist_data['taxon']['id'])
except Taxon.DoesNotExist:
raise TaxonMatchError
inat_vv_confirmed = inat_data_confirms_vv(inaturalist_data)
# Check if it has the vespawatch_evidence observation field value and if it's set to "nest"
if 'ofvs' in inaturalist_data:
is_nest_ofv = next((item for item in inaturalist_data['ofvs'] if item["field_id"] == settings.VESPAWATCH_EVIDENCE_OBS_FIELD_ID), None)
else:
is_nest_ofv = None
if is_nest_ofv and is_nest_ofv['value'] == "nest":
created = Nest.objects.create(
inat_vv_confirmed=inat_vv_confirmed,
originates_in_vespawatch=False,
inaturalist_id=inaturalist_data['id'],
taxon=taxon,
latitude=inaturalist_data['geojson']['coordinates'][1],
longitude=inaturalist_data['geojson']['coordinates'][0],
observation_time=observation_time) # TODO: What to do with iNat observations without (parsable) time?
else: # Default is specimen
created = Individual.objects.create(
inat_vv_confirmed=inat_vv_confirmed,
originates_in_vespawatch=False,
inaturalist_id=inaturalist_data['id'],
taxon=taxon,
latitude=inaturalist_data['geojson']['coordinates'][1],
longitude=inaturalist_data['geojson']['coordinates'][0],
observation_time=observation_time) # TODO: What to do with iNat observations without (parsable) time?
for photo in inaturalist_data['photos']:
created.assign_picture_from_url(photo['url'])
return created
else:
raise ParseDateError | 7f122a37b37c9bdc045e5e93da16f9fcfa6bc132 | 5,316 |
import uuid
def get_cert_sha256_by_openssl(certraw: str) -> str:
"""calc the sha1 of a certificate, return openssl result str"""
res: str = None
tmpname = None
try:
tmpname = tmppath / f"{uuid.uuid1()}.crt"
while tmpname.exists():
tmpname = tmppath / f"{uuid.uuid1()}.crt"
tmpname.write_text(certraw, encoding="utf-8")
cmd = f"openssl x509 -in {tmpname} -fingerprint -noout -sha256"
res = exec_openssl(cmd)
except Exception as ex:
raise Exception(f"Parse ssl data error, err:{ex}")
finally:
if tmpname is not None:
tmpname.unlink()
return res | 9cd095bd8d2d710b1cf7e4a3155a0b4fc08587f5 | 5,317 |
def analytic_pi(x, c, w, h):
"""Analytic response function for an even pair of Lorentz distributions.
Correspond to
.. math::
\\Pi(x) = \\int_{-\infty}^{\\infty}
\\frac{\\omega^2}{\\omega^2+x^2}\sigma()_{i}
where :math:`\\sigma(\\omega)` is :func:`~even_lorentzian`.
Args:
x (array): matsubara at which the response function is evaluated
c (float): Center of the distribution (+ or -)
w (float): Width of the distribution (variance)
h (float): Height/weight of the distribtuion (area under the curve)
Returns:
array: Values of the integral at imaginary `x`
"""
return 2*h*c/(c**2+(x+w)**2) | fc622e79a6692105c15e05ea353ba925b8378831 | 5,318 |
def run(canvas):
""" This function runs the rules of game through all points, and changes their status accordingly.(in the same canvas)
@Args:
--
canvas : canvas of population to run the rules on.
@returns:
--
None
"""
canvas = np.array(canvas)
next_gen_canvas = np.array(create_canvas(canvas.shape[0]))
for r, row in enumerate(canvas):
for c, pt in enumerate(row):
# print(r-1,r+2,c-1,c+2)
next_gen_canvas[r][c] = __judge_point(
pt, canvas[r - 1: r + 2, c - 1: c + 2]
)
canvas = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
return canvas.tolist() | 4a7ae84cf245755f51c3c7a5c22e646e452e6d7a | 5,319 |
def initialize_vocabulary(vocabulary_path):
"""Initialize vocabulary from file.
We assume the vocabulary is stored one-item-per-line, so a file:
dog
cat
will result in a vocabulary {"dog": 0, "cat": 1}, and this function will
also return the reversed-vocabulary ["dog", "cat"].
Args:
vocabulary_path: path to the file containing the vocabulary.
Returns:
a pair: the vocabulary (a dictionary mapping string to integers), and
the reversed vocabulary (a list, which reverses the vocabulary mapping).
Raises:
ValueError: if the provided vocabulary_path does not exist.
"""
if tf.gfile.Exists(vocabulary_path):
rev_vocab = []
with tf.gfile.GFile(vocabulary_path, mode="rb") as f:
rev_vocab.extend(f.readlines())
rev_vocab = [line.strip() for line in rev_vocab]
vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])
return vocab, rev_vocab
else:
raise ValueError("Vocabulary file %s not found.", vocabulary_path) | bf56054aab47ac959fc38929445fc15a1d59b8a9 | 5,320 |
def ravel_space(space):
"""
Convert the space into a Discrete space.
"""
dims = _nested_dim_helper(space)
return Discrete(dims[0]) | baa04d3dd16e1c797bbdb83ff5f42474e77c57b4 | 5,321 |
def _add_col(dataframe, metadata, col_limits, families, weights, random_state):
"""Add a new column to the end of the dataframe by sampling a distribution
from ``families`` according to the column limits and distribution weights
and sampling the required number of values from that distribution."""
nrows, ncols = dataframe.shape
if isinstance(col_limits[1], tuple):
family_counts = get_family_counts(metadata, families)
while len(dataframe.columns) != ncols + 1:
family = random_state.choice(families, p=weights)
idx = families.index(family)
if family_counts[family] < col_limits[1][idx]:
pdf = family.make_instance(random_state)
dataframe[ncols] = pdf.sample(nrows, random_state)
metadata.append(pdf)
dataframe = _rename(dataframe)
return dataframe, metadata
family = random_state.choice(families, p=weights)
pdf = family.make_instance(random_state)
dataframe[ncols] = pdf.sample(nrows, random_state)
metadata.append(pdf)
dataframe = _rename(dataframe)
return dataframe, metadata | b0a711a132f78188cc8b40fe3fb907d022aaa37a | 5,322 |
import struct
def read_and_decrypt_mylogin_cnf(f):
"""Read and decrypt the contents of .mylogin.cnf.
This decryption algorithm mimics the code in MySQL's
mysql_config_editor.cc.
The login key is 20-bytes of random non-printable ASCII.
It is written to the actual login path file. It is used
to generate the real key used in the AES cipher.
:param f: an I/O object opened in binary mode
:return: the decrypted login path file
:rtype: io.BytesIO or None
"""
# Number of bytes used to store the length of ciphertext.
MAX_CIPHER_STORE_LEN = 4
LOGIN_KEY_LEN = 20
# Move past the unused buffer.
buf = f.read(4)
if not buf or len(buf) != 4:
logger.error('Login path file is blank or incomplete.')
return None
# Read the login key.
key = f.read(LOGIN_KEY_LEN)
# Generate the real key.
rkey = [0] * 16
for i in range(LOGIN_KEY_LEN):
try:
rkey[i % 16] ^= ord(key[i:i+1])
except TypeError:
# ord() was unable to get the value of the byte.
logger.error('Unable to generate login path AES key.')
return None
rkey = struct.pack('16B', *rkey)
# Create a decryptor object using the key.
decryptor = _get_decryptor(rkey)
# Create a bytes buffer to hold the plaintext.
plaintext = BytesIO()
while True:
# Read the length of the ciphertext.
len_buf = f.read(MAX_CIPHER_STORE_LEN)
if len(len_buf) < MAX_CIPHER_STORE_LEN:
break
cipher_len, = struct.unpack("<i", len_buf)
# Read cipher_len bytes from the file and decrypt.
cipher = f.read(cipher_len)
plain = _remove_pad(decryptor.update(cipher))
if plain is False:
continue
plaintext.write(plain)
if plaintext.tell() == 0:
logger.error('No data successfully decrypted from login path file.')
return None
plaintext.seek(0)
return plaintext | 84b0831a139db80e0a8d48c7fbaacaef377c93e9 | 5,323 |
def list_files(tag=None, inst_id=None, data_path=None, format_str=None,
supported_tags=None, file_cadence=dt.timedelta(days=1),
two_digit_year_break=None, delimiter=None, file_type=None):
"""Return a Pandas Series of every file for chosen Instrument data.
Parameters
----------
tag : string or NoneType
Denotes type of file to load. Accepted types are <tag strings>.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
supported_tags : dict or NoneType
keys are inst_id, each containing a dict keyed by tag
where the values file format template strings. (default=None)
file_cadence : dt.timedelta or pds.DateOffset
pysat assumes a daily file cadence, but some instrument data file
contain longer periods of time. This parameter allows the specification
of regular file cadences greater than or equal to a day (e.g., weekly,
monthly, or yearly). (default=dt.timedelta(days=1))
two_digit_year_break : int or NoneType
If filenames only store two digits for the year, then '1900' will be
added for years >= two_digit_year_break and '2000' will be added for
years < two_digit_year_break. If None, then four-digit years are
assumed. (default=None)
delimiter : string or NoneType
Delimiter string upon which files will be split (e.g., '.'). If None,
filenames will be parsed presuming a fixed width format. (default=None)
file_type : str or NoneType
File format for Madrigal data. Load routines currently accepts 'hdf5',
'simple', and 'netCDF4', but any of the Madrigal options may be used
here. If None, will look for all known file types. (default=None)
Returns
-------
out : pds.Series
A pandas Series containing the verified available files
"""
# Initialize the transitional variables
list_file_types = file_types.keys() if file_type is None else [file_type]
sup_tags = {inst_id: {tag: supported_tags[inst_id][tag]}}
out_series = list()
# Cycle through each requested file type, loading the requested files
for ftype in list_file_types:
if supported_tags[inst_id][tag].find('{file_type}') > 0:
sup_tags[inst_id][tag] = supported_tags[inst_id][tag].format(
file_type=file_types[ftype])
out_series.append(pysat.instruments.methods.general.list_files(
tag=tag, inst_id=inst_id, data_path=data_path,
format_str=format_str, supported_tags=sup_tags,
file_cadence=file_cadence,
two_digit_year_break=two_digit_year_break, delimiter=delimiter))
# Combine the file lists, ensuring the files are correctly ordered
if len(out_series) == 0:
out = pds.Series(dtype=str)
elif len(out_series) == 1:
out = out_series[0]
else:
out = pds.concat(out_series).sort_index()
return out | e8dd3c25c953fb6d8ccef6cbef4fd31c579826fd | 5,324 |
def is_on_top(bb1, bb2):
""" For obj 1 to be on top of obj 2:
- obj1 must be above obj 2
- the bottom of obj 1 must be close to the top of obj 2
"""
bb1_min, _ = bb1
_, bb2_max = bb2
x1,y1,z1 = bb1_min
x2,y2,z2 = bb2_max
return z1 < z2 + ONTOP_EPSILON and is_above(bb1, bb2) | 666b7a424fc1b8769cc436c07981ae134e6241a9 | 5,325 |
def prepare_definitions(defs, prefix=None):
"""
prepares definitions from a dictionary
With a provided dictionary of definitions in key-value pairs and builds them
into an definition list. For example, if a dictionary contains a key ``foo``
with a value ``bar``, the returns definitions will be a list with the values
``['foo=bar']``. If a key contains a value of ``None``, the key will be
ignored and will not be part of the final definition list. If a ``prefix``
value is provided, each definition entry will be prefixed with the provided
value.
Args:
defs: the arguments to process
prefix (optional): prefix value to prefix each definition
Returns:
list of arguments
"""
final = []
if defs:
for key, val in defs.items():
if val is None:
continue
if prefix:
key = prefix + key
if val:
final.append('{}={}'.format(key, val))
else:
final.append(key)
return final | ddc6d14cc18f8afba766efee65ab365df1d226c2 | 5,326 |
def load_training_data(mapsize=512, grfized=False, exclude_fid=False,
dense_grid=False, random_split=False,
from_files=False):
"""Load data for different training scenarios."""
if not grfized and (not dense_grid) and (not random_split):
# the default data to loas
X_train, X_test, y_train, y_test = load_sparse_grid(imsize=mapsize,
from_files=from_files)
elif grfized:
# equivalent gaussian random filed maps
assert not from_files
X_train, X_test, y_train, y_test = load_grf_sparse_grid()
elif dense_grid:
assert not from_files
# data with additional points around a cosmology
X_train, X_test, y_train, y_test = load_dense_grid(imsize=mapsize)
elif random_split:
# random train and test split
X_train, X_test, y_train, y_test = load_randomsplit_grid(
imsize=mapsize, from_files=from_files)
# aleays predict newidf, why not, it takes not time
# anyway we will not use it with the experiemnts
fn = '../../data/columbia_data_fiducial_new_idf_pix'+str(mapsize)+'.npy'
X_new_idf = np.load(fn)
y_new_idf = np.ones((len(y_test),2))
y_new_idf[:,0], y_new_idf[:,1] = 0.309, 0.816
if exclude_fid: # exclude fiducial cosmo params if asked for
idx = (y_train[:,0] == 0.309) & (y_train[:,1] == 0.816)
X_train, y_train = X_train[~idx], y_train[~idx]
return X_train, X_test, X_new_idf, y_train, y_test, y_new_idf | 2e2ddf93311c315f070c91af8bcc1a0df5e94343 | 5,327 |
def concat_features(args, feature_dim_name='feature'):
"""Concatenate Xs along a set of feature dimensions
Parameters
----------
args : iterable
list of tuples of the form (dims, DataArray) where dims is a tuple of dimensions that will be considered feature dimensions
Returns
-------
stacked : xr.DataArray
The output where the data has been stacked along the feature_dim_name
"""
indexes = []
arrays = []
for dims, xarr in args:
stacked_arr = xarr.stack(**{feature_dim_name: dims})
indexes.append(stacked_arr.indexes[feature_dim_name])
arrays.append(stacked_arr)
index = concat_multi_indexes(indexes)
return xr.concat(arrays, dim=index) | d7b44931a5b8a626ca81e5260566c60379d64fc2 | 5,328 |
def _inspect_mixin(
self, geoctx=None, format="pyarrow", file=None, timeout=30, client=None, **params
):
"""
Quickly compute this proxy object using a low-latency, lower-reliability backend.
Inspect is meant for getting simple computations out of Workflows, primarily for interactive use.
It's quicker but less resilient, won't be retried if it fails, and has no progress updates.
If you have a larger computation (longer than ~30sec), or you want to be sure the computation will succeed,
use `~.compute` instead. `~.compute` creates a `.Job`, which runs asynchronously, will be retried if it fails,
and stores its results for later retrieval.
Parameters
----------
geoctx: `.scenes.geocontext.GeoContext`, `~.workflows.types.geospatial.GeoContext`, or None
The GeoContext parameter under which to run the computation.
Almost all computations will require a `~.workflows.types.geospatial.GeoContext`,
but for operations that only involve non-geospatial types,
this parameter is optional.
format: str or dict, default "pyarrow"
The serialization format for the result.
See the `formats
<https://docs.descarteslabs.com/descarteslabs/workflows/docs/formats.html#output-formats>`_
documentation for more information.
If "pyarrow" (the default), returns an appropriate Python object, otherwise returns raw bytes.
file: path or file-like object, optional
If specified, writes results to the path or file instead of returning them.
timeout: int, optional, default 30
The number of seconds to wait for the result.
Raises `~descarteslabs.workflows.models.JobTimeoutError` if the timeout passes.
client: `.workflows.inspect.InspectClient`, optional
Allows you to use a specific InspectClient instance with non-default
auth and parameters
**params: Proxytype
Parameters under which to run the computation.
Returns
-------
result: Python object or bytes
When ``format="pyarrow"`` (the default), returns an appropriate Python object representing
the result, either as a plain Python type, or object from `descarteslabs.workflows.result_types`.
For other formats, returns raw bytes. Consider using `file` in that case to save the results to a file.
"""
if geoctx is not None:
params["geoctx"] = GeoContext._promote(geoctx)
if client is None:
client = _get_global_inspect_client()
return client.inspect(self, format=format, file=file, timeout=timeout, **params) | fd66a1728ca99806dc2c5056cf4a612ca7cac79b | 5,329 |
def list_dvs(service_instance):
"""
Returns a list of distributed virtual switches associated with a given service instance.
service_instance
The Service Instance Object from which to obtain distributed virtual switches.
"""
return utils_common.list_objects(service_instance, vim.DistributedVirtualSwitch) | 2223fe68c13868bea2884b292318e21cb1c2b99c | 5,330 |
from typing import Optional
def gdb_cli_args(request: FixtureRequest) -> Optional[str]:
"""
Enable parametrization for the same cli option
"""
return getattr(request, 'param', None) or request.config.getoption('gdb_cli_args', None) | 635c5cfd397fe286003add99e094778f835a88d9 | 5,331 |
from datetime import datetime
def coerce_rfc_3339_date(input_date):
"""This function returns true if its argument is a valid RFC 3339 date."""
if input_date:
return datetime.datetime.strptime(input_date, "%Y-%m-%dT%H:%M:%SZ")
return False | 83cc2c32b74ad896d79db1a91f4a0fd88b26731e | 5,332 |
def extract_job_url(job):
"""
parse the job data and extract the str for the URL of the job posted
params:
job str: html str representation from bs4
returns:
url str: relative URL path of the job ad
"""
return job.a["href"] | 7517badcc2814e641c04a8f880353d897d434b7f | 5,333 |
import sh
def commit(experiment_name, time):
"""
Try to commit repo exactly as it is when starting the experiment for reproducibility.
"""
try:
sh.git.commit('-a',
m='"auto commit tracked files for new experiment: {} on {}"'.format(experiment_name, time),
allow_empty=True
)
commit_hash = sh.git('rev-parse', 'HEAD').strip()
return commit_hash
except:
return '<Unable to commit>' | a5a75cad77d605ef60905e8b36c6df9913b7bd3c | 5,334 |
def weighted_loss(class_weights):
"""
Create a weighted loss function. Penalise the misclassification
of classes more with the higher usage
"""
weight_values = list(class_weights.values())
def weighted_binary_crossentropy(y_true, y_pred):
# add another dimension to compute dot product
expanded_weights = K.expand_dims(weight_values, axis=-1)
return K.dot(K.binary_crossentropy(y_true, y_pred), expanded_weights)
return weighted_binary_crossentropy | 804a643dff3916f376545a9f481edc418ebf5d8e | 5,335 |
def delete_cluster(resource_root, name):
"""
Delete a cluster by name
@param resource_root: The root Resource object.
@param name: Cluster name
@return: The deleted ApiCluster object
"""
resp = resource_root.delete("%s/%s" % (CLUSTERS_PATH, name))
return ApiCluster.from_json_dict(resp, resource_root) | 2ed12d7f927d6579cbea81765b353f0eecae8f4a | 5,336 |
def mk_test(x, alpha = 0.05):
"""This perform the MK (Mann-Kendall) test to check if there is any trend present in
data or not
Args:
x: a vector of data
alpha: significance level
Returns:
trend: tells the trend (increasing, decreasing or no trend)
h: True (if trend is present) or False (if trend is absence)
p: p value of the sifnificance test
z: normalized test statistics
Examples::
>>> x = np.random.rand(100)
>>> trend = mk_test(x,0.05)
>>> print(trend.trend)
increasing
Credit: http://pydoc.net/Python/ambhas/0.4.0/ambhas.stats/
"""
n = len(x)
ta = n*(n-1)/2
# calculate S
s = 0
for k in xrange(n-1):
for j in xrange(k+1,n):
s += np.sign(x[j] - x[k])
# calculate the unique data
unique_x = np.unique(x)
g = len(unique_x)
# calculate the var(s)
if n == g: # there is no tie
var_s = (n*(n-1)*(2*n+5))/18
else: # there are some ties in data
tp = np.zeros(unique_x.shape)
for i in xrange(len(unique_x)):
tp[i] = sum(unique_x[i] == x)
var_s = (n*(n-1)*(2*n+5) - np.sum(tp*(tp-1)*(2*tp+5)))/18
if s>0:
z = (s - 1)/np.sqrt(var_s)
elif s == 0:
z = 0
elif s<0:
z = (s + 1)/np.sqrt(var_s)
else:
z = 0
# calculate the p_value
p = 2*(1- norm.cdf(abs(z))) # two tail test
h = abs(z) > norm.ppf(1-alpha/2)
if (z<0) and h:
trend = 'decreasing'
elif (z>0) and h:
trend = 'increasing'
else:
trend = 'no trend'
return pd.Series({'trend':trend, 'varS':round(var_s,3), 'p':round(p,3), 'z':round(z,3), 's':round(s,3), 'n':n, 'ta':ta}) | 8586c7ee5cf71ea79db9f57ebc6cc77d942962f7 | 5,337 |
from typing import List
def convert_event_to_boxes(event: Event) -> List[EventBox]:
"""Takes in an event and converts this into a list of boxes that when combined completely cover the time allocated
to this event. Usually, this list will contain a single EventBox as many events start and end on the same day, but
any events split across multiple day boundaries will be split into multiple boxes."""
start_date = event.start_datetime.date()
end_date = event.end_datetime.date()
start_time_float = time_to_float(event.start_datetime.time())
end_time_float = time_to_float(event.end_datetime.time())
days_spanned = (end_date - start_date).days + 1
boxes = []
if days_spanned == 1:
boxes.append(EventBox(0, start_time_float, end_time_float))
else:
boxes.append(EventBox(0, start_time_float, 24.0))
for i in range(max(0, days_spanned - 2)):
boxes.append(EventBox(i + 1, 0.0, 24.0))
boxes.append(EventBox(days_spanned - 1, 0.0, end_time_float))
return boxes | c8f93fb2480792540e9052dd79c654e835021030 | 5,338 |
import copy
def _reduce_consecutive_layers(conv_defs, start_id, end_id, multiplier=0.5):
"""Reduce the outputs of consecutive layers with multiplier.
Args:
conv_defs: Mobilenet conv_defs.
start_id: 0-based index of the starting conv_def to be reduced.
end_id: 0-based index of the last conv_def to be reduced.
multiplier: The multiplier by which to reduce the conv_defs.
Returns:
Mobilenet conv_defs where the output sizes from layers [start_id, end_id],
inclusive, are reduced by multiplier.
Raises:
ValueError if any layer to be reduced does not have the 'num_outputs'
attribute.
"""
defs = copy.deepcopy(conv_defs)
for d in defs['spec'][start_id:end_id+1]:
d.params.update({
'num_outputs': np.int(np.round(d.params['num_outputs'] * multiplier))
})
return defs | ffcfad4956f72cf91aeaa5e795e9568d0808417f | 5,339 |
def ajax_save_content(request):
""" Save front end edited content """
site = get_current_site(request)
content_name = request.POST['content_name']
cms_content = CmsContent.objects.get(site=site, name=content_name)
cms_content.content = request.POST['content']
cms_content.save()
return HttpResponse('SUCCESS') | f99bcfaa7ff5773870ed6ba76bfb0cc97fab248b | 5,340 |
def add_regional_group_costs(ws, data_sheet):
"""
"""
ws.sheet_properties.tabColor = "92D050"
##Color white
ws.sheet_view.showGridLines = False
#Set blue and red border strips
set_cell_color(ws, 'A1:AZ1', "004C97")
set_cell_color(ws, 'A2:AZ2', "C00000")
ws = bar_chart(ws, "Estimates!$C$64:$C$71", "Estimates!$B$65:$B$71", "Total Cost by Region", 'Cost ($Bn)', "B4")
ws = bar_chart(ws, "Estimates!$F$64:$F$71", "Estimates!$E$65:$E$71", "Mean Annual 10-Year GDP by Region",'GDP ($Bn)', "L4")
ws = bar_chart(ws, "Estimates!$I$64:$I$71", "Estimates!$H$65:$H$71", "Initial Investment by Region",'Cost ($Bn)', "B20")
ws = bar_chart(ws, "Estimates!$C$75:$C$82", "Estimates!$B$76:$B$82", "GDP Share by Region",'Percent of GDP (%)', "L20")
return ws | a325b33b705819be81725e6fb6c4f277c6add097 | 5,341 |
def normal(loc=0.0, scale=1.0, size=(1,1), sparsity=1.0):
"""
Draw random samples from a normal (Gaussian) distribution.
Parameters
----------
loc: Mean ("centre") of the distribution.
scale: Standard deviation (spread or "width") of the distribution.
size: Output shape (only tuple of length 2, i.e. (m, n), supported).
sparsity: Sparsity (between 0.0 and 1.0).
Examples
--------
>>> import systemml as sml
>>> import numpy as np
>>> sml.setSparkContext(sc)
>>> from systemml import random
>>> m1 = sml.random.normal(loc=3, scale=2, size=(3,3))
>>> m1.toNumPyArray()
array([[ 3.48857226, 6.17261819, 2.51167259],
[ 3.60506708, -1.90266305, 3.97601633],
[ 3.62245706, 5.9430881 , 2.53070413]])
"""
if len(size) != 2:
raise TypeError('Incorrect type for size. Expected tuple of length 2')
INPUTS = []
rows = asStr(size[0])
cols = asStr(size[1])
loc = asStr(loc)
scale = asStr(scale)
sparsity = asStr(sparsity)
# loc + scale*standard normal
return constructSamplingNode(INPUTS, [OUTPUT_ID, ' = ', loc,' + ', scale,' * random.normal(', rows, ',', cols, ',', sparsity, ')\n']) | cda973e6fa0ed2dcb0046cb1d5ef99dd5efbaf3c | 5,342 |
def random_data(num):
""" will return json random float, hex, int and a random password
{0: {
'float': 186.66541583209647,
'hex': '43435c553c722359e386804f6b28d2c2ee3754456c38f5e7e68f',
'int': 851482763158959204,
'password': '5AJ]-02X0J'
}
}"""
data = {}
count = 0
while count < num:
data.update(
{
count: {
"hex": random_hex(),
"int": randint(1, 10**18),
"float": uniform(0.1, 10**3.01),
"password": randPwStr()
}
}
)
count += 1
return data | 108ebabe8b156218a452cbade729dc09356d2d0b | 5,343 |
def denied(request):
"""Authentication failed and user was denied."""
return render(request, 'djangosaml2/denied.html') | 9341e694163de3d8cd63d448ac39294003046dac | 5,344 |
import os
import traceback
import sys
def getFontCoverage(f, glyphCache=None):
"""
Calculate a weighted average of all glyph coverages.
Use frequencies of multiple languages to average out language specific bias.
So it does not use all the glyphs, just the A-Z, a-z for the languages we have fequencies for.
"""
total = []
if glyphCache is None:
glyphCache = {}
supportedLanguages = coverage.data.checkLanguages(f)
# make a prioritised list of glyphnames:
# - only the glyphs we need for the tables
# - and the components they need
# - then do the glyphs without components first
# - so that remove overlap work will propagate to the compoents, saving work
availableGlyphs = []
for name in coverage.data.coverageNames:
if not name in f:
continue
g = f[name]
availableGlyphs.append(name)
if not supportedLanguages:
return None
for lang in supportedLanguages:
table = coverage.data.frequencies[lang]
languageTotal = 0
for glyphName in availableGlyphs:
if not glyphName in table:
continue
weight = table[glyphName]
if glyphName in f:
g = f[glyphName]
else:
continue
try:
a = calculateGlyphCoverage(g, f, cache=glyphCache)
except:
if f.path is not None:
fontName = os.path.basename(f.path)
else:
fontName = "object: %s-%s" % (f.info.familyName, f.info.styleName)
print("failed calculating the coverage for %s in %s" % (g.name, fontName))
traceback.print_exc(file=sys.stdout)
a = 0
if a > 0:
languageTotal += a * weight
total.append(languageTotal / len(table))
return sum(total) / len(supportedLanguages) | 76b5fa4a3eea809e4c68ea72187ef4168a092219 | 5,345 |
import time
def monday_of_week(year, week):
"""
Returns a datetime for the monday of the given week of the given year.
"""
str_time = time.strptime('{0} {1} 1'.format(year, week), '%Y %W %w')
date = timezone.datetime(year=str_time.tm_year, month=str_time.tm_mon,
day=str_time.tm_mday, tzinfo=timezone.utc)
if timezone.datetime(year, 1, 4).isoweekday() > 4:
# ISO 8601 where week 1 is the first week that has at least 4 days in
# the current year
date -= timezone.timedelta(days=7)
return date | 886c16df011f86eaa95254e360062d5530e05512 | 5,346 |
from typing import Mapping
from typing import Any
from typing import Dict
def object_meta(metadata: Metadata) -> Mapping[str, Any]:
"""
Return a minimal representation of an ObjectMeta with the supplied information.
Spec: https://github.com/argoproj/argo-workflows/blob/v3.0.4/docs/fields.md#objectmeta
"""
meta: Dict[str, Any] = {}
if metadata.generate_name_from_prefix:
meta["generateName"] = metadata.name
else:
meta["name"] = metadata.name
if metadata.annotations:
meta["annotations"] = metadata.annotations
if metadata.labels:
meta["labels"] = metadata.labels
if metadata.namespace:
meta["namespace"] = metadata.namespace
return meta | fc4d30954f9c61c90511fbfc00b403017f41f6c9 | 5,347 |
from time import time
def vectorize_timing(n_targets):
"""
Calculate the rise time of ``n_targets`` targets, return the
run time in seconds.
"""
vega_coord = SkyCoord(279.23473479*u.degree, 38.78368896*u.degree)
vega = FixedTarget(name="Vega", coord=vega_coord)
target_list = n_targets*[vega]
t = Time("2008-02-27 22:00:00")
obs = Observer(location=EarthLocation(10*u.deg, 20*u.deg, 0*u.m))
start = time()
obs.target_rise_time(t, target_list)
end = time()
return end-start | 287f723d66efedc9eaa874e3b1db9d6724598c10 | 5,348 |
import json
def get_game(name, all=False):
"""
Get the game information for a particular game.
For response object structure, see:
https://dev.twitch.tv/docs/v5/reference/search/#search-games
May throw exceptions on network/Twitch error.
"""
search_opts = {
'query': name,
'type': 'suggest',
'live': 'false',
}
headers = {
'Client-ID': config['twitch_clientid'],
'Accept': 'application/vnd.twitchtv.v5+json',
}
res = common.http.request("https://api.twitch.tv/kraken/search/games", search_opts, headers=headers)
res = json.loads(res)
if all:
return res['games'] or []
else:
for game in res['games'] or []:
if game['name'] == name:
return game
return None | 0946516ca7062087d0dc01daa89b328a26367145 | 5,349 |
def compute_correlation_prob_class_target(candidates_per_query_target):
"""This function computes the overall correlation between the probability of being in
the positive class and the value of the target column
"""
probs_per_query_target = []
gains_per_query_target = []
for key in candidates_per_query_target.keys():
candidates = candidates_per_query_target[key].keys()
tmp_probs = [candidates_per_query_target[key][candidate]['pred_prob'] for candidate in candidates]
tmp_gains = [candidates_per_query_target[key][candidate][TARGET_COLUMN] for candidate in candidates]
probs_per_query_target += tmp_probs
gains_per_query_target += tmp_gains
return pearsonr(probs_per_query_target, gains_per_query_target) | e0412cfa3940149d88c75f680aab55dece9b36a2 | 5,350 |
def get(sql: str):
""" execute select SQL and return unique result.
select count(1) form meters
or
select lass(ts) from meters where tag = 'xxx'
:return: only value
"""
result = _query(sql)
try:
value = result.next()
except StopIteration:
return None
except taos.error.OperationalError:
return None
if len(value) == 1:
return value[0]
else:
raise MultiColumnsError('Expect only one column.') | 10b03b64c0a18b4cd5a3c83e6d101d05566b251c | 5,351 |
def _get_parse_pauli_sums():
"""Helper function to obtain the generator of the sampled list of the pauli
sum coefficients after parsing pauli sums."""
# TODO(jaeyoo) : this will be c++ op
def _parse_pauli_sums(pauli_sums, n_programs, n_ops):
"""Helper function to parse given pauli sums to collect observable
coefficients.
Currently `cirq.PauliSum` is not subscriptable, which means it is not
possible to construct a uniform-shape tensor whose elements are
consistently matched to `cirq.PauliString` inside of given `PauliSum`
because the order of `PauliString`'s can be different whenever accessed.
So, the current version of _parse_pauli_sums only consider a `PauliSum`
to be sampled, not a `PauliString`. The observable coefficients are then
sum of the absolute value of coefficients of `PauliString`'s in the
`PauliSum`.
Args:
pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops]
representing output observables for each program.
n_programs: `tf.Tensor` of the number of programs.
n_ops: `tf.Tensor` of the number of pauli sums.
Returns:
observable_coeff_: `tf.Tensor` of real numbers. This involves the
coefficients of Pauli sum terms of the first PauliString.
It is directly used to calculate probabilities.
[n_programs, n_ops]
"""
pauli_sums = util.from_tensor(pauli_sums)
def get_pauli_sum_coeff(i):
def get_i_pauli_sum_coeff(j):
# Because PauliSum object is not subscriptable, use for-loop.
# pauli_sums[i][j] : j-th `PauliSum` of i-th program.
return tf.reduce_sum(
tf.abs([
pstring.coefficient.real for pstring in pauli_sums[i][j]
]))
return tf.map_fn(get_i_pauli_sum_coeff,
tf.range(n_ops),
dtype=tf.float32)
observable_coeff = tf.map_fn(get_pauli_sum_coeff,
tf.range(n_programs),
dtype=tf.float32)
return observable_coeff
def parse_pauli_sums_generator(pauli_sums, n_programs, n_ops):
"""tf.py_function wrapper generator of _parse_programs()."""
# observable_coeff has the shape of [n_programs, n_ops]
observable_coeff = tf.py_function(func=_parse_pauli_sums,
inp=[
tf.stop_gradient(pauli_sums),
tf.stop_gradient(n_programs),
tf.stop_gradient(n_ops),
],
Tout=tf.float32)
return observable_coeff
return parse_pauli_sums_generator | 4914cb42b8286455ffe0306ec8256e63925111ba | 5,352 |
def is_fully_defined(x):
"""Returns True iff `x` is fully defined in every dimension.
For more details, see `help(tf.TensorShape.is_fully_defined)`.
Args:
x: object representing a shape; convertible to `tf.TensorShape`.
Returns:
is_fully_defined: `bool` indicating that the shape is fully known.
"""
return tf.TensorShape(x).is_fully_defined() | d3d419864fb9d6168adce54afae84f089c9a680c | 5,353 |
def make_shell_context():
"""
Creates a python REPL with several default imports
in the context of the current_app
:return:
"""
return dict(current_app=current_app) | 8db290ccfa51681ac63e8e5d88b29c4e82176f36 | 5,354 |
def recommend_hybrid_user(
df, model, interactions, user_id, user_dict,
item_dict, topn, new_only=True, threshold=3,
show=True):
"""Function to produce user recommendations. Hybrid version of
recommend_known_user
Args:
model: trained matrix factorization model
interactions: dataset used for training the model
user_id: user ID for which we need to generate recommendation
user_dict: Dictionary type input containing user_id as key and
interaction_index as value
item_dict: Dictionary type input containing item_id as key and
item_name as value
threshold: value above which the rating is favorable in interaction
matrix
topn: Number of output recommendation needed
new_only: whether to only recommend items that users have not visited
show: whether to show the result of function
Returns:
Prints list of items the given user has already visited
Prints list of N recommended items which user hopefully will be
interested in
"""
print('Recommending items for user {}...'.format(user_id))
n_users, n_items = interactions.shape
user_features, item_features, user_x, _ = feature_matrix(
df, user_id=user_id)
scores = pd.Series(model.predict(
user_x, interactions.values[user_x, :], user_features=user_features,
item_features=item_features))
scores.index = interactions.columns
scores = list(pd.Series(scores.sort_values(ascending=False).index))
known_items = list(pd.Series(
interactions.loc[user_id, :]
[interactions.loc[user_id, :] > threshold].index).sort_values(
ascending=False))
if new_only:
scores = [x for x in scores if x not in known_items]
item_list = scores[:topn]
known_items = list(pd.Series(known_items).apply(lambda x: item_dict[x]))
recommended_items = list(pd.Series(item_list).apply(
lambda x: item_dict[x]))
if show is True:
print("Known Likes:")
counter = 1
for i in known_items:
print(str(counter) + '- ' + i)
counter += 1
print("Recommended Items:")
counter = 1
for i in recommended_items:
print(str(counter) + '- ' + i)
counter += 1
return item_list | f6f1bb5486dcd3a7848ca006998587a8efce4939 | 5,355 |
def i(mu_i, mu_ij, N) :
"""Calcule le tableau I[i, j]"""
return [[I_ij(i, j, mu_i, mu_ij, N) for j in range(0, N)] for i in range(0, N)] | 518609bbe91088d94267515ccd07b3fa16525d4f | 5,356 |
import dateutil
def format_datetime(this, date, date_format=None):
"""Convert datetime to a required format."""
date = dateutil.parser.isoparse(date)
if date_format is None:
date_format = "%d-%m-%Y"
return date.strftime(date_format) | 0311eb918540dbb0c5751244b89de220073b9dcd | 5,357 |
import numpy
def _estimate_melting_levels(latitudes_deg, valid_time_unix_sec):
"""Estimates melting level at each point.
This estimate is based on linear regression with respect to latitude. There
is one set of regression coefficients for each month.
:param latitudes_deg: numpy array of latitudes (deg N).
:param valid_time_unix_sec: Valid time.
:return: melting_levels_m_asl: numpy array of melting levels (metres above
sea level), with same shape as `latitudes_deg`.
"""
month_index = int(
time_conversion.unix_sec_to_string(valid_time_unix_sec, '%m')
)
return (
MELT_LEVEL_INTERCEPT_BY_MONTH_M_ASL[month_index - 1] +
MELT_LEVEL_SLOPE_BY_MONTH_M_DEG01[month_index - 1] *
numpy.absolute(latitudes_deg)
) | d72ac1cf0c23eadb49fc15e55c2a71e273120500 | 5,358 |
def edit_municipality(self, request, form):
""" Edit a municipality. """
layout = EditMunicipalityLayout(self, request)
if form.submitted(request):
form.update_model(self)
request.message(_("Municipality modified."), 'success')
return redirect(layout.success_url)
if not form.errors:
form.apply_model(self)
return {
'layout': layout,
'form': form,
'button_text': _("Save"),
'cancel': layout.cancel_url,
} | 4d233f97cbc7672b38348eb982cecc68f88ade17 | 5,359 |
from matplotlib import pyplot as plt
def plot_period_transactions(
model,
max_frequency=7,
title="Frequency of Repeat Transactions",
xlabel="Number of Calibration Period Transactions",
ylabel="Customers",
**kwargs
):
"""
Plot a figure with period actual and predicted transactions.
Parameters
----------
model: lifetimes model
A fitted lifetimes model.
max_frequency: int, optional
The maximum frequency to plot.
title: str, optional
Figure title
xlabel: str, optional
Figure xlabel
ylabel: str, optional
Figure ylabel
kwargs
Passed into the matplotlib.pyplot.plot command.
Returns
-------
axes: matplotlib.AxesSubplot
"""
labels = kwargs.pop("label", ["Actual", "Model"])
n = model.data.shape[0]
simulated_data = model.generate_new_data(size=n)
model_counts = pd.DataFrame(model.data["frequency"].value_counts().sort_index().iloc[:max_frequency])
simulated_counts = pd.DataFrame(simulated_data["frequency"].value_counts().sort_index().iloc[:max_frequency])
combined_counts = model_counts.merge(simulated_counts, how="outer", left_index=True, right_index=True).fillna(0)
combined_counts.columns = labels
ax = combined_counts.plot(kind="bar", **kwargs)
plt.legend()
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
return ax | 2428c585dcd117302382b3788be9d763f23c1b3a | 5,360 |
import copy
def operate_monitor(params):
""" different apps has different required params"""
ret_obj = copy.deepcopy(RET_OBJ)
group_id = params.get("group_id", type=int, default=None)
app_name = params.get("app_name")
operate = "update" if key_exist(group_id, app_name) else "insert"
valid_key = "_".join([app_name, operate])
if valid_key not in param_valids:
raise CustomException("operate_monitor Not found corresponding valid function for %s" % app_name, 1005)
params = param_valids[valid_key](params)
api_create(params) if operate == "insert" else api_update(params)
ret_obj['msg'] = operate + " monitor successfully."
return ret_obj | 8d78d61dc44acf2fdcc85a8e3cd4d6fd68c47bf6 | 5,361 |
def construct_rgba_vector(img, n_alpha=0):
"""
Construct RGBA vector to be used to color faces of pcolormesh
This funciton was taken from Flamingo.
----------
Args:
img [Mandatory (np.ndarray)]: NxMx3 RGB image matrix
n_alpha [Mandatory (float)]: Number of border pixels
to use to increase alpha
----------
Returns:
rgba [Mandatory (np.ndarray)]: (N*M)x4 RGBA image vector
"""
alpha = np.ones(img.shape[:2])
if n_alpha > 0:
for i, a in enumerate(np.linspace(0, 1, n_alpha)):
alpha[:, [i, -2-i]] = a
rgb = img[:, :-1, :].reshape((-1, 3)) # we have 1 less faces than grid
rgba = np.concatenate((rgb, alpha[:, :-1].reshape((-1, 1))), axis=1)
if np.any(img > 1):
rgba[:, :3] /= 255.0
return rgba | 028275930ad4d2a3b98ce32e48021da8ff1e6c43 | 5,362 |
def nice(val):
"""Make sure this value is nice"""
if pd.isna(val):
return None
return val | a2d0c3c64c7c2e01d66d171902e85a3d0056cc73 | 5,363 |
import logging
import requests
import json
def retrieve_tree(issue_id):
"""Retrieve a tree of issues from Redmine, starting at `issue_id`."""
logging.info(f" Retrieving issue #{issue_id} ...")
params = {
'issue_id': issue_id
}
response = requests.get(ISSUES_ENDPOINT, params=params, headers=HEADERS)
data = json.loads(response.text)
issue = data['issues'][0]
issue['children'] = retrieve_children(issue_id)
return issue | 928d2f3d68e5b9033a062d5e24d3f34f74781357 | 5,364 |
from typing import Sequence
from typing import Dict
def cmdline_args(argv: Sequence[str], options: Sequence[Option], *, process: callable = None,
error: callable = None, results: dict = None) -> (Dict, Sequence[str]):
"""
Take an array of command line args, process them
:param argv: argument array
:param options: sequence of options to parse
:param process: process function
:param error: error function
:param results: optional dict to contain results (alternative to process callable)
:return: parsed results, remaining unprocessed arguments
"""
def select_option(short_opt, long_opt):
selected_option = None
for current_opt in options:
if short_opt is not None and short_opt == current_opt.short:
selected_option = current_opt
break
elif long_opt is not None and current_opt.long is not None:
if current_opt.long.startswith(long_opt) or long_opt.startswith(current_opt.long):
selected_option = current_opt
break
else:
if error is not None:
if short_opt:
error(f"unknown short option '-{short_opt}'")
else:
error(f"unknown long option '--{long_opt}'")
return selected_option
def dispatch_option(_option: Option, _opt: str, _args):
if _option.fn is not None:
return _option.fn(_option, _opt, _args) if callable(_option.fn) else _option.fn
if process:
tmp = process(_option, _opt, _args)
if tmp is not None:
return tmp
return _args if _option.has_arg else True
if results is None:
results = dict()
index = skip_count = 0
saved_args = []
for index, arg in enumerate(argv):
if skip_count:
skip_count -= 1
elif arg.startswith('--'): # long arg
skip_count = 0
longopt = arg[2:]
option = select_option(None, longopt)
if option is None:
saved_args.append(f"--{longopt}")
else:
args = None
if option.has_arg:
if '=' in longopt:
longopt, args = longopt.split('=', maxsplit=1)
else:
skip_count += 1
args = argv[index + skip_count]
results[option.long] = dispatch_option(option, longopt, args)
elif arg.startswith('-'):
skip_count = 0
for opt in arg[1:]:
option = select_option(opt, None)
if option is None:
saved_args.append(f"-{opt}")
else:
if option.has_arg:
skip_count += 1
args = argv[index + skip_count] if option.has_arg else None
results[option.long] = dispatch_option(option, opt, args)
else:
break
return results, saved_args + [arg for arg in argv[index + skip_count:]] | c9c78d5a6b5fb6147a8b392647ec9a7e4abc2800 | 5,365 |
def trailing_zeroes(value):
# type: (int) -> int
"""Count the number of trailing zeros in a given 8-bit integer"""
return CTZ_TABLE[value] | a98968aa38c886de9aa38bae71e52d0e012c432c | 5,366 |
def _calc_WaterCirculation(heat_load, CT_design, WBT, DBT, fixedCWT_ctrl, pump_ctrl, ignore_CT_eff, max_CT_eff=0.85):
"""Calculates the water circulation loop. Used by simulate_CT().
Parameters:
Returns:
All (time x CT) arrays as
HWT Hot water temp [pint, C]
CWT Cold water temp [pint, C]
waterflow Water mass flow rate [pint, kg/s]. This is the input water stream to the CTs.
Notes:
1) This routine determines the temperatures of the water circuit (HWT, CWT) and the water flow rate to
transfer the heat load to the CT.
2) The WBT serves as a lower limit to CWT.
(variables: WBT is an iterable (length nTime), whereas WBT2 is a 2d array (time x CT))
"""
nTime = len(WBT)
nCT = CT_design.shape[0]
# .......................................................... 1) Calc CWT (based on WBT) and approach
# i) CWT
if fixedCWT_ctrl:
raise NotImplementedError
# This ctrl is not as simple as setting CWT to rated, because what if ambient WBT + min approach is above this?
# CWT fixed at design value
# CWT = Q_(np.tile(CT_design['CWT [°C]'].values, (Nsimul, 1)), 'degC')
else:
# CWT from CT performance curves
perf_m = CT_design['CT perf slope'].values
perf_b = CT_design['CT perf y-int'].values
# time x CT
CWT = Q_(np.outer(WBT, perf_m) + np.tile(perf_b, (nTime, 1)), 'degC')
# ii) Approach
WBT2 = Q_(np.transpose(np.tile(WBT, (nCT, 1))), 'degC')
approach = CWT - WBT2
# .......................................................... 2) Calc water circulation loop
# (calc deltaT, waterflow, assuming loaded)
# Forms a time-invariant array with shape (time x CT) and as a Pint quantity
tile_and_pint = lambda arr, units: Q_(np.tile(arr, (nTime, 1)), units)
HWT_r = tile_and_pint(CT_design['HWT [°C]'].values, 'degC')
waterflow_r = tile_and_pint(CT_design['water flow [kg/s]'].values, 'kg/s')
if pump_ctrl == 'fixed HWT':
deltaT = HWT_r - CWT
waterflow = (heat_load / (cp_water * deltaT)).to_base_units()
elif pump_ctrl == 'range limit':
# Calc range as if HWT = HWT_r
deltaT = HWT_r - CWT
# i) Adjust deltaT
deltaT_min = np.tile(CT_design['Min Range [C°]'].values, (nTime, 1))
deltaT = Q_(np.clip((deltaT).magnitude, deltaT_min, None), 'delta_degC')
# ii) Calc water flow
waterflow = (heat_load / (cp_water * deltaT)).to_base_units()
elif pump_ctrl == 'c':
# Calc range & water flow as if HWT = HWT_r
deltaT = HWT_r - CWT
waterflow = (heat_load / (cp_water * deltaT)).to_base_units()
waterflow_units = waterflow.units
# i) Adjust water flow
# Clip violating values
waterflow_ub = np.tile((CT_design['Max per unit water flow'] * CT_design['water flow [kg/s]']).values,
(nTime, 1))
waterflow_lb = np.tile((CT_design['Min per unit water flow'] * CT_design['water flow [kg/s]']).values,
(nTime, 1))
_wf = np.clip(waterflow.magnitude, waterflow_lb, waterflow_ub)
# Back to pint
waterflow = Q_(_wf, waterflow_units)
# ii) Calc deltaT
deltaT = (heat_load / (cp_water * waterflow)).to('delta_degC')
else:
waterflow = waterflow_r
deltaT = (heat_load / (cp_water * waterflow)).to('delta_degC')
# .......................................................... 3) No-load fix
# This part is necessary for all conrtol modes because the operational limits applied
# in the step 2 assumed loaded operation. After this step, water flow and deltaT are final.
CT_load_mask = (heat_load != 0).astype('int') # 0 if no load, 1 otherwise
waterflow = waterflow * CT_load_mask
deltaT = deltaT * CT_load_mask
HWT = CWT + deltaT
# .......................................................... 4) HWT and CWT adjustment
# HWT cannot be less than DBT; in which case, HWT is limited to DBT and CWT rises.
# Vectorize DBT into (time x CT)
DBT = np.tile(DBT, (nCT, 1)).transpose()
HWT = Q_(np.maximum(HWT.magnitude, DBT), 'degC')
CWT = HWT - deltaT
# .......................................................... 5) Checks and return
assert waterflow.units == ureg.kg / ureg.s
assert deltaT.units == ureg.delta_degC, deltaT.units
# Check that CT efficiency is realistic. In practice, efficiency is 65-70% (normal operating conditions)
CT_eff = deltaT / (deltaT + approach)
assert ignore_CT_eff or np.all(CT_eff < max_CT_eff), \
"CT efficiency exceeded the limit: {}".format(CT_eff)
assert all(obj.shape == (nTime, nCT) for obj in (HWT, CWT, waterflow, deltaT, approach, CT_eff))
# Check energy balance
assert np.allclose(heat_load.magnitude, (cp_water * deltaT * waterflow).to(heat_load.units).magnitude)
res = {
'HWT': HWT,
'CWT': CWT,
'water flow': waterflow,
'range': deltaT,
'approach': approach,
'CT_eff': CT_eff,
}
return res | 46d4d7e8fb1c718821b9f64fe86fa268e05c459a | 5,367 |
import zipfile
import os
import sys
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=None,
mode='w'
):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir)+1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'" % p)
if compress is None:
compress = (sys.version>="2.4") # avoid 2.3 zipimport bug when 64 bits
compression = [zipfile.ZIP_STORED, zipfile.ZIP_DEFLATED][bool(compress)]
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
os.path.walk(base_dir, visit, z)
z.close()
else:
os.path.walk(base_dir, visit, None)
return zip_filename | 97a24dfac85af54918ecda934ef854e4606ada0b | 5,368 |
def read_dataset_from_csv(data_type, path):
"""Read dataset from csv
Args:
data_type (str): train/valid/test
Returns:
pd: data
"""
data = pd.read_csv(tf.io.gfile.glob(path + data_type + "*")[0])
return data | 3b5fb8318d6b7297166b381d199fe206f4240d84 | 5,369 |
def search_ignore_case(s, *keywords):
"""Convenience function to search a string for keywords. Case
insensitive version.
"""
acora = AcoraBuilder(keywords, ignore_case=True).build()
return acora.findall(s) | bf093c3864353278a9ff91267b0806bc1e2362a3 | 5,370 |
import logging
from typing import Union
from typing import Iterable
def _logger_setup(logger: logging.Logger,
header_label: str,
formatter: Formatter = None,
level: int = logging.INFO,
level_normalized: int = logging.INFO,
handler_delegate=Union[logging.StreamHandler,Iterable[logging.StreamHandler]],
**handler_kwargs) ->logging.Logger:
"""A convenience function for creating well named loggers with optional custom formatter.
This function will implement an already defined formatter for you if the formatter param is
None. For an example of a good general use logger see the example code at the bottom of this
file.
SPECIAL NOTE:
Although this function's signature allows the caller to pass virtually any logging.Handler
subclass into the handler_delegate parameter, I've only tested this functionality against
the logging.StreamHandler class. If you wish to use it for others you will likely encounter
bugs. But if you are up to the task it seems like it could potentially be a helpful tool for
allowing the instantiation of a wide array of utility loggers under a single interface.
:param header_label:
:param logger: an initialized logger object that needs to have its formatter and optinoal handlers set.
:param child_name: The auxiliary logger you wish to create to handle some specific task. The
logger which maps to this child will set its level, handlers, and formatters
according to your inputs, allowing you to specify many different loggers to
manage data output in a form that suites you.
:type child_name: A string
:param formatter: An optional parameter that specifies the manner in which you wish to format
the available logging-event-_data as well as how you wish to present the
message _data for your log events. The default formatter will take one of
two styles, based the given `level`.
For level==logging.INFO (20) and bellow:
log messages will be presented in two parts.
* First, a header line that's formatted to be bold, and underlined,
that gives the time-stamp for when the log was submitted, the
child_name, the model and function and line number from which the
log-message was originated
* Followed by an indented new-line where the log-message will be
be printed. The message
For level==logging.INFO+1 (21) and above:
log messages will be presented in two parts.
* First, a header line that's formatted to be bold, and underlined,
that gives the time-stamp for when the log was submitted, the
child_name, the model and function and line number from which the
log-message was originated
* Followed, on the same line as the header, the log-message. This
distinction, as opposed to the indented new-line in lower level
messaged, is done because it is often the case the when higher
level messages occur, there are very many of them. Forcing each
one to then be a multi-line message actually makes it much harder
to visually parse.
* Special note: In order to aid in automated parsing of these
log-messages, the header details and log message will be seperated
by the following character key:
`::>`
:type formatter: an instance of logging.Formatter
:param handler_delegate: An optional parameter that Specifies the type of handler you want to
associate to the logger instance that's mapped to the
root_name.child_name you've passed in. The handler will be set up
inside of this function, this parameter simply allows you to indicate
the way you wish to have your output handled.
(E.G. to std_out, std_err, or some file output stream)
:type handler_delegate: This should be a delegate function of your desired handler's
constructor, DEFAULT=logging.StreamHandler
:param level: Specifies the desired logging level of the resulting logger.
DEFAULT=logging.DEBUG
:type level: An int, must be in the range of [0,0xffffffff]
:type handler_kwargs: Any additional keywords that should be passed into the construction of the
handler. These are necessary for the instantiation of handlers that will
output to anything other than sys.std_out, and sys.std_err.
:return: a reference to the logger instance that's mapped to the input naming scheme of
<root_name>.<child_name >
:rtype: logging.Logger
"""
logger.propagate = False
level_normalized = level if level_normalized is None else level_normalized
try:
colr_id = log_id_map[level_normalized]
except KeyError:
# level_normalized isn't in our custom log_id_map
if level_normalized in logging._levelToName:
# but it has been registered with the logging library
LEVELS_STR2INT[logging._levelToName[level_normalized]] = level_normalized
for lvl,color_code in sorted(log_id_map.items(),key=lambda tpl:tpl[0]):
if lvl<=level_normalized:
colr_id = color_code
else:
break
else:
colr_id = log_id_map["NOTSET"]
log_id_map[level_normalized] = colr_id
if formatter is None:
formatter = _build_default_formatter(level,header_label,colr_id,formatter)
logger.addHandler(_ensured_configure_handler(formatter, level, handler_delegate, handler_kwargs))
return logger | 20c5147d28e9afd7f6f4c9600f9d43ec68aeacb0 | 5,371 |
from typing import Union
from typing import List
import pathlib
def _prepare_directory(data_dir: Union[str, PathLike],
ignore_bad: bool = True,
confirm_uids: bool = True) -> List[str]:
"""
Reorganizes PPMI `data_dir` to a structure compatible with ``heudiconv``
PPMI data starts off with a sub-directory structure that is not conducive
to use with ``heudiconv``. By default, scans are grouped by scan type
rather than by session, and there are a number of redundant sub-directories
that we don't need. This script reorganizes the data, moving things around
so that the general hierarchy is {subject}/{session}/{scan}, which makes
for a much easier time converting the PPMI dataset into BIDS format.
An added complication is that a minority of the scans in the PPMI database
are "bad" to some degree. For most, it is likely that there was some issue
with exporting/uploading the DICOM files. For others, the conversion
process we intend to utilize (``heudiconv`` and ``dcm2niix``) fails to
appropriately convert the files due to some idiosyncratic reason that could
be fixed but we don't have the patience to fix at the current juncture.
Nonetheless, these scans need to be removed so that we can run the batch of
subjects through ``heudiconv`` without any abrupt failures. By default,
these scans are moved to a sub-directory of `data_dir`; setting
`ignore_bad` to False will retain these scans (but be warned!)
Parameters
----------
data_dir : str or pathlib.Path
Filepath to PPMI dataset, as downloaded from https://ppmi-info.org
ignore_bad : bool, optional
Whether to ignore "bad" scans (i.e., ones that are known to fail
conversion or reconstruction)
confirm_uids : bool, optional
Whether to check that DICOM study instance UIDs for provided subject
are all consistent for a given session. Only applicable if `pydicom`
is installed. Default: True
Returns
-------
subjects : list
List of subjects who are ready to be converted / reconstructed with
``heudiconv``
coerce : list
List of paths to data directories where subjects / sessions may have
had inconsistent study instance UIDs that should be coerced
"""
if isinstance(data_dir, str):
data_dir = pathlib.Path(data_dir).resolve()
# location where "bad" scans will be moved
if ignore_bad:
timeout = data_dir / 'bad'
timeout.mkdir(exist_ok=True)
else:
timeout = None
subjects, coerce = [], []
for subj_dir in sorted(data_dir.glob('*')):
if not subj_dir.is_dir() or subj_dir.name == 'bad':
continue
subj, force = _prepare_subject(subj_dir, timeout=timeout,
confirm_uids=confirm_uids)
subjects.append(subj)
coerce.extend(force)
return subjects, coerce | 2220dec499f875501ab7fd80e4bdcf25c61c641d | 5,372 |
import re
def find_log_for(tool_code, form_id, log_f):
"""Returns an array of lines from log for
given tool code (P1,N3,...) and form_id. The
form_id is taken from runner - thus we search for
formula number ``form_id+1``
"""
log = open(log_f,'r')
current_f = -1
formula = re.compile('.*ltl:(\d+): (.*)$')
tool = re.compile('.*\[([PN]\d+)\]: (.*)$')
gather = re.compile('Performing sanity checks and gathering statistics')
output = []
for line in log:
m_form = formula.match(line)
if m_form:
current_f = int(m_form.group(1))
curr_tool = ''
if current_f < form_id+1:
continue
if current_f > form_id+1:
break
m_tool = tool.match(line)
if m_tool:
curr_tool = m_tool.group(1)
if gather.match(line):
curr_tool = 'end'
if curr_tool == tool_code:
output.append(line.strip())
log.close()
return output | c28659ce832dcc8ad372188a556699f20c9116db | 5,373 |
import json
import subprocess
def get_images(repository_name):
"""Call ecr to get available images"""
eprint("obtaining available images")
try:
out = json.loads( run_command([
"aws", "ecr", "describe-images",
"--repository-name", repository_name,
"--no-paginate"
]).stdout)
except subprocess.CalledProcessError as e:
err(f"failed to get availabe images from repository: {e}" )
return list(sorted(out["imageDetails"], key=lambda image: image["imagePushedAt"], reverse=True)) | 6c523e071b81e24871cb9908677ed69299d22360 | 5,374 |
import shlex
import inspect
import subprocess
def exe_cmd_and_poll_output(cmd, encoding='UTF-8', is_capture_output=False):
"""
将命令输出实时打印到标准输出
:param is_capture_output:
:param cmd: 命令行
:param encoding: 字符编码
:return: 标准输出字符串列表
"""
func_name = inspect.stack()[0][3]
hlog.enter_func(func_name)
hlog.trace("cmd=%s" % cmd)
output = list()
cmd = shlex.split(cmd)
with subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) as p:
while p.poll() is None:
line = p.stdout.readline()
line = str(line, encoding=encoding)
print(line, end='')
if is_capture_output:
output.append(line)
if p.returncode != 0:
hlog.error('Command execution failed')
hlog.exit_func(func_name)
return output | cdc7fe1d6412d722a70e14f1fb1576fbfd16ff4b | 5,375 |
def create_patric_boolean_dict(genome_dict,all_ECs):
"""
Create new dict of dicts to store genome names
:param genome_dict: dict of key=genome_id, value=dict of genome's name, id, ec_numbers
:param all_ECs: set of all ECs found across all genomes
"""
## new format: key=genome, value={EC:0 or 1}
## This makes it easy to write to file with pandas
boolean_genome_dict = {}
for genome_id in genome_dict:
boolean_genome_dict[genome_id] = {}
boolean_genome_dict[genome_id]['genome_name'] = genome_dict[genome_id]['genome_name']
boolean_genome_dict[genome_id]['genome_name_with_id'] = genome_dict[genome_id]['genome_name_with_id']
boolean_genome_dict[genome_id]['duplicate'] = genome_dict[genome_id]['duplicate']
for EC in all_ECs:
if EC in genome_dict[genome_id]['ECs']:
boolean_genome_dict[genome_id][EC] = 1
else:
boolean_genome_dict[genome_id][EC] = 0
return boolean_genome_dict | 7ab3554bbf705ee8ce99d1d99ff453b06e3d2b53 | 5,376 |
def append_ast_if_req(field):
""" Adds a new filter to template tags that for use in templates. Used by writing {{ field | append_ast_if_req }}
@register registers the filter into the django template library so it can be used in template.
:param Form.field field:
a field of a form that you would like to return the label and potentially an asterisk for.
:returns:
The field label and, if it's a required field, an asterisk
:rtype: string
"""
if field.field.required:
return field.label + '*'
else:
return field.label | 76e36ead3387729b0536bf84f288c400f376a041 | 5,377 |
def getPileupMixingModules(process):
"""
Method returns two lists:
1) list of mixing modules ("MixingModule")
2) list of data mixing modules ("DataMixingModules")
The first gets added only pileup files of type "mc", the
second pileup files of type "data".
"""
mixModules, dataMixModules = [], []
prodsAndFilters = {}
prodsAndFilters.update(process.producers)
prodsAndFilters.update(process.filters)
for key, value in prodsAndFilters.items():
if value.type_() in ["MixingModule", "DataMixingModule", "PreMixingModule"]:
mixModules.append(value)
if value.type_() == "DataMixingModule":
dataMixModules.append(value)
return mixModules, dataMixModules | 4ee3cc5f7b11e4ad6a846f14dc99e4f82bd04905 | 5,378 |
from typing import Hashable
from typing import Type
from typing import Any
from typing import ForwardRef
import typing
def GetFirstTypeArgImpl_(type_: Hashable, parentClass: Type[Any]) -> Type[Any]:
""" Returns the actual type, even if type_ is a string. """
if isinstance(type_, type):
return type_
if not isinstance(type_, str):
# It's not a type and it's not a str.
# We don't know what to do with it.
raise ValueError("Bad type argument: {}".format(type_))
forwardRef = ForwardRef(type_, is_argument=False)
# pylint: disable=protected-access
evaluated = forwardRef._evaluate(GetClassNamespace_(parentClass), None)
if evaluated is None:
raise RuntimeError("Unable to resolve type {}".format(type_))
if isinstance(evaluated, typing._GenericAlias): # type: ignore
if isinstance(
evaluated.__args__[0], typing._GenericAlias): # type: ignore
# Now use the origin to retrieve the default value type.
return evaluated.__args__[0].__origin__
return evaluated.__args__[0]
return evaluated | f6fd63c4080af886de24465d866f87e716b49992 | 5,379 |
from typing import Callable
from typing import Any
from typing import Sequence
def tree_map_zipped(fn: Callable[..., Any], nests: Sequence[Any]):
"""Map a function over a list of identical nested structures.
Args:
fn: the function to map; must have arity equal to `len(list_of_nests)`.
nests: a list of identical nested structures.
Returns:
a nested structure whose leaves are outputs of applying `fn`.
"""
if not nests:
return nests
tree_def = tree_structure(nests[0])
if any([tree_structure(x) != tree_def for x in nests[1:]]):
raise ValueError('All elements must share the same tree structure.')
return jax.tree_unflatten(
tree_def, [fn(*d) for d in zip(*[jax.tree_leaves(x) for x in nests])]) | 8117efd93402fb7ab5e34b4015950c77a24dc038 | 5,380 |
def square_area(side):
"""Returns the area of a square"""
# You have to code here
# REMEMBER: Tests first!!!
return pow(side,2) | e3cc1a0d404c62a9b1d50de63ea924087c77066a | 5,381 |
def match_collision_name_to_mesh_name(properties):
"""
This function matches the selected collison to the selected mesh.
:param object properties: The property group that contains variables that maintain the addon's correct state.
:return str: The changed collision name.
"""
collisions = get_from_collection(properties.collision_collection_name, 'MESH', properties)
meshes = get_from_collection(properties.mesh_collection_name, 'MESH', properties)
if collisions and meshes:
selected_mesh = [mesh for mesh in meshes if mesh.select_get()][0]
selected_collision = [collision for collision in collisions if collision.select_get()][0]
name = f'{selected_collision.name.split("_")[0]}_{selected_mesh.name}'
selected_collision.name = name
return name
return '' | 3658435cdaa21408664a511e3555f3976c1b3614 | 5,382 |
def _handle_event_colors(color_dict, unique_events, event_id):
"""Create event-integer-to-color mapping, assigning defaults as needed."""
default_colors = dict(zip(sorted(unique_events), cycle(_get_color_list())))
# warn if not enough colors
if color_dict is None:
if len(unique_events) > len(_get_color_list()):
warn('More events than default colors available. You should pass '
'a list of unique colors.')
else:
custom_colors = dict()
for key, color in color_dict.items():
if key in unique_events: # key was a valid event integer
custom_colors[key] = color
elif key in event_id: # key was an event label
custom_colors[event_id[key]] = color
else: # key not a valid event, warn and ignore
warn('Event ID %s is in the color dict but is not '
'present in events or event_id.' % str(key))
# warn if color_dict is missing any entries
unassigned = sorted(set(unique_events) - set(custom_colors))
if len(unassigned):
unassigned_str = ', '.join(str(e) for e in unassigned)
warn('Color was not assigned for event%s %s. Default colors will '
'be used.' % (_pl(unassigned), unassigned_str))
default_colors.update(custom_colors)
return default_colors | 31ae730af0a184b5de469687b74334960c2939ef | 5,383 |
import logging
import warnings
def redirect_logs_and_warnings_to_lists(
used_logs: list[logging.LogRecord], used_warnings: list
) -> RedirectedLogsAndWarnings:
"""For example if using many processes with multiprocessing, it may be beneficial to log from one place.
It's possible to log to variables (logs as well as warnings), pass it to the main process and then log it
with workings filter etc.
To log stored logs and warnings, use
Args:
used_logs (list): List where logs will be stored
used_warnings (list): List where warnings will be stored
Returns:
RedirectedLogsAndWarnings: Object, where you can reset redirect. Logs and warnings you already have
from inserted parameters.
"""
showwarning_backup = warnings.showwarning
OUTPUT_backup = config.OUTPUT
STREAM_backup = config.STREAM
def custom_warn(message, category, filename, lineno, file=None, line=None):
used_warnings.append(
{
"message": message,
"category": category,
"filename": filename,
"lineno": lineno,
"file": file,
"line": line,
}
)
warnings.showwarning = custom_warn
config.OUTPUT = None
config.STREAM = None
config.TO_LIST = used_logs
return RedirectedLogsAndWarnings(
logs=used_logs,
warnings=used_warnings,
showwarning_backup=showwarning_backup,
OUTPUT_backup=OUTPUT_backup,
STREAM_backup=STREAM_backup,
) | 31cda3f036c8438371811b6421a8af2b0f6ac215 | 5,384 |
def get_file_picker_settings():
"""Return all the data FileUploader needs to start the Google Drive Picker."""
google_settings = frappe.get_single("Google Settings")
if not (google_settings.enable and google_settings.google_drive_picker_enabled):
return {}
return {
"enabled": True,
"appId": google_settings.app_id,
"developerKey": google_settings.api_key,
"clientId": google_settings.client_id
} | 3b1840e22512e1f9112f9fa4dfb6697299aa248a | 5,385 |
import os
import codecs
import re
def find_version(*file_paths):
"""
Args:
*file_paths:
Returns:
"""
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Args:
*parts:
Returns:
"""
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.") | e4723ef772f210ea0ac9e8c318ddc597b25a8440 | 5,386 |
def match_subset(pattern: oechem.OEMol, target:oechem.OEMol):
"""Check if target is a subset of pattern."""
# Atoms are equal if they have same atomic number (so explicit Hydrogens are needed as well for a match)
atomexpr = oechem.OEExprOpts_AtomicNumber
# single or double bonds are considered identical (resonance,chirality fix)
bondexpr = oechem.OEExprOpts_EqSingleDouble
ss = oechem.OESubSearch(pattern, atomexpr, bondexpr )
oechem.OEPrepareSearch(target, ss)
return ss.SingleMatch(target) | 99d8d5d73f465f929b6710ec53b5e01f92c1e229 | 5,387 |
def get_quad_strike_vector(q):
"""
Compute the unit vector pointing in the direction of strike for a
quadrilateral in ECEF coordinates. Top edge assumed to be horizontal.
Args:
q (list): A quadrilateral; list of four points.
Returns:
Vector: The unit vector pointing in strike direction in ECEF coords.
"""
P0, P1, P2, P3 = q
p0 = Vector.fromPoint(P0) # fromPoint converts to ECEF
p1 = Vector.fromPoint(P1)
v1 = (p1 - p0).norm()
return v1 | 169b8043a5a385843b92225cba8677ef39bb43a5 | 5,388 |
def CP_to_TT(
cp_cores,
max_rank,
eps=1e-8,
final_round=None,
rsvd_kwargs=None,
verbose=False,
):
"""
Approximate a CP tensor by a TT tensor.
All cores of the TT are rounded to have a TT-rank of most `max_rank`, and singular values of at
most `eps` times the largest singular value. For the first core and last core this rounding is
done using SVD, for all other cores a randomized SVD is employed. Uses
`sklearn.utils.extmath.randomized_svd¶` for randomized SVD. After forming the TT, it is
optionally rounded again with an accuracy of `final_round`.
Parameters
----------
cp_cores: list<np.ndarray>
List of CP cores
max_rank: int
eps: float (default: 1e-8)
rsvd_kwargs: dict (optional)
keyword arguments to pass to the randomized svd method.
verbose: bool (default: False)
"""
d = len(cp_cores)
tt_cores = [None] * d
prev_rank = 1
if rsvd_kwargs is None:
rsvd_kwargs = {}
for alpha in range(d):
core = cp_cores[alpha]
dim = core.shape[0]
if alpha == 0:
U, S, V = svd(cp_cores[0], full_matrices=False)
elif alpha < d - 1: # Use randomized SVD for middle cores
core = np.einsum("ik,jk->ijk", SV, core)
core_mat = core.reshape(
core.shape[0] * core.shape[1], core.shape[2]
)
U, S, V = randomized_svd(
core_mat, n_components=max_rank, **rsvd_kwargs
)
else: # alpha = d - 1
core = np.einsum("ik,jk->ij", SV, core)
U, S, V = svd(core)
r = 1
r = max(1, min(max_rank, np.sum(S > eps)))
U = U[:, :r]
S = S[:r]
V = V[:r, :]
SV = (S * V.T).T
if alpha == d - 1:
tt_cores[alpha - 1] = np.einsum(
"ijk,kl->ijl", tt_cores[alpha - 1], U
)
tt_cores[alpha] = SV.reshape(SV.shape + (1,))
else:
tt_cores[alpha] = U.reshape((prev_rank, dim, r))
if verbose:
print(
f"feature {alpha+1}/{d}, compressed TT core size is {tt_cores[alpha].shape}"
)
prev_rank = r
if verbose:
print("Orthogonalizing")
tt = TensorTrain(tt_cores, is_orth=True)
if final_round is not None:
if verbose:
print(f"Rounding to {final_round}...")
tt.round(eps=final_round)
if verbose:
print(f"Final TT rank: {tt.tt_rank}")
return tt | 54c30dec3f18271050150dfcc443fcbfe74c4df5 | 5,389 |
def _create_key_list(entries):
"""
Checks if entries are from FieldInfo objects and extracts keys
:param entries: to create key list from
:return: the list of keys
"""
if len(entries) == 0:
return []
if all(isinstance(entry, FieldInfo) for entry in entries):
return [entry.key for entry in entries]
# this should be a regular list of strings
return entries | bb87bbfbfc1856d4041c12d8babaaa8d8ce42249 | 5,390 |
def compose_rule_hierarchies(rule_hierarchy1, lhs_instances1, rhs_instances1,
rule_hierarchy2, lhs_instances2, rhs_instances2):
"""Compose two rule hierarchies."""
if len(rule_hierarchy1["rules"]) == 0:
return rule_hierarchy2, lhs_instances2, rhs_instances2
if len(rule_hierarchy2["rules"]) == 0:
return rule_hierarchy1, lhs_instances1, rhs_instances1
graphs = set(rule_hierarchy1["rules"].keys()).union(
rule_hierarchy2["rules"].keys())
homomorphisms = set(rule_hierarchy1["rule_homomorphisms"].keys()).union(
rule_hierarchy2["rule_homomorphisms"].keys())
new_rule_hierarchy = {
"rules": {},
"rule_homomorphisms": {}
}
new_lhs_instances = {}
new_rhs_instances = {}
composition_data = {}
# Compose rules
for graph in graphs:
if graph in rule_hierarchy1["rules"]:
rule1 = rule_hierarchy1["rules"][graph]
lhs_instance1 = lhs_instances1[graph]
rhs_instance1 = rhs_instances1[graph]
else:
rule1 = Rule.identity_rule()
lhs_instance1 = {}
rhs_instance1 = {}
if graph in rule_hierarchy2["rules"]:
rule2 = rule_hierarchy2["rules"][graph]
lhs_instance2 = lhs_instances2[graph]
rhs_instance2 = rhs_instances2[graph]
else:
rule2 = Rule.identity_rule()
lhs_instance2 = {}
rhs_instance2 = {}
new_rule, new_lhs_instance, new_rhs_instance, data = compose_rules(
rule1, lhs_instance1, rhs_instance1,
rule2, lhs_instance2, rhs_instance2, return_all=True)
new_rule_hierarchy["rules"][graph] = new_rule
new_lhs_instances[graph] = new_lhs_instance
new_rhs_instances[graph] = new_rhs_instance
composition_data[graph] = data
# Compute rule homomorphisms
for source, target in homomorphisms:
lhs_hom1, p_hom1, rhs_hom1 = rule_hierarchy1["rule_homomorphisms"][
(source, target)]
lhs_hom2, p_hom2, rhs_hom2 = rule_hierarchy2["rule_homomorphisms"][
(source, target)]
source_data = composition_data[source]
target_data = composition_data[target]
# H_G -> H_T
h_hom = get_unique_map_from_pushout(
source_data["h"].nodes(),
source_data["rhs1_h"],
source_data["lhs2_h"],
compose(rhs_hom1, target_data["rhs1_h"]),
compose(lhs_hom2, target_data["lhs2_h"])
)
# P*G_1 -> P*T_1
p1_p_hom = get_unique_map_to_pullback_complement(
target_data["p1_p1_p"], target_data["p1_p_h"],
p_hom1, source_data["p1_p1_p"],
compose(source_data["p1_p_h"], h_hom))
# P*G_2 -> P*T_2
p2_p_hom = get_unique_map_to_pullback_complement(
target_data["p2_p2_p"], target_data["p2_p_h"],
p_hom2, source_data["p2_p2_p"],
compose(source_data["p2_p_h"], h_hom))
# Pi_G -> Pi_T
pi_hom = get_unique_map_to_pullback(
new_rule_hierarchy["rules"][target].p.nodes(),
target_data["pi_p1_p"], target_data["pi_p2_p"],
compose(source_data["pi_p1_p"], p1_p_hom),
compose(source_data["pi_p2_p"], p2_p_hom))
# L_G -> L_T
lambda_hom = get_unique_map_from_pushout(
new_rule_hierarchy["rules"][source].lhs.nodes(),
source_data["lhs1_lambda"], source_data["p1_p_lambda"],
compose(lhs_hom1, target_data["lhs1_lambda"]),
compose(p1_p_hom, target_data["p1_p_lambda"]))
# R_G -> R_T
rho_hom = get_unique_map_from_pushout(
new_rule_hierarchy["rules"][source].rhs.nodes(),
source_data["p2_p_rho"], source_data["rhs2_rho"],
compose(p2_p_hom, target_data["p2_p_rho"]),
compose(rhs_hom2, target_data["rhs2_rho"]))
new_rule_hierarchy["rule_homomorphisms"][(source, target)] = (
lambda_hom, pi_hom, rho_hom
)
return new_rule_hierarchy, new_lhs_instances, new_rhs_instances | 0cfab451af31bfc7b41d610381efb47e8c5c0fb5 | 5,391 |
def expand_tile(value, size):
"""Add a new axis of given size."""
value = tf.convert_to_tensor(value=value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims) | 50adf652fff47418d1f8f1250a2a6d01f712da76 | 5,392 |
import logging
import requests
import time
import json
import pytz
def fetch_exchange(zone_key1='DK-DK1', zone_key2='DK-DK2', session=None,
target_datetime=None, logger=logging.getLogger(__name__)):
"""
Fetches 5-minute frequency exchange data for Danish bidding zones
from api.energidataservice.dk
"""
r = session or requests.session()
sorted_keys = '->'.join(sorted([zone_key1, zone_key2]))
# pick the correct zone to search
if 'DK1' in sorted_keys and 'DK2' in sorted_keys:
zone = 'DK1'
elif 'DK1' in sorted_keys:
zone = 'DK1'
elif 'DK2' in sorted_keys:
zone = 'DK2'
elif 'DK-BHM' in sorted_keys:
zone = 'DK2'
else:
raise NotImplementedError(
'Only able to fetch exchanges for Danish bidding zones')
exch_map = {
'DE->DK-DK1': '"ExchangeGermany"',
'DE->DK-DK2': '"ExchangeGermany"',
'DK-DK1->DK-DK2': '"ExchangeGreatBelt"',
'DK-DK1->NO-NO2': '"ExchangeNorway"',
'DK-DK1->NL': '"ExchangeNetherlands"',
'DK-DK1->SE': '"ExchangeSweden"',
'DK-DK1->SE-SE3': '"ExchangeSweden"',
'DK-DK1->NL': '"ExchangeNetherlands"',
'DK-DK2->SE': '("ExchangeSweden" - "BornholmSE4")', # Exchange from Bornholm to Sweden is included in "ExchangeSweden"
'DK-DK2->SE-SE4': '("ExchangeSweden" - "BornholmSE4")', # but Bornholm island is reported separately from DK-DK2 in eMap
'DK-BHM->SE': '"BornholmSE4"',
}
if sorted_keys not in exch_map:
raise NotImplementedError(
'Exchange {} not implemented'.format(sorted_keys))
timestamp = arrow.get(target_datetime).strftime('%Y-%m-%d %H:%M')
# fetch real-time/5-min data
sqlstr = 'SELECT "Minutes5UTC" as timestamp, {0} as "netFlow" \
from "{1}" WHERE "PriceArea" = \'{2}\' AND \
"Minutes5UTC" >= (timestamp\'{3}\'-INTERVAL \'24 hours\') AND \
"Minutes5UTC" <= timestamp\'{3}\' \
ORDER BY "Minutes5UTC" ASC'.format(exch_map[sorted_keys],
ids['real_time'],
zone,
timestamp)
url = 'https://api.energidataservice.dk/datastore_search_sql?sql={}'.format(sqlstr)
response = r.get(url)
# raise errors for responses with an error or no data
retry_count = 0
while response.status_code in [429, 403, 500]:
retry_count += 1
if retry_count > 5:
raise Exception('Retried too many times..')
# Wait and retry
logger.warn('Retrying..')
time.sleep(5 ** retry_count)
response = r.get(url)
if response.status_code != 200:
j = response.json()
if 'error' in j and 'info' in j['error']:
error = j['error']['__type']
text = j['error']['info']['orig']
msg = '"{}" fetching exchange data for {}: {}'.format(
error, sorted_keys, text)
else:
msg = 'error while fetching exchange data for {}: {}'.format(
sorted_keys, json.dumps(j))
raise requests.exceptions.HTTPError(msg)
if not response.json()['result']['records']:
raise ParserException(
"DK.py", 'API returned no data', zone_key=sorted_keys)
df = pd.DataFrame(response.json()['result']['records'])
df = df.set_index('timestamp')
df.index = pd.DatetimeIndex(df.index)
# drop empty rows
df.dropna(how='all', inplace=True)
# all exchanges are reported as net import,
# where as eMap expects net export from
# the first zone in alphabetical order
if 'DE' not in sorted_keys:
df['netFlow'] = -1 * df['netFlow']
# Format output
output = []
for dt in df.index:
data = {
'sortedZoneKeys': sorted_keys,
'datetime': None,
'netFlow': None,
'source': 'api.energidataservice.dk'
}
data['datetime'] = dt.to_pydatetime()
data['datetime'] = data['datetime'].replace(tzinfo=pytz.utc)
data['netFlow'] = df.loc[dt, 'netFlow']
output.append(data)
return output | 93488b4bc24a6a899232a5a1fd0e694d0747ad12 | 5,393 |
def snitch(func):
"""
This method is used to add test function to TestCase classes.
snitch method gets test function and returns a copy of this function
with 'test_' prefix at the beginning (to identify this function as
an executable test).
It provides a way to implement a storage (python module that
contains non-executable test functions) for tests and to include
different set of functions into different test cases.
"""
return FunctionType(func.func_code, func.func_globals,
'test_' + func.func_name, closure=func.func_closure) | b8b54d55269951cb3db4c1f45c375ac36cbd3bdf | 5,394 |
def average_h5(path, path_dc):
"""Return averaged data from HDF5 DC measurements.
Subtracts dark current from the signal measurements.
Args:
- path, path_dc: paths to signal and dark measurement files.
Returns:
- 2D array containing averaged and DC-subtracted measurement.
"""
with h5.File(path, 'r') as f:
with h5.File(path_dc, 'r') as fdc:
arr = (f['data'][...].mean(axis=0) -
fdc['data'][...].mean(axis=0))
return arr | 8daaa7efcdbaf7137d320407b64a96b73f847289 | 5,395 |
def edit_distance(hypothesis, truth, normalize=True, name="edit_distance"):
"""Computes the Levenshtein distance between sequences.
This operation takes variable-length sequences (`hypothesis` and `truth`),
each provided as a `SparseTensor`, and computes the Levenshtein distance.
You can normalize the edit distance by length of `truth` by setting
`normalize` to true.
For example, given the following input:
```python
# 'hypothesis' is a tensor of shape `[2, 1]` with variable-length values:
# (0,0) = ["a"]
# (1,0) = ["b"]
hypothesis = tf.SparseTensor(
[[0, 0, 0],
[1, 0, 0]],
["a", "b"],
(2, 1, 1))
# 'truth' is a tensor of shape `[2, 2]` with variable-length values:
# (0,0) = []
# (0,1) = ["a"]
# (1,0) = ["b", "c"]
# (1,1) = ["a"]
truth = tf.SparseTensor(
[[0, 1, 0],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0]],
["a", "b", "c", "a"],
(2, 2, 2))
normalize = True
```
This operation would return the following:
```python
# 'output' is a tensor of shape `[2, 2]` with edit distances normalized
# by 'truth' lengths.
output ==> [[inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.5, 1.0]] # (1,0): addition, (1,1): no hypothesis
```
Args:
hypothesis: A `SparseTensor` containing hypothesis sequences.
truth: A `SparseTensor` containing truth sequences.
normalize: A `bool`. If `True`, normalizes the Levenshtein distance by
length of `truth.`
name: A name for the operation (optional).
Returns:
A dense `Tensor` with rank `R - 1`, where R is the rank of the
`SparseTensor` inputs `hypothesis` and `truth`.
Raises:
TypeError: If either `hypothesis` or `truth` are not a `SparseTensor`.
"""
if not isinstance(
hypothesis,
(sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Hypothesis must be a SparseTensor.")
if not isinstance(
truth, (sparse_tensor.SparseTensor, sparse_tensor.SparseTensorValue)):
raise TypeError("Truth must be a SparseTensor.")
return gen_array_ops.edit_distance(
hypothesis.indices,
hypothesis.values,
hypothesis.dense_shape,
truth.indices,
truth.values,
truth.dense_shape,
normalize=normalize,
name=name) | 9dfcebb6f49de41c5d5d6bfcc849873f14e2b3f9 | 5,396 |
import os
from datetime import datetime
def file_timestamp(path):
"""
Returns a datetime.datetime() object representing the given path's "latest"
timestamp, which is calculated via the maximum (newest/youngest) value
between ctime and mtime. This accounts for platform variations in said
values. If the path doesn't exist, the earliest timestamp supported by the
system is returned -- typically the epoch.
"""
try:
st = os.stat(path)
timestamp = max(st.st_mtime, st.st_ctime)
except OSError:
timestamp = 0
return datetime.datetime.fromtimestamp(timestamp) | f75d381d10072f2ada8c62f333d32f4fc8d6ccf3 | 5,397 |
def kl_bernoulli(p: np.ndarray, q: np.ndarray) -> np.ndarray:
"""
Compute KL-divergence between 2 probabilities `p` and `q`. `len(p)` divergences are calculated
simultaneously.
Parameters
----------
p
Probability.
q
Probability.
Returns
-------
Array with the KL-divergence between `p` and `q`.
"""
m = np.clip(p, 0.0000001, 0.9999999999999999).astype(float)
n = np.clip(q, 0.0000001, 0.9999999999999999).astype(float)
return m * np.log(m / n) + (1. - m) * np.log((1. - m) / (1. - n)) | 91567169da22ae42bd90c15292f1699f53a184ab | 5,398 |
def _incremental_mean_and_var(X, last_mean, last_variance, last_sample_count):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : array-like, shape (n_features,)
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : array, shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = _safe_accumulator_op(np.nansum, X, axis=0)
new_sample_count = np.sum(~np.isnan(X), axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = (
_safe_accumulator_op(np.nanvar, X, axis=0) * new_sample_count)
last_unnormalized_variance = last_variance * last_sample_count
with cupyx.errstate(divide=None, invalid=None):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance + new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count | 8fac3715bed8431f0910bbf7a37d3924afece9c0 | 5,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.