content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def scale_intensity(data, out_min=0, out_max=255):
"""Scale intensity of data in a range defined by [out_min, out_max], based on the 2nd and 98th percentiles."""
p2, p98 = np.percentile(data, (2, 98))
return rescale_intensity(data, in_range=(p2, p98), out_range=(out_min, out_max)) | 57df2200fbefa4ab6f1c91f46063b1b1f147301e | 23,078 |
def raises_regex_op(exc_cls, regex, *args):
"""
self.assertRaisesRegex(
ValueError, "invalid literal for.*XYZ'$", int, "XYZ"
)
asserts.assert_fails(lambda: int("XYZ"),
".*?ValueError.*izznvalid literal for.*XYZ'$")
"""
# print(args)
# asserts.assert_fails(, f".*?{exc_cls.value.value}")
invokable = _codegen.code_for_node(
cst.Call(
func=args[0].value,
args=[
a.with_changes(
whitespace_after_arg=cst.SimpleWhitespace(value="")
)
for a in args[1:]
],
)
)
regex = f'".*?{exc_cls.value.value}.*{regex.value.evaluated_value}"'
return cst.parse_expression(
f"asserts.assert_fails(lambda: {invokable}, {regex})"
) | 9b0e6aa0692d2285467578083f76c888de9874c1 | 23,079 |
def getParInfo(sourceOp, pattern='*', names=None,
includeCustom=True, includeNonCustom=True):
"""
Returns parInfo dict for sourceOp. Filtered in the following order:
pattern is a pattern match string
names can be a list of names to include, default None includes all
includeCustom to include custom parameters
includeNonCustom to include non-custom parameters
parInfo is {<parName>:(par.val, par.expr, par.mode string, par.bindExpr,
par.default)...}
"""
parInfo = {}
for p in sourceOp.pars(pattern):
if (names is None or p.name in names) and \
((p.isCustom and includeCustom) or \
(not p.isCustom and includeNonCustom)):
parInfo[p.name] = [p.val, p.expr if p.expr else '', p.mode.name,
p.bindExpr, p.default]
return parInfo | 01eafb065ef98e1fd4676898aeb8d0c5a7a74b9d | 23,080 |
def generate_crontab(config):
"""Generate a crontab entry for running backup job"""
command = config.cron_command.strip()
schedule = config.cron_schedule
if schedule:
schedule = schedule.strip()
schedule = strip_quotes(schedule)
if not validate_schedule(schedule):
schedule = config.default_crontab_schedule
else:
schedule = config.default_crontab_schedule
return f'{schedule} {command}\n' | d958c47e0673d19dbd8d8eb2493995cdc2ada7ff | 23,081 |
import attr
def to_dict(observation: Observation):
"""Convert an Observation object back to dict format"""
return _unprefix_attrs(attr.asdict(observation)) | 4ffd5ad24fee6bd983d7cb85ac7d1b9eeb56e751 | 23,085 |
def _consolidate_extrapolated(candidates):
"""Get the best possible derivative estimate, given an error estimate.
Going through ``candidates`` select the best derivative estimate element-wise using
the estimated candidates, where best is defined as minimizing the error estimate
from the Richardson extrapolation.
See https://tinyurl.com/ubn3nv5 for corresponding code in numdifftools and
https://tinyurl.com/snle7mb for an explanation of how errors of Richardson
extrapolated derivative estimates can be estimated.
Args:
candidates (dict): Dictionary containing different derivative estimates and
their error estimates.
Returns:
consolidated (np.ndarray): Array of same shape as input derivative estimates.
candidate_der_dict (dict): Best derivative estimate given method.
candidate_err_dict (dict): Errors corresponding to best derivatives given method
"""
# first find minimum over steps for each method
candidate_der_dict = {}
candidate_err_dict = {}
for key in candidates.keys():
_der = candidates[key]["derivative"]
_err = candidates[key]["error"]
derivative, error = _select_minimizer_along_axis(_der, _err)
candidate_der_dict[key] = derivative
candidate_err_dict[key] = error
# second find minimum over methods
candidate_der = np.stack(list(candidate_der_dict.values()))
candidate_err = np.stack(list(candidate_err_dict.values()))
consolidated, _ = _select_minimizer_along_axis(candidate_der, candidate_err)
updated_candidates = (candidate_der_dict, candidate_err_dict)
return consolidated, updated_candidates | 2641a56d852ed9e4065c7dfad4b1fd51ef581b91 | 23,086 |
import torch
def build_wideresnet_hub(
num_class: int,
name='wide_resnet50_2',
pretrained=True):
"""[summary]
Normalized
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
Args:
name (str, optional): [description]. Defaults to 'wide_resnet50_2'.
pretrained (bool, optional): [description]. Defaults to True.
"""
model = torch.hub.load(
'pytorch/vision:v0.6.0',
name,
pretrained=pretrained)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_class)
return model | 39f977a9ab368bd9fa15fb36c600c350afca7f53 | 23,088 |
def get_phoenix_model_wavelengths(cache=True):
"""
Return the wavelength grid that the PHOENIX models were computed on,
transformed into wavelength units in air (not vacuum).
"""
wavelength_url = ('ftp://phoenix.astro.physik.uni-goettingen.de/v2.0/'
'HiResFITS/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits')
wavelength_path = download_file(wavelength_url, cache=cache, timeout=30)
wavelengths_vacuum = fits.getdata(wavelength_path)
# Wavelengths are provided at vacuum wavelengths. For ground-based
# observations convert this to wavelengths in air, as described in
# Husser 2013, Eqns. 8-10:
sigma_2 = (10**4 / wavelengths_vacuum)**2
f = (1.0 + 0.05792105/(238.0185 - sigma_2) + 0.00167917 /
(57.362 - sigma_2))
wavelengths_air = wavelengths_vacuum / f
return wavelengths_air | ff5632086ffb3aa3eb6655c3ba18e182f0724bc4 | 23,089 |
def accuracy_boundingbox(data, annotation, method, instance): ## NOT IMPLEMENTED
"""
Calculate how far off each bounding box was
Parameters
----------
data: color_image, depth_image
annotation: pascal voc annotation
method: function(instance, *data)
instance: instance of object
Returns
-------
(int, int, int) boxes_found, boxes_missed, boxes_extra.
"""
FOUND_THRESHOLD = 5 # pixels
##
bounding_boxes = method(instance, *data)
##
boxes_found, boxes_missed, boxes_extra = 0, 0, 0
for value in annotation.findall('object'):
annotation_bounding_box = value.find('bndbox')
ax1, ay1, ax2, ay2 = [int(annotation_bounding_box.find(param).text) for param in ['xmin', 'ymin', 'xmax', 'ymax']]
for bounding_box in bounding_boxes:
X, Y, Z = [], [], []
for x, y in bounding_box.vertices:
X.append(x)
Y.append(y)
X, Y = np.unique(X), np.unique(Y)
bx1, by1, bx2, by2 = min(X), min(Y), max(X), max(Y)
##
x1_close = bx1 - FOUND_THRESHOLD <= ax1 <= bx1 + FOUND_THRESHOLD
y1_close = by1 - FOUND_THRESHOLD <= ay1 <= by1 + FOUND_THRESHOLD
x2_close = bx2 - FOUND_THRESHOLD <= ax2 <= bx2 + FOUND_THRESHOLD
y2_close = by2 - FOUND_THRESHOLD <= ay2 <= by2 + FOUND_THRESHOLD
if all((x1_close, y1_close, x2_close, y2_close)):
boxes_found += 1
boxes_missed = len(annotation.findall('object')) - boxes_found
boxes_extra = len(bounding_boxes) - boxes_found
return boxes_found, boxes_missed, boxes_extra | 78fa63d5e2cbdad843feaddd277b98886789a517 | 23,091 |
import logging
def test_kbd_gpios():
"""Test keyboard row & column GPIOs.
Note, test only necessary on 50pin -> 50pin flex
These must be tested differently than average GPIOs as the servo side logic,
a 4to1 mux, is responsible for shorting colX to rowY where X == 1|2 and Y
= 1|2|3. To test the flex traces I'll set the row to both high and low
and examine that the corresponding column gets shorted correctly.
Returns:
errors: integer, number of errors encountered while testing
"""
errors = 0
# disable everything initially
kbd_off_cmd = 'kbd_m1_a0:1 kbd_m1_a1:1 kbd_m2_a0:1 kbd_m2_a1:1 kbd_en:off'
for col_idx in xrange(2):
if not set_ctrls(kbd_off_cmd):
logging.error('Disabling all keyboard rows/cols')
errors += 1
break
mux_ctrl = KBD_MUX_COL_IDX[col_idx]
kbd_col = 'kbd_col%d' % (col_idx + 1)
for row_idx in xrange(3):
kbd_row = 'kbd_row%d' % (row_idx + 1)
cmd = '%s1:%d %s0:%d ' % (mux_ctrl, row_idx>>1, mux_ctrl,
row_idx & 0x1)
cmd += 'kbd_en:on %s' % (kbd_col)
(retval, ctrls) = get_ctrls(cmd, timeout=30)
if not retval:
logging.error('ctrls = %s', ctrls)
errors += 1
for set_val in [GPIO_MAPS[ctrls[kbd_col]], ctrls[kbd_col]]:
cmd = '%s:%s sleep:0.2 %s' % (kbd_row, set_val, kbd_col)
(retval, ctrls) = get_ctrls(cmd)
if not retval:
logging.error('ctrls = %s', ctrls)
errors += 1
if ctrls[kbd_col] != set_val:
logging.error('After setting %s, %s != %s', kbd_row,
kbd_col, set_val)
errors += 1
return errors | 237f26a5da5711c480ef9dadbaa46170ca97c884 | 23,093 |
def fields_for_model(model):
"""
This function returns the fields for a schema that matches the provided
nautilus model.
Args:
model (nautilus.model.BaseModel): The model to base the field list on
Returns:
(dict<field_name: str, graphqlType>): A mapping of field names to
graphql types
"""
# the attribute arguments (no filters)
args = {field.name.lower() : convert_peewee_field(field) \
for field in model.fields()}
# use the field arguments, without the segments
return args | 9eb6f1a51513ff6b42ab720a1196cea1402cac23 | 23,094 |
def _landstat(landscape, updated_model, in_coords):
"""
Compute the statistic for transforming coordinates onto an existing
"landscape" of "mountains" representing source positions. Since the
landscape is an array and therefore pixellated, the precision is limited.
Parameters
----------
landscape: nD array
synthetic image representing locations of sources in reference plane
updated_model: Model
transformation (input -> reference) being investigated
in_coords: nD array
input coordinates
Returns
-------
float:
statistic representing quality of fit to be minimized
"""
def _element_if_in_bounds(arr, index):
try:
return arr[index]
except IndexError:
return 0
out_coords = updated_model(*in_coords)
if len(in_coords) == 1:
out_coords = (out_coords,)
out_coords2 = tuple((coords - 0.5).astype(int) for coords in out_coords)
result = sum(_element_if_in_bounds(landscape, coord[::-1]) for coord in zip(*out_coords2))
################################################################################
# This stuff replaces the above 3 lines if speed doesn't hold up
# sum = np.sum(landscape[i] for i in out_coords if i>=0 and i<len(landscape))
# elif len(in_coords) == 2:
# xt, yt = out_coords
# sum = np.sum(landscape[iy,ix] for ix,iy in zip((xt-0.5).astype(int),
# (yt-0.5).astype(int))
# if ix>=0 and iy>=0 and ix<landscape.shape[1]
# and iy<landscape.shape[0])
################################################################################
return -result | 0205654ef8580a0d6731155d7d0c2b2c1a360e9c | 23,095 |
def presence(label):
"""Higher-order function to test presence of a given label
"""
return lambda x, y: 1.0 * ((label in x) == (label in y)) | 49c7e0b4b7af69c808917af7ab4d6b56a7a4ef89 | 23,096 |
def make_formula(formula_str, row, col, first_data_row=None):
# noinspection SpellCheckingInspection
"""
A cell will be written as a formula if the HTML tag has the attribute "data-excel" set.
Note that this function is called when the spreadsheet is being created. The cell it applies to knows where it
is and what the first data row is.
Allowed formula strings:
"SUM ROW A-C": sum the current row from A-C
"SUM ROW A,C": sum cells A and C in the current row
"SUM COL": sums current col from first_row to row - 1
"FORMULA RAW IF(F13 > 0, (F13-E13)/F13, '')": uses formula as is
"FORMULA RELATIVE IF(colm001rowp000 > 0, (colm001rowp0-colm002rowp000)/colm001rowp001, '')": creates the
formula relative to the current location. colm002 means two cols to the left of the current cell.
rowp000 means the current row plus 0 (e.g. the current row)
:param formula_str: the value of the "data-excel" tag containing params for generating the formula
:param row: cell row
:param col: cell column
:param first_data_row: for column formulas
:return: a string
"""
parts = formula_str.split(' ')
func = parts[0]
args = parts[-1]
formula = ''
if func == 'SUM':
func_modifier = parts[1]
if func_modifier == 'ROW':
if '-' in args:
cols = args.split('-')
formula = '=SUM({}{}:{}{})'.format(cols[0], row + 1, cols[1], row + 1)
elif ',' in args:
cols = map(methodcaller('strip'), args.split(','))
# Put the row number after each col letter and then add them together
cols = '+'.join(map(lambda x: x + str(row + 1), cols))
formula = '=SUM({})'.format(cols)
elif func_modifier == 'COL':
formula = '=SUM({}:{})'.format(xl_rowcol_to_cell(first_data_row, col), xl_rowcol_to_cell(row - 1, col))
elif func == 'FORMULA':
func_modifier = parts[1]
formula_str = ' '.join(parts[2:])
if func_modifier == 'RAW':
formula = '=' + formula_str
elif func_modifier == 'RELATIVE':
formula = '=' + locate_cells(formula_str, row, col)
return formula | d9a41a2906151a050afa78e099278b7d5462faa9 | 23,098 |
def select(population, to_retain):
"""Go through all of the warroirs and check which ones are best fit to breed and move on."""
#This starts off by sorting the population then gets all of the population dived by 2 using floor divison I think
#that just makes sure it doesn't output as a pesky decimal. Then it takes one half of memebers which shall be females.
# which tend to be not as strong as males(Not being sexist just science thats how we are built.) So the front half will be
#The lower digits because we sorted it then the upper half will be males. Then it finishes off by getting the strongest males and
#females and returns them.
sorted_pop = sorted(population)
to_keep_by_sex = to_retain//2
members = len(sorted_pop)//2
females = sorted_pop[:members]
males = sorted_pop[members:]
strong_females = females[-to_keep_by_sex:]
strong_males = males[-to_keep_by_sex:]
return strong_males, strong_females | 4dc1251f09e6bd976d170017bbd328563e9ef786 | 23,099 |
import numpy
def normal_function( sigma, width ):
"""
Defaulf fitting function, it returns values from a normal distribution
"""
log2 = log(2)
sigma2 = float(sigma)**2
lo, hi = width, width+1
def normal_func(value, index):
return value * exp( -index*index/sigma2 * log2 )
values = [ normal_func(1, x) for x in range(-lo, hi) ]
values = numpy.array( values )
return lo, hi, values | 797b4eb00db5a0d4675b547664982f537da9e6ab | 23,100 |
def remove_duplicates(l):
"""
Remove any duplicates from the original list.
Return a list without duplicates.
"""
new_l = l[:]
tmp_l = new_l[:]
for e in l:
tmp_l.remove(e)
if e in tmp_l:
new_l.remove(e)
return new_l | 81132e3b23592589c19ddb11f661e80be6984782 | 23,102 |
import functools
def in_boudoir(callback):
"""Décorateur : commande utilisable dans un boudoir uniquement.
Lors d'une invocation de la commande décorée hors d'un boudoir
(enregistré dans :class:`.bdd.Boudoir`), affiche un message d'erreur.
Ce décorateur n'est utilisable que sur une commande définie dans un
Cog.
"""
@functools.wraps(callback)
async def new_callback(self, ctx, *args, **kwargs):
try:
Boudoir.from_channel(ctx.channel)
except ValueError:
await ctx.reply("Cette commande est invalide en dehors "
"d'un boudoir.")
else:
# if ctx.authors
return await callback(self, ctx, *args, **kwargs)
return new_callback | ed086805f2d865331f559406218f9a9ecd4a7194 | 23,103 |
def xyz_to_pix(position, bounds, pixel_size):
"""Convert from 3D position to pixel location on heightmap."""
u = int(np.round((position[1] - bounds[1, 0]) / pixel_size))
v = int(np.round((position[0] - bounds[0, 0]) / pixel_size))
return (u, v) | d38b45d573a689f72fda4a7ed477be831bea26a8 | 23,105 |
import random
def get_random_lb():
""" Selects a random location from the load balancers file.
Returns:
A string specifying a load balancer IP.
"""
with open(LOAD_BALANCERS_FILE) as lb_file:
return random.choice([':'.join([line.strip(), str(PROXY_PORT)])
for line in lb_file]) | 2f8620a213bfc87dd3eae662ace409a31597931b | 23,106 |
def enlarge(n):
"""
Multiplies a number by 100
Param: n (numeric) the number to enlarge
Return the enlarged number(numeric)
"""
return n * 100 | 6685af169c8e321ceabc0086d1835d459a627a59 | 23,107 |
import re
def ResolveWikiLinks(html):
"""Given an html file, convert [[WikiLinks]] into links to the personal wiki:
<a href="https://z3.ca/WikiLinks">WikiLinks</a>"""
wikilink = re.compile(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]')
def linkify(match):
wiki_root = 'https://z3.ca'
wiki_name = match.group(1).replace('\n', ' ')
wiki_slug = wiki_name.replace(' ', '_')
return f'<a class="wiki" href="{wiki_root}/{wiki_slug}">{wiki_name}</a>'
return wikilink.sub(linkify, html) | bef3e309aa2489e720a1742e327e9dd4edf6d720 | 23,108 |
def handle_new_favorite(query_dict):
"""Does not handle multi-part data properly.
Also, posts don't quite exist as they should."""
for required in POST_REQUIRED_PARAMS:
if required not in query_dict:
return False
# not yet safe to use.
post_id = str(string_from_interwebs(query_dict["post"][0])).strip()
author_id = str(string_from_interwebs(query_dict["user"][0])).strip()
with Connection('localhost', 27017) as connection:
favorite = check_favorite(author_id, post_id, connection)
if favorite is not None:
delete_favorite(favorite, connection)
update_post(post_id, connection)
return True
return False | 7caed7d280870cb7a67b1bfd53200ec3486a4f41 | 23,109 |
def steam_ratings(html_text):
"""Tries to get both 'all' and 'recent' ratings."""
return {
"overall": steam_all_app_rating(html_text),
"recent": steam_recent_app_rating(html_text),
} | 71cb3e85e9a1f01e5d4b080372b24c2f848bf7cf | 23,110 |
def separation_cos_angle(lon0, lat0, lon1, lat1):
"""Evaluate the cosine of the angular separation between two
direction vectors."""
return (np.sin(lat1) * np.sin(lat0) + np.cos(lat1) * np.cos(lat0) *
np.cos(lon1 - lon0)) | a7e1a7ecdfd0ab7f1dc58b99190cc9eeab7fcf20 | 23,111 |
def get_band_params(meta, fmt='presto'):
"""
Returns (fmin, fmax, nchans) given a metadata dictionary loaded from
a specific file format.
"""
if fmt == 'presto':
fbot = meta['fbot']
nchans = meta['nchan']
ftop = fbot + nchans * meta['cbw']
fmin = min(fbot, ftop)
fmax = max(fbot, ftop)
elif fmt == 'sigproc':
raise ValueError("Cannot parse observing band parameters from data in sigproc format")
else:
raise ValueError(f"Unknown format: {fmt}")
return fmin, fmax, nchans | 61e9b0781559de431e5189b89f69a0763b039d8f | 23,113 |
import functools
def logging(f):
"""Decorate a function to log its calls."""
@functools.wraps(f)
def decorated(*args, **kwargs):
sargs = map(str, args)
skwargs = (f'{key}={value}' for key, value in kwargs.items())
print(f'{f.__name__}({", ".join([*sargs, *skwargs])})...')
try:
value = f(*args, **kwargs)
except Exception as cause:
print(f'! {cause}')
raise
print(f'=> {value}')
return value
return decorated | 25822434fe331c59ce64b6f9cd5ec89b70b2542a | 23,114 |
def yandex_mean_encoder(columns=None, n_jobs=1, alpha=100, true_label=None):
"""
Smoothed mean-encoding with custom smoothing strength (alpha)
http://learningsys.org/nips17/assets/papers/paper_11.pdf
"""
buider = partial(
build_yandex_mean_encoder,
alpha=alpha,
)
return TargetCategoryEncoder(buider, columns, n_jobs, true_label) | b38ac44cb2ff12c33f415d716cc4e13006eabf0b | 23,115 |
from typing import Optional
import math
from typing import Counter
def cure_sample_part(
X: np.ndarray,
k: int,
c: int = 3,
alpha: float = 0.3,
u_min: Optional[int] = None,
f: float = 0.3,
d: float = 0.02,
p: Optional[int] = None,
q: Optional[int] = None,
n_rep_finalclust: Optional[int] = None,
plotting: bool = True,
):
"""
CURE algorithm variation for large datasets.
Partition the sample space into p partitions, each of size len(X)/p, then partially cluster each
partition until the final number of clusters in each partition reduces to n/(pq). Then run a second
clustering pass on the n/q partial clusters for all the partitions.
:param X: input data array.
:param k: desired number of clusters.
:param c: number of representatives for each cluster.
:param alpha: parameter that regulates the shrinking of representative points toward the centroid.
:param u_min: size of the smallest cluster u.
:param f: percentage of cluster points (0 <= f <= 1) we would like to have in the sample.
:param d: (0 <= d <= 1) the probability that the sample contains less than f*|u| points of cluster u is less than d.
:param p: the number of partitions.
:param q: the number >1 such that each partition reduces to n/(pq) clusters.
:param n_rep_finalclust: number of representatives to use in the final assignment phase.
:param plotting: if True, plots all intermediate steps.
:return, rep, mat_a): returns the clusters dictionary, the dictionary of representatives, the matrix a.
"""
if ((p is None) and (q is not None)) or ((q is None) and (p is not None)):
raise ValueError("p and q must be both specified if not None.")
# choose the parameters suggested by the paper if the user doesnt provide input parameters
if u_min is None:
u_min = round(len(X) / k)
if n_rep_finalclust is None:
n_rep_finalclust = c
_, df_nonan = build_initial_matrices(X)
# this is done to ensure that the algorithm starts even when input params are bad
while True:
print("new f: ", f)
print("new d: ", d)
n = math.ceil(chernoffBounds(u_min=u_min, f=f, N=len(X), k=k, d=d))
if n <= len(df_nonan):
b_sampled = df_nonan.sample(n, random_state=42)
break
else:
if f >= 0.19:
f = f - 0.1
else:
d = d * 2
b_notsampled = df_nonan.loc[
[str(i) for i in range(len(df_nonan)) if str(i) not in b_sampled.index], :
]
# find the best p and q according to the paper
if (p is None) and (q is None):
def g(x):
res = (x[1] - 1) / (x[0] * x[1]) + 1 / (x[1] ** 2)
return res
results = {}
for i in range(2, 15):
for j in range(2, 15):
results[(i, j)] = g([i, j])
p, q = max(results, key=results.get)
print("p: ", p)
print("q: ", q)
if (n / (p * q)) < 2 * k:
print("n/pq is less than 2k, results could be wrong.")
if k * d >= 1:
print("k*d is greater or equal to 1, results could be wrong.")
# form the partitions
lin_sp = np.linspace(0, n, p + 1, dtype="int")
# lin_sp
b_partitions = []
for num_p in range(p):
# try:
b_partitions.append(b_sampled.iloc[lin_sp[num_p]: lin_sp[num_p + 1]])
# except:
# b_partitions.append(b_sampled.iloc[lin_sp[num_p]:])
k_prov = round(n / (p * q))
# perform clustering on each partition separately
partial_clust = []
partial_rep = []
partial_CURE_df = []
for i in range(p):
print("\n")
print(i)
clusters, rep, CURE_df = cure(
b_partitions[i].values,
k=k_prov,
c=c,
alpha=alpha,
plotting=plotting,
partial_index=b_partitions[i].index,
)
partial_clust.append(clusters)
partial_rep.append(rep)
partial_CURE_df.append(CURE_df)
# merging all data into single components
# clusters
clust_tot = {}
for d in partial_clust:
clust_tot.update(d)
# representatives
rep_tot = {}
for d in partial_rep:
rep_tot.update(d)
# mat CURE_df
diz = {i: len(b_partitions[i]) for i in range(p)}
num_freq = Counter(diz.values()).most_common(1)[0][0]
bad_ind = [k for k, v in diz.items() if v != num_freq]
for ind in bad_ind:
partial_CURE_df[ind]["{0}x".format(diz[ind])] = [np.nan] * k_prov
partial_CURE_df[ind]["{0}y".format(diz[ind])] = [np.nan] * k_prov
CURE_df_tot = partial_CURE_df[0].append(partial_CURE_df[1])
for i in range(1, len(partial_CURE_df) - 1):
CURE_df_tot = CURE_df_tot.append(partial_CURE_df[i + 1])
# mat Xdist
X_dist_tot = dist_mat_gen_cure(rep_tot)
# final_clustering
prep_data = [clust_tot, rep_tot, CURE_df_tot, X_dist_tot]
clusters, rep, CURE_df = cure(
b_sampled.values,
k=k,
c=c,
alpha=alpha,
preprocessed_data=prep_data,
partial_index=b_sampled.index,
n_rep_finalclust=n_rep_finalclust,
not_sampled=b_notsampled.values,
plotting=plotting,
not_sampled_ind=b_notsampled.index,
)
return clusters, rep, CURE_df | b85d2f23bd1b64a0f17abc5178cfc25e442419b5 | 23,116 |
from typing import Optional
import pickle
def deserialize_result(r: bytes, *, deserializer: Optional[Deserializer] = None) -> JobResult:
"""Given bytes, deserializes them into a JobResult object.
:param r: bytes to deserialize.
:param deserializer: Optional serializer to use for deserialization. If not set, pickle is used.
:return: A JobResult object.
:raises DeserializationError: If bytes cannot be converted to JobResult.
"""
if deserializer is None:
deserializer = pickle.loads
try:
d = deserializer(r)
return JobResult(
job_try=d['t'],
function=d['f'],
args=d['a'],
kwargs=d['k'],
enqueue_time=ms_to_datetime(d['et']),
score=None,
success=d['s'],
result=d['r'],
start_time=ms_to_datetime(d['st']),
finish_time=ms_to_datetime(d['ft']),
)
except Exception as e:
raise DeserializationError('unable to deserialize job result') from e | 049a453a7277f30a38019ca59bedbc458fbaf84c | 23,118 |
def form_symb_dCdU():
"""Form a symbolic version of dCdU"""
dCdU = form_nd_array("dCdU",[3,3,8*12])
for I in range(3):
for J in range(3):
for K in range(3,8*12):
dCdU[I,J,K] = 0
return dCdU | ec802da453dd7c522bf5725fd70fd16a2406c12e | 23,119 |
import torch
def predict(X, y, clf, onehot_encoder, params):
"""
Runs a forward pass for a SINGLE sample and returns the output prediction.
Arguments:
X (list[int]) : a list of integers with each integer an input class of step
y (list[int]) : a list of integers with each integer an output class of step
Returns:
y_pred (list[int]) : a list of integers with each integer the prediction of each step
"""
scalar = params['scalar']
output_dim = params['output_dim']
X = torch.tensor(X).cuda() # shape(seq_len,)
X = X.unsqueeze(0) # shape(batch_size=1, seq_len)
if scalar is True:
seq_len = [X.shape[1]]
else:
seq_len = [len(y)] # 2d list
if scalar is True:
y = torch.tensor([[y]]).cuda().float().cuda() # shape(1,)
else:
y = lists2onehottensors([y], output_dim, onehot_encoder)
# change to 1-hot
y_pred = clf.forward(X, seq_len)
loss = clf.compute_loss(y_pred, y, seq_len)
loss = loss.item()
if scalar is False:
# convert softmax y_pred and y to 1d list
y_pred = onehottensors2classlist(y_pred, seq_len)[0]
return y_pred | 8e0aa3c687a3ad24e02f730a5dca31f4fd36c6ad | 23,120 |
def parse_modules_and_elabs(raw_netlist, net_manager):
"""
Parses a raw netlist into its IvlModule and IvlElab objects.
Returns a tuple: (modules, elabs)
modules is a list of IvlModule objects.
elabs is a list of IvlElab objects.
"""
sections = parse_netlist_to_sections(raw_netlist)
modules_lines = group_lines(sections['SCOPES'])
elab_bundles_lines = group_lines(sections['ELABORATED NODES'])
modules = [parse_module_lines(lines, net_manager)
for lines in modules_lines]
elabs = [parse_elab_bundle_lines(lines, net_manager)
for lines in elab_bundles_lines]
return modules, elabs | 029d5a86de450eb6c104cc2582e21fe854c557e7 | 23,121 |
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
ndt, ndn = dt, dn
if dt in lower_case_files_for:
ndt, ndn = scrub(dt), scrub(dn)
return ndt, ndn | 853959c073f45be0ffc97dfd8733d2b10a837a32 | 23,122 |
def by_uri(uri):
"""A LicenseSelector-less means of picking a License from a URI."""
if _BY_URI_CACHE.has_key(uri):
return _BY_URI_CACHE[uri]
for key, selector in cc.license.selectors.SELECTORS.items():
if selector.has_license(uri):
license = selector.by_uri(uri)
_BY_URI_CACHE[uri] = license
return license
return None | 1dc3dfe4070857984768e1af6462927fd08daf77 | 23,123 |
def read_S(nameIMGxml):
"""
This function extract the images's center from the xml file.
Parameters
----------
nameIMGxml : str
the name of the file generated by MM3D.
Usually, it is "Orientation-Im[n°i].JPG.xml"
Returns
-------
numpy.ndarray: the center of the IMG (size 1*3)
"""
tree = etree.parse(nameIMGxml)
for user in tree.xpath("/ExportAPERO/OrientationConique/Externe/Centre"):
# print(user.text)
S = user.text.split(" ")
center = np.array(S, float)
return np.transpose(center) | f4054827c8ecfa6d81ac752e8ac46e4cccbc5245 | 23,124 |
def AddBatchJob(client):
"""Add a new BatchJob to upload operations to.
Args:
client: an instantiated AdWordsClient used to retrieve the BatchJob.
Returns:
The new BatchJob created by the request.
"""
# Initialize appropriate service.
batch_job_service = client.GetService('BatchJobService', version='v201601')
# Create a BatchJob.
batch_job_operations = [{
'operand': {},
'operator': 'ADD'
}]
return batch_job_service.mutate(batch_job_operations)['value'][0] | 6c48997e4739f05fe6df826654a45b6f7deafc1b | 23,125 |
def bellmanFord(obj,source):
"""Determination of minimum distance between vertices using Bellman Ford Algorithm."""
validatePositiveWeight(obj)
n = CountVertices(obj)
minDist = dict()
for vertex in obj.vertexList:
if vertex == source:
minDist[vertex] = 0
else:
minDist[vertex] = float("inf")
#Comparing if minDist[i]+edge_weight(i,j)<minDist[j]
for i in range(n-1):
for vertex in obj.adjList:
for nbrVertex in obj.adjList[vertex]:
if minDist[nbrVertex]>minDist[vertex]+obj.weightList[vertex][obj.adjList[vertex].index(nbrVertex)]:
minDist[nbrVertex] = minDist[vertex]+obj.weightList[vertex][obj.adjList[vertex].index(nbrVertex)]
return minDist | 83cdbab547741a070ef694b4cea8f16355eb4af5 | 23,126 |
from typing import Callable
import inspect
import click
def auto_default_option(*param_decls, **attrs) -> Callable[[_C], _C]:
"""
Attaches an option to the command, with a default value determined from the decorated function's signature.
All positional arguments are passed as parameter declarations to :class:`click.Option`;
all keyword arguments are forwarded unchanged (except ``cls``).
This is equivalent to creating an :class:`click.Option` instance manually
and attaching it to the :attr:`click.Command.params` list.
.. versionadded:: 0.7.0
:param cls: the option class to instantiate. This defaults to :class:`click.Option`.
"""
def decorator(f: _C) -> _C:
option_attrs = attrs.copy()
if "help" in option_attrs:
option_attrs["help"] = inspect.cleandoc(option_attrs["help"])
OptionClass = option_attrs.pop("cls", click.Option)
option = OptionClass(param_decls, **option_attrs)
_param_memo(f, option)
_get_default_from_callback_and_set(f, option)
return f
return decorator | e1813faf2c0d936333edf4d2a1b111dec6c7a376 | 23,127 |
def zCurve(seq):
"""Return 3-dimensional Z curve corresponding to sequence.
zcurve[n] = zcurve[n-1] + zShift[n]
"""
zcurve = np.zeros((len(seq), 3), dtype=int)
zcurve[0] = zShift(seq, 0)
for pos in range(1, len(seq)):
zcurve[pos] = np.add(zcurve[pos - 1], zShift(seq, pos))
return zcurve | 4118274fc3bee084777847553dcfa9c4dc92c6c9 | 23,129 |
def getgeo():
""" Grabbing and returning the zones """
data = request.args.get('zone_name', None)
print data
#Check if data is null - get all zones
out = []
if data:
rec = mongo.db.zones.find({'zone_name':data})
else:
rec = mongo.db.zones.find()
for r in rec:
r.pop('_id')
out.append(r['loc'])
jsonOut = json.dumps(out)
print jsonOut
return Response(response=jsonOut,
status=200,
mimetype="application/json") | 822e8e995ae47d887340a2750eeda5646dfa9d5b | 23,130 |
def photo_upload(request):
"""AJAX POST for uploading a photo for any given application."""
response = None
if request.is_ajax() and request.method == 'POST':
form = PhotoForm(
data=request.POST, files=request.FILES, use_required_attribute=False,
)
if form.is_valid():
ct = request.POST.get('content_type')
oid = request.POST.get('oid')
if ct and oid:
ct = ContentType.objects.get(pk=ct)
mod = ct.model_class()
try:
instance = mod.objects.get(pk=oid)
phile = form.save(commit=False)
phile.content_object = instance
phile.save()
response = render(
request,
'dashboard/view_photo.ajax.html',
{'photo': phile, 'ct': ct, 'oid': oid},
)
except Exception as error:
msg = "Fail: {0}".format(str(error))
else:
msg = "Fail: No Content Type or Object ID Provided"
else:
msg = "Fail: {0}".format(form.errors)
else:
msg = "AJAX POST required"
if not response:
response = HttpResponse(msg, content_type='text/plain; charset=utf-8')
return response | e760570d07f43800c05f4d1d8b36b9cb84804003 | 23,131 |
def table_str(bq_target):
# type: (BigqueryTarget) -> str
"""Given a BigqueryTarget returns a string table reference."""
t = bq_target.table
return "%s.%s.%s" % (t.project_id, t.dataset_id, t.table_id) | 95053c839d2bc1e4d628261d669a73a6b9dcb309 | 23,132 |
def any_to_any_translate_back(content, from_='zh-CN', to_='en'):
"""
中英,英中回译
:param content:str, 4891个字, 用户输入
:param from_: str, original language
:param to_: str, target language
:return: str, result of translate
"""
translate_content = any_to_any_translate(content, from_=from_, to_=to_)
result = any_to_any_translate(translate_content, from_=to_, to_=from_)
return result | def5100d73712fd1f244913aca725328cbe02b4d | 23,133 |
def to_short_site_cname(user, site):
"""
订阅源显示名称,最多 10 个汉字,支持用户自定义名称
"""
if isinstance(site, dict):
site_id = site['id']
site_cname = site['cname']
else:
site_id = site.id
site_cname = site.cname
if user:
cname = get_user_site_cname(user.oauth_id, site_id)
if cname:
return cut_to_short(cname, 20)
return cut_to_short(site_cname, 20) | 70b4874af06e4185a72a45ff82838b2d00cdcec6 | 23,134 |
def load_roed_data(full_output=False):
""" Load master table with all labels """
mtab = load_master_table()
df1 = table.Table.read("roed14_stars.fits").to_pandas()
def renamer(x):
""" match master table Star with Name """
x = x.strip()
if x.startswith("BD") or x.startswith("CD"): return x.replace(" ","_")
return x.replace(" ","")
df1.index = map(renamer, df1["Name"])
star_labels = ["Teff", "logg", "Vt", "__M_H_"] # Should include some other columns eventually
# to test how individual stars might affect things
star_labels += ["SN3950","SN4550","SN5200","SN6750","Cl"]
for label in star_labels:
mtab.add_column(mtab.Column(df1.loc[mtab["Star"]][label], label))
mtab.rename_column("__M_H_", "[M/H]")
# Set up abundances
df2 = table.Table.read("roed14_abunds.fits").to_pandas()
df2["Name"] = map(renamer, df2["Name"])
df2.loc[:,"log_e_"][df2["l_log_e_"] == "<"] = np.nan
df2.loc[:,"__X_Fe_"][df2["l_log_e_"] == "<"] = np.nan
all_ions = np.unique(df2["Ion"])
groups = df2.groupby("Ion")
_abunds = []
for ion in all_ions:
tdf = groups.get_group(ion)
xfe = pd.Series(tdf["__X_Fe_"], name=ion.strip(), copy=True)
xfe.index = tdf["Name"]
_abunds.append(xfe)
abunds = pd.DataFrame(_abunds).transpose()
for ion in all_ions:
ion = ion.strip()
if "(" in ion and ")" in ion:
newname = ion.split("(")[1][:-1]
assert len(newname) == 2, newname
newname = "[{}/Fe]".format(newname)
elif "Fe " in ion:
newname = "[{}/H]".format(ion)
else:
newname = "[{}/Fe]".format(ion)
mtab.add_column(mtab.Column(abunds.loc[mtab["Star"]][ion], name=newname, copy=True))
if full_output:
return mtab, df1, abunds
return mtab | 8698b478abb9a3e617ce9a23feb89fecf220e341 | 23,135 |
import math
def calculate_distance(p1, p2):
"""
Calculate distance between two points
param p1: tuple (x,y) point1
param p2: tuple (x,y) point2
return: distance between two points
"""
x1, y1 = p1
x2, y2 = p2
d = math.sqrt(pow(x2 - x1, 2) + pow(y2 - y1, 2))
return d | 756b609a91e17299eb879e27e83cd663800e46dd | 23,136 |
def statistics(request, network):
""" some nice statistics for the whole pool """
# some basic statistics
days = 1
current_height, all_blocks, pool_blocks, pool_blocks_percent, bbp_mined = get_basic_statistics(network, days)
miners_count = get_miner_count(network, days)
graph_days = 7
top_miners = get_top_miners(network)
# the solution and block statistics
share_statistics = get_solution_statistics(network, days=graph_days)
block_statistics = list(get_block_statistics(network, days=graph_days))
statistics = []
# now we join the statistics
for i, share_stat in enumerate(list(share_statistics)):
statistics.append([share_stat[0], share_stat[1], block_statistics[i][1]])
# and finally the forecast for the blocks
blocks_two_days = statistics[-2][2] + statistics[-1][2]
blocks_per_hour = blocks_two_days / (24 + timezone.now().hour)
forecast_blocks = int(round(blocks_per_hour * 24))
last_blocks = Block.objects.filter(network=network, pool_block=True).values('height', 'inserted_at').order_by('-height')[0:50]
return render(request, 'purepool/statistics.html', {
'network': network,
'statistics': statistics,
'top_miners': top_miners,
'last_blocks': last_blocks,
'forecast_blocks': forecast_blocks,
'days': days,
'current_height': current_height,
'all_blocks': all_blocks,
'pool_blocks': pool_blocks,
'pool_blocks_percent': pool_blocks_percent,
'bbp_mined': bbp_mined,
'miners_count': miners_count,
}) | d44033323bfe041ee53109a9274ff2fd9c9d9df3 | 23,137 |
from textwrap import dedent
def package_load_instructions(inst_distributions):
"""Load instructions, displayed in the package notes"""
per_package_inst = ''
for dist in inst_distributions:
if dist.type == 'zip':
per_package_inst += dedent(
"""
# Loading the ZIP Package
Zip packages are compressed, so large resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
elif dist.type == 'csv':
per_package_inst += dedent(
"""
# Loading the CSV Package
CSV packages load resources individually, so small resources may load faster.
import metapack as mp
pkg = mp.open_package('{url}')
""".format(url=dist.package_url.inner))
if per_package_inst:
return '\n---\n'+per_package_inst
else:
return '' | 321a7486f27a3cb327ae7556e317bc53c24726ac | 23,138 |
def specialize_transform(graph, args):
"""Specialize on provided non-None args.
Parameters that are specialized on are removed.
"""
mng = graph.manager
graph = transformable_clone(graph, relation=f'sp')
mng.add_graph(graph)
for p, arg in zip(graph.parameters, args):
if arg is not None:
mng.replace(p, Constant(arg))
new_parameters = [p for p, arg in zip(graph.parameters, args)
if arg is None]
mng.set_parameters(graph, new_parameters)
return graph | 44b892312ff677bdc5bee84bf5df1e1dc4bd5ba5 | 23,139 |
from functools import reduce
import operator
import itertools
def Multiplication(k):
"""
Generate a function that performs a polynomial multiplication and return coefficients up to degree k
"""
assert isinstance(k, int) and k > 0
def isum(factors):
init = next(factors)
return reduce(operator.iadd, factors, init)
def mul_function(x1, x2):
# prepare indices for convolution
l1, l2 = len(x1), len(x2)
M = min(k + 1, l1 + l2 - 1)
indices = [[] for _ in range(M)]
for (i, j) in itertools.product(range(l1), range(l2)):
if i + j >= M:
continue
indices[i + j].append((i, j))
# wrap with log-tensors for stability
X1 = [LogTensor(x1[i]) for i in range(l1)]
X2 = [LogTensor(x2[i]) for i in range(l2)]
# perform convolution
coeff = []
for c in range(M):
coeff.append(isum(X1[i] * X2[j] for (i, j) in indices[c]).torch())
return coeff
return mul_function | 23a663231e44b09cd446e9ba1d269e7b123efc1d | 23,140 |
def safe_divide(a, b):
"""
Avoid divide by zero
http://stackoverflow.com/questions/26248654/numpy-return-0-with-divide-by-zero
"""
with np.errstate(divide='ignore', invalid='ignore'):
c = np.true_divide(a, b)
c[c == np.inf] = 0
c = np.nan_to_num(c)
return c | 104970a64f5d77f674a46f9da08b039345fa546a | 23,141 |
def read_image(filepath, gray=False):
"""
read image
:param filepath:
:param gray:
:return:
"""
if gray:
return cv2.cvtColor(cv2.imread(filepath), cv2.COLOR_BGR2GRAY)
else:
return cv2.cvtColor(cv2.imread(filepath), cv2.COLOR_BGR2RGB) | d5743c8ad517f5c274e3ac64d6082ea36539cfe3 | 23,143 |
from ...hubble.helper import parse_hub_uri
def mixin_hub_pull_parser(parser):
"""Add the arguments for hub pull to the parser
:param parser: the parser configure
"""
def hub_uri(uri: str) -> str:
parse_hub_uri(uri)
return uri
parser.add_argument(
'uri',
type=hub_uri,
help='The URI of the executor to pull (e.g., jinahub[+docker]://NAME)',
)
mixin_hub_pull_options_parser(parser) | 9f62462baecc744ab7b7e3e78b5446a3d5347569 | 23,144 |
from typing import Tuple
from typing import List
import yaml
from typing import Dict
def load_config() -> Tuple[List, List]:
"""Get configuration from config file.
Returns repo_paths and bare_repo_dicts.
"""
if config_file.exists():
with open(config_file, "r") as ymlfile:
config = yaml.load(ymlfile, Loader=yaml.Loader)
repo_paths = flatten_list(
[expand_path(i) for i in config.get('repo_paths', [])]
)
bare_repo_dicts: List[Dict] = config.get('bare_repos', [])
bare_repo: Dict[str, str]
for bare_repo in bare_repo_dicts:
bare_repo['git_dir'] = expand_path(bare_repo['git_dir'])[0]
bare_repo['work_tree'] = expand_path(bare_repo['work_tree'])[0]
else:
repo_paths = []
bare_repo_dicts = []
return repo_paths, bare_repo_dicts | e9f483c6cc3ff1335a5d9866cc577e72a9a8084f | 23,145 |
def deindented_source(src):
"""De-indent source if all lines indented.
This is necessary before parsing with ast.parse to avoid "unexpected
indent" syntax errors if the function is not module-scope in its
original implementation (e.g., staticmethods encapsulated in classes).
Parameters
----------
src : str
input source
Returns
-------
str :
de-indented source; the first character of at least one line is
non-whitespace, and all other lines are deindented by the same
"""
lines = src.splitlines()
n_chars = float("inf")
for line in lines:
len_line = len(line)
idx = 0
# we're Python 3, so we assume you're not mixing tabs and spaces
while idx < n_chars and idx < len_line and line[idx] in [" ", '\t']:
idx += 1
if len_line > idx:
n_chars = min(idx, n_chars)
lines = [line[n_chars:] for line in lines]
src = "\n".join(lines)
return src | 227d5e8e35b251f02ce5e9237f8120d2dd9c7e4b | 23,146 |
def home():
"""Render the home page."""
form = SearchForm()
search_results = None
if form.validate_on_submit():
search_term = form.username.data
cur = conn.cursor()
cur.execute(f"SELECT * FROM student WHERE name = '{search_term}';")
search_results = cur.fetchall()
cur.close()
return render_template(
"home.html", form=form, search_results=search_results) | d578e4ba95af57828dfa6f483ad9aa0aeac8ea92 | 23,147 |
def capacity():
"""
Returns the raw capacity of the filesystem
Returns:
filesystem capacity (int)
"""
return hdfs.capacity() | c9e220b19a1a1a200d2393bb98116be1767370b9 | 23,148 |
async def deploy(current_user: User = Depends(auth.get_current_user)):
""" This function is used to deploy the model of the currently trained chatbot """
response = mongo_processor.deploy_model(bot=current_user.get_bot(), user=current_user.get_user())
return {"message": response} | cb7d53f605616e8a979dd144b786313c99f7a244 | 23,149 |
def find_rocks(img,rgb_thresh=(100, 100, 60)):
""" Find rock in given image frame"""
color_select = np.zeros_like(img[:,:,0])
# Require that each pixel be above all three threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:,:,0] > rgb_thresh[0]) \
& (img[:,:,1] > rgb_thresh[1]) \
& (img[:,:,2] < rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
return color_select | f0bffadfdf826f1f649029f3aaf224d07681589e | 23,150 |
from pathlib import Path
def maybe_start_with_home_prefix(p: Path) -> Path:
"""
If the input path starts with the home directory path string, then return
a path that starts with the home directory and points to the same location.
Otherwise, return the path unchanged.
"""
try:
return Path("~", p.relative_to(Path.home()))
except ValueError:
return p | 6ee4e49e8dfb9bc68a1c10f5ea792715fb5d5336 | 23,151 |
def parse_nrc_lexicon():
"""Extract National Resource Council Canada emotion lexicon from http://saifmohammad.com/WebPages/lexicons.html
Returns:
{str: [str]} A defaultdict of emotion to list of associated words
"""
emotion2words = defaultdict(list)
with open(NRC_LEXICON) as lexicon_file:
lexicon_file.__next__()
for line in lexicon_file:
word, emotion, associated = line.split()
if associated == '1':
emotion2words[emotion].append(word)
return emotion2words | 869988934a7ab6a1b0b601f96472ff85a2686975 | 23,152 |
def rouge_2_fscore(predictions, labels, **unused_kwargs):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
# Convert the outputs and labels to a [batch_size, input_length] tensor.
outputs = tf.squeeze(outputs, axis=[-1, -2])
labels = tf.squeeze(labels, axis=[-1, -2])
rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0) | 1c0ab9b514c36cf9947b31624e8a2cf308cdfe6b | 23,153 |
def enhancedFeatureExtractorDigit(datum):
"""
Your feature extraction playground.
You should return a util.Counter() of features
for this datum (datum is of type samples.Datum).
## DESCRIBE YOUR ENHANCED FEATURES HERE...
##
"""
features = basicFeatureExtractorDigit(datum)
"*** YOUR CODE HERE ***"
return features | 564e324cd12b2cb98bd65cb053b5acea2f4d5831 | 23,155 |
def tested_function(x):
"""
Testovana funkce
Da se sem napsat vselijaka cunarna
"""
freq = 1
damp_fac = 0.1
val = np.sin(freq * x)
damp = np.exp(-1 * damp_fac * abs(x))
return val * damp | 95808b55ace5b0536104f02de874344aa02d7033 | 23,157 |
def parse_line(line):
"""
Parses a (non-comment) line of a GFF3 file. The attribute field is parsed into a dict.
:param line: line to parse as string
:return: dict with for each column (key) the corresponding value
"""
parts = line.strip().split('\t')
output = {}
if len(parts) != len(COLUMNS):
raise Exception('Incorrect number of columns in line.', parts, COLUMNS)
for key, value in zip(COLUMNS, parts):
if key == 'attributes':
output[key] = parse_attributes(value)
elif key == 'start' or key == 'stop':
output[key] = int(value)
else:
output[key] = value
return output | 0ea071c5a4165fd2bbbe77798bec09b033250c72 | 23,158 |
import requests
from datetime import datetime
def get_time_string(place: str = "Europe/Moscow"):
"""
Get time data from worldtimeapi.org and return simple string
Parameters
----------
place : str
Location, i.e. 'Europe/Moscow'.
Returns
-------
string
Time in format '%Y-%m-%d %H:%M:%S'
Examples
--------
>>> get_time_string()
2021-08-16 16:03:34
"""
url = "http://worldtimeapi.org/api/timezone/" + place
data = requests.get(url).json()
date = datetime.fromisoformat(data["datetime"])
string = date.strftime("%Y-%m-%d %H:%M:%S")
return string | f15ef5a843317c55d3c60bf2ee8c029258e1cd78 | 23,159 |
from typing import Type
from typing import Dict
from typing import Callable
def new_worker_qthread(
Worker: Type[WorkerProtocol],
*args,
_start_thread: bool = False,
_connect: Dict[str, Callable] = None,
**kwargs,
):
"""This is a convenience function to start a worker in a Qthread.
In most cases, the @thread_worker decorator is sufficient and preferable.
But this allows the user to completely customize the Worker object.
However, they must then maintain control over the thread and clean up
appropriately.
It follows the pattern described here:
https://www.qt.io/blog/2010/06/17/youre-doing-it-wrong
and
https://doc.qt.io/qt-5/qthread.html#details
see also:
https://mayaposch.wordpress.com/2011/11/01/how-to-really-truly-use-qthreads-the-full-explanation/
A QThread object is not a thread! It should be thought of as a class to
*manage* a thread, not as the actual code or object that runs in that
thread. The QThread object is created on the main thread and lives there.
Worker objects which derive from QObject are the things that actually do
the work. They can be moved to a QThread as is done here.
.. note:: Mostly ignorable detail
While the signals/slots syntax of the worker looks very similar to
standard "single-threaded" signals & slots, note that inter-thread
signals and slots (automatically) use an event-based QueuedConnection,
while intra-thread signals use a DirectConnection. See `Signals and
Slots Across Threads
<https://doc.qt.io/qt-5/threads-qobject.html#signals-and-slots-across-threads>`_
Parameters
----------
Worker : QObject
QObject type that implements a `work()` method. The Worker should also
emit a finished signal when the work is done.
_start_thread : bool
If True, thread will be started immediately, otherwise, thread must
be manually started with thread.start().
_connect : dict, optional
Optional dictionary of {signal: function} to connect to the new worker.
for instance: _connect = {'incremented': myfunc} will result in:
worker.incremented.connect(myfunc)
*args
will be passed to the Worker class on instantiation.
**kwargs
will be passed to the Worker class on instantiation.
Returns
-------
worker : WorkerBase
The created worker.
thread : QThread
The thread on which the worker is running.
Examples
--------
Create some QObject that has a long-running work method:
.. code-block:: python
class Worker(QObject):
finished = Signal()
increment = Signal(int)
def __init__(self, argument):
super().__init__()
self.argument = argument
@Slot()
def work(self):
# some long running task...
import time
for i in range(10):
time.sleep(1)
self.increment.emit(i)
self.finished.emit()
worker, thread = new_worker_qthread(
Worker,
'argument',
_start_thread=True,
_connect={'increment': print},
)
"""
if _connect and not isinstance(_connect, dict):
raise TypeError("_connect parameter must be a dict")
thread = QThread()
worker = Worker(*args, **kwargs)
worker.moveToThread(thread)
thread.started.connect(worker.work)
worker.finished.connect(thread.quit)
worker.finished.connect(worker.deleteLater)
thread.finished.connect(thread.deleteLater)
if _connect:
[getattr(worker, key).connect(val) for key, val in _connect.items()]
if _start_thread:
thread.start() # sometimes need to connect stuff before starting
return worker, thread | f607799bd7abf4b275d90bc4523dc9f0e8d2d200 | 23,160 |
import fnmatch
def contains(filename, value=None, fnvalue=None):
""" If a string is contained within a yaml (and is not a comment or key), return where we found it """
if filename in ALL_STRINGS:
for el in ALL_STRINGS[filename]:
if (value and value in el[0]) or (fnvalue and fnmatch.fnmatch(el[0], fnvalue)):
return el[1].strip() | 2c21aee4fe7121ad26e588b33dfa2f09f1d4066b | 23,161 |
def plot_with_overview(
ds,
tn,
forcing_vars=["dqdt_adv", "dtdt_adv"],
domain_var="q",
overview_window_width=4,
):
"""
Produce a forcing plot with timestep `tn` highlighted together with
overview plots of domain data variable `domain_var`. The width over the
overview plot is set with `overview_window_width` (in degrees)
"""
ds_forcing = ds
ds_domain = domain_load.load_data(
root_data_path="data", name=ds_forcing.domain_name
)
ds_traj = trajectory_load.load_data(
root_data_path="data", name=ds_forcing.trajectory_name
)
N_vars = len(forcing_vars)
figwidth = 12
subplot_height = 3
traj_color = "red"
N_vars = len(forcing_vars)
figsize = (figwidth, 4 + subplot_height * N_vars)
fig = plt.figure(figsize=figsize)
gs = GridSpec(2 + N_vars, 2)
ds_forc_tn = ds_forcing.isel(time=tn)
lat0, lon0 = ds_forc_tn.lat, ds_forc_tn.lon
domain_window = dict(
lat=slice(lat0 - overview_window_width / 2, lat0 + overview_window_width / 2),
lon=slice(lon0 - overview_window_width / 2, lon0 + overview_window_width / 2),
time=ds_forc_tn.time,
)
da_domain = ds_domain[domain_var].sel(**domain_window).sum(dim="level")["q"]
ax_domain = _add_overview_axes(fig=fig, gs=gs[:2, 0])
da_domain.plot(ax=ax_domain)
ax_satellite = _add_overview_axes(fig=fig, gs=gs[0:2, 1])
ax_satellite.set_extent(ax_domain.get_extent())
traj_plot_kwargs = dict(
ds=ds_traj,
add_ref="eurec4a_circle",
color=traj_color,
reference_time=ds_forc_tn.time,
)
trajectory_plot.main(ax=ax_domain, **traj_plot_kwargs)
trajectory_plot.main(ax=ax_satellite, **traj_plot_kwargs)
ax = None
for n, v in enumerate(forcing_vars):
ax = fig.add_subplot(gs[n + 2, :], sharex=ax)
ds_forcing[v].plot(ax=ax, y="level")
# .item() doesn't return a np.datetime64 object sadly, so we have to
# make our own...
t0 = np.datetime64(ds_forc_tn[v].time.item(), "ns")
ax.axvline(x=t0, color="black", linestyle="--", alpha=0.5)
fig.tight_layout()
title = f"{ds.name} {ds.trajectory_type} trajectory\n{ds.domain_name} domain\n"
if hasattr(ds, "velocity_method"):
title += (
f"{ds.velocity_method} velocity method using "
"{ds.velocity_method_kwargs_height}m height\n"
)
plt.suptitle(title, y=1.01)
return fig | e02be6157853b5a0a409fe45a02f52d685913e22 | 23,162 |
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# Flatten last layer and training labels
logits = tf.reshape(nn_last_layer, (-1, num_classes))
labels = tf.reshape(correct_label, (-1, num_classes))
# Create cross entropy loss function and optimizer
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels))
# Include regularizers to the loss function
l2_loss = tf.losses.get_regularization_losses()
cross_entropy_loss += tf.reduce_sum(l2_loss)
adam_op = tf.train.AdamOptimizer(learning_rate)
# Create training operation with defined optimizer and loss function
train_op = adam_op.minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss | b618626f7e458e1ef3ffda74af7532d93668a9cb | 23,163 |
def expanded_indexer(key, ndim):
"""Given a key for indexing an ndarray, return an equivalent key which is a
tuple with length equal to the number of dimensions.
The expansion is done by replacing all `Ellipsis` items with the right
number of full slices and then padding the key with full slices so that it
reaches the appropriate dimensionality.
"""
if not isinstance(key, tuple):
# numpy treats non-tuple keys equivalent to tuples of length 1
key = (key,)
new_key = []
# handling Ellipsis right is a little tricky, see:
# http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
found_ellipsis = False
for k in key:
if k is Ellipsis:
if not found_ellipsis:
new_key.extend((ndim + 1 - len(key)) * [slice(None)])
found_ellipsis = True
else:
new_key.append(slice(None))
else:
new_key.append(k)
if len(new_key) > ndim:
raise IndexError("too many indices")
new_key.extend((ndim - len(new_key)) * [slice(None)])
return tuple(new_key) | a4fa3b1e4a106350348c128ef5ce8b86dac1f0c0 | 23,164 |
def downsample(
data, sampling_freq=None, target=None, target_type="samples", method="mean"
):
"""Downsample pandas to a new target frequency or number of samples
using averaging.
Args:
data: (pd.DataFrame, pd.Series) data to downsample
sampling_freq: (float) Sampling frequency of data in hertz
target: (float) downsampling target
target_type: type of target can be [samples,seconds,hz]
method: (str) type of downsample method ['mean','median'],
default: mean
Returns:
out: (pd.DataFrame, pd.Series) downsmapled data
"""
if not isinstance(data, (pd.DataFrame, pd.Series)):
raise ValueError("Data must by a pandas DataFrame or Series instance.")
if not (method == "median") | (method == "mean"):
raise ValueError("Metric must be either 'mean' or 'median' ")
if target_type == "samples":
n_samples = target
elif target_type == "seconds":
n_samples = target * sampling_freq
elif target_type == "hz":
n_samples = sampling_freq / target
else:
raise ValueError('Make sure target_type is "samples", "seconds", ' ' or "hz".')
idx = np.sort(np.repeat(np.arange(1, data.shape[0] / n_samples, 1), n_samples))
# if data.shape[0] % n_samples:
if data.shape[0] > len(idx):
idx = np.concatenate([idx, np.repeat(idx[-1] + 1, data.shape[0] - len(idx))])
if method == "mean":
return data.groupby(idx).mean().reset_index(drop=True)
elif method == "median":
return data.groupby(idx).median().reset_index(drop=True) | 4dca048a77ac1f20d0ee1bac702c48e4311f8900 | 23,165 |
def available_commands(mod, ending="_command"):
"""Just returns the available commands, rather than the whole long list."""
commands = []
for key in mod.__dict__:
if key.endswith(ending):
commands.append(key.split(ending)[0])
return commands | 38a96ca9485e9814ba161550613d3d96db126693 | 23,166 |
def retrieve_browse(browse_location, config):
""" Retrieve browse image and get the local path to it.
If location is a URL perform download.
"""
# if file_name is a URL download browse first and store it locally
validate = URLValidator()
try:
validate(browse_location)
input_filename = abspath(get_storage_path(
basename(browse_location), config=config))
logger.info("URL given, downloading browse image from '%s' to '%s'.",
browse_location, input_filename)
if not exists(input_filename):
start = time()
try:
# timeout in seconds
setdefaulttimeout(120)
remote_browse = urlopen(browse_location)
with open(input_filename, "wb") as local_browse:
local_browse.write(remote_browse.read())
except HTTPError, error:
raise IngestionException("HTTP error downloading '%s': %s"
% (browse_location, error.code))
except URLError, error:
raise IngestionException("URL error downloading '%s': %s"
% (browse_location, error.reason))
logger.info(
"Retrieved %s %dB in %.3fs", browse_location,
getsize(input_filename), time() - start,
)
else:
raise IngestionException("File to download already exists locally "
"as '%s'" % input_filename)
except ValidationError:
input_filename = abspath(get_storage_path(browse_location,
config=config))
logger.info("Filename given, using local browse image '%s'.",
input_filename)
# check that the input filename is valid -> somewhere under the storage dir
storage_path = get_storage_path()
if commonprefix((input_filename, storage_path)) != storage_path:
raise IngestionException("Input path '%s' points to an invalid "
"location." % browse_location)
try:
models.FileNameValidator(input_filename)
except ValidationError, error:
raise IngestionException("%s" % str(error), "ValidationError")
return input_filename | 6c5b5e916542db20584205588e5e2929df692b38 | 23,167 |
def add_suffix(input_dict, suffix):
"""Add suffix to dict keys."""
return dict((k + suffix, v) for k,v in input_dict.items()) | 7dbedd523d24bfdf194c999b8927a27b110aad3e | 23,168 |
from autots.tools.transform import GeneralTransformer
from autots.tools.transform import simple_context_slicer
from datetime import datetime
def ModelPrediction(
df_train,
forecast_length: int,
transformation_dict: dict,
model_str: str,
parameter_dict: dict,
frequency: str = 'infer',
prediction_interval: float = 0.9,
no_negatives: bool = False,
constraint: float = None,
future_regressor_train=[],
future_regressor_forecast=[],
holiday_country: str = 'US',
startTimeStamps=None,
grouping_ids=None,
random_seed: int = 2020,
verbose: int = 0,
n_jobs: int = None,
):
"""Feed parameters into modeling pipeline
Args:
df_train (pandas.DataFrame): numeric training dataset of DatetimeIndex and series as cols
forecast_length (int): number of periods to forecast
transformation_dict (dict): a dictionary of outlier, fillNA, and transformation methods to be used
model_str (str): a string to be direct to the appropriate model, used in ModelMonster
frequency (str): str representing frequency alias of time series
prediction_interval (float): width of errors (note: rarely do the intervals accurately match the % asked for...)
no_negatives (bool): whether to force all forecasts to be > 0
constraint (float): when not None, use this value * data st dev above max or below min for constraining forecast values.
future_regressor_train (pd.Series): with datetime index, of known in advance data, section matching train data
future_regressor_forecast (pd.Series): with datetime index, of known in advance data, section matching test data
holiday_country (str): passed through to holiday package, used by a few models as 0/1 regressor.
startTimeStamps (pd.Series): index (series_ids), columns (Datetime of First start of series)
n_jobs (int): number of processes
Returns:
PredictionObject (autots.PredictionObject): Prediction from AutoTS model object
"""
transformationStartTime = datetime.datetime.now()
try:
coerce_integer = transformation_dict['coerce_integer']
grouping = transformation_dict['grouping']
if grouping == 'user' and grouping_ids is None:
grouping = 'kmeans5'
transformation_dict['grouping'] = 'kmeans5'
reconciliation = transformation_dict['reconciliation']
except Exception:
coerce_integer = False
grouping = None
grouping_ids = None
reconciliation = None
transformer_object = GeneralTransformer(
outlier_method=transformation_dict['outlier_method'],
outlier_threshold=transformation_dict['outlier_threshold'],
outlier_position=transformation_dict['outlier_position'],
fillna=transformation_dict['fillna'],
transformation=transformation_dict['transformation'],
detrend=transformation_dict['detrend'],
second_transformation=transformation_dict['second_transformation'],
transformation_param=transformation_dict['transformation_param'],
third_transformation=transformation_dict['third_transformation'],
transformation_param2=transformation_dict['transformation_param2'],
fourth_transformation=transformation_dict['fourth_transformation'],
discretization=transformation_dict['discretization'],
n_bins=transformation_dict['n_bins'],
grouping=grouping,
grouping_ids=grouping_ids,
reconciliation=reconciliation,
coerce_integer=coerce_integer,
).fit(df_train)
df_train_transformed = transformer_object.transform(df_train)
# slice the context, ie shorten the amount of data available.
if transformation_dict['context_slicer'] not in [None, 'None']:
df_train_transformed = simple_context_slicer(
df_train_transformed,
method=transformation_dict['context_slicer'],
forecast_length=forecast_length,
)
# make sure regressor has same length. This could be a problem if wrong size regressor is passed.
if len(future_regressor_train) > 0:
future_regressor_train = future_regressor_train.tail(
df_train_transformed.shape[0]
)
transformation_runtime = datetime.datetime.now() - transformationStartTime
# from autots.evaluator.auto_model import ModelMonster
model = ModelMonster(
model_str,
parameters=parameter_dict,
frequency=frequency,
prediction_interval=prediction_interval,
holiday_country=holiday_country,
random_seed=random_seed,
verbose=verbose,
forecast_length=forecast_length,
n_jobs=n_jobs,
)
model = model.fit(df_train_transformed, future_regressor=future_regressor_train)
df_forecast = model.predict(
forecast_length=forecast_length, future_regressor=future_regressor_forecast
)
if df_forecast.forecast.isnull().all(axis=0).astype(int).sum() > 0:
raise ValueError(
"Model {} returned NaN for one or more series".format(model_str)
)
transformationStartTime = datetime.datetime.now()
# Inverse the transformations
df_forecast.forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.lower_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.lower_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.upper_forecast = pd.DataFrame(
transformer_object.inverse_transform(df_forecast.upper_forecast)
) # , index = df_forecast.forecast_index, columns = df_forecast.forecast_columns)
df_forecast.transformation_parameters = transformation_dict
# Remove negatives if desired
# There's df.where(df_forecast.forecast > 0, 0) or df.clip(lower = 0), not sure which faster
if no_negatives:
df_forecast.lower_forecast = df_forecast.lower_forecast.clip(lower=0)
df_forecast.forecast = df_forecast.forecast.clip(lower=0)
df_forecast.upper_forecast = df_forecast.upper_forecast.clip(lower=0)
if constraint is not None:
if verbose > 2:
print("Using constraint.")
constraint = float(constraint)
train_std = df_train.std(axis=0)
train_min = df_train.min(axis=0) - (constraint * train_std)
train_max = df_train.max(axis=0) + (constraint * train_std)
df_forecast.forecast = df_forecast.forecast.clip(lower=train_min, axis=1)
df_forecast.forecast = df_forecast.forecast.clip(upper=train_max, axis=1)
transformation_runtime = transformation_runtime + (
datetime.datetime.now() - transformationStartTime
)
df_forecast.transformation_runtime = transformation_runtime
return df_forecast | afb3dccc20c2399a50c2564aeb0d1f214d80dd6a | 23,169 |
def make_year_key(year):
"""A key generator for sorting years."""
if year is None:
return (LATEST_YEAR, 12)
year = str(year)
if len(year) == 4:
return (int(year), 12)
if len(year) == 6:
return (int(year[:4]), int(year[4:]))
raise ValueError('invalid year %s' % year) | ced5617772af14a3e438cb268f58ceee3895083d | 23,170 |
def set_stereo_from_geometry(gra, geo, geo_idx_dct=None):
""" set graph stereo from a geometry
(coordinate distances need not match connectivity -- what matters is the
relative positions at stereo sites)
"""
gra = without_stereo_parities(gra)
last_gra = None
atm_keys = sorted(atom_keys(gra))
geo_idx_dct = (geo_idx_dct if geo_idx_dct is not None
else {atm_key: idx for idx, atm_key in enumerate(atm_keys)})
# set atom and bond stereo, iterating to self-consistency
atm_keys = set()
bnd_keys = set()
while last_gra != gra:
last_gra = gra
atm_keys.update(stereogenic_atom_keys(gra))
bnd_keys.update(stereogenic_bond_keys(gra))
gra = _set_atom_stereo_from_geometry(gra, atm_keys, geo, geo_idx_dct)
gra = _set_bond_stereo_from_geometry(gra, bnd_keys, geo, geo_idx_dct)
return gra | d26248883b90a561c8d70bb8a12be68affe40c2a | 23,171 |
def multiply_MPOs(op0, op1):
"""Multiply two MPOs (composition along physical dimension)."""
# number of lattice sites must agree
assert op0.nsites == op1.nsites
L = op0.nsites
# physical quantum numbers must agree
assert np.array_equal(op0.qd, op1.qd)
# initialize with dummy tensors and bond quantum numbers
op = MPO(op0.qd, (L+1)*[[0]])
# combine virtual bond quantum numbers
for i in range(L + 1):
op.qD[i] = qnumber_flatten([op0.qD[i], op1.qD[i]])
for i in range(L):
# multiply physical dimensions and reorder dimensions
op.A[i] = np.tensordot(op0.A[i], op1.A[i], (1, 0)).transpose((0, 3, 1, 4, 2, 5))
# merge virtual bonds
s = op.A[i].shape
assert len(s) == 6
op.A[i] = op.A[i].reshape((s[0], s[1], s[2]*s[3], s[4]*s[5]))
# consistency check
assert is_qsparse(op.A[i], [op.qd, -op.qd, op.qD[i], -op.qD[i+1]]), \
'sparsity pattern of MPO tensor does not match quantum numbers'
return op | 62e9e250c46d281ccbb39c0ddf1082d45386a1d3 | 23,172 |
import json
from typing import OrderedDict
def build_list_of_dicts(val):
"""
Converts a value that can be presented as a list of dict.
In case top level item is not a list, it is wrapped with a list
Valid values examples:
- Valid dict: {"k": "v", "k2","v2"}
- List of dict: [{"k": "v"}, {"k2","v2"}]
- JSON decodable string: '{"k": "v"}', or '[{"k": "v"}]'
- List of JSON decodable strings: ['{"k": "v"}', '{"k2","v2"}']
Invalid values examples:
- ["not", "a", "dict"]
- [123, None],
- [["another", "list"]]
:param val: Input value
:type val: Union[list, dict, str]
:return: Converted(or original) list of dict
:raises: ValueError in case value cannot be converted to a list of dict
"""
if val is None:
return []
if isinstance(val, str):
# use OrderedDict to preserve order
val = json.loads(val, object_pairs_hook=OrderedDict)
if isinstance(val, dict):
val = [val]
for index, item in enumerate(val):
if isinstance(item, str):
# use OrderedDict to preserve order
val[index] = json.loads(item, object_pairs_hook=OrderedDict)
if not isinstance(val[index], dict):
raise ValueError("Expected a list of dicts")
return val | dfd92f619ff1ec3ca5cab737c74af45c86a263e0 | 23,173 |
def add_borders_to_DataArray_U_points(da_u, da_v):
"""
A routine that adds a column to the "right" of the 'u' point
DataArray da_u so that every tracer point in the tile
will have a 'u' point to the "west" and "east"
After appending the border the length of da_u in x
will be +1 (one new column)
This routine is pretty general. Any tiles can be in the da_u and
da_v DataArrays but if the tiles to the "right" of the da_u tiles
are not available then the new rows will be filled with nans.
Parameters
----------
da_u : DataArray
The `DataArray` object that has tiles of a u-point variable
Tiles of the must be in their original llc layout.
da_v : DataArray
The `DataArray` object that has tiles of the v-point variable that
corresponds with da_u. (e.g., VVEL corresponds with UVEL)
Tiles of the must be in their original llc layout.
Returns
-------
da_u_new: DataArray
a new `DataArray` object that has the appended values of 'u' along
its right edge. The lon_u and lat_u coordinates are lost but all
other coordinates remain.
"""
#%%
# the i_g dimension will be incremented by one.
i_g = np.arange(1, len(da_u.i_g)+2)
# the j dimension is unchanged.
j = da_u['j'].values
llcN = len(j)
# the k dimension, if it exists, is unchanged.
if 'k' in da_u.dims:
nk = len(da_u.k)
k = da_u['k'].values
else:
nk = 0
# the time dimension, if it exists, is unchanged
if 'time' in da_u.dims:
time = da_u['time'].values
#%%
#print "\n>>> ADDING BORDERS TO U POINT TILES\n"
# tiles whose tile to the right are rotated 90 degrees counter clockwise
# to add borders from tiles 4, 5, or 6 we need to use the da_v fields
rot_tiles = {4, 5, 6}
# the new arrays will be one longer in the j direction, +1 column
pad_j = 1 # add one to the second dimension (x)
pad_i = 0 # we do not pad in first dimension (y)
# set the number of processed tiles counter to zero
num_proc_tiles = 0
# find the number of non-tile dimensions
if 'tile' in da_u.dims:
num_dims = da_u.ndim - 1
else:
num_dims = da_u.ndim
# loop through all tiles in da_u
for tile_index in da_u.tile.values:
# find out which tile is to the right of this da_u tile
right_tile_index, top_tile_index, corner_tile_index = \
get_llc_tile_border_mapping(tile_index)
# if 'tile' exists as a dimension, select and copy the proper da_u tile
if 'tile' in da_u.dims:
ref_arr = deepcopy(da_u.sel(tile=tile_index))
else:
# otherwise we have a single da_u tile so make a copy of it
ref_arr = deepcopy(da_u)
# the append_border flag will be true if we have a tile to the right.
append_border = False
#print '\ncurrent da_u tile ', tile_index
#print 'right tile index ', right_tile_index
# check to see if there even is a tile to the right of da_u tile_index
# tiles 10 and 13 don't have one!
if right_tile_index > 0:
#print 'there is a tile to the right of da_u tile ', tile_index
# determine whether the tile to the right is rotated relative
# to da_u tile_index. if so we'll need da_v!
if tile_index in rot_tiles:
#print 'append with da_v tile ', right_tile_index
if right_tile_index in da_v.tile.values:
#print 'we have da_v tile ', right_tile_index
# see if we have multiple da_v tiles.
if len(da_v.tile) > 1:
# pull out the one we need.
right_arr = da_v.sel(tile=right_tile_index)
append_border = True
#print 'appending from da_v tile ', right_tile_index
# there is only one da_v tile
elif da_v.tile == right_tile_index:
# it is the one we need.
right_arr = da_v
append_border = True
#print 'appending from da_v tile ', right_tile_index
# something may have gone wrong.
else:
print('something is wrong with the da_v tile')
# if we do not have the da_v tile, then we can't append!
else:
print('we do not have da_v tile ', right_tile_index)
# the values to append to the top come from another da_u tile
else:
#print 'append with da_u tile ', right_tile_index
# see if we have the required da_u tile
if right_tile_index in da_u.tile.values:
#print 'we have da_u tile ', right_tile_index
# see if we have multiple da_u tiles
if len(da_u.tile) > 1:
# pull out the one we need.
right_arr = da_u.sel(tile=right_tile_index)
append_border = True
#print 'appending from da_u tile ', right_tile_index
# if we only have one tile then something is wrong because
# the tile to the right of this da_u tile cannot be itself
else:
print('tile to the right cannot be tile_index')
# we do not have the required da_u tile.
else:
print('we do not have da_u tile ', right_tile_index)
# there is no tile to the right
#else:
# print 'there is no tile to the right of da_u tile ', tile_index
# if we have found a tile to the right we can do the appending
if append_border:
new_arr=append_border_to_tile(ref_arr, tile_index,
'u', llcN,
right = right_arr)
# if not then we will append an array of nans
else:
if num_dims == 2:
pad = ((0, pad_i), (0, pad_j))
elif num_dims == 3:
pad = ((0, 0), (0, pad_i), (0, pad_j))
elif num_dims == 4:
pad = ((0, 0), (0, 0), (0, pad_i), (0, pad_j))
new_arr = np.pad(ref_arr, pad_width = pad, mode='constant',
constant_values = np.nan)
# create a new DataArray
if num_dims == 2:
new_coords = [('j', j), ('i_g', i_g)]
elif num_dims == 3 and nk > 0:
new_coords = [('k', k), ('j', j), ('i_g', i_g)]
elif num_dims == 3 and nk == 0:
new_coords = [('time', time),('j', j), ('i_g', i_g)]
elif num_dims == 4:
new_coords = [('time', time), ('k', k), ('j', j), ('i_g',i_g)]
tmp_DA = xr.DataArray(new_arr, name = da_u.name, coords=new_coords)
# give the new DataArray the same attributes as da_u
tmp_DA.attrs = da_u.attrs
# give the new DataArray a tile coordinate
tmp_DA.coords['tile'] = tile_index
# increment the number of processed tiles counter by one
num_proc_tiles += 1
# set da_u_new equal to tmp_DA if this is the first processed tile
if num_proc_tiles == 1:
da_u_new = tmp_DA
# otherwise, concatentate tmp_DA with da_u_new along the 'tile' dim
else:
da_u_new = xr.concat([da_u_new, tmp_DA],'tile')
# reset tmp_DA
tmp_DA = []
# add all time coordinates to_da_u_new from da_u.
for idx, var in enumerate(da_u.coords):
if 'tim' in var:
da_u_new[var] = da_u[var]
da_u_new.attrs['padded'] = True
#%%
return da_u_new
#%% | e9b3e057fb56a998821e84a96b77e00bac4e0923 | 23,174 |
def arg(prevs, newarg):
""" Joins arguments to list """
retval = prevs
if not isinstance(retval, list):
retval = [retval]
return retval + [newarg] | 8d591595add095542ad697b4bd54642a4a14a17c | 23,175 |
def quote_plus(s, safe='', encoding=None, errors=None):
"""Quote the query fragment of a URL; replacing ' ' with '+'"""
if ' ' in s:
s = quote(s, safe + ' ', encoding, errors)
return s.replace(' ', '+')
return quote(s, safe, encoding, errors) | e0a5ba9237550856b695e236e7a457c34f053ba0 | 23,176 |
def mod(x, y):
"""Implement `mod`."""
return x % y | f19c019ed3cf072b1b5d3ec851c14a824c14edb5 | 23,177 |
from typing import List
def _lower_batch_matmul(op: relay.Call, inputs: List[te.Tensor]) -> te.Tensor:
"""Lower a batch_matmul using cuBLAS."""
return cublas.batch_matmul(
inputs[0],
inputs[1],
transa=op.attrs["transpose_a"],
transb=op.attrs["transpose_b"],
dtype=op.checked_type.dtype,
) | 2fa7be16c558e0edf9233d699139c004b44a93c2 | 23,178 |
from re import S
def cross_entropy_loss(inputs, labels, rescale_loss=1):
""" cross entropy loss with a mask """
criterion = mx.gluon.loss.SoftmaxCrossEntropyLoss(weight=rescale_loss)
loss = criterion(inputs, labels)
mask = S.var('mask')
loss = loss * S.reshape(mask, shape=(-1,))
return S.make_loss(loss.mean()) | 7151ace5b1ac93439defe93a4d6e45002cbfb8a6 | 23,179 |
def as_wrapping_formatters(objs, fields, field_labels, formatters, no_wrap=None, no_wrap_fields=[]):
"""This function is the entry point for building the "best guess"
word wrapping formatters. A best guess formatter guesses what the best
columns widths should be for the table celldata. It does this by collecting
various stats on the celldata (min, max average width of column celldata) and from
this celldata decides the desired widths and the minimum widths.
Given a list of formatters and the list of objects (objs), this function
first determines if we need to augment the passed formatters with word wrapping
formatters. If the no_wrap parameter or global no_wrap flag is set,
then we do not build wrapping formatters. If any of the formatters within formatters
is a word wrapping formatter, then it is assumed no more wrapping is required.
:param objs:
:param fields:
:param field_labels:
:param formatters:
:param no_wrap:
:param no_wrap_fields:
:return: When no wrapping is required, the formatters parameter is returned
-- effectively a NOOP in this case
When wrapping is required, best-guess word wrapping formatters are returned
with original parameter formatters embedded in the word wrapping formatters
"""
no_wrap = is_nowrap_set(no_wrap)
if not needs_wrapping_formatters(formatters, no_wrap):
return formatters
format_spec = build_best_guess_formatters_using_average_widths(objs, fields, field_labels, formatters, no_wrap_fields)
formatters = build_wrapping_formatters(objs, fields, field_labels, format_spec)
return formatters | 1f60c9ebaebb919ab8d4478e029aed649931df8a | 23,180 |
import torch
def classification_metrics(n_classes: int = 2):
"""Function to set up the classification metrics"""
logger.info(f"Setting up metrics for: {n_classes}")
metrics_dict_train = torch.nn.ModuleDict(
{
"accuracy": Accuracy(),
"recall": Recall(),
"precision": Precision(),
"F1": F1(),
}
)
metrics_dict_val = torch.nn.ModuleDict(
{
"accuracy": Accuracy(),
"recall": Recall(),
"precision": Precision(),
"F1": F1(),
}
)
return metrics_dict_train, metrics_dict_val | b876c7ac3da006cf54bc04e91f13de5a35103dab | 23,181 |
def ping(device,
address,
ttl=None,
timeout=None,
tos=None,
dscp=None,
size=None,
count=None,
source=None,
rapid=False,
do_not_fragment=False,
validate=False,
vrf=None,
command=None,
output=None):
""" execute ping and parse ping result and return structure data
Args:
device ('obj'): Device object
address ('str'): Address value
tos ('int'): type of service value
dscp (`str`): DSCP value
size ('str'): data bytes expected
ttl ('int'): Not supported
timeout ('int'): timeout interval
count ('int'): repeat count
source ('str'): source address or interface, default: None
rapid ('bool'): Not supported
do_not_fragment ('bool'): enable do not fragment bit in IP header, default: False
validate (`bool`): validate reply data, default: False
vrf ('str'): VRF name
command (`str`): ping command. This will ignore all other arguments
output (`str`): ping command output. no parser call involved
Returns:
Boolean
Raises:
None
"""
try:
obj = Ping(device=device)
return obj.parse(addr=address,
vrf=vrf,
tos=tos,
dscp=dscp,
size=size,
ttl=ttl,
timeout=timeout,
count=count,
source=source,
rapid=rapid,
do_not_fragment=do_not_fragment,
validate=validate,
command=command,
output=output)
except SchemaEmptyParserError:
log.info('parsed_output was empty')
return {}
except Exception as e:
log.warning(e)
return {} | 1e13d1af7e9678bc8650bc3173858b148fbedc86 | 23,182 |
import numpy
import scipy
def _subSquare(vectors, var, full=False):
"""
given a series of vectors, this function calculates:
(variances,vectors)=numpy.linalg.eigh(vectors.H*vectors)
it's a seperate function because if there are less vectors
than dimensions the process can be accelerated, it just takes some dancing
it is based on this:
>>> vectors=Matrix(helpers.ascomplex(numpy.random.randn(
... numpy.random.randint(1,10),numpy.random.randint(1,10),2
... )))
>>> cov = vectors.H*vectors
>>> Xcov = vectors*vectors.H
>>> (Xval,Xvec) = numpy.linalg.eigh(Xcov)
>>> vec = Xvec.H*vectors
>>> assert vec.H*vec == cov
"""
vectors = Matrix(vectors)
shape = vectors.shape
if not all(shape):
val = numpy.zeros([0])
vec = numpy.zeros([0, shape[1]])
return (val, vec)
eig = numpy.linalg.eigh
if shape[0] >= shape[1] or full or not vectors.any() or (var < 0).any():
scaled = Matrix(var[:, None]*numpy.array(vectors))
cov = vectors.H*scaled
(val, vec) = eig(cov)
vec = vec.H
elif not var.any():
cov = vectors.H*vectors
(_,vec) = eig(cov)
vec = vec.H
val = numpy.zeros(vec.shape[0])
else:
scaled = Matrix(scipy.sqrt(var)[:, None]*numpy.array(vectors))
Xcov = scaled*scaled.H
#Xcov = var[:,None]*numpy.array(vectors)*vectors.H
(_, Xvec) = eig(Xcov)
Xscaled = (Xvec.H*scaled)
val = helpers.mag2(Xscaled)
vec = numpy.array(Xscaled)/scipy.sqrt(val[:, numpy.newaxis])
return (val, vec) | 8f588d3f64eaf892a1481983436c13d7c5010f12 | 23,184 |
def to_pickle(data):
"""
This prepares data on arbitrary form to be pickled. It handles any nested
structure and returns data on a form that is safe to pickle (including
having converted any database models to their internal representation).
We also convert any Saver*-type objects back to their normal
representations, they are not pickle-safe.
"""
def process_item(item):
"Recursive processor and identification of data"
dtype = type(item)
if dtype in (basestring, int, long, float, bool):
return item
elif dtype == tuple:
return tuple(process_item(val) for val in item)
elif dtype in (list, _SaverList):
return [process_item(val) for val in item]
elif dtype in (dict, _SaverDict):
return dict((process_item(key), process_item(val)) for key, val in item.items())
elif dtype in (set, _SaverSet):
return set(process_item(val) for val in item)
elif hasattr(item, '__item__'):
# we try to conserve the iterable class, if not convert to list
try:
return item.__class__([process_item(val) for val in item])
except (AttributeError, TypeError):
return [process_item(val) for val in item]
return pack_dbobj(item)
return process_item(data) | e52dddec911b0ac548daed81452d041aee41f548 | 23,185 |
import requests
def spot_silver_benchmark_sge() -> pd.DataFrame:
"""
上海黄金交易所-数据资讯-上海银基准价-历史数据
https://www.sge.com.cn/sjzx/mrhq
:return: 历史数据
:rtype: pandas.DataFrame
"""
url = "https://www.sge.com.cn/graph/DayilyShsilverJzj"
payload = {}
r = requests.post(url, data=payload)
data_json = r.json()
temp_df = pd.DataFrame(data_json["wp"])
temp_df.columns = [
"交易时间",
"晚盘价",
]
temp_df["交易时间"] = pd.to_datetime(temp_df["交易时间"], unit="ms").dt.date
temp_zp_df = pd.DataFrame(data_json["zp"])
temp_zp_df.columns = [
"交易时间",
"早盘价",
]
temp_zp_df["交易时间"] = pd.to_datetime(temp_zp_df["交易时间"], unit="ms").dt.date
temp_df["早盘价"] = temp_zp_df["早盘价"]
return temp_df | 8bfcc5d24116231835a41447fb284f346586628e | 23,186 |
def requires_all_permissions(permission, login_url=None, raise_exception=False):
"""
Decorator for views that defines what permissions are required, and also
adds the required permissions as a property to that view function.
The permissions added to the view function can then be used by the sidebar
template to know whether to render the sidebar menu item that links to that
view function
"""
def decorator(function):
if isinstance(permission, str):
permissions = (permission, )
else:
permissions = permission
function.permissions = permissions
@wraps(function)
@permission_required(permission, login_url, raise_exception)
def wrap(request, *args, **kwargs):
return function(request, *args, **kwargs)
return wrap
return decorator | e2368c7f32185ebe1ee7cec50625a72b0fe9ec03 | 23,187 |
def hasTable(cur, table):
"""checks to make sure this sql database has a specific table"""
cur.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='table_name'")
rows = cur.fetchall()
if table in rows:
return True
else:
return False | dfdb3db0901832330083da8b645ae90e28cfb26d | 23,188 |
def _check_wkt_load(x):
"""Check if an object is a loaded polygon or not. If not, load it."""
if isinstance(x, str):
try:
x = loads(x)
except WKTReadingError:
warn('{} is not a WKT-formatted string.'.format(x))
return x | 457a02cffa7f56e05ad7ca3a8df83f5f719346b7 | 23,189 |
def _yielddefer(function, *args, **kwargs):
"""
Called if a function decorated with :func:`yieldefer` is invoked.
"""
try:
retval = function(*args, **kwargs)
except:
return defer.fail()
if isinstance(retval, defer.Deferred):
return retval
if not (hasattr(retval, '__iter__') and
hasattr(retval, 'next') and
hasattr(retval, 'send') and
hasattr(retval, 'throw')):
return defer.succeed(retval)
iterator = retval
def maybe_deferred(val):
# We don't want exceptions to become twisted failures
# because exceptions thrown by the generator methods
# indicate exceptions thrown by the code _between_ the
# yield statements or it indicates the end of the
# iteration.
if isinstance(val, defer.Deferred):
return val
else:
return defer.succeed(val)
def success(value):
try:
d = maybe_deferred(iterator.send(value))
d.addCallbacks(success, fail)
return d
except StopIteration:
return None
except defer._DefGen_Return as e:
return e.value
def fail(failure):
try:
d = maybe_deferred(failure.throwExceptionIntoGenerator(iterator))
#d = iterator.throw(failure.value)
d.addCallbacks(success, fail)
return d
except StopIteration:
return None
except defer._DefGen_Return as e:
return e.value
try:
d = maybe_deferred(iterator.next())
d.addCallbacks(success, fail)
except StopIteration:
d = defer.succeed(None)
except defer._DefGen_Return as e:
d = defer.succeed(e.value)
except:
d = defer.fail()
return d | b226984e1b4845783dc47f187d24482e040cc6c0 | 23,190 |
import scipy
import warnings
def dimension_parameters(time_series, nr_steps=100, literature_value=None,
plot=False, r_minmin=None, r_maxmax=None,
shortness_weight=0.5, literature_weight=1.):
""" Estimates parameters r_min and r_max for calculation of correlation
dimension using the algorithm by Grassberger and Procaccia and uses them
to calculate it.
This experimental function performs a simple grid search on r_min and r_max
in the intervall given by r_minmin, r_maxmax and nr_steps. The performance
of the parameters is measured by a combination of NRMSE, a penalty for small
intervalls relative to given r_minmin and r_maxmax and a quadratic penalty
for the difference from the literature value if given.
For calculating the dimension of a high number of similar time_series in a
row it is advisable to use this function only once to get the parameters
and then use the function dimension with them in the subsequent computations.
Might fail for short time_series or unreasonable choices of parameters.
It is recommended to use the plot option to double check the plausibility
of the results.
Args:
time_series (np.ndarray): time series to calculate dimension of, shape (T, d)
r_minmin (float): minimum radius in grid search
r_maxmax (float): maximum radius in grid search
nr_steps (int): number of steps in grid search
plot (boolean): flag for plotting loglog plot
Returns:
tuple: 3-element tuple containing:
- **best_r_min** (*float*): Estimation for r_min
- **best_r_max** (*float*): Estimation for r_max
- **dimension** (*float*): Estimation for dimension using
the parameters best_r_min and best_r_max
"""
if r_maxmax is None:
expansion = []
for d in range(time_series.shape[1]):
expansion.append(np.max(time_series[:, d] - np.min(time_series[:, d])))
r_maxmax = np.max(expansion)
if r_minmin is None:
r_minmin = 0.001 * r_maxmax
literature_cost = 0
nr_points = float(time_series.shape[0])
radii = np.logspace(np.log10(r_minmin), np.log10(r_maxmax), nr_steps)
tree = scipy.spatial.cKDTree(time_series)
N_r = np.array(tree.count_neighbors(tree, radii), dtype=float) / nr_points
N_r = np.vstack((radii, N_r))
loss = None
for start_index in range(nr_steps - 1):
for end_index in range(start_index + 1, nr_steps):
# print(str(start_index)+', '+ str(end_index))
current_N_r = N_r[:, start_index:end_index]
current_r_min = radii[start_index]
current_r_max = radii[end_index]
# linear fit based on loglog scale, to get slope/dimension:
slope, intercept = np.polyfit(np.log(current_N_r[0]),
np.log(current_N_r[1]), deg=1)[0:2]
dimension = slope
estimated_line = intercept + slope * np.log(current_N_r[0])
error = rmse(np.log(current_N_r[1]), estimated_line,
normalization="historic")
shortness_cost = nr_steps / (end_index - start_index) ** 3
if literature_value is not None:
literature_cost = np.sqrt(literature_value - dimension)
new_loss = error + shortness_weight * shortness_cost + literature_weight * literature_cost * 5.
if loss is None:
loss = new_loss
best_r_min = current_r_min
best_r_max = current_r_max
best_slope = slope
best_intercept = intercept
elif new_loss < loss:
loss = new_loss
best_r_min = current_r_min
best_r_max = current_r_max
best_slope = slope
best_intercept = intercept
dimension = best_slope
# ###plotting
# if plot:
#
# plt.loglog(N_r[0], N_r[1], 'x', basex=10., basey=10.,label='data')
# plt.loglog(N_r[0], best_intercept + best_slope*N_r[1],
# label='fit: r_min ='+str(round(best_r_min,3))+', r_max = '+
# str(round(best_r_max,3)))
# plt.axvline(x=best_r_min)
# plt.axvline(x=best_r_max)
# plt.title('loglog plot of the N_r(radius), slope/dim = ' + str(dimension))
# plt.legend()
# plt.show()
if plot:
warn_string = "Plotting was removed in the entirety of the rescomp package.\n" \
"The 'plot' paramter will be removed in future releases as well."
warnings.warn(warn_string, UserWarning)
return best_r_min, best_r_max, dimension | abec1b064b42083b9e60bb2cb4827d7fe8f7b2e9 | 23,191 |
def seir_model_with_soc_dist(init_vals, params, t):
"""
SEIR infection model with social distancing.
rho = social distancing factor.
"""
S_0, E_0, I_0, R_0 = init_vals
S, E, I, R = [S_0], [E_0], [I_0], [R_0]
alpha, beta, gamma, rho = params
dt = t[1] - t[0]
for _ in t[1:]:
next_S = S[-1] - (rho * beta * S[-1] * I[-1]) * dt
next_E = E[-1] + (rho * beta * S[-1] * I[-1] - alpha * E[-1]) * dt
next_I = I[-1] + (alpha * E[-1] - gamma * I[-1]) * dt
next_R = R[-1] + (gamma * I[-1]) * dt
S.append(next_S)
E.append(next_E)
I.append(next_I)
R.append(next_R)
return np.stack([S, E, I, R]).T | dae5ace760055f5bbb78f079b660e8d55587b2fe | 23,193 |
import torch
def greeq(data, transmit=None, receive=None, opt=None, **kwopt):
"""Fit a non-linear relaxometry model to multi-echo Gradient-Echo data.
Parameters
----------
data : sequence[GradientEchoMulti]
Observed GRE data.
transmit : sequence[PrecomputedFieldMap], optional
Map(s) of the transmit field (b1+). If a single map is provided,
it is used to correct all contrasts. If multiple maps are
provided, there should be one for each contrast.
receive : sequence[PrecomputedFieldMap], optional
Map(s) of the receive field (b1-). If a single map is provided,
it is used to correct all contrasts. If multiple maps are
provided, there should be one for each contrast.
If no receive map is provided, the output `pd` map will have
a remaining b1- bias field.
opt : GREEQOptions or dict, optional
Algorithm options.
{'preproc': {'register': True}, # Co-register contrasts
'optim': {'nb_levels': 1, # Number of pyramid levels
'max_iter_rls': 10, # Max reweighting iterations
'max_iter_gn': 5, # Max Gauss-Newton iterations
'max_iter_cg': 32, # Max Conjugate-Gradient iterations
'tolerance_rls': 1e-05, # Tolerance for early stopping (RLS)
'tolerance_gn': 1e-05, ""
'tolerance_cg': 1e-03}, ""
'backend': {'dtype': torch.float32, # Data type
'device': 'cpu'}, # Device
'penalty': {'norm': 'jtv', # Type of penalty: {'tkh', 'tv', 'jtv', None}
'factor': {'r1': 10, # Penalty factor per (log) map
'pd': 10,
'r2s': 2,
'mt': 2}},
'verbose': 1}
Returns
-------
pd : ParameterMap
Proton density
r1 : ParameterMap
Longitudinal relaxation rate
r2s : ParameterMap
Apparent transverse relaxation rate
mt : ParameterMap, optional
Magnetisation transfer saturation
Only returned is MT-weighted data is provided.
"""
opt = GREEQOptions().update(opt, **kwopt)
dtype = opt.backend.dtype
device = opt.backend.device
backend = dict(dtype=dtype, device=device)
# --- estimate noise / register / initialize maps ---
data, transmit, receive, maps = preproc(data, transmit, receive, opt)
vx = spatial.voxel_size(maps.affine)
has_mt = hasattr(maps, 'mt')
# --- prepare penalty factor ---
lam = opt.penalty.factor
if isinstance(lam, dict):
lam = [lam.get('pd', 0), lam.get('r1', 0),
lam.get('r2s', 0), lam.get('mt', 0)]
lam = core.utils.make_vector(lam, 4, **backend) # PD, R1, R2*, MT
# --- initialize weights (RLS) ---
if str(opt.penalty.norm).lower() == 'none' or all(lam == 0):
opt.penalty.norm = ''
opt.penalty.norm = opt.penalty.norm.lower()
mean_shape = maps[0].shape
rls = None
sumrls = 0
if opt.penalty.norm in ('tv', 'jtv'):
rls_shape = mean_shape
if opt.penalty.norm == 'tv':
rls_shape = (len(maps),) + rls_shape
rls = torch.ones(rls_shape, **backend)
sumrls = 0.5 * core.py.prod(rls_shape)
if opt.penalty.norm:
print(f'With {opt.penalty.norm.upper()} penalty:')
print(f' - PD: {lam[0]:.3g}')
print(f' - R1: {lam[1]:.3g}')
print(f' - R2*: {lam[2]:.3g}')
if has_mt:
print(f' - MT: {lam[3]:.3g}')
else:
print('Without penalty')
if opt.penalty.norm not in ('tv', 'jtv'):
# no reweighting -> do more gauss-newton updates instead
opt.optim.max_iter_gn *= opt.optim.max_iter_rls
opt.optim.max_iter_rls = 1
printer = CritPrinter(max_levels=opt.optim.nb_levels,
max_rls=opt.optim.max_iter_rls,
max_gn=opt.optim.max_iter_gn,
penalty=opt.penalty.norm,
verbose=opt.verbose)
printer.print_head()
shape0 = shape = maps.shape[1:]
aff0 = aff = maps.affine
vx0 = vx = spatial.voxel_size(aff0)
vol0 = vx0.prod()
vol = vx.prod() / vol0
for level in range(opt.optim.nb_levels, 0, -1):
printer.level = level
if opt.optim.nb_levels > 1:
aff, shape = _get_level(level, aff0, shape0)
vx = spatial.voxel_size(aff)
vol = vx.prod() / vol0
maps, rls = _resize(maps, rls, aff, shape)
if opt.penalty.norm in ('tv', 'jtv'):
sumrls = 0.5 * vol * rls.reciprocal().sum(dtype=torch.double)
# --- compute derivatives ---
nb_prm = len(maps)
nb_hes = nb_prm * (nb_prm + 1) // 2
grad = torch.empty((nb_prm,) + shape, **backend)
hess = torch.empty((nb_hes,) + shape, **backend)
ll_rls = []
ll_max = core.constants.ninf
max_iter_rls = max(opt.optim.max_iter_rls // level, 1)
for n_iter_rls in range(max_iter_rls):
# --- Reweighted least-squares loop ---
printer.rls = n_iter_rls
multi_rls = rls if opt.penalty.norm == 'tv' else [rls] * len(maps)
# --- Gauss Newton loop ---
ll_gn = []
for n_iter_gn in range(opt.optim.max_iter_gn):
printer.gn = n_iter_gn
crit = 0
grad.zero_()
hess.zero_()
# --- loop over contrasts ---
for contrast, b1m, b1p in zip(data, receive, transmit):
# compute gradient
crit1, g1, h1 = _nonlin_gradient(contrast, maps, b1m, b1p, opt)
# increment
if hasattr(maps, 'mt') and not contrast.mt:
# we optimize for mt but this particular contrast
# has no information about mt so g1/h1 are smaller
# than grad/hess.
grad[:-1] += g1
hind = list(range(nb_prm-1))
cnt = nb_prm
for i in range(nb_prm):
for j in range(i+1, nb_prm):
if i != nb_prm-1 and j != nb_prm-1:
hind.append(cnt)
cnt += 1
hess[hind] += h1
crit += crit1
else:
grad += g1
hess += h1
crit += crit1
del g1, h1
# --- penalty ---
reg = 0.
if opt.penalty.norm:
for i, (map, weight, l) in enumerate(zip(maps, multi_rls, lam)):
if not l:
continue
reg1, g1 = _nonlin_reg(map.fdata(**backend), vx, weight, l * vol)
reg += reg1
grad[i] += g1
del g1
# --- gauss-newton ---
if not hess.isfinite().all():
print('WARNING: NaNs in hess')
if not grad.isfinite().all():
print('WARNING: NaNs in hess')
if opt.penalty.norm:
hess = hessian_sym_loaddiag(hess, 1e-5, 1e-8)
deltas = _nonlin_solve(hess, grad, multi_rls, lam * vol, vx, opt)
else:
hess = hessian_sym_loaddiag(hess, 1e-3, 1e-4)
deltas = hessian_sym_solve(hess, grad)
if not deltas.isfinite().all():
print('WARNING: NaNs in delta')
for map, delta in zip(maps, deltas):
map.volume -= delta
if map.min is not None or map.max is not None:
map.volume.clamp_(map.min, map.max)
del deltas
# --- Compute gain ---
ll = crit + reg + sumrls
ll_max = max(ll_max, ll)
ll_prev = ll_gn[-1] if ll_gn else ll_max
gain = (ll_prev - ll) / (ll_max - ll_prev)
ll_gn.append(ll)
printer.print_crit(crit, reg, sumrls, gain)
if gain < opt.optim.tolerance_gn:
print('GN converged: ', ll_prev.item(), '->', ll.item())
break
# --- Update RLS weights ---
if opt.penalty.norm in ('tv', 'jtv'):
del multi_rls
rls = _nonlin_rls(maps, lam, opt.penalty.norm)
sumrls = (0.5 * vol) * rls.sum(dtype=torch.double)
eps = core.constants.eps(rls.dtype)
rls = rls.clamp_min_(eps).reciprocal_()
# --- Compute gain ---
# (we are late by one full RLS iteration when computing the
# gain but we save some computations)
ll = ll_gn[-1]
ll_prev = ll_rls[-1][-1] if ll_rls else ll_max
ll_rls.append(ll_gn)
gain = (ll_prev - ll) / (ll_max - ll_prev)
if abs(gain) < opt.optim.tolerance_rls:
print(f'RLS converged ({gain:7.2g})')
break
del grad
if opt.uncertainty:
multi_rls = rls if opt.penalty.norm == 'tv' else [rls] * len(maps)
uncertainty = _nonlin_uncertainty(hess, multi_rls, lam * vol, vx, opt)
maps.pd.uncertainty = uncertainty[0]
maps.r1.uncertainty = uncertainty[1]
maps.r2s.uncertainty = uncertainty[2]
if hasattr(maps, 'mt'):
maps.mt.uncertainty = uncertainty[3]
# --- Prepare output ---
return postproc(maps) | 4190b82de68f6362bf6cec48e4e88419bab7b0da | 23,194 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.