content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _find_nearest_idx(a, a0):
"""Element idx in nd array `a` closest to the scalar value `a0`."""
if isinstance(a, list):
a = np.array(a)
idx = np.abs(a - a0).argmin()
return idx | c681b96ee8d3629daacd15650428a36041189739 | 12,000 |
def build_argument_parser():
"""
Builds the argument parser
:return: the argument parser
:rtype: ArgumentParser
"""
opts = ArgumentParser()
opts.add_argument(dest='filename', help='Filename or release name to guess', nargs='*')
naming_opts = opts.add_argument_group("Naming")
naming_opts.add_argument('-t', '--type', dest='type', default=None,
help='The suggested file type: movie, episode. If undefined, type will be guessed.')
naming_opts.add_argument('-n', '--name-only', dest='name_only', action='store_true', default=None,
help='Parse files as name only, considering "/" and "\\" like other separators.')
naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None,
help='If short date is found, consider the first digits as the year.')
naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None,
help='If short date is found, consider the second digits as the day.')
naming_opts.add_argument('-L', '--allowed-languages', action='append', dest='allowed_languages', default=None,
help='Allowed language (can be used multiple times)')
naming_opts.add_argument('-C', '--allowed-countries', action='append', dest='allowed_countries', default=None,
help='Allowed country (can be used multiple times)')
naming_opts.add_argument('-E', '--episode-prefer-number', action='store_true', dest='episode_prefer_number',
default=None,
help='Guess "serie.213.avi" as the episode 213. Without this option, '
'it will be guessed as season 2, episode 13')
naming_opts.add_argument('-T', '--expected-title', action='append', dest='expected_title', default=None,
help='Expected title to parse (can be used multiple times)')
naming_opts.add_argument('-G', '--expected-group', action='append', dest='expected_group', default=None,
help='Expected release group (can be used multiple times)')
input_opts = opts.add_argument_group("Input")
input_opts.add_argument('-f', '--input-file', dest='input_file', default=None,
help='Read filenames from an input text file. File should use UTF-8 charset.')
output_opts = opts.add_argument_group("Output")
output_opts.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=None,
help='Display debug output')
output_opts.add_argument('-P', '--show-property', dest='show_property', default=None,
help='Display the value of a single property (title, series, video_codec, year, ...)')
output_opts.add_argument('-a', '--advanced', dest='advanced', action='store_true', default=None,
help='Display advanced information for filename guesses, as json output')
output_opts.add_argument('-s', '--single-value', dest='single_value', action='store_true', default=None,
help='Keep only first value found for each property')
output_opts.add_argument('-l', '--enforce-list', dest='enforce_list', action='store_true', default=None,
help='Wrap each found value in a list even when property has a single value')
output_opts.add_argument('-j', '--json', dest='json', action='store_true', default=None,
help='Display information for filename guesses as json output')
output_opts.add_argument('-y', '--yaml', dest='yaml', action='store_true', default=None,
help='Display information for filename guesses as yaml output')
conf_opts = opts.add_argument_group("Configuration")
conf_opts.add_argument('-c', '--config', dest='config', action='append', default=None,
help='Filepath to the configuration file. Configuration contains the same options as '
'those command line options, but option names have "-" characters replaced with "_". '
'If not defined, guessit tries to read a configuration default configuration file at '
'~/.guessit/options.(json|yml|yaml) and ~/.config/guessit/options.(json|yml|yaml). '
'Set to "false" to disable default configuration file loading.')
conf_opts.add_argument('--no-embedded-config', dest='no_embedded_config', action='store_true',
default=None,
help='Disable default configuration.')
information_opts = opts.add_argument_group("Information")
information_opts.add_argument('-p', '--properties', dest='properties', action='store_true', default=None,
help='Display properties that can be guessed.')
information_opts.add_argument('-V', '--values', dest='values', action='store_true', default=None,
help='Display property values that can be guessed.')
information_opts.add_argument('--version', dest='version', action='store_true', default=None,
help='Display the guessit version.')
return opts | c4194d065ac1751e723e1be6e6968712e6e3cb4f | 12,001 |
def does_user_have_product(product, username):
"""Return True/False if a user has the specified product."""
try:
instance = adobe_api.AdobeAPIObject(username)
except adobe_api.AdobeAPINoUserException:
return False
return instance.has_product(product) | eda5a4a983fac8fa089c575788982685836b4b87 | 12,002 |
from datetime import datetime
def items_for_result(cl, result, form):
"""
Generate the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_index, field_name in enumerate(cl.list_display):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
elif isinstance(f, (models.BooleanField)):
row_classes.append('checkmark-td')
if value:
row_classes.append('positive')
else:
row_classes.append('negative')
elif isinstance(f, models.FileField):
row_classes.append('file-td')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
link_or_text = result_repr
# format_html(
# '<a href="{}"{}>{}</a>',
# url,
# format_html(
# ' data-popup-opener="{}"', value
# ) if cl.is_popup else '',
# result_repr)
yield format_html('<{}{}>{}</{}>', table_tag, row_class, link_or_text, table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(str(bf.errors) + str(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
info = (result._meta.app_label, result._meta.model_name)
admin_url = reverse('admin:%s_%s_change' % info, args=(result.pk,))
yield format_html(f'<td><a href={admin_url}></a></td>')
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', form[cl.model._meta.pk.name]) | 99ec81a16f833d095de880f459dcd97dce056ccd | 12,003 |
def system_to_ntp_time(timestamp):
"""Convert a system time to a NTP time.
Parameters:
timestamp -- timestamp in system time
Returns:
corresponding NTP time
"""
return timestamp + NTP_DELTA | 2f0081e6c473b05302a5c08dc1818cea0c500caa | 12,004 |
def bcewithlogits_loss(weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None):
"""Creates a criterion that combines a `Sigmoid` layer and the `BCELoss` in one single
class
Arguments:
weights(Tensor, optional) : A manual rescaling weight given to the loss of each batch element.
size_average (bool, optional) : By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple
elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False.
(default: True)
reduce (bool, optional) : By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is
False, returns a loss per batch element instead and ignores size_average.
(default: True)
reduction (string, optional) : Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
(default: 'mean')
pos_weight (Tensor, optional) : a weight of positive examples. Must be a vector with length equal to the number of classes.
Returns:
BCEWithLogitsLoss
"""
return nn.BCEWithLogitsLoss(weight, size_average, reduce, reduction, pos_weight) | 57c125abb1df39d6e8b77bc741b31aa9bbaba9fc | 12,005 |
from typing import Optional
def transition_matrix(
adata: AnnData,
vkey: str = "velocity",
backward: bool = False,
weight_connectivities: Optional[float] = None,
sigma_corr: Optional[float] = None,
scale_by_variances: bool = False,
var_key: Optional[str] = "velocity_graph_uncertainties",
var_min: float = 0.1,
use_negative_cosines: bool = True,
self_transitions: bool = False,
perc: Optional[float] = None,
threshold: Optional[float] = None,
density_normalize: bool = True,
) -> KernelExpression:
"""
Compute a transition matrix based on a combination of RNA Velocity and transcriptomic similarity.
To learn more about the way in which the transition matrices are computed, see
:class:`cellrank.tl.kernels.VelocityKernel` for the velocity-based transition matrix and
:class:`cellrank.tl.kernels.ConnectivityKernel` for the transcriptomic-similarity-based transition matrix.
Params
------
adata: :class:`anndata.AnnData`
Annotated data object.
vkey
Key from :paramref:`adata` `.layers` to access the velocities.
backward
Direction of the process.
weight_connectivities
Weight given to transcriptomic similarities as opposed to velocities. Must be in `[0, 1]`.
use_negative_cosines
Whether to use correlations with cells that have an angle > 90 degree with :math:`v_i`.
sigma_corr
Scaling parameter for the softmax. Larger values will lead to a more concentrated distribution (more peaked).
Default is to use 1 / median_velocity_correlation.
scale_by_variances
Use velocity variances to scale the softmax.
var_key
Key from `adata.uns` to acess velocity variances.
var_min
Variances are clipped to this value at the lower end.
self_transitions
Assigns elements to the diagonal of the velocity-graph based on a confidence measure
perc
Quantile of the distribution of exponentiated velocity correlations. This is used as a threshold to set
smaller values to zero.
threshold
Set a threshold to remove exponentiated velocity correlations smaller than :paramref:`threshold`.
density_normalize
Whether to use density correction when computing the transition probabilities.
Density correction is done as by [Haghverdi16]_.
Returns
-------
:class:`cellrank.tl.KernelExpression`
A kernel expression object.
"""
# initialise the velocity kernel and compute transition matrix
vk = VelocityKernel(
adata,
backward=backward,
vkey=vkey,
use_negative_cosines=use_negative_cosines,
var_key=var_key,
)
vk.compute_transition_matrix(
sigma_corr=sigma_corr,
scale_by_variances=scale_by_variances,
var_min=var_min,
self_transitions=self_transitions,
perc=perc,
threshold=threshold,
density_normalize=density_normalize,
)
if weight_connectivities is not None:
if 0 < weight_connectivities < 1:
logg.info(
f"Using a connectivity kernel with weight `{weight_connectivities}`"
)
ck = ConnectivityKernel(adata, backward=backward).compute_transition_matrix(
density_normalize=density_normalize
)
final = (1 - weight_connectivities) * vk + weight_connectivities * ck
elif weight_connectivities == 0:
final = vk
elif weight_connectivities == 1:
final = ConnectivityKernel(
adata, backward=backward
).compute_transition_matrix(density_normalize=density_normalize)
else:
raise ValueError(
f"The parameter `weight_connectivities` must be in range `[0, 1]`, found `{weight_connectivities}`."
)
else:
final = vk
final.write_to_adata()
return final | fde82fe71e6e63a5842cf041de60afa10316442d | 12,006 |
from typing import Union
from typing import Any
from datetime import datetime
def create_access_token(
subject: Union[str, Any], expires_delta: timedelta = None, is_superuser: bool = False
) -> str:
"""
generate jwt token
:param subject: subject need to save in token
:param expires_delta: expires time
:return: token
"""
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE)
to_encode = {"exp": expire, "sub": str(subject)}
# superuser token can always access
if is_superuser:
to_encode.pop("exp")
encoded_jwt = jwt.encode(
to_encode, settings.SECRET_KEY, algorithm=settings.TOKEN_ALGORITHMS
)
return encoded_jwt | b07c94ae32311f71737daec6e168ef9deb12ef38 | 12,007 |
import numpy
def transparency(wavelength):
"""Returns the sky transparency in [0, 1] for wavelength in [m]"""
wavelength = wavelength / 10**-9
idx = numpy.argmin(numpy.abs(
data_transparency['wavelength'] * 1000 - wavelength))
return data_transparency['fraction'][idx] | 168691af8b8772203493df146b4b9629100e1002 | 12,008 |
import re
import glob
import os
def split_fortran_files(source_dir, subroutines=None):
"""Split each file in `source_dir` into separate files per subroutine.
Parameters
----------
source_dir : str
Full path to directory in which sources to be split are located.
subroutines : list of str, optional
Subroutines to split. (Default: all)
Returns
-------
fnames : list of str
List of file names (not including any path) that were created
in `source_dir`.
Notes
-----
This function is useful for code that can't be compiled with g77 because of
type casting errors which do work with gfortran.
Created files are named: ``original_name + '_subr_i' + '.f'``, with ``i``
starting at zero and ending at ``num_subroutines_in_file - 1``.
"""
if subroutines is not None:
subroutines = [x.lower() for x in subroutines]
def split_file(fname):
with open(fname, 'rb') as f:
lines = f.readlines()
subs = []
need_split_next = True
# find lines with SUBROUTINE statements
for ix, line in enumerate(lines):
m = re.match(b'^\\s+subroutine\\s+([a-z0-9_]+)\s*\\(', line, re.I)
if m and line[0] not in b'Cc!*':
if subroutines is not None:
subr_name = m.group(1).decode('ascii').lower()
subr_wanted = (subr_name in subroutines)
else:
subr_wanted = True
if subr_wanted or need_split_next:
need_split_next = subr_wanted
subs.append(ix)
# check if no split needed
if len(subs) <= 1:
return [fname]
# write out one file per subroutine
new_fnames = []
num_files = len(subs)
for nfile in range(num_files):
new_fname = fname[:-2] + '_subr_' + str(nfile) + '.f'
new_fnames.append(new_fname)
with open(new_fname, 'wb') as fn:
if nfile + 1 == num_files:
fn.writelines(lines[subs[nfile]:])
else:
fn.writelines(lines[subs[nfile]:subs[nfile+1]])
return new_fnames
exclude_pattern = re.compile('_subr_[0-9]')
source_fnames = [f for f in glob.glob(os.path.join(source_dir, '*.f'))
if not exclude_pattern.search(os.path.basename(f))]
fnames = []
for source_fname in source_fnames:
created_files = split_file(source_fname)
if created_files is not None:
for cfile in created_files:
fnames.append(os.path.basename(cfile))
return fnames | aa709fcd2b73921b19c8d1e3235a30867112f2ea | 12,009 |
def parse_all_moves(moves_string):
""" Parse a move string """
moves = []
if not moves_string:
raise ValueError("No Moves Given")
moves_strings = moves_string.split(" ")
for move_string in moves_strings:
move = CubeMove.parse(move_string)
moves.append(move)
return moves | ddc063574b8cfe5a2a7bb604033976e505f85df2 | 12,010 |
import itertools
def merge_samples(samples, nchannels, weight_table=None):
"""
Merges two samples
:param samples: the samples, must have the same sample rate and channel count
:param nchannels: the number of channels
:param weight_table: adds a specific weight to each sample when merging the sound
:return: the merged sample
"""
zipped = itertools.zip_longest(*samples, fillvalue=(0 for _ in range(nchannels)))
mapped = map(lambda x:
(__weighted_avg(itertools.islice(itertools.chain(*x), c, len(samples), nchannels), weight_table,
len(samples)) for c in range(nchannels)),
zipped)
return mapped | dcae84c506ea13cc130c3846cbe250d59b6f5115 | 12,011 |
def get_spam_info(msg: Message, max_score=None) -> (bool, str):
"""parse SpamAssassin header to detect whether a message is classified as spam.
Return (is spam, spam status detail)
The header format is
```X-Spam-Status: No, score=-0.1 required=5.0 tests=DKIM_SIGNED,DKIM_VALID,
DKIM_VALID_AU,RCVD_IN_DNSWL_BLOCKED,RCVD_IN_MSPIKE_H2,SPF_PASS,
URIBL_BLOCKED autolearn=unavailable autolearn_force=no version=3.4.2```
"""
spamassassin_status = msg["X-Spam-Status"]
if not spamassassin_status:
return False, ""
return get_spam_from_header(spamassassin_status, max_score=max_score) | e11f806a8b8007d25898545b9b16f9ec2207fa89 | 12,012 |
def _check(isamAppliance, id=None):
"""
Check if the last created user has the exact same id or id exists
:param isamAppliance:
:param comment:
:return:
"""
ret_obj = get_all(isamAppliance)
if id != None:
for users in ret_obj['data']:
if users['id'] == id:
return True
return False | 36ccb79bf303a6cf0c3eb7a33e7b2c8c1d1090d7 | 12,013 |
import calendar
from datetime import datetime
def plot_week_timeseries(time, value, normalise=True,
label=None, h=0.85, value2=None,
label2=None, daynames=None,
xfmt="%1.0f", ax=None):
"""
Shows a timeseries dispatched by days as bars.
@param time dates
@param value values to display as bars.
@param normalise normalise data before showing it
@param label label of the series
@param values2 second series to show as a line
@param label2 label of the second series
@param daynames names to use for week day names (default is English)
@param xfmt format number of the X axis
@param ax existing axis
@return axis
.. plot::
import datetime
import matplotlib.pyplot as plt
from mlinsights.timeseries.datasets import artificial_data
from mlinsights.timeseries.agg import aggregate_timeseries
from mlinsights.timeseries.plotting import plot_week_timeseries
dt1 = datetime.datetime(2019, 8, 1)
dt2 = datetime.datetime(2019, 9, 1)
data = artificial_data(dt1, dt2, minutes=15)
print(data.head())
agg = aggregate_timeseries(data, per='week')
plot_week_timeseries(
agg['weektime'], agg['y'], label="y",
value2=agg['y']/2, label2="y/2", normalise=False)
plt.show()
"""
if time.shape[0] != value.shape[0]:
raise AssertionError("Dimension mismatch") # pragma: no cover
def coor(ti):
days = ti.days
x = days
y = ti.seconds
return x, y
max_value = value.max()
if value2 is not None:
max_value = max(max_value, value2.max())
value2 = value2 / max_value
value = value / max_value
input_maxy = 1.
if ax is None:
ax = plt.gca()
# bars
delta = None
maxx, maxy = None, None
first = True
for i in range(time.shape[0]):
ti = time[i]
if i < time.shape[0] - 1:
ti1 = time[i + 1]
delta = (ti1 - ti) if delta is None else min(delta, ti1 - ti)
if delta == 0:
raise RuntimeError( # pragma: no cover
"The timeseries contains duplicated time values.")
else:
ti1 = ti + delta
x1, y1 = coor(ti)
x2, y2 = coor(ti1)
if y2 < y1:
x2, y2 = coor(ti + delta)
y2 = y1 + (y2 - y1) * h
if first and label:
ax.plot([x1, x1 + value[i] * 0.8], [y1, y1],
'b', alpha=0.5, label=label)
first = False
if maxx is None:
maxx = (x1, x1 + input_maxy)
maxy = (y1, y2)
else:
maxx = (min(x1, maxx[0]), # pylint: disable=E1136
max(x1 + input_maxy, maxx[1])) # pylint: disable=E1136
maxy = (min(y1, maxy[0]), # pylint: disable=E1136
max(y2, maxy[1])) # pylint: disable=E1136
rect = patches.Rectangle((x1, y1), value[i] * h, y2 - y1,
linewidth=1, edgecolor=None,
facecolor='b', fill=True,
alpha=0.5)
ax.add_patch(rect)
# days border
xticks = []
if daynames is None:
daynames = list(calendar.day_name)
maxx = [(maxx[0] // 7) * 7, maxx[1]]
new_ymin = maxy[0] - (maxy[1] * 0.025 + maxy[0] * 0.975 - maxy[0])
for i in range(int(maxx[0]), int(maxx[1] + 0.1)):
x1i = maxx[0] + input_maxy * i
x2i = x1i + input_maxy
xticks.append(x1i)
ax.plot([x1i, x1i + input_maxy], [new_ymin, new_ymin], 'k', alpha=0.5)
ax.plot([x1i, x1i + input_maxy], [maxy[1], maxy[1]], 'k', alpha=0.5)
ax.plot([x1i, x1i], [maxy[0], maxy[1]], 'k', alpha=0.5)
ax.plot([x2i, x2i], [maxy[0], maxy[1]], 'k', alpha=0.5)
ax.text(x1i, new_ymin, daynames[i])
# invert y axis
ax.invert_yaxis()
# change y labels
nby = len(ax.get_yticklabels())
ys = ax.get_yticks()
ylabels = []
for i in range(nby):
dh = ys[i]
dt = datetime.timedelta(seconds=dh)
tx = "%dh%02d" % (dt.seconds // 3600,
60 * (dt.seconds / 3600 - dt.seconds // 3600))
ylabels.append(tx)
ax.set_yticklabels(ylabels)
# change x labels
xs = ax.get_xticks()
xticks = []
xlabels = []
for i in range(0, len(xs) - 1):
if xs[i] < 0:
continue
dx = xs[i] - int(xs[i] / input_maxy) * input_maxy
xlabels.append(dx if normalise else (dx * max_value))
xticks.append(xs[i])
dx = (xs[i] + xs[i + 1]) / 2
dx = dx - int(dx / input_maxy) * input_maxy
xlabels.append(dx if normalise else (dx * max_value))
xticks.append((xs[i] + xs[i + 1]) / 2)
if len(xticks) < len(xlabels):
xticks.append(xs[-1])
ax.set_xticks(xticks)
ax.set_xticklabels(
[xfmt % x for x in xlabels] if xfmt else xlabels)
ax.tick_params(axis='x', rotation=30)
# value2
if value2 is not None:
value = value2.copy()
if normalise:
value = value / max_value
first = True
xs = []
ys = []
for i in range(time.shape[0]):
ti = time[i]
if i < time.shape[0] - 1:
ti1 = time[i + 1]
else:
ti1 = ti + delta
x1, y1 = coor(ti)
x2, y2 = coor(ti1)
if y2 < y1:
x2, y2 = coor(ti + delta)
y2 = y1 + (y2 - y1) * h
x2 = x1 + value[i] * h
if len(ys) > 0 and y2 < ys[-1]:
if first and label2 is not None:
ax.plot(xs, ys, color='orange', linewidth=2, label=label2)
first = False
else:
ax.plot(xs, ys, color='orange', linewidth=2)
xs, ys = [], []
xs.append(x2)
ys.append((y1 + y2) / 2)
if len(xs) > 0:
ax.plot(xs, ys, color='orange', linewidth=2)
return ax | 051ba850567e56f69d45e6cb33c7148cb5faba31 | 12,014 |
def plot_boxes_on_image(image, boxes, color=(0,255,255), thickness=2):
"""
Plot the boxes onto the image.
For the boxes a center, size representation is expected: [cx, cy, w, h].
:param image: The image onto which to draw.
:param boxes: The boxes which shall be plotted.
:return: An image with the boxes overlayed over the image.
"""
for box in boxes:
start_point = tuple([int(x) for x in box[:2] - box[2:] // 2])
end_point = tuple([int(x) for x in box[:2] + box[2:] // 2])
image = cv2.rectangle(image, start_point, end_point, color, thickness)
return image | 9bd58bb72e8b37b446f342eb145203af50175732 | 12,015 |
import re
def get_text(markup: str) -> str:
"""Remove html tags, URLs and spaces using regexp"""
text = re.sub(r"<.*?>", "", markup)
url_pattern = r"(http|ftp)s?://(?:[a-zA-Z]|[0-9]|[$-_@.&#+]|[!*\(\),]|\
(?:%[0-9a-fA-F][0-9a-fA-F]))+"
text = re.sub(url_pattern, "", text)
text = re.sub(r"\s+", " ", text)
return text.strip()
def preprocess_token(token: Token) -> str:
"""Remove grave accents and return lemmatized token lower case"""
result = remplace_accents(token.lemma_.strip().lower())
return result
def is_token_allowed(token: Token) -> bool:
"""No Stop words, No Punctuations or len token >= 3"""
# Avoid token: inmiscuyéndose lemma_ "inmiscuir el"
if (
not token
or token.is_space
or token.is_stop
or token.is_punct
or len(token) < 3
or " " in token.lemma_.strip()
):
return False
return True | 76b1c86cd852f82fecaa6011aa5e828bae0b4b45 | 12,016 |
def intercept_channel(channel, *interceptors):
"""Intercepts a channel through a set of interceptors.
This is an EXPERIMENTAL API.
Args:
channel: A Channel.
interceptors: Zero or more objects of type
UnaryUnaryClientInterceptor,
UnaryStreamClientInterceptor,
StreamUnaryClientInterceptor, or
StreamStreamClientInterceptor.
Interceptors are given control in the order they are listed.
Returns:
A Channel that intercepts each invocation via the provided interceptors.
Raises:
TypeError: If interceptor does not derive from any of
UnaryUnaryClientInterceptor,
UnaryStreamClientInterceptor,
StreamUnaryClientInterceptor, or
StreamStreamClientInterceptor.
"""
from grpc import _interceptor # pylint: disable=cyclic-import
return _interceptor.intercept_channel(channel, *interceptors) | 0f56be58125afcad9afd7aefd5ba55c6ca3b2970 | 12,017 |
def create_static_ip(compute, project, region, name):
"""Create global static IP
:param compute: GCE compute resource object using googleapiclient.discovery
:param project: string, GCE Project Id
:param region: string, GCE region
:param name: string, Static IP name
:return: Operation information
:rtype: dict
"""
return compute.addresses().insert(project=project, region=region, body={
'name': name,
}).execute() | 9f2f608ab3878c1534b3af47421866411d1ca523 | 12,018 |
import torch
def ctc_loss(encoder_outputs, labels, frame_lens, label_lens, reduction, device):
"""
All sorts of stupid restrictions from documentation:
In order to use CuDNN, the following must be satisfied:
1. targets must be in concatenated format,
2. all input_lengths must be T.
3. blank=0
4. target_lengths \leq 256,
5. the integer arguments must be of dtype torch.int32.
"""
assert (frame_lens[1:] - frame_lens[:-1] >= 0).all() # assert in increasing len
# req (5)
labels, frame_lens, label_lens = transform_data(lambda data: torch.tensor(data, dtype=torch.int32).to(device),
labels, frame_lens, label_lens)
# req (4)
skipped_indices, working_indices = filter_data_on_len(label_lens, max_len=256)
if len(skipped_indices) > 0:
print('some labels too long, unable to compute CTC...')
if len(working_indices) == 0:
print('skipping entire batch')
return None
print('skipping indices in batch: ' + str(skipped_indices))
working_indices = torch.LongTensor(working_indices).to(device)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data.index_select(0, working_indices),
encoder_outputs, labels, frame_lens, label_lens)
# frame_lens 1, 1, 2, 3, 3, 3, 4
# frame_len[1:] 1, 2, 3, 3, 3, 4
# frame_lebs[:-1] 1, 1, 2, 3, 3, 3
# diff 0, 1, 1, 0, 0, 1
# nonzero_idx 1, 2, 5
# change_points 2, 3, 6
change_points = (frame_lens[1:] - frame_lens[:-1]).nonzero().squeeze(dim=-1) + 1
change_points = torch.cat([change_points, torch.LongTensor([len(frame_lens)]).to(device)]) # add last portion
# req 2
prev_change_point = 0
total_loss = 0
count = 0
global_encoder_outputs, global_labels, global_frame_lens, global_label_lens = encoder_outputs, labels, frame_lens, label_lens
for change_point in change_points:
# we call this a minibatch
minibatch_size = len(frame_lens)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data[prev_change_point:change_point],
global_encoder_outputs, global_labels, global_frame_lens, global_label_lens)
# req 3; moves up so that we leave idx=0 to blank
labels = labels + 1
# req 1
concat_labels = torch.cat([label[:label_len] for label, label_len in zip(labels, label_lens)])
loss = F.ctc_loss(encoder_outputs.transpose(0, 1).cpu(), concat_labels.cpu(), frame_lens.cpu(), label_lens.cpu(), blank=0, reduction=reduction)
if torch.isinf(loss):
print('inf CTC loss occurred...')
skipped_indices, working_indices = ctc_fallback(encoder_outputs, labels, frame_lens, label_lens, 0)
if len(working_indices) == 0:
print('skipping the entire minibatch')
continue
print('skipping indices in minibatch: ' + str(skipped_indices))
working_indices = torch.LongTensor(working_indices).to(device)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data.index_select(0, working_indices),
encoder_outputs, labels, frame_lens, label_lens)
concat_labels = torch.cat([label[:label_len] for label, label_len in zip(labels, label_lens)])
loss = F.ctc_loss(encoder_outputs.transpose(0, 1).cpu(), concat_labels.cpu(), frame_lens.cpu(), label_lens.cpu(), blank=0, reduction=reduction)
minibatch_size = len(working_indices)
if reduction == 'mean':
loss *= minibatch_size
count += minibatch_size
total_loss += loss
prev_change_point = change_point
if total_loss == 0:
# all data points failed
return None
return total_loss / count if reduction == 'mean' else total_loss | 9907e1890669da7843efea080f5da51f64a82c1a | 12,019 |
import re
def expand_parameters(host, params):
"""Expand parameters in hostname.
Examples:
* "target{N}" => "target1"
* "{host}.{domain} => "host01.example.com"
"""
pattern = r"\{(.*?)\}"
def repl(match):
param_name = match.group(1)
return params[param_name]
return re.sub(pattern, repl, host) | 04f62924fdc77b02f3a393e5cc0c5382d1d4279a | 12,020 |
import math
import time
def search_s1(saturation, size, startTime):
"""
First stage for sequential adsorption.
Returns list of circles, current saturation, list of times and list
of saturations.
Keyword arguments:
size -- radius of single circle
saturation -- max saturation
startTime -- start time of algorithm
"""
D = size*2
rC = size*5
com_sat = 0
N = 0
ntimeList = []
satList = []
circles = [plt.Circle((np.random.rand(),np.random.rand()), size)]
while(com_sat < saturation and N <= 1000):
N += 1
newX = np.random.rand()
newY = np.random.rand()
neighborList = neighbors(newX, newY, circles, rC)
if len(neighborList) != 0:
for e in neighborList:
circleX = circles[e].get_center()[0]
circleY = circles[e].get_center()[1]
if (math.sqrt((newX - circleX)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX)**2 + (newY - circleY-V)**2) < D or
math.sqrt((newX - circleX)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY-V)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY-V)**2) < D):
collision = 1
break
else:
collision = 0
if (collision == 0):
circles.append(plt.Circle((newX, newY), size))
com_sat = math.pi * size**2 * len(circles) * 100
ntimeList.append(time.time() - startTime)
satList.append(com_sat)
N = 0
else:
circles.append(plt.Circle((newX, newY), size))
return circles, com_sat, satList, ntimeList | 386909b285a5504a075f496edeb6e45bb41b6bc3 | 12,021 |
def draw_labeled_bounding_boxes(img, labeled_frame, num_objects):
"""
Starting from labeled regions, draw enclosing rectangles in the original color frame.
"""
# Iterate through all detected cars
for car_number in range(1, num_objects + 1):
# Find pixels with each car_number label value
rows, cols = np.where(labeled_frame == car_number)
# Find minimum enclosing rectangle
x_min, y_min = np.min(cols), np.min(rows)
x_max, y_max = np.max(cols), np.max(rows)
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=(255, 0, 0), thickness=6)
return img | 3eeacafb08a15a98ec70567361c2b7d807af156c | 12,022 |
import re
def _skip_comments_and_whitespace(lines, idx):
###############################################################################
"""
Starting at idx, return next valid idx of lines that contains real data
"""
if (idx == len(lines)):
return idx
comment_re = re.compile(r'^[#!]')
lines_slice = lines[idx:]
for line in lines_slice:
line = line.strip()
if (comment_re.match(line) is not None or line == ""):
idx += 1
else:
return idx
return idx | b2b794681859eaa22dfc1807211bf050423cd107 | 12,023 |
def named_payload(name, parser_fn):
"""Wraps a parser result in a dictionary under given name."""
return lambda obj: {name: parser_fn(obj)} | 259525b93d056e045b0f8d5355d4028d67bfac45 | 12,024 |
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
def IsZero(g):
# Some introspection to check if the gradient is feeding zeros
if context.executing_eagerly():
# TODO(apassos) add an efficient way to detect eager zeros here.
return False
if g.op.type in ("ZerosLike", "Zeros"):
return True
const_fill_value = tensor_util.constant_value(g)
return const_fill_value is not None and (const_fill_value == 0).all()
logits = op.inputs[0]
if grad_grad is not None and not IsZero(grad_grad):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(grad_grad, 1),
array_ops.expand_dims(softmax, 2)),
axis=1)) * softmax)
return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits)) | c5e82b224aa427e1898f037e0be9a42261ef0505 | 12,025 |
def prod_finished(job):
"""Check if prod stage is finished."""
try:
step = "prod" + str(job.doc.prod_replicates_done - 1)
except (KeyError, AttributeError):
step = "prod" + "0"
run_file = job.ws + "/run.{}".format(step)
if job.isfile("run.{}".format(step)):
with open(run_file) as myfile:
return "Program ended" in myfile.read()
else:
return False | 4ce509bb6656555a26384f733ccc91b974636d5f | 12,026 |
def PrimaryCaps(layer_input, name, dim_capsule, channels, kernel_size=9, strides=2, padding='valid'):
""" PrimaryCaps layer can be seen as a convolutional layer with a different
activation function (squashing)
:param layer_input
:param name
:param dim_capsule
:param channels
:param kernel_size
"""
assert channels % dim_capsule == 0, "Invalid size of channels and dim_capsule"
# I.e. each primary capsule contains 8 convoutional units with a 9x9 kernel and a stride of 2.
num_filters = channels * dim_capsule
conv_layer = layers.Conv2D(
name=name,
filters=num_filters,
kernel_size=kernel_size,
strides=strides,
activation=None, # We apply squasing later, therefore no activation funciton is needed here
padding=padding)(layer_input)
# In total PrimaryCapsules has [32x6x6] capsule outputs (each outpus is an 8D vector) and each
# capsule in the [6x6] grid is sharing their weights with each other
# See https://keras.io/layers/core/#reshape
reshaped_conv = layers.Reshape(target_shape=(-1, dim_capsule))(conv_layer)
# Now lets apply the squashing function
return layers.Lambda(squashing)(reshaped_conv) | 41bb066e6b2fdac74f3002ae63055096228bd386 | 12,027 |
def get_font(args):
"""
Gets a font.
:param args: Arguments (ttf and ttfsize).
:return: Font.
"""
try:
return ImageFont.truetype(args.ttf, args.ttfsize)
except:
return ImageFont.load_default() | aee4761c93ef177f26b1bfd81ad5149186e32d47 | 12,028 |
def zeros(shape):
"""
Creates and returns a new array with the given shape which is filled with zeros.
"""
mat = empty(shape)
return fill(mat, 0.0) | 7cbe68c9928094e3588560643b8029867fa51ab7 | 12,029 |
def unpack_puzzle_input(dir_file: str) -> tuple[list, list]:
"""
Args:
dir_file (str): location of .txt file to pull data from
Returns:
bingo numbers and bingo cards in list format
"""
with open(dir_file, "r") as file:
content = file.read().splitlines()
bingo_numbers = [int(i) for i in content[0].split(",")]
bingo_cards = []
for index in range(2, len(content)):
if content[index-1] == '':
bingo_cards.append([[int(i) for i in content[index].split()]])
elif content[index] != '':
bingo_cards[-1].append([int(i) for i in content[index].split()])
return bingo_numbers, bingo_cards | 47ea8846233aabf1bc8e07f22e9993b7a5a328e1 | 12,030 |
def blank_response():
"""Fixture that constructs a response with a blank body."""
return build_response(data="") | 5ffe8c5b0b775db68c626f1e9cce8a83c66978ff | 12,031 |
import torch
def dense_image_warp(image:torch.Tensor, flow:torch.Tensor) -> torch.Tensor:
"""Image warping using per-pixel flow vectors.
See [1] for the original reference (Note that the tensor shape is different, etc.).
[1] https://www.tensorflow.org/addons/api_docs/python/tfa/image/dense_image_warp
Parameters
----------
image : torch.Tensor [shape=(batch, channels, height, width)]
flow : torch.Tensor [shape=(batch, 2, height, width)]
Returns
-------
warped_image : torch.Tensor [shape=(batch, channels, height, width)]
"""
batch_size, channels, height, width = image.shape
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
y_range = torch.arange(0., height, device=image.device, requires_grad=False)
x_range = torch.arange(0., width, device=image.device, requires_grad=False)
y_grid, x_grid = torch.meshgrid(y_range, x_range)
stacked_grid = torch.stack((y_grid, x_grid), dim=0) # shape=(2, height, width)
batched_grid = stacked_grid.unsqueeze(0) # shape=(1, 2, height, width)
query_points_on_grid = batched_grid - flow # shape=(batch_size, 2, height, width)
query_points_flattened = einops.rearrange(query_points_on_grid, 'b x h w -> b (h w) x') # shape=(batch_size, height * width, 2)
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened) # shape=(batch_size, channels, n_queries)
interpolated = einops.rearrange(interpolated, 'b c (h w) -> b c h w', h=height, w=width)
return interpolated | 324be2c28dceeeb8079b4a89d94908e1a208c0a1 | 12,032 |
def validateRange(rangeStr : str) -> bool:
"""Validates the range argument"""
# type cast and compare
try:
# get range indices
ranges = rangeStr.split(",", 1)
rangeFrom = 0 if ranges[0] == "" else int(ranges[0])
rangeTo = 0 if ranges[1] == "" else int(ranges[1])
# check first if both ranges are not set
# using the -r , hack
if ranges == ["", ""]:
return False
# check if any of the range param is set
# and do testing per side
# if either range start/end is set and is <= 0:
if (ranges[0] != "" and rangeFrom < 0) or\
(ranges[1] != "" and rangeTo < 0):
return False
elif (ranges[0] != "") and (ranges[1] != ""):
# if both are set, do conditions here
# if from == to or from > to or from,to <=0, fail
if (rangeFrom == rangeTo) or\
(rangeFrom > rangeTo) or\
((rangeFrom <= 0) or (rangeTo <= 0)):
return False
except (ValueError, IndexError, AttributeError):
return False
return True | 375d80ef61c429a4e22df7321d223fe18939f597 | 12,033 |
from operator import add
def update_log_ip_dict_per_ingress_egress_point(flow_ingress_asn, flow_ip, origin_asn, ip_prefix, country_code, flow_bytes, flow_packets, d_ipsrc_level_analysis_perpoint):
"""
Account for unique IPAddresses, BGP prefixes, origin_asn per ingress/egress points.
:param flow_ingress_asn:
:param flow_ip:
:param origin_asn:
:param ip_prefix:
:param d_ipsrc_level_analysis_perpoint:
:return: dict of dict {'1234': {('10.10.10.1', 23456, '10.0.0.0/8'): [1]},
'5678': {('181.3.50.1', 98765, '181.3.50.0/20'): [1]}, ...}
"""
k = (flow_ip, origin_asn, ip_prefix, country_code)
values = [1, flow_bytes, flow_packets]
flow_ingress_asn = frozenset(flow_ingress_asn)
if flow_ingress_asn not in d_ipsrc_level_analysis_perpoint.keys():
d_ipsrc_level_analysis_perpoint[flow_ingress_asn] = dict()
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
if k not in d_ipsrc_level_analysis_perpoint[flow_ingress_asn]:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = map(add, d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k], values)
return d_ipsrc_level_analysis_perpoint | ad6ccefd62b11f3cf1a7b5e452789ddf22fcad55 | 12,034 |
import yaml
def _config_from_file(configfile):
"""Return a dict containing all of the config values found in the given
configfile.
"""
conf = {}
# set from config if possible
if configfile:
with open(configfile, 'r') as fp:
config_yaml = yaml.load(fp)
conf = config_yaml
# in the config yaml, 'years' is a map of years to styles; in the config
# dict used in this module, 'year_styles' is that map and 'years' is
# simply a list of the years to graph
conf['year_styles'] = conf.pop('years', {})
conf['years'] = list(conf['year_styles'].keys())
return conf | 2ab4d779fd18c13054e3c40ae411106856fae9ae | 12,035 |
def odr_planar_fit(points, rand_3_estimate=False):
"""
Fit a plane to 3d points.
Orthogonal distance regression is performed using the odrpack.
Parameters
----------
points : list of [x, y, z] points
rand_3_estimate : bool, optional
First estimation of the plane using 3 random points from the input points list.
Default is False which implies a regular least square fit for the first estimation.
Returns
-------
ndarray
"""
def f_3(beta, xyz):
""" implicit definition of the plane"""
return beta[0] * xyz[0] + beta[1] * xyz[1] + beta[2] * xyz[2] + beta[3]
# # Coordinates of the 2D points
x = points[:, 0]
y = points[:, 1]
z = points[:, 2]
# x = np.r_[9, 35, -13, 10, 23, 0]
# y = np.r_[34, 10, 6, -14, 27, -10]
# z = np.r_[100, 101, 101, 100, 101, 101]
if rand_3_estimate:
# initial guess for parameters
# select 3 random points
i = np.random.choice(len(x), size=3, replace=False)
# Form the 3 points
r_point_1 = np.r_[x[i[0]], y[i[0]], z[i[0]]]
r_point_2 = np.r_[x[i[1]], y[i[1]], z[i[1]]]
r_point_3 = np.r_[x[i[2]], y[i[2]], z[i[2]]]
# Two vectors on the plane
v_1 = r_point_1 - r_point_2
v_2 = r_point_1 - r_point_3
# normal to the 3-point-plane
u_1 = np.cross(v_1, v_2)
# Construct the first estimation, beta0
d_0 = u_1[0] * r_point_1[0] + u_1[1] * r_point_1[1] + u_1[2] * r_point_1[2]
beta0 = np.r_[u_1[0], u_1[1], u_1[2], d_0]
else:
beta0 = lstsq_planar_fit(points)
# Create the data object for the odr. The equation is given in the implicit form 'a*x + b*y + c*z + d = 0' and
# beta=[a, b, c, d] (beta is the vector to be fitted). The positional argument y=1 means that the dimensionality
# of the fitting is 1.
lsc_data = odr.Data(np.row_stack([x, y, z]), y=1)
# Create the odr model
lsc_model = odr.Model(f_3, implicit=True)
# Create the odr object based on the data, the model and the first estimation vector.
lsc_odr = odr.ODR(lsc_data, lsc_model, beta0)
# run the regression.
lsc_out = lsc_odr.run()
return lsc_out.beta / lsc_out.beta[3] | d4b542f1527fc89937be92e3ff1ffc827cb5cced | 12,036 |
def adjust_learning_rate_lrstep(epoch, opt):
"""Sets the learning rate to the initial LR decayed by decay rate every steep step"""
steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))
if steps > 0:
new_lr = opt.lr_init * (opt.lr_decay_rate ** steps)
return new_lr
return opt.lr_init | e3af0a5e654595f309a2a202d0620b57e5968530 | 12,037 |
def subplot(n, m, k):
"""
Create a subplot command
Example::
import numpy as np
x = np.linspace(-5, 5, 1000)
figure(1)
subplot(2, 1, 1)
plot(x, np.sin(x), "r+")
subplot(2, 1, 2)
plot(x, np.cos(x), "g-")
show()
"""
global _current_axes
lig = (k - 1) / m
col = (k - 1) % m
fig = gcf()
axe = fig.get_axes(lig, col)
_current_axes = axe
return axe | d7793d099a57ad8d825e16e737dc72f28dd0456c | 12,038 |
from typing import Any
from typing import cast
def _create_gdcm_image(src: bytes, **kwargs: Any) -> "gdcm.Image":
"""Return a gdcm.Image from the `src`.
Parameters
----------
src : bytes
The raw image frame data to be encoded.
**kwargs
Required parameters:
* `rows`: int
* `columns`: int
* `samples_per_pixel`: int
* `number_of_frames`: int
* `bits_allocated`: int
* `bits_stored`: int
* `pixel_representation`: int
* `photometric_interpretation`: str
Returns
-------
gdcm.Image
An Image containing the `src` as a single uncompressed frame.
"""
rows = kwargs['rows']
columns = kwargs['columns']
samples_per_pixel = kwargs['samples_per_pixel']
number_of_frames = kwargs['number_of_frames']
pixel_representation = kwargs['pixel_representation']
bits_allocated = kwargs['bits_allocated']
bits_stored = kwargs['bits_stored']
photometric_interpretation = kwargs['photometric_interpretation']
pi = gdcm.PhotometricInterpretation.GetPIType(
photometric_interpretation
)
# GDCM's null photometric interpretation gets used for invalid values
if pi == gdcm.PhotometricInterpretation.PI_END:
raise ValueError(
"An error occurred with the 'gdcm' plugin: invalid photometric "
f"interpretation '{photometric_interpretation}'"
)
# `src` uses little-endian byte ordering
ts = gdcm.TransferSyntax.ImplicitVRLittleEndian
image = gdcm.Image()
image.SetNumberOfDimensions(2)
image.SetDimensions((columns, rows, 1))
image.SetPhotometricInterpretation(
gdcm.PhotometricInterpretation(pi)
)
image.SetTransferSyntax(gdcm.TransferSyntax(ts))
pixel_format = gdcm.PixelFormat(
samples_per_pixel,
bits_allocated,
bits_stored,
bits_stored - 1,
pixel_representation
)
image.SetPixelFormat(pixel_format)
if samples_per_pixel > 1:
# Default `src` is planar configuration 0 (i.e. R1 G1 B1 R2 G2 B2)
image.SetPlanarConfiguration(0)
# Add the Pixel Data element and set the value to `src`
elem = gdcm.DataElement(gdcm.Tag(0x7FE0, 0x0010))
elem.SetByteStringValue(src)
image.SetDataElement(elem)
return cast("gdcm.Image", image) | d0844d92d3b0c6b62164019c56864cc498c51334 | 12,039 |
def _4_graphlet_contains_3star(adj_mat):
"""Check if a given graphlet of size 4 contains a 3-star"""
return (4 in [a.sum() for a in adj_mat]) | 307f03707d1a7032df0ccb4f7951eec0c75832fe | 12,040 |
import re
import subprocess
import os
def _one_day(args):
"""Prompts for index file update
`_need_to_index` has already filtered jpg preferred over other
formats, that is, if there is a duplicate name, it will list the
jpg, not the arw, pcd, etc. in the index.
Args:
args (list): files to index
"""
def _extract_jpg(image):
for e, s in (
# You can view arw files by modifying the camera type:
# exiftool -sonymodelid="ILCE-7M2" -ext ARW
# but better to extract the jpg preview and not modify the
# camera type
('arw', ['exiftool', '-b', '-PreviewImage', image]),
# Suffix [5] produces an image 3072 by 2048 ("16 Base")
('pcd', ['convert', image + '[5]']),
):
if not image.endswith('.' + e):
continue
p = re.sub(f'\\.{e}$', '.jpg', image)
if e == 'pcd':
s.append(p)
i = subprocess.check_output(s)
with open(p, 'wb') as f:
f.write(i)
return p
return image
if not args:
return
cwd = os.getcwd()
simple_msg = None
for a in args:
img = os.path.basename(a)
d = os.path.dirname(a)
if d:
os.chdir(d)
if not os.path.exists(img):
continue
preview = _extract_jpg(img)
if simple_msg:
msg = simple_msg
else:
if common.MOVIE_SUFFIX.search(img):
subprocess.check_call(['open', '-a', 'QuickTime Player.app', img])
else:
subprocess.check_call(['open', '-a', 'Preview.app', preview])
msg = input(a + ': ')
if not msg:
status = False
break
if msg == '?':
simple_msg = msg
if os.path.exists(img):
if msg == '!':
os.remove(img)
if preview != img:
os.remove(preview)
print(a + ': removed')
else:
with open('index.txt', 'a') as f:
f.write(preview + ' ' + msg + '\n')
else:
print(a + ': does not exist')
if d:
os.chdir(cwd)
try:
os.remove('index.txt~')
except Exception:
pass
return | 408d75736d1bf891cae2ffd38b204ba18a9bc265 | 12,041 |
from pathlib import Path
def af4_path() -> Path:
"""Return the abspath of Go bio-target-rna-fusion binary. Builds the binary if necessary"""
global AF4_PATH
if not AF4_PATH:
af4_label = "//go/src/github.com/grailbio/bio/cmd/bio-fusion"
build([af4_label])
AF4_PATH = go_executable(af4_label)
return AF4_PATH | f70b97661bf17eb4e3b67af20c5437ebd6123266 | 12,042 |
def get_sentence_content(sentence_token):
"""Extrac sentence string from list of token in present in sentence
Args:
sentence_token (tuple): contains length of sentence and list of all the token in sentence
Returns:
str: setence string
"""
sentence_content = ''
for word in sentence_token[1]:
sentence_content += word.text
return sentence_content | 4f6f1bb557bb508e823704fc645c2901e5f8f03f | 12,043 |
import os
def _parse_filename(filename):
"""Parse meta-information from given filename.
Parameters
----------
filename : str
A Market 1501 image filename.
Returns
-------
(int, int, str, str) | NoneType
Returns a tuple with the following entries:
* Unique ID of the individual in the image
* Index of the camera which has observed the individual
* Filename without extension
* File extension
Returns None if the given filename is not a valid filename.
"""
filename_base, ext = os.path.splitext(filename)
if '.' in filename_base:
# Some images have double filename extensions.
filename_base, ext = os.path.splitext(filename_base)
if ext != ".jpg":
return None
person_id, cam_seq, frame_idx, detection_idx = filename_base.split('_')
return int(person_id), int(cam_seq[1]), filename_base, ext | 61d8b721a594a802de8abc1c30a316fd1995a14e | 12,044 |
def sequence_generator(data, look_back = 50):
"""\
Description:
------------
Input data for LSTM: Convert to user trajectory (maximum length: look back)
"""
train,test, valid = [],[],[]
unique_users = set(data[:,0])
items_per_user = {int(user):[0 for i in range(look_back)] for user in unique_users}
for (idx,row) in enumerate(data):
user,item,time = int(row[0]),int(row[1]),row[2]
items_per_user[user] = items_per_user[user][1:]+[item+1]
current_items = items_per_user[user]
if row[3]==0:
train.append([current_items[:-1],current_items[-1]])
elif row[3]==2:
test.append([current_items[:-1],current_items[-1]])
else:
valid.append([current_items[:-1],current_items[-1]])
return train,test | 688e572edf1b6d2dea2f069742b01c10ec36f928 | 12,045 |
def prefit_clf__svm(gamma: float = 0.001) -> base.ClassifierMixin:
"""Returns an unfitted SVM classifier object.
:param gamma: ...
:return:
"""
return svm.SVC(gamma=gamma) | 481409fdd7b2970d3595a7f60e411e71ebb00ac0 | 12,046 |
def option_not_exist_msg(option_name, existing_options):
""" Someone is referencing an option that is not available in the current package
options
"""
result = ["'options.%s' doesn't exist" % option_name]
result.append("Possible options are %s" % existing_options or "none")
return "\n".join(result) | 7ffa0afa81483d78a1ed0d40d68831e09710b7e1 | 12,047 |
def elslib_CylinderD2(*args):
"""
:param U:
:type U: float
:param V:
:type V: float
:param Pos:
:type Pos: gp_Ax3
:param Radius:
:type Radius: float
:param P:
:type P: gp_Pnt
:param Vu:
:type Vu: gp_Vec
:param Vv:
:type Vv: gp_Vec
:param Vuu:
:type Vuu: gp_Vec
:param Vvv:
:type Vvv: gp_Vec
:param Vuv:
:type Vuv: gp_Vec
:rtype: void
"""
return _ElSLib.elslib_CylinderD2(*args) | af11eee7a0a429ead43d40ed678f932abd2313f7 | 12,048 |
def get_version_message(version: str):
"""Get the message for the zygrader version from the changelog"""
changelog = load_changelog()
msg = [f"zygrader version {version}", ""]
version_index = 0
for line in changelog:
if line == version:
version_index = changelog.index(line) + 1
line = changelog[version_index]
while line:
msg.append(line)
version_index += 1
line = changelog[version_index]
return msg | 8530180c9d2dc413a1057c2ca255aa0e3dddd72c | 12,049 |
def get_arity(p, b_addr):
"""
Retrieves the arity by inspecting a funciton call
:param p: angr project
:param b_addr: basic block address
:return: arity of the function
"""
return len(get_ord_arguments_call(p, b_addr)) | 47b7721421a226d969aada8d873c43f8f58810e9 | 12,050 |
def draw_des3_plot():
"""
This function is to draw the plot of DES 3.
"""
objects = ('Singapore', 'Uruguay', 'Chile', 'Belgium', 'Denmark', 'Qatar', 'Portugal', 'Canada', 'Spain', 'Ireland')
y_pos = np.arange(len(objects))
performance = [71, 69, 68, 66, 65, 65, 64, 63, 63, 62]
plt.xkcd()
fig = plt.figure(figsize=(9, 6), dpi=35)
fig.suptitle('Number of people fully vaccinated by country')
ax = fig.add_subplot(111)
ax.barh(y_pos, performance, align='center', alpha=0.5)
plt.yticks(y_pos, objects)
ax.set_xlabel('Share of people fully vaccinated')
return fig | 6cc1bb3331e9eed57a700596d133d2bf00c3398e | 12,051 |
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id") | 12a1e9c0805e8549be7861247b97f52defd576d9 | 12,052 |
def gather_from_processes(chunk, split_sizes, displacements, comm=MPI.COMM_WORLD):
"""Gather data chunks on rank zero
:param chunk: Data chunks, living on ranks 0, 1, ..., comm.size-1
:type chunk: np.ndarray
:param split_sizes: Chunk lenghts on individual ranks
:type split_sizes: np.ndarray
:param displacements: Chunk displacements (compare scatter_to_processes)
:type displacements: np.ndarray
:return: Dataset gathered again, living on rank 0
:type return: np.ndarray
Inspired by: https://stackoverflow.com/a/36082684
Licensed under the Academic Free License version 3.0
"""
comm.Barrier()
total_length = np.array(chunk.shape[0])
gathered = np.empty((comm.allreduce(total_length), chunk.shape[1]), dtype=chunk.dtype)
comm.Gatherv(chunk, [gathered, split_sizes, displacements, MPI.DOUBLE], root=0)
return gathered | 8f696241e0dc61bbb5dd9867f307e29e8358d69e | 12,053 |
import os
from pathlib import Path
async def publish_file_as_upload(
background_tasks: BackgroundTasks, file_data: UploadFile = File(...)
) -> tp.Union[IpfsPublishResponse, GenericResponse]:
"""
Publish file to IPFS using local node (if enabled by config) and / or pin to Pinata pinning cloud (if enabled by config).
File is accepted as an UploadFile (multipart form data)
"""
try:
# temporary fix using on disk caching, need to be reworked to work without saving data on the disk
cache_dir = "cache"
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
path = f"{cache_dir}/{file_data.filename}"
with open(path, "wb") as f:
f.write(file_data.file.read())
cid, uri = await publish_file(Path(path), background_tasks)
message = f"File {file_data.filename} published"
logger.info(message)
return IpfsPublishResponse(status=status.HTTP_200_OK, details=message, ipfs_cid=cid, ipfs_link=uri)
except Exception as e:
message = f"An error occurred while publishing file to IPFS: {e}"
logger.error(message)
return GenericResponse(status=status.HTTP_500_INTERNAL_SERVER_ERROR, details=message) | 4e4f931e61257d86233c07ddd476d9ea548dc0ac | 12,054 |
import json
def edit_collab() :
"""
Endpoint to edit a specified collaboration's member variables. This endpoint requires the requesting user to be an
authenticated user to properly function.
Request Body Parameters:
id: string, JSON, required
owner: string, JSON, optional
size: int, JSON, optional
members: array of strings, JSON, optional
date: int, JSON, optional
duration: int, JSON, optional
location, string, JSON, optional
status: bool, JSON, optional
title: string, JSON, optional
description: string, JSON, optional
classes: array of strings, JSON, optional
skills: array of strings, JSON, optional
applicants: array of strings, JSON, optional
This endpoint queries the database for the specified collaboration. If the collaboration is found, other variables
included, if any, are updated. If the search fails, an appropriate error message is returned.
"""
data = request.get_json()
collab_id = data['id']
record = collabDB.find({'_id' : ObjectId(collab_id)})
if record is None:
return json.dumps({'error': "No collaborations to update matched id", 'code': 996})
else:
try:
if 'owner' in data and isinstance(data['owner'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"owner": data['owner']
}
}
)
if 'size' in data and isinstance(data['size'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"size": data['size']
}
}
)
if 'members' in data and isinstance(data['members'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"members": data['members']
}
}
)
if 'date' in data and isinstance(data['date'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"date": data['date']
}
}
)
if 'duration' in data and isinstance(data['duration'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"duration": data['duration']
}
}
)
if 'location' in data and isinstance(data['location'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"location": data['location']
}
}
)
if 'status' in data and isinstance(data['status'], bool):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"status": data['status']
}
}
)
if 'title' in data and isinstance(data['title'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"title": data['title']
}
}
)
if 'description' in data and isinstance(data['description'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"description": data['description']
}
}
)
if 'classes' in data and isinstance(data['classes'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"classes": data['classes']
}
}
)
if 'skills' in data and isinstance(data['skills'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"skills": data['skills']
}
}
)
if 'applicants' in data and isinstance(data['applicants'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"applicants": data['applicants']
}
}
)
if record.modified_count > 0:
return json.dumps({'success': True})
else:
return json.dumps({'success': True})
except Exception as e:
print(e)
return json.dumps({'error': "Error while trying to update existing doc.", 'code': 997}) | f205302c6f1368542c870f3ab89bbde374e0957b | 12,055 |
def create_lkas_ui(packer, main_on, enabled, steer_alert, defog, ahbc, ahbcramping, config, noipma, stats, persipma, dasdsply, x30, daschime, lines):
"""Creates a CAN message for the Ford Steer Ui."""
values = {
"PersIndexIpma_D_Actl": persipma,
"DasStats_D_Dsply": dasdsply,
"Set_Me_X30": x30,
"Lines_Hud": lines,
"Hands_Warning_W_Chime": steer_alert,
"CamraDefog_B_Req": defog,
"AhbHiBeam_D_Rq": ahbc,
"AhbcRampingV_D_Rq": ahbcramping,
"FeatConfigIpmaActl": config,
"FeatNoIpmaActl": noipma,
"CamraStats_D_Dsply": stats,
"DasWarn_D_Dsply": daschime,
}
return packer.make_can_msg("Lane_Keep_Assist_Ui", 0, values) | b9c855b6210b34fde8943eaa8293d691ee291527 | 12,056 |
import torch
def _contextual_loss(x, y, reduction='mean'):
"""Contextual loss
"""
loss = -torch.log(_contextual_similarity(x, y))
if reduction == 'mean':
loss = loss.mean()
return loss | 3dee81131c0b1468e822cdc36f1817204ec9eba3 | 12,057 |
def _actual_center(pos, angle):
"""
Calculate the position of the geometric center of the agent
The value of self.cur_pos is the center of rotation.
"""
dir_vec = get_dir_vec(angle)
return pos + (CAMERA_FORWARD_DIST - (ROBOT_LENGTH / 2)) * dir_vec | bd354e1ef64cd14132944da9e436df2a4696d0fe | 12,058 |
def load_one_batch_mnist(batch_size=64, shuffle=True):
"""Return a single batch (inputs, labels) of MNIST data."""
dataloader = get_mnist_dataloder(batch_size, shuffle)
X, y = next(iter(dataloader))
return X, y | 2a2036a72cecf957fd7c8a5344c85a59935fb442 | 12,059 |
import re
def replace_math_functions(input_string):
""" FIXME: Temporarily replace std:: invocations of math functions with non-std:: versions to prevent linker errors
NOTE: This can lead to correctness issues when running tests, since the correct version of the math function (exp/expf) might not get called.
Plan is to remove this function once HIP supports std:: math function calls inside device code
"""
output_string = input_string
output_string = re.sub("std::exp\(", "::exp(", output_string)
output_string = re.sub("std::log\(", "::log(", output_string)
output_string = re.sub("std::pow\(", "::pow(", output_string)
return output_string | 09562b7bb173e44877e18368b0283b3c3f590004 | 12,060 |
def KmeansInterCompare(k, data, nbTests):
"""Réalisation d'un nombre donné de classification Kmeans.
Le meilleur résultat selon le critère d'inertie inter-groupe est affiché"""
KmeansResults = []
for i in range(0, nbTests):
KmeansResults.append(Kmeans(k, data))
# on maximise l'inertie inter-groupe donc on privilégie la séparation des groupes
best_kmeans = 0
for i in range(1, nbTests):
if inerInter(KmeansResults[best_kmeans][0], KmeansResults[best_kmeans][1]) < inerInter(KmeansResults[i][0], KmeansResults[i][1]):
best_kmeans = i
return KmeansResults[best_kmeans] | 42d8d900ad5e7d6e80bbd16736178fe5a14f878c | 12,061 |
from bs4 import BeautifulSoup
def get_all_reports_url(url_1,url_2, headers=None):
""" Returns all reports URLs on a single 'url' """
if headers == None:
header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'}
else:
header = headers
url = urljoin(url_1, url_2)
# initialize the session
session = HTMLSession()
# make the HTTP request and retrieve response
response = session.get(url, headers=header)
# execute Javascript with a timeout of 20 seconds
# response.html.render(timeout=20) ## pyppeteer.errors.TimeoutError: Navigation Timeout Exceeded: 20000 ms exceeded.
# construct the soup parser
soup = BeautifulSoup(response.html.html, "html.parser")
urls = []
table = soup.find("table", class_="ms-rteTable-5")
for report, name in zip(table.find_all("td", class_="ms-rteTableEvenCol-5"), table.find_all("td", class_="ms-rteTableOddCol-5")) :
report_url = report.find("a").attrs.get("href")
name = ((''.join(name.text.split())).replace("/", "-")).replace(" ", "").replace("\u200b", "")
if not report_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
report_url = urljoin(url_1, report_url)
try:
pos = report_url.index("?")
report_url = report_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(report_url):
urls.append({'url':report_url, 'name':name})
# close the session to end browser process
session.close()
# print total images found in URL
print(f"Total {len(urls)} Reports Found!")
return urls | 705047c4d21167f9ecde23f163e383e9f6aa00b2 | 12,062 |
import os
def generate_url_with_signature(endpoint, signature):
"""Generate a url for an endpoint with a signature.
Args:
endpoint: An endpoint referencing a method in the backend.
signature: A signature serialized with releant data and the secret
salt.
Returns:
url for the given endpoint with signature attached.
"""
if os.environ.get('FLASK_ENV', 'development') == 'production':
return url_for(
endpoint, signature=signature, _external=True, _scheme='https'
)
return url_for(endpoint, signature=signature, _external=True) | 491b3ef932bd9fd6e68d21112282e1f660a56a0f | 12,063 |
def left_shift(k, n=32):
"""
Returns the n*n matrix corresponding to the operation
lambda v: vec_from_int(int_from_vec(v) << k, n)
>>> print_mat(left_shift(2, 6))
000000
000000
100000
010000
001000
000100
>>> int_from_vec(left_shift(2) * vec_from_int(42)) == 42 << 2
True
"""
D = set(range(n))
return Mat((D, D), {(j + k, j): one for j in range(n - k)}) | 216011184a8b6a02675524fd2906f092553396c6 | 12,064 |
from typing import List
from datetime import datetime
def get_tensorboard_logger(
trainer: Engine, evaluators: ThreeEvaluators, metric_names: List[str]
) -> TensorboardLogger:
"""
creates a ``tensorboard`` logger which read metrics from given evaluators and attaches it to a given trainer
:param trainer: an ``ignite`` trainer to attach to
:param evaluators: a triple of train, validation, and test evaluators to get metrics from
:param metric_names: a list of metrics to log during validation and testing
"""
tb_logger = TensorboardLogger(
log_dir=f"runs/{datetime.now()}", flush_secs=1
)
training_loss = OutputHandler(
"training",
["running_loss"],
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(trainer, training_loss, Events.EPOCH_COMPLETED)
validation_loss = OutputHandler(
"validation",
metric_names,
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(evaluators.validation, validation_loss, Events.COMPLETED)
test_loss = OutputHandler(
"test",
metric_names,
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(evaluators.test, test_loss, Events.COMPLETED)
return tb_logger | 30a920c056cf10df4656cbda2a1e92fb28388f06 | 12,065 |
def get_vocabs(datasets):
"""Build vocabulary from an iteration of dataset objects
Args:
dataset: a list of dataset objects
Returns:
two sets of all the words and tags respectively in the dataset
"""
print("Building vocabulary...")
vocab_words = set()
vocab_tags = set()
for dataset in datasets:
for words, tags in dataset:
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
return vocab_words, vocab_tags | 1fce7fe7b9dbdd3216802d0816ac5fabc542b859 | 12,066 |
def check_row_uniqueness(board: list) -> bool:
"""
Return True if each row has no repeated digits.
Return False otherwise.
>>> check_row_uniqueness([\
"**** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 2 ****"\
])
True
>>> check_row_uniqueness([\
"**** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 5 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 2 ****"\
])
False
"""
global NUMBER
for row in board:
count = 0
row_set = set()
for char in row:
if char.isdigit():
if int(char) in range(1, NUMBER + 1):
count += 1
row_set.add(char)
if len(row_set) != count:
return False
return True | b4d937f14de0da90e694621fb2c70e9a59e80f0a | 12,067 |
def calculate_distance_to_divide(
grid, longest_path=True, add_to_grid=False, clobber=False
):
"""Calculate the along flow distance from drainage divide to point.
This utility calculates the along flow distance based on the results of
running flow accumulation on the grid. It will use the connectivity
used by the FlowAccumulator (e.g. D4, D8, Dinf).
Parameters
----------
grid : ModelGrid
longest_path : bool, optional
Take the longest (or shortest) path to a drainage divide. Default is
true.
add_to_grid : boolean, optional
Flag to indicate if the stream length field should be added to the
grid. Default is False. The field name used is ``distance_to_divide``.
clobber : boolean, optional
Flag to indicate if adding the field to the grid should not clobber an
existing field with the same name. Default is False.
Returns
-------
distance_to_divide : float ndarray
The distance that has to be covered from an imaginary flow, located in
each node of the grid, to reach the watershed's outlet.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> mg = RasterModelGrid((5, 4))
>>> elev = np.array([0., 0., 0., 0.,
... 0., 10., 10., 0.,
... 0., 20., 20., 0.,
... 0., 30., 30., 0.,
... 0., 0., 0., 0.])
>>> _ = mg.add_field("topographic__elevation", elev, at="node")
>>> mg.set_closed_boundaries_at_grid_edges(
... bottom_is_closed=False,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> fr = FlowAccumulator(mg, flow_director = 'D8')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... mg,
... add_to_grid=True,
... clobber=True,
... )
>>> mg.at_node['distance_to_divide']
array([ 0., 3., 3., 0.,
0., 2., 2., 0.,
0., 1., 1., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.])
Now, let's change to MFD the flow_director method, which routes flow to
multiple nodes.
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))
>>> elev = np.array([0., 0., 0., 0.,
... 0., 10., 10., 0.,
... 0., 20., 20., 0.,
... 0., 30., 30., 0.,
... 0., 0., 0., 0.])
>>> _ = mg.add_field("topographic__elevation", elev, at="node")
>>> mg.set_closed_boundaries_at_grid_edges(
... bottom_is_closed=False,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> fr = FlowAccumulator(mg, flow_director = 'MFD')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... mg,
... add_to_grid=True,
... clobber=True,
... )
>>> mg.at_node['distance_to_divide']
array([ 0., 3., 3., 0.,
0., 2., 2., 0.,
0., 1., 1., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.])
The distance_to_divide utility can also work on irregular grids. For the
example we will use a Hexagonal Model Grid, a special type of Voroni Grid
that has regularly spaced hexagonal cells.
>>> from landlab import HexModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> dx = 1
>>> hmg = HexModelGrid((5, 3), dx)
>>> _ = hmg.add_field(
... "topographic__elevation",
... hmg.node_x + np.round(hmg.node_y),
... at="node",
... )
>>> hmg.status_at_node[hmg.boundary_nodes] = hmg.BC_NODE_IS_CLOSED
>>> hmg.status_at_node[0] = hmg.BC_NODE_IS_FIXED_VALUE
>>> fr = FlowAccumulator(hmg, flow_director = 'D4')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... hmg,
... add_to_grid=True,
... clobber=True,
... )
>>> hmg.at_node['distance_to_divide']
array([ 3., 0., 0.,
0., 2., 1., 0.,
0., 1., 1., 0., 0.,
0., 0., 0., 0.,
0., 0., 0.])
"""
# check that flow__receiver nodes exists
if "flow__receiver_node" not in grid.at_node:
raise FieldError(
"A 'flow__receiver_node' field is required at the "
"nodes of the input grid."
)
if "flow__upstream_node_order" not in grid.at_node:
raise FieldError(
"A 'flow__upstream_node_order' field is required at the "
"nodes of the input grid."
)
if "drainage_area" not in grid.at_node:
raise FieldError(
"A 'flow__upstream_node_order' field is required at the "
"nodes of the input grid."
)
# get the reciever nodes, depending on if this is to-one, or to-multiple,
# we'll need to get a different at-node field.
if grid.at_node["flow__receiver_node"].size != grid.size("node"):
to_one = False
else:
to_one = True
flow__receiver_node = grid.at_node["flow__receiver_node"]
drainage_area = grid.at_node["drainage_area"]
# get the upstream node order
flow__upstream_node_order = grid.at_node["flow__upstream_node_order"]
# get downstream flow link lengths, result depends on type of grid.
if isinstance(grid, RasterModelGrid):
flow_link_lengths = grid.length_of_d8[
grid.at_node["flow__link_to_receiver_node"]
]
else:
flow_link_lengths = grid.length_of_link[
grid.at_node["flow__link_to_receiver_node"]
]
# create an array that representes the distance to the divide.
distance_to_divide = np.zeros(grid.nodes.size)
if not longest_path:
distance_to_divide[:] = 2 * grid.size("node") * np.max(flow_link_lengths)
# iterate through the flow__upstream_node_order backwards.
for node in reversed(flow__upstream_node_order):
# if drainage are is equal to node cell area, set distance to zeros
# this should handle the drainage divide cells as boundary cells have
# their area set to zero.
if drainage_area[node] == grid.cell_area_at_node[node]:
distance_to_divide[node] = 0
# get flow recievers
reciever = flow__receiver_node[node]
if to_one:
# if not processing an outlet node.
if reciever != node:
if longest_path:
cond = (
distance_to_divide[reciever]
< distance_to_divide[node] + flow_link_lengths[node]
)
else:
cond = (
distance_to_divide[reciever]
> distance_to_divide[node] + flow_link_lengths[node]
)
if cond:
distance_to_divide[reciever] = (
distance_to_divide[node] + flow_link_lengths[node]
)
else:
# non-existant links are coded with -1
useable_receivers = np.where(reciever != grid.BAD_INDEX)[0]
for idx in range(len(useable_receivers)):
r = reciever[useable_receivers][idx]
fll = flow_link_lengths[node][useable_receivers][idx]
# if not processing an outlet node.
if r != node:
if longest_path:
cond = distance_to_divide[r] < distance_to_divide[node] + fll
else:
cond = distance_to_divide[r] > distance_to_divide[node] + fll
if cond:
distance_to_divide[r] = distance_to_divide[node] + fll
# store on the grid
if add_to_grid:
grid.add_field(
"distance_to_divide", distance_to_divide, at="node", clobber=clobber
)
return distance_to_divide | 99afc09e88aee3ea7486a9dfddc45d98358d0bd7 | 12,068 |
import matplotlib.pyplot as plt
import textwrap
import os
import time
def run_example(device_id, do_plot=False):
"""
Run the example: Connect to a Zurich Instruments UHF Lock-in Amplifier or
UHFAWG, UHFQA, upload and run a basic AWG sequence program. It then demonstrates
how to upload (replace) a waveform without changing the sequencer program.
Requirements:
UHFLI with UHF-AWG Arbitrary Waveform Generator Option.
Hardware configuration: Connect signal output 1 to signal input 1 with a
BNC cable.
Arguments:
device_id (str): The ID of the device to run the example with. For
example, `dev2006` or `uhf-dev2006`.
do_plot (bool, optional): Specify whether to plot the signal measured by the scope
output. Default is no plot output.
Returns:
data: Data structure returned by the Scope
Raises:
Exception: If the UHF-AWG Option is not installed.
RuntimeError: If the device is not "discoverable" from the API.
See the "LabOne Programing Manual" for further help, available:
- On Windows via the Start-Menu:
Programs -> Zurich Instruments -> Documentation
- On Linux in the LabOne .tar.gz archive in the "Documentation"
sub-folder.
"""
# Settings
apilevel_example = 6 # The API level supported by this example.
err_msg = "This example can only be ran on either a UHFAWG, UHFQA or a UHF with the AWG option enabled."
# Call a zhinst utility function that returns:
# - an API session `daq` in order to communicate with devices via the data server.
# - the device ID string that specifies the device branch in the server's node hierarchy.
# - the device's discovery properties.
(daq, device, _) = zhinst.utils.create_api_session(device_id, apilevel_example, required_devtype='UHF',
required_options=['AWG'], required_err_msg=err_msg)
zhinst.utils.api_server_version_check(daq)
# Create a base configuration: Disable all available outputs, awgs, demods, scopes,...
zhinst.utils.disable_everything(daq, device)
# Now configure the instrument for this experiment. The following channels
# and indices work on all device configurations. The values below may be
# changed if the instrument has multiple input/output channels and/or either
# the Multifrequency or Multidemodulator options installed.
out_channel = 0
out_mixer_channel = 3
in_channel = 0
osc_index = 0
awg_channel = 0
frequency = 1e6
amplitude = 1.0
exp_setting = [
['/%s/sigins/%d/imp50' % (device, in_channel), 1],
['/%s/sigins/%d/ac' % (device, in_channel), 0],
['/%s/sigins/%d/diff' % (device, in_channel), 0],
['/%s/sigins/%d/range' % (device, in_channel), 1],
['/%s/oscs/%d/freq' % (device, osc_index), frequency],
['/%s/sigouts/%d/on' % (device, out_channel), 1],
['/%s/sigouts/%d/range' % (device, out_channel), 1],
['/%s/sigouts/%d/enables/%d' % (device, out_channel, out_mixer_channel), 1],
['/%s/sigouts/%d/amplitudes/*' % (device, out_channel), 0.],
['/%s/awgs/0/outputs/%d/amplitude' % (device, awg_channel), amplitude],
['/%s/awgs/0/outputs/0/mode' % device, 0],
['/%s/awgs/0/time' % device, 0],
['/%s/awgs/0/userregs/0' % device, 0]
]
daq.set(exp_setting)
daq.sync()
# Number of points in AWG waveform
AWG_N = 2000
# Define an AWG program as a string stored in the variable awg_program, equivalent to what would
# be entered in the Sequence Editor window in the graphical UI.
# This example demonstrates four methods of definig waveforms via the API
# - (wave w0) loaded directly from programmatically generated CSV file wave0.csv.
# Waveform shape: Blackman window with negative amplitude.
# - (wave w1) using the waveform generation functionalities available in the AWG Sequencer language.
# Waveform shape: Gaussian function with positive amplitude.
# - (wave w2) using the vect() function and programmatic string replacement.
# Waveform shape: Single period of a sine wave.
# - (wave w3) directly writing an array of numbers to the AWG waveform memory.
# Waveform shape: Sinc function. In the sequencer language, the waveform is initially
# defined as an array of zeros. This placeholder array is later overwritten with the
# sinc function.
awg_program = textwrap.dedent("""\
const AWG_N = _c1_;
wave w0 = "wave0";
wave w1 = gauss(AWG_N, AWG_N/2, AWG_N/20);
wave w2 = vect(_w2_);
wave w3 = zeros(AWG_N);
while(getUserReg(0) == 0);
setTrigger(1);
setTrigger(0);
playWave(w0);
playWave(w1);
playWave(w2);
playWave(w3);
""")
# Define an array of values that are used to write values for wave w0 to a CSV file in the module's data directory
waveform_0 = -1.0 * np.blackman(AWG_N)
# Redefine the wave w1 in Python for later use in the plot
width = AWG_N/20
waveform_1 = np.exp(-(np.linspace(-AWG_N/2, AWG_N/2, AWG_N))**2/(2*width**2))
# Define an array of values that are used to generate wave w2
waveform_2 = np.sin(np.linspace(0, 2*np.pi, AWG_N))
# Fill the waveform values into the predefined program by inserting the array
# as comma-separated floating-point numbers into awg_program
awg_program = awg_program.replace('_w2_', ','.join([str(x) for x in waveform_2]))
# Do the same with the integer constant AWG_N
awg_program = awg_program.replace('_c1_', str(AWG_N))
# Create an instance of the AWG Module
awgModule = daq.awgModule()
awgModule.set('awgModule/device', device)
awgModule.execute()
# Get the modules data directory
data_dir = awgModule.getString('awgModule/directory')
# All CSV files within the waves directory are automatically recognized by the AWG module
wave_dir = os.path.join(data_dir, "awg", "waves")
if not os.path.isdir(wave_dir):
# The data directory is created by the AWG module and should always exist. If this exception is raised,
# something might be wrong with the file system.
raise Exception("AWG module wave directory {} does not exist or is not a directory".format(wave_dir))
# Save waveform data to CSV
csv_file = os.path.join(wave_dir, "wave0.csv")
np.savetxt(csv_file, waveform_0)
# Transfer the AWG sequence program. Compilation starts automatically.
awgModule.set('awgModule/compiler/sourcestring', awg_program)
# Note: when using an AWG program from a source file (and only then), the compiler needs to
# be started explicitly with awgModule.set('awgModule/compiler/start', 1)
while awgModule.getInt('awgModule/compiler/status') == -1:
time.sleep(0.1)
if awgModule.getInt('awgModule/compiler/status') == 1:
# compilation failed, raise an exception
raise Exception(awgModule.getString('awgModule/compiler/statusstring'))
if awgModule.getInt('awgModule/compiler/status') == 0:
print("Compilation successful with no warnings, will upload the program to the instrument.")
if awgModule.getInt('awgModule/compiler/status') == 2:
print("Compilation successful with warnings, will upload the program to the instrument.")
print("Compiler warning: ", awgModule.getString('awgModule/compiler/statusstring'))
# Wait for the waveform upload to finish
time.sleep(0.2)
i = 0
while (awgModule.getDouble('awgModule/progress') < 1.0) and (awgModule.getInt('awgModule/elf/status') != 1):
print("{} awgModule/progress: {:.2f}".format(i, awgModule.getDouble('awgModule/progress')))
time.sleep(0.5)
i += 1
print("{} awgModule/progress: {:.2f}".format(i, awgModule.getDouble('awgModule/progress')))
if awgModule.getInt('awgModule/elf/status') == 0:
print("Upload to the instrument successful.")
if awgModule.getInt('awgModule/elf/status') == 1:
raise Exception("Upload to the instrument failed.")
# Replace the waveform w3 with a new one.
waveform_3 = np.sinc(np.linspace(-6*np.pi, 6*np.pi, AWG_N))
# The set command below on awgs/0/waveform/index defines the index of the waveform in the sequencer program to
# replace with the data that is written to awgs/0/waveform/data.
# Let N be the total number of waveforms and M>0 be the number of waveforms defined from CSV file. Then the index of
# the waveform to be replaced is defined as following:
# - 0,...,M-1 for all waveforms defined from CSV file alphabetically ordered by filename,
# - M,...,N-1 in the order that the waveforms are defined in the sequencer program.
# For the case of M=0, the index is defined as:
# - 0,...,N-1 in the order that the waveforms are defined in the sequencer program.
# Of course, for the trivial case of 1 waveform, use index=0 to replace it.
# Here we replace waveform w3, the 4th waveform defined in the sequencer program. Using 0-based indexing the
# index of the waveform we want to replace (w3, a vector of zeros) is 3:
index = 3
daq.setInt('/' + device + '/awgs/0/waveform/index', index)
daq.sync()
# Write the waveform to the memory. For the transferred array, floating-point (-1.0...+1.0)
# as well as integer (-32768...+32768) data types are accepted.
# For dual-channel waves, interleaving is required.
daq.vectorWrite('/' + device + '/awgs/0/waveform/data', waveform_3)
# Configure the Scope for measurement
# 'channels/0/inputselect' : the input channel for the scope:
# 0 - signal input 1
daq.setInt('/%s/scopes/0/channels/0/inputselect' % (device), in_channel)
# 'time' : timescale of the wave, sets the sampling rate to 1.8GHz/2**time.
# 0 - sets the sampling rate to 1.8 GHz
# 1 - sets the sampling rate to 900 MHz
# ...
# 16 - sets the sampling rate to 27.5 kHz
daq.setInt('/%s/scopes/0/time' % device, 0)
# 'single' : only get a single scope shot.
# 0 - take continuous shots
# 1 - take a single shot
# Disable the scope.
daq.setInt('/%s/scopes/0/enable' % device, 0)
# Configure the length of the scope shot.
daq.setInt('/%s/scopes/0/length' % device, 10000)
# Now configure the scope's trigger to get aligned data
# 'trigenable' : enable the scope's trigger (boolean).
daq.setInt('/%s/scopes/0/trigenable' % device, 1)
# Specify the trigger channel:
#
# Here we trigger on the signal from UHF signal input 1. If the instrument has the DIG Option installed we could
# trigger the scope using an AWG Trigger instead (see the `setTrigger(1);` line in `awg_program` above).
# 0: Signal Input 1
# 192: AWG Trigger 1
trigchannel = 0
daq.setInt('/%s/scopes/0/trigchannel' % device, trigchannel)
if trigchannel == 0:
# Trigger on the falling edge of the negative blackman waveform `w0` from our AWG program.
daq.setInt('/%s/scopes/0/trigslope' % device, 2)
daq.setDouble('/%s/scopes/0/triglevel' % device, -0.600)
# Set hysteresis triggering threshold to avoid triggering on noise
# 'trighysteresis/mode' :
# 0 - absolute, use an absolute value ('scopes/0/trighysteresis/absolute')
# 1 - relative, use a relative value ('scopes/0trighysteresis/relative') of the trigchannel's input range
# (0.1=10%).
daq.setDouble('/%s/scopes/0/trighysteresis/mode' % device, 0)
daq.setDouble('/%s/scopes/0/trighysteresis/relative' % device, 0.025)
# Set a negative trigdelay to capture the beginning of the waveform.
trigdelay = -1.0e-6
daq.setDouble('/%s/scopes/0/trigdelay' % device, trigdelay)
else:
# Assume we're using an AWG Trigger, then the scope configuration is simple: Trigger on rising edge.
daq.setInt('/%s/scopes/0/trigslope' % device, 1)
# Set trigdelay to 0.0: Start recording from when the trigger is activated.
trigdelay = 0.0
daq.setDouble('/%s/scopes/0/trigdelay' % device, trigdelay)
trigreference = 0.0
# The trigger reference position relative within the wave, a value of 0.5 corresponds to the center of the wave.
daq.setDouble('/%s/scopes/0/trigreference' % device, trigreference)
# Set the hold off time in-between triggers.
daq.setDouble('/%s/scopes/0/trigholdoff' % device, 0.025)
# Set up the Scope Module.
scopeModule = daq.scopeModule()
scopeModule.set('scopeModule/mode', 1)
scopeModule.subscribe('/' + device + '/scopes/0/wave')
daq.setInt('/%s/scopes/0/single' % device, 1)
scopeModule.execute()
# Start the AWG in single-shot mode.
# This is the preferred method of using the AWG: Run in single mode continuous waveform playback is best achieved by
# using an infinite loop (e.g., while (true)) in the sequencer program.
daq.set([['/' + device + '/awgs/0/single', 1],
['/' + device + '/awgs/0/enable', 1]])
daq.sync()
# Start the scope...
daq.setInt('/%s/scopes/0/enable' % device, 1)
daq.sync()
time.sleep(1.0)
daq.setInt('/%s/awgs/0/userregs/0' % device, 1)
# Read the scope data with timeout.
local_timeout = 2.0
records = 0
while (records < 1) and (local_timeout > 0):
time.sleep(0.1)
local_timeout -= 0.1
records = scopeModule.getInt("scopeModule/records")
# Disable the scope.
daq.setInt('/%s/scopes/0/enable' % device, 0)
data_read = scopeModule.read(True)
wave_nodepath = '/{}/scopes/0/wave'.format(device)
assert wave_nodepath in data_read, "Error: The subscribed data `{}` was returned.".format(wave_nodepath)
data = data_read[wave_nodepath][0][0]
f_s = 1.8e9 # sampling rate of scope and AWG
for n in range(0, len(data['channelenable'])):
p = data['channelenable'][n]
if p:
y_measured = data['wave'][n]
x_measured = np.arange(-data['totalsamples'], 0)*data['dt'] + \
(data['timestamp'] - data['triggertimestamp'])/f_s
# Compare expected and measured signal
full_scale = 0.75
y_expected = np.concatenate((waveform_0, waveform_1, waveform_2, waveform_3))*full_scale*amplitude
x_expected = np.linspace(0, 4*AWG_N/f_s, 4*AWG_N)
# Correlate measured and expected signal
corr_meas_expect = np.correlate(y_measured, y_expected)
index_match = np.argmax(corr_meas_expect)
if do_plot:
# The shift between measured and expected signal depends among other things on cable length.
# We simply determine the shift experimentally and then plot the signals with an according correction
# on the horizontal axis.
x_shift = index_match/f_s - trigreference*(x_measured[-1] - x_measured[0]) + trigdelay
print('Plotting the expected and measured AWG signal.')
x_unit = 1e-9
plt.figure(1)
plt.clf()
plt.title('Measured and expected AWG Signals')
plt.plot(x_measured/x_unit, y_measured, label='measured')
plt.plot((x_expected + x_shift)/x_unit, y_expected, label='expected')
plt.grid(True)
plt.autoscale(axis='x', tight=True)
plt.legend(loc='upper left')
plt.xlabel('Time, relative to trigger (ns)')
plt.ylabel('Voltage (V)')
plt.draw()
plt.show()
# Normalize the correlation coefficient by the two waveforms and check they
# agree to 95%.
norm_correlation_coeff = corr_meas_expect[index_match]/np.sqrt(sum(y_measured**2)*sum(y_expected**2))
assert norm_correlation_coeff > 0.95, \
("Detected a disagreement between the measured and expected signals, "
"normalized correlation coefficient: {}.".format(norm_correlation_coeff))
print("Measured and expected signals agree, normalized correlation coefficient: ",
norm_correlation_coeff, ".", sep="")
return data_read | 925954c576cc21c32ed94386d62a3e4dcfebdfe3 | 12,069 |
def string_unquote(value: str):
"""
Method to unquote a string
Args:
value: the value to unquote
Returns:
unquoted string
"""
if not isinstance(value, str):
return value
return value.replace('"', "").replace("'", "") | e062c012fc43f9b41a224f168de31732d885b21f | 12,070 |
def translate(tx, ty, tz):
"""Translate."""
return affine(t=[tx, ty, tz]) | f3b11f6bcf0f77b39423d8aac3d34149ae8b93a7 | 12,071 |
import torch
def randomized_svd_gpu(M, n_components, n_oversamples=10, n_iter='auto',
transpose='auto', random_state=0, lib='pytorch',tocpu=True):
"""Computes a truncated randomized SVD on GPU. Adapted from Sklearn.
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
lib : {'cupy', 'pytorch'}, str optional
Chooses the GPU library to be used.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitly specified
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
M = M.T # this implementation is a bit faster with smaller shape[1]
if lib == 'pytorch':
M_gpu = torch.Tensor.cuda(torch.from_numpy(M.astype('float32')))
# Generating normal random vectors with shape: (M.shape[1], n_random)
Q = torch.cuda.FloatTensor(M_gpu.shape[1], n_random).normal_()
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of M in Q
for i in range(n_iter):
Q = torch.mm(M_gpu, Q)
Q = torch.mm(torch.transpose(M_gpu, 0, 1), Q)
# Sample the range of M using by linear projection of Q. Extract an orthonormal basis
Q, _ = torch.qr(torch.mm(M_gpu, Q))
# project M to the (k + p) dimensional space using the basis vectors
B = torch.mm(torch.transpose(Q, 0, 1), M_gpu)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = torch.svd(B)
del B
U = torch.mm(Q, Uhat)
if transpose:
# transpose back the results according to the input convention
U, s, V=(torch.transpose(V[:n_components, :], 0, 1),s[:n_components],torch.transpose(U[:, :n_components], 0, 1))
else:
U, s, V=( U[:, :n_components], s[:n_components], V[:n_components, :])
if tocpu is True:
return np.array(U.cpu()).astype('float'), np.array(s.cpu()).astype('float'), np.array(V.cpu()).astype('float')
else:
return U, s, V | 31483d2f83b66d8cd4bc15a929ecf657c67d0af2 | 12,072 |
import re
def clean_column_names(df: pd.DataFrame) -> pd.DataFrame:
"""Cleans the column names of the given dataframe by applying the following steps
after using the janitor `clean_names` function:
* strips any 'unnamed' field, for example 'Unnamed: 0'
* replaces the first missing name with 'is_away'
* coverts '#' to '_nbr'
* converts '%' to '_pct'
Args:
df (pd.DataFrame): The dataframe to clean the column names of.
Returns:
pd.DataFrame: The dataframe with cleaned column names.
"""
df = clean_names(df)
cols = df.columns
cols = [re.sub("unnamed_[0-9]+_level_[0-9]", "", x).strip("_") for x in cols]
# away will always be the first empty string following cleaning step above
cols[cols.index("")] = "is_away"
cols = [x.replace("#", "_nbr") for x in cols]
cols = [x.replace("%", "_pct") for x in cols]
cols = ["is_active" if x == "status" else x for x in cols]
cols = ["is_start" if x == "gs" else x for x in cols]
df.columns = cols
return df | 37c41b02846cacab1e412d402b50d94e18e1e20e | 12,073 |
def cos(x):
"""Return the cosine.
INPUTS
x (Variable object or real number)
RETURNS
if x is a Variable, then return a Variable with val and der.
if x is a real number, then return the value of np.cos(x).
EXAMPLES
>>> x = Variable(0, name='x')
>>> t = cos(x)
>>> print(t.val, t.der['x'])
1.0 0.0
"""
try:
val = np.cos(x.val)
ders = defaultdict(float)
sec_ders = defaultdict(float)
for key in x.der:
ders[key] += -np.sin(x.val) * (x.der[key])
sec_ders[key] += -x.sec_der[key]*np.sin(x.val)+(x.der[key]**2)*(-np.cos(x.val))
return Variable(val, ders, sec_ders)
except AttributeError:
return np.cos(x) | 472bfb53345c14545a8f1bf75deb5679fe1916f8 | 12,074 |
def _get_regions(connection):
""" Get list of regions in database excluding GB. If no regions are found,
a ValueError is raised.
"""
query_regions = connection.execute(
db.select([models.Region.code]).where(models.Region.code != "GB")
)
regions = [r[0] for r in query_regions]
if not regions:
raise ValueError("NPTG data not populated yet.")
return regions | ffb58c5c695a8dd669497f8dccdf5ef8202e5a21 | 12,075 |
def word_embedding_forward(x, W):
"""
Forward pass for word embeddings. We operate on minibatches of size N where
each sequence has length T. We assume a vocabulary of V words, assigning each
to a vector of dimension D.
Inputs:
- x: Integer array of shape (N, T) giving indices of words. Each element idx
of x muxt be in the range 0 <= idx < V.
- W: Weight matrix of shape (V, D) giving word vectors for all words.
Returns a tuple of:
- out: Array of shape (N, T, D) giving word vectors for all input words.
"""
out = None
##############################################################################
# TODO: Implement the forward pass for word embeddings. #
# #
# HINT: This should be very simple. #
##############################################################################
pass
##############################################################################
# END OF YOUR CODE #
##############################################################################
return out | 232096cc2c90a16fdc1c428d98ebe2db184df368 | 12,076 |
def retrieve_from_stream(iden, interval=60):
"""
Return messages from a stream.
:param iden: Identifier of the stream.
:param interval: defaults to messages of last 60 seconds.
"""
return stm_str.get_messages(str(UID), str(TOKEN), interval, iden) | 166918258ff3bc8ff972164027df4bf93b4a280e | 12,077 |
def train_op(tot_loss, lr, var_opt, name):
"""
When only the discriminator is trained, the learning rate is set to be 0.0008
When the generator model is also trained, the learning rate is set to be 0.0004
Since there are batch_normalization layers in the model, we need to use update_op for keeping train and test moving average
of the batch_norm parameters
"""
# optimizer = tf.train.RMSPropOptimizer(learning_rate = lr)
epsilon = 1e-4 # added on 18th of July
optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=epsilon, name=name)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
grads = optimizer.compute_gradients(tot_loss, var_list=var_opt)
print("================================================")
print("I am printing the non gradient")
for grad, var in grads:
if grad is None:
print("no gradient", grad, var)
print("================================================")
opt = optimizer.apply_gradients(grads)
return opt | 52dc967b9adac561f210fe4305d69b8724841607 | 12,078 |
import math
def logadd(x, y):
"""Adds two log values.
Ensures accuracy even when the difference between values is large.
"""
if x < y:
temp = x
x = y
y = temp
z = math.exp(y - x)
logProb = x + math.log(1.0 + z)
if logProb < _MinLogExp:
return _MinLogExp
else:
return logProb | d1993f9e3d9fd5f44938509df12d818fb5eb7f3d | 12,079 |
def getResourceNameString(ro_config, rname, base=None):
"""
Returns a string value corresoponding to a URI indicated by the supplied parameter.
Relative references are assumed to be paths relative to the supplied base URI or,
if no rbase is supplied, relative to the current directory.
"""
rsplit = rname.split(":")
if len(rsplit) == 2:
# Try to interpret name as CURIE
for rpref in ro_config["annotationPrefixes"]:
if rsplit[0] == rpref:
rname = ro_config["annotationPrefixes"][rpref]+rsplit[1]
if urlparse.urlsplit(rname).scheme == "":
if base:
rname = resolveUri(rname, base)
else:
rname = resolveFileAsUri(rname)
return rname | f5840a7effb4ac91a77810531f10d323a03490ce | 12,080 |
import os
def NMFcomponents(ref, ref_err = None, n_components = None, maxiters = 1e3, oneByOne = False, path_save = None):
"""Returns the NMF components, where the rows contain the information.
Input: ref and ref_err should be (N * p) where n is the number of references, p is the number of pixels in each reference.
path_save (string): a path to save intermediate results to calculate additional componetns with previous calculated information. Default: None.
Output: NMf components (n_components * p).
"""
if ref_err is None:
ref_err = np.sqrt(ref)
if (n_components is None) or (n_components > ref.shape[0]):
n_components = ref.shape[0]
ref[ref < 0] = 0
ref_err[ref <= 0] = np.nanpercentile(ref_err, 95)*10 #Setting the err of <= 0 pixels to be max error to reduce their impact
ref_columnized = ref.T #columnize ref, making the columns contain the information
ref_err_columnized = ref_err.T # columnize ref_err, making the columns contain the information
components_column = 0
if not oneByOne:
if path_save is not None:
print('path_save is only supported when oneByOne == True.')
g_img = nmf.NMF(ref_columnized, V=1.0/ref_err_columnized**2, n_components=n_components)
chi2, time_used = g_img.SolveNMF(maxiters=maxiters)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
print("Building components one by one...")
if path_save is None:
for i in range(n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == 0):
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
print('\t path_save provided, you might want to load data and continue previous component calculation')
print('\t\t loading from ' + path_save + '_comp.fits for components.')
if not os.path.exists(path_save + '_comp.fits'):
print('\t\t ' + path_save + '_comp.fits does not exist, calculating from scratch.')
for i in range(n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == 0):
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D component matrix at ' + path_save + '_comp.fits')
fits.writeto(path_save + '_comp.fits', g_img.W, overwrite = True)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D coefficient matrix at ' + path_save + '_coef.fits')
fits.writeto(path_save + '_coef.fits', g_img.H, overwrite = True)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
W_assign = fits.getdata(path_save + '_comp.fits')
H_assign = fits.getdata(path_save + '_coef.fits')
if W_assign.shape[1] >= n_components:
print('You have already had ' + str(W_assign.shape[1]) + ' components while asking for ' + str(n_components) + '. Returning to your input.')
components_column = W_assign/np.sqrt(np.nansum(W_assign**2, axis = 0))
components = decolumnize(components_column, mask = mask)
else:
print('You are asking for ' + str(n_components) + ' components. Building the rest based on the ' + str(W_assign.shape[1]) + ' provided.')
for i in range(W_assign.shape[1], n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == W_assign.shape[1]):
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(W_assign)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(H_assign)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D component matrix at ' + path_save + '_comp.fits')
fits.writeto(path_save + '_comp.fits', g_img.W, overwrite = True)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D coefficient matrix at ' + path_save + '_coef.fits')
fits.writeto(path_save + '_coef.fits', g_img.H, overwrite = True)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
return components_column.T | ca986498797a635a3b4a85ba6ac432a4ce2954d3 | 12,081 |
import argparse
def parse_args():
"""
Parses arguments provided through the command line.
"""
# Initialize
parser = argparse.ArgumentParser()
# Arguments
parser.add_argument("meme_file", metavar="motifs.meme")
parser.add_argument("tomtom_file", metavar="tomtom.txt")
parser.add_argument("clusters_file", metavar="clusters.txt")
parser.add_argument(
"--cluster",
default=None,
help="cluster (defaut: all)",
type=int,
)
parser.add_argument(
"--out-dir",
default="./",
help="output directory (default: ./)",
)
return(parser.parse_args()) | cc2e38e563ef5e94009f171e757454a3b59d8588 | 12,082 |
def get_mean_from_protobin(filename):
"""Get image mean from protobinary and return ndarray with skimage format.
"""
img = read_caffe_protobin(filename)
size = (img.channels, img.height, img.width)
img = caffe.io.blobproto_to_array(img).reshape(size)
img = img.transpose([1, 2, 0])
return img | ba03cdeb534d00885c1c6bee22bee15af3880a85 | 12,083 |
def has_key(key):
"""
Check if key is in the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.has_key <mykey>
"""
store = load()
return key in store | 7d551773da4c7d98f090d7ff9b489bcbecbec32e | 12,084 |
def __create_setting(ingest):
"""Creates the setting for a particular family"""
signer, addresser, auth_keys, threshold = ingest
settings = Settings(
auth_list=','.join(auth_keys),
threshold=threshold)
return (
signer,
addresser,
SettingPayload(
action=SettingPayload.CREATE,
dimension=addresser.family,
data=settings.SerializeToString())) | a74d7c5a109555de591d26f5e123c31a5d0b4a7a | 12,085 |
def paser_bs(sent):
"""Convert compacted bs span to triple list
Ex:
"""
sent=sent.strip('<sos_b>').strip('<eos_b>')
sent = sent.split()
belief_state = []
domain_idx = [idx for idx,token in enumerate(sent) if token in all_domain]
for i,d_idx in enumerate(domain_idx):
next_d_idx = len(sent) if i+1 == len(domain_idx) else domain_idx[i+1]
domain = sent[d_idx]
sub_span = sent[d_idx+1:next_d_idx]
sub_s_idx = [idx for idx,token in enumerate(sub_span) if token in all_slots]
for j,s_idx in enumerate(sub_s_idx):
next_s_idx = len(sub_span) if j == len(sub_s_idx) - 1 else sub_s_idx[j+1]
slot = sub_span[s_idx]
value = ' '.join(sub_span[s_idx+1:next_s_idx])
bs = " ".join([domain,slot,value])
belief_state.append(bs)
return list(set(belief_state)) | b517d58e7a7958b9186c7f9216cb33e506c148f7 | 12,086 |
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI container server apps."""
conf = global_conf.copy()
conf.update(local_conf)
return ContainerController(conf) | d493112d714e0303b807304b8be11d0d8b8c5b37 | 12,087 |
import traceback
def retrieve_succinct_traceback() -> str:
"""
A utility that retrive succint traceback digest from a complete traceback string.
"""
tb = traceback.format_exc()
return "\n".join(pg.splitlines()[-1] for pg in split_paragraphs(tb)) | 882e190138fd51be807d37f014c22aead57f88ba | 12,088 |
def get_canonical_format_name(format_name):
"""
Get the canonical format name for a possible abbreviation
Args:
format_name (str): Format name or abbreviation
Returns:
The canonical name from CANONICAL_FORMATS, or None if the format is
not recognized.
"""
try:
return CANONICAL_FORMATS[format_name.lower()]
except KeyError:
return None | ae95d0321e2f8880ccbd7710e48666991208a470 | 12,089 |
def build_protoc_args(
ctx,
plugin,
proto_infos,
out_arg,
extra_options = [],
extra_protoc_args = [],
short_paths = False,
resolve_tools = True):
"""
Build the args for a protoc invocation.
This does not include the paths to the .proto files, which should be done external to this function.
Args:
ctx: The Bazel rule execution context object.
plugin: The ProtoPluginInfo for the plugin to use.
proto_infos: The list of ProtoInfo providers.
out_arg: The path to provide as the output arg to protoc, usually the generation root dir.
extra_options: An optional list of extra options to pass to the plugin.
extra_protoc_args: An optional list of extra args to add to the command.
short_paths: Whether to use the .short_path instead of .path when creating paths. The short_path is used when
making a test/executable and referencing the runfiles.
resolve_tools: Whether to resolve and add the tools to returned inputs.
Returns:
- The list of args.
- The inputs required for the command.
- The input manifests required for the command.
"""
# Specify path getter
get_path = _short_path if short_paths else _path
# Build inputs and manifests list
inputs = []
input_manifests = []
if plugin.tool and resolve_tools:
plugin_runfiles, plugin_input_manifests = ctx.resolve_tools(tools = [plugin.tool])
inputs += plugin_runfiles.to_list()
input_manifests += plugin_input_manifests
inputs += plugin.data
# Get plugin name
plugin_name = plugin.name
if plugin.protoc_plugin_name:
plugin_name = plugin.protoc_plugin_name
# Build args
args_list = []
# Load all descriptors (direct and transitive) and remove dupes
descriptor_sets = depset([
descriptor_set
for proto_info in proto_infos
for descriptor_set in proto_info.transitive_descriptor_sets.to_list()
]).to_list()
inputs += descriptor_sets
# Add descriptors
pathsep = ctx.configuration.host_path_separator
args_list.append("--descriptor_set_in={}".format(pathsep.join(
[get_path(f) for f in descriptor_sets],
)))
# Add --plugin if not a built-in plugin
if plugin.tool_executable:
# If Windows, mangle the path. It's done a bit awkwardly with
# `host_path_seprator` as there is no simple way to figure out what's
# the current OS.
if ctx.configuration.host_path_separator == ";":
plugin_tool_path = get_path(plugin.tool_executable).replace("/", "\\")
else:
plugin_tool_path = get_path(plugin.tool_executable)
args_list.append("--plugin=protoc-gen-{}={}".format(plugin_name, plugin_tool_path))
# Add plugin --*_out/--*_opt args
plugin_options = list(plugin.options)
plugin_options.extend(extra_options)
if plugin_options:
opts_str = ",".join(
[option.replace("{name}", ctx.label.name) for option in plugin_options],
)
if plugin.separate_options_flag:
args_list.append("--{}_opt={}".format(plugin_name, opts_str))
else:
out_arg = "{}:{}".format(opts_str, out_arg)
args_list.append("--{}_out={}".format(plugin_name, out_arg))
# Add any extra protoc args provided or that plugin has
args_list.extend(extra_protoc_args)
if plugin.extra_protoc_args:
args_list.extend(plugin.extra_protoc_args)
return args_list, inputs, input_manifests | 24796ea7d817dd3a11ec2c1b54de23573c6da275 | 12,090 |
def cycle_left(state):
"""Rotates the probabilityfunction, translating each discrete left by one site.
The outcome is the same as if the probabilityfunction was fully local, with n_discretes
indices, and the indices were permuted with (1, 2, ..., n_discretes-1, 0).
Args:
state: The probabilityfunction to which the rotation is applied.
Returns:
The rotation probabilityfunction.
"""
# TODO Can we do this more easily using the kwargs available for
# pswapaxes/all_to_all?
dim = LOCAL_DIM
pmap_index = pops.AXIS_NAME
n_global_discretes, n_local_discretes = number_of_discretes(state)
if n_local_discretes < 8:
msg = ("cycle_left isn't supported for less than 8 local discretes, you "
f"provided {n_local_discretes}.")
raise NotImplementedError(msg)
# Number of discretes that don't really take part in the process.
num_discretes_leftover = n_local_discretes - n_global_discretes - 1
orig_shape = state.shape
# REDACTED Make a diagram illustrating what is going on here.
state = state.reshape((dim, dim**n_global_discretes, dim**num_discretes_leftover))
state = jax.lax.pswapaxes(state, pmap_index, 1)
state = state.transpose((1, 0, 2))
state = state.reshape((dim, dim**n_global_discretes, dim**num_discretes_leftover))
state = jax.lax.pswapaxes(state, pmap_index, 1)
state = state.reshape((dim**8, dim**(n_local_discretes - 8)))
state = state.transpose((1, 0))
state = state.reshape((dim**(n_local_discretes - 7), dim**7))
state = state.transpose((1, 0))
return state.reshape(orig_shape) | 37f6f3626abe66682b339e9a74fb439917cba9ce | 12,091 |
def make_rare_deleterious_variants_filter(sample_ids_list=None):
""" Function for retrieving rare, deleterious variants """
and_list = [
{
"$or":
[
{"cadd.esp.af": {"$lt": 0.051}},
{"cadd.esp.af": {"$exists": False}}
]
},
{
"$or":
[
{"func_knowngene": "exonic"},
{"func_knowngene": "splicing"}
]
},
{"cadd.phred": {"$gte": 10}},
{"exonicfunc_knowngene": {"$ne": "synonymous SNV"}},
{"1000g2015aug_all": {"$lt": 0.051}}
]
result = _append_sample_id_constraint_if_needed(and_list, sample_ids_list)
return result | 2a97e5a0aa96b2a221c32639dde20fcf8de09bce | 12,092 |
def PluginCompleter(unused_self, event_object):
"""Completer function that returns a list of available plugins."""
ret_list = []
if not IsLoaded():
return ret_list
if not '-h' in event_object.line:
ret_list.append('-h')
plugins_list = parsers_manager.ParsersManager.GetWindowsRegistryPlugins()
for plugin_cls in plugins_list.GetKeyPlugins(RegCache.hive_type):
plugins_list = plugin_cls(reg_cache=RegCache.reg_cache)
plugin_name = plugins_list.plugin_name
if plugin_name.startswith('winreg'):
plugin_name = plugin_name[7:]
if plugin_name == 'default':
continue
ret_list.append(plugin_name)
return ret_list | c03b765ed39c1e43d3b67aaf9486c0f015b7bc4e | 12,093 |
def train_model(item_user_data) -> []:
""""Returns trained model"""
model = implicit.als.AlternatingLeastSquares(factors=50)
model.fit(item_user_data)
return model | d3917cb422707bebdd519bab04b9332c1056ec7a | 12,094 |
def refresh_blind_balances(wallet, balances, storeback=True):
""" Given a list of (supposedly) unspent balances, iterate over each one
and verify it's status on the blockchain. Each balance failing
this verification updates own status in the database (if storeback is True).
Returns a list of TRULY unspent balances.
"""
rpc = wallet.rpc
unspent = [ ]
for balance in balances:
result = rpc.get_blinded_balances([balance["commitment"]])
if len(result) == 0:
if storeback:
wallet.modifyBlindBalance(balance["commitment"], used=True)
else:
unspent.append(balance)
return unspent | 2d468827ae32d359b323921d5933796ada22d627 | 12,095 |
import argparse
from typing import Union
def command_up(
stairlight: StairLight, args: argparse.Namespace
) -> Union[dict, "list[dict]"]:
"""Execute up command
Args:
stairlight (StairLight): Stairlight class
args (argparse.Namespace): CLI arguments
Returns:
Union[dict, list]: Upstairs results
"""
return search(
func=stairlight.up,
args=args,
tables=find_tables_to_search(stairlight=stairlight, args=args),
) | 042214e94a95b998a0f416ab51c973338eba34ba | 12,096 |
def hull_area(par, llhs, above_min=1):
"""Estimate projected area of llh minimum for single parameter
Parameters
----------
par : np.ndarray
the parameter values
llhs : np.ndarray
the llh values
Returns
-------
float
"""
min_llh = llhs.min()
try:
Hull = ConvexHull(np.stack([par, llhs]).T[llhs < min_llh+above_min])
return Hull.volume
except QhullError:
return np.inf | 3284a9742dfd9889fff67fd6f68ab9435858a521 | 12,097 |
def assemble_chain(leaf, store):
"""Assemble the trust chain.
This assembly method uses the certificates subject and issuer common name and
should be used for informational purposes only. It does *not*
cryptographically verify the chain!
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
store_dict = {}
for cert in store:
store_dict[cert.get_subject().CN] = cert
chain = [leaf]
current = leaf
try:
while current.get_issuer().CN != current.get_subject().CN:
chain.append(store_dict[current.get_issuer().CN])
current = store_dict[current.get_issuer().CN]
except KeyError:
invalid = crypto.X509()
patch_certificate(invalid)
invalid.set_subject(current.get_issuer())
chain.append(invalid)
chain.reverse()
return chain | c59025ddcbb777f4f5358f8d89e05191c22eb780 | 12,098 |
import os
def hcp_mg_relax_cell() -> tuple:
"""
HCP Mg relax cell, wyckoff='c'.
"""
aiida_twinpy_dir = os.path.dirname(
os.path.dirname(aiida_twinpy.__file__))
filename = os.path.join(aiida_twinpy_dir,
'tests',
'data',
'HCP_Mg_relax.poscar')
pos = Poscar.from_file(filename)
cell = get_cell_from_pymatgen_structure(pos.structure)
return cell | 54c3e169be518aa2cba39954eb5d8115f400634e | 12,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.