content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import math
def sin(c):
"""
sin(a+x)= sin(a) cos(x) + cos(a) sin(x)
"""
if not isinstance(c,pol): return math.sin(c)
a0,p=c.separate();
lst=[math.sin(a0),math.cos(a0)]
for n in range(2,c.order+1):
lst.append( -lst[-2]/n/(n-1))
return phorner(lst,p) | a6ec312df4362c130343133dae9a09b377f56cf5 | 10,700 |
def _calc_metadata() -> str:
"""
Build metadata MAY be denoted by appending a plus sign
and a series of dot separated identifiers
immediately following the patch or pre-release version.
Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
"""
if not is_appveyor:
return "local-build"
is_pr = PR_NUM in env
assert (PR_NUM in env) == (PR_BRANCH in env)
assert VER in env
if is_pr:
return "{VER}.pr{PR_NUM}-{PR_BRANCH}".format(**env)
else:
if env[BRANCH] != "master":
# Shouldn't happen, since side branches are not built.
return "{VER}.{BRANCH}".format(**env)
else:
return "{VER}".format(**env) | ccbd4912622808b5845d8e30546d6eb27e299342 | 10,701 |
import functools
def authorization_required(func):
"""Returns 401 response if user is not logged-in when requesting URL with user ndb.Key in it
or Returns 403 response if logged-in user's ndb.Key is different from ndb.Key given in requested URL.
"""
@functools.wraps(func)
def decorated_function(*pa, **ka): # pylint: disable=missing-docstring
if auth.is_authorized(ndb.Key(urlsafe=ka['key'])):
return func(*pa, **ka)
if not auth.is_logged_in():
return abort(401)
return abort(403)
return decorated_function | 12c0d645b0b26bf419e413866afaf1b4e7a19869 | 10,702 |
import torch
def pad_col(input, val=0, where='end'):
"""Addes a column of `val` at the start of end of `input`."""
if len(input.shape) != 2:
raise ValueError(f"Only works for `phi` tensor that is 2-D.")
pad = torch.zeros_like(input[:, :1])
if val != 0:
pad = pad + val
if where == 'end':
return torch.cat([input, pad], dim=1)
elif where == 'start':
return torch.cat([pad, input], dim=1)
raise ValueError(f"Need `where` to be 'start' or 'end', got {where}") | 77caa028bb76da922ba12492f077811d2344c2a9 | 10,703 |
from typing import List
import itertools
def seats_found_ignoring_floor(data: List[List[str]], row: int, col: int) -> int:
"""
Search each cardinal direction util we hit a wall or a seat.
If a seat is hit, determine if it's occupied.
"""
total_seats_occupied = 0
cardinal_direction_operations = itertools.product([-1, 0, 1], repeat=2)
for row_modifier, col_modifier in cardinal_direction_operations:
if row_modifier or col_modifier:
total_seats_occupied += next_seat_on_path_occupied(
data, row, col, row_modifier, col_modifier
)
return total_seats_occupied | e5442d757df6304da42f817c975969723ad0abca | 10,704 |
def product_design_space() -> ProductDesignSpace:
"""Build a ProductDesignSpace for testing."""
alpha = RealDescriptor('alpha', lower_bound=0, upper_bound=100, units="")
beta = RealDescriptor('beta', lower_bound=0, upper_bound=100, units="")
gamma = CategoricalDescriptor('gamma', categories=['a', 'b', 'c'])
dimensions = [
ContinuousDimension(alpha, lower_bound=0, upper_bound=10),
ContinuousDimension(beta, lower_bound=0, upper_bound=10),
EnumeratedDimension(gamma, values=['a', 'c'])
]
return ProductDesignSpace(name='my design space', description='does some things', dimensions=dimensions) | 93468cc7aaeb6a6bf7453d2f3e974bc28dece31f | 10,705 |
def compute_percents_of_labels(label):
"""
Compute the ratio/percentage size of the labels in an labeled image
:param label: the labeled 2D image
:type label: numpy.ndarray
:return: An array of relative size of the labels in the image. Indices of the sizes in the array \
is corresponding to the labels in the labeled image. E.g. output [0.2, 0.5, 0.3] means label 0's size \
is 0.2 of the labeled image, label 1' size is 0.5 of the labeled image, and label 2's size is 0.3 of \
the labeled image.
:rtype: numpy.ndarray
"""
# Get the bins of the histogram. Since the last bin of the histogram is [label, label+1]
# We add 1 to the number of different labels in the labeled image when generating bins
num_labels = np.arange(0, len(np.unique(label)) + 1)
# Histogramize the label image and get the frequency array percent_of_dominance
(percent_of_dominance, _) = np.histogram(label, bins=num_labels)
# Convert the dtype of frequency array to float
percent_of_dominance = percent_of_dominance.astype("float")
# Normalized by the sum of frequencies (number of pixels in the labeled image)
percent_of_dominance /= percent_of_dominance.sum()
return percent_of_dominance | 6dfe34b7da38fa17a5aa4e42acc5c812dd126f77 | 10,706 |
def removepara(H,M,Hmin = '1/2',Hmax = 'max',output=-1,kwlc={}):
""" Retrieve lineal contribution to cycle and remove it from cycle.
**H** y **M** corresponds to entire cycle (two branches). I.e. **H**
starts and ends at the same value (or an aproximate value).
El ciclo M vs H se separa en sus dos ramas. H1,M1 y H2,M2, defined by::
H1,M1: curva con dH/dt < 0. El campo decrece con el tiempo.
H2,M2: curva con dH/dt > 0. El campo aumenta con el tiempo.
Con la variable global FIGS = True shows intermediate states of
proceso de determinarion y linear contribution removing.
Figure Shows **Hmin** and **Hmax** positions in the cycle.
output: kind of output, (0 or -1) out.params or (1) out. (v 0.210304)
Note: output is set to -1 as default to achive backward
compatibility. But it should be changed in future to 1.
kwlc = dictionary with kwargs to be passed to lienar contribution.
Returns:
if output = -1: H1,M1,H2,M2,[pendiente,salto,desp]
if output = 1:
returns plain objtect with previous attributes and others.
"""
if PRINT:
print('**********************************************************')
print('removepara ')
print('**********************************************************')
if Hmax == 'max':
Hmax = max(abs(H))
if Hmin == '1/2':
Hmin = 0.5*max(abs(H))
H1,M1,H2,M2 = splitcycle(H,M)
o1 = linealcontribution(H1,M1,[Hmax,Hmin],label='dH/dt < 0',output=output,**kwlc)
o2 = linealcontribution(H2,M2,[Hmax,Hmin],label='dH/dt > 0',output=output,**kwlc)
if output == 1:
p1 = o1.params
p2 = o2.params
elif output == -1:
p1 = o1
p2 = o2
Ms = (p1['Ms'].value + p2['Ms'].value)*0.5
if p1['Ms'].stderr == None or p2['Ms'].stderr == None:
eMs = None
else:
eMs = (p1['Ms'].stderr + p2['Ms'].stderr)*0.5
# Fin de ajustes
if PRINT:
print('slope 1:',p1['Xi'])
print('slope 2:',p2['Xi'])
print('Ms 1 :',p1['Ms'])
print('Ms 2 :',p2['Ms'])
print('Ms :%s +/- %s'%(Ms,eMs))
print('offset 1 :',p1['offset'])
print('offset 2 :',p2['offset'])
print('a 1 :',p1['a'])
print('a 2 :',p2['a'])
print('b 1 :',p1['b'])
print('b 2 :',p2['b'])
# Armamos una pendiente promedio a partir de la obtenida para cada rama.
# Corregimos ambas ramas eliminando esta pendiente.
pend =(p1['Xi']+p2['Xi'])/2.
salto=(p1['Ms']+p2['Ms'])/2.
desp =(p1['offset']+p2['offset'])/2.
M1 = (M1-H1*pend)
M2 = (M2-H2*pend)
if FIGS:
__newfig__()
pyp.plot(H1,M1,'b.-',label = 'dH/dt < 0')
pyp.plot(H2,M2,'r.-',label = 'dH/dt > 0')
pyp.axhline(salto,color = 'k', alpha =0.5)
pyp.axhline(-salto,color= 'k', alpha =0.5)
pyp.legend(loc=0)
if output == 1:
out = ReturnClass()
out.H1 = H1
out.H2 = H2
out.M1 = M1
out.M2 = M2
out.pend = pend
out.desp = desp
out.salto = salto
out.o1 = o1
out.o2 = o2
return out
else:
return H1,M1,H2,M2,[pend,salto,desp] | 1d70c60f60b3ab7b976a0ec12a3541e5a7e53426 | 10,707 |
def flush():
"""
Remove all mine contents of minion.
:rtype: bool
:return: True on success
CLI Example:
.. code-block:: bash
salt '*' mine.flush
"""
if __opts__["file_client"] == "local":
return __salt__["data.update"]("mine_cache", {})
load = {
"cmd": "_mine_flush",
"id": __opts__["id"],
}
return _mine_send(load, __opts__) | fe7d120362393fcb4380473cdaf76e153646644a | 10,708 |
def polygon_to_shapely_polygon_wkt_compat(polygon):
"""
Convert a Polygon to its Shapely Polygon representation but with WKT
compatible coordinates.
"""
shapely_points = []
for location in polygon.locations():
shapely_points.append(location_to_shapely_point_wkt_compat(location))
return shapely.geometry.Polygon(shapely.geometry.LineString(shapely_points)) | 54c889d2071cc8408c2bb4b739a30c3458c80f4c | 10,709 |
import six
def ccd_process(ccd, oscan=None, trim=None, error=False, masterbias=None,
bad_pixel_mask=None, gain=None, rdnoise=None,
oscan_median=True, oscan_model=None):
"""Perform basic processing on ccd data.
The following steps can be included:
* overscan correction
* trimming of the image
* create edeviation frame
* gain correction
* add a mask to the data
* subtraction of master bias
The task returns a processed `ccdproc.CCDData` object.
Parameters
----------
ccd: `ccdproc.CCDData`
Frame to be reduced
oscan: None, str, or, `~ccdproc.ccddata.CCDData`
For no overscan correction, set to None. Otherwise proivde a region
of `ccd` from which the overscan is extracted, using the FITS
conventions for index order and index start, or a
slice from `ccd` that contains the overscan.
trim: None or str
For no trim correction, set to None. Otherwise proivde a region
of `ccd` from which the image should be trimmed, using the FITS
conventions for index order and index start.
error: boolean
If True, create an uncertainty array for ccd
masterbias: None, `~numpy.ndarray`, or `~ccdproc.CCDData`
A materbias frame to be subtracted from ccd.
bad_pixel_mask: None or `~numpy.ndarray`
A bad pixel mask for the data. The bad pixel mask should be in given
such that bad pixels havea value of 1 and good pixels a value of 0.
gain: None or `~astropy.Quantity`
Gain value to multiple the image by to convert to electrons
rdnoise: None or `~astropy.Quantity`
Read noise for the observations. The read noise should be in
`~astropy.units.electron`
oscan_median : bool, optional
If true, takes the median of each line. Otherwise, uses the mean
oscan_model : `~astropy.modeling.Model`, optional
Model to fit to the data. If None, returns the values calculated
by the median or the mean.
Returns
-------
ccd: `ccdproc.CCDData`
Reduded ccd
"""
# make a copy of the object
nccd = ccd.copy()
# apply the overscan correction
if isinstance(oscan, ccdproc.CCDData):
nccd = ccdproc.subtract_overscan(nccd, overscan=oscan,
median=oscan_median,
model=oscan_model)
elif isinstance(oscan, six.string_types):
nccd = ccdproc.subtract_overscan(nccd, fits_section=oscan,
median=oscan_median,
model=oscan_model)
elif oscan is None:
pass
else:
raise TypeError('oscan is not None, a string, or CCDData object')
# apply the trim correction
if isinstance(trim, six.string_types):
nccd = ccdproc.trim_image(nccd, fits_section=trim)
elif trim is None:
pass
else:
raise TypeError('trim is not None or a string')
# create the error frame
if error and gain is not None and rdnoise is not None:
nccd = ccdproc.create_deviation(nccd, gain=gain, rdnoise=rdnoise)
elif error and (gain is None or rdnoise is None):
raise ValueError(
'gain and rdnoise must be specified to create error frame')
# apply the bad pixel mask
if isinstance(bad_pixel_mask, np.ndarray):
nccd.mask = bad_pixel_mask
elif bad_pixel_mask is None:
pass
else:
raise TypeError('bad_pixel_mask is not None or numpy.ndarray')
# apply the gain correction
if isinstance(gain, u.quantity.Quantity):
nccd = ccdproc.gain_correct(nccd, gain)
elif gain is None:
pass
else:
raise TypeError('gain is not None or astropy.Quantity')
# test subtracting the master bias
if isinstance(masterbias, ccdproc.CCDData):
nccd = nccd.subtract(masterbias)
elif isinstance(masterbias, np.ndarray):
nccd.data = nccd.data - masterbias
elif masterbias is None:
pass
else:
raise TypeError(
'masterbias is not None, numpy.ndarray, or a CCDData object')
return nccd | 610a53693ff84ba2e1a68662dd0a19e55228c129 | 10,710 |
def get_role_keyids(rolename):
"""
<Purpose>
Return a list of the keyids associated with 'rolename'.
Keyids are used as identifiers for keys (e.g., rsa key).
A list of keyids are associated with each rolename.
Signing a metadata file, such as 'root.json' (Root role),
involves signing or verifying the file with a list of
keys identified by keyid.
<Arguments>
rolename:
An object representing the role's name, conformant to 'ROLENAME_SCHEMA'
(e.g., 'root', 'snapshot', 'timestamp').
<Exceptions>
tuf.FormatError, if 'rolename' does not have the correct object format.
tuf.UnknownRoleError, if 'rolename' cannot be found in the role database.
tuf.InvalidNameError, if 'rolename' is incorrectly formatted.
<Side Effects>
None.
<Returns>
A list of keyids.
"""
# Raises tuf.FormatError, tuf.UnknownRoleError, or tuf.InvalidNameError.
_check_rolename(rolename)
roleinfo = _roledb_dict[rolename]
return roleinfo['keyids'] | 4888a09740560d760bfffe9eecd50bfa67ff0613 | 10,711 |
def _DX(X):
"""Computes the X finite derivarite along y and x.
Arguments
---------
X: (m, n, l) numpy array
The data to derivate.
Returns
-------
tuple
Tuple of length 2 (Dy(X), Dx(X)).
Note
----
DX[0] which is derivate along y has shape (m-1, n, l).
DX[1] which is derivate along x has shape (m, n-1, l).
"""
return (X[1:, :, :] - X[:-1, :, :], # D along y
X[:, 1:, :] - X[:, 0:-1, :]) # D along x | 4aff05c2c25089c9f93b762a18dad42b0142db09 | 10,712 |
def load_spectra_from_dataframe(df):
"""
:param df:pandas dataframe
:return:
"""
total_flux = df.total_flux.values[0]
spectrum_file = df.spectrum_filename.values[0]
pink_stride = df.spectrum_stride.values[0]
spec = load_spectra_file(spectrum_file, total_flux=total_flux,
pinkstride=pink_stride, as_spectrum=True)
return spec | 31d1cbbee8d999dac5ee0d7f8d4c71f7f58afc3b | 10,713 |
def included_element(include_predicates, exclude_predicates, element):
"""Return whether an index element should be included."""
return (not any(evaluate_predicate(element, ep)
for ep in exclude_predicates) and
(include_predicates == [] or
any(evaluate_predicate(element, ip)
for ip in include_predicates))) | 00e0d66db26e8bca7e3cb8505596247065422cb6 | 10,714 |
def _insertstatushints(x):
"""Insert hint nodes where status should be calculated (first path)
This works in bottom-up way, summing up status names and inserting hint
nodes at 'and' and 'or' as needed. Thus redundant hint nodes may be left.
Returns (status-names, new-tree) at the given subtree, where status-names
is a sum of status names referenced in the given subtree.
"""
if x is None:
return (), x
op = x[0]
if op in {'string', 'symbol', 'kindpat'}:
return (), x
if op == 'not':
h, t = _insertstatushints(x[1])
return h, (op, t)
if op == 'and':
ha, ta = _insertstatushints(x[1])
hb, tb = _insertstatushints(x[2])
hr = ha + hb
if ha and hb:
return hr, ('withstatus', (op, ta, tb), ('string', ' '.join(hr)))
return hr, (op, ta, tb)
if op == 'or':
hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
hr = sum(hs, ())
if sum(bool(h) for h in hs) > 1:
return hr, ('withstatus', (op,) + ts, ('string', ' '.join(hr)))
return hr, (op,) + ts
if op == 'list':
hs, ts = zip(*(_insertstatushints(y) for y in x[1:]))
return sum(hs, ()), (op,) + ts
if op == 'func':
f = getsymbol(x[1])
# don't propagate 'ha' crossing a function boundary
ha, ta = _insertstatushints(x[2])
if getattr(symbols.get(f), '_callstatus', False):
return (f,), ('withstatus', (op, x[1], ta), ('string', f))
return (), (op, x[1], ta)
raise error.ProgrammingError('invalid operator %r' % op) | 956fe03a7f5747f93034501e63cc31ff2956c2d6 | 10,715 |
def make_sine(freq: float, duration: float, sr=SAMPLE_RATE):
"""Return sine wave based on freq in Hz and duration in seconds"""
N = int(duration * sr) # Number of samples
return np.sin(np.pi*2.*freq*np.arange(N)/sr) | 622b03395da5d9f8a22ac0ac30282e23d6596055 | 10,716 |
def _widget_abbrev(o):
"""Make widgets from abbreviations: single values, lists or tuples."""
float_or_int = (float, int)
if isinstance(o, (list, tuple)):
if o and all(isinstance(x, string_types) for x in o):
return DropdownWidget(values=[unicode_type(k) for k in o])
elif _matches(o, (float_or_int, float_or_int)):
min, max, value = _get_min_max_value(o[0], o[1])
if all(isinstance(_, int) for _ in o):
cls = IntSliderWidget
else:
cls = FloatSliderWidget
return cls(value=value, min=min, max=max)
elif _matches(o, (float_or_int, float_or_int, float_or_int)):
step = o[2]
if step <= 0:
raise ValueError("step must be >= 0, not %r" % step)
min, max, value = _get_min_max_value(o[0], o[1], step=step)
if all(isinstance(_, int) for _ in o):
cls = IntSliderWidget
else:
cls = FloatSliderWidget
return cls(value=value, min=min, max=max, step=step)
else:
return _widget_abbrev_single_value(o) | f5a57f2d74811ff21ea56631fd9fb22fea4ae91f | 10,717 |
def get_conditions():
"""
List of conditions
"""
return [
'blinded',
'charmed',
'deafened',
'fatigued',
'frightened',
'grappled',
'incapacitated',
'invisible',
'paralyzed',
'petrified',
'poisoned',
'prone',
'restrained',
'stunned',
'unconscious',
'exhaustion'
] | 816ccb50581cafa20bdefed2a075a3370704cef4 | 10,718 |
def negative_predictive_value(y_true: np.array, y_score: np.array) -> float:
"""
Calculate the negative predictive value (duplicted in :func:`precision_score`).
Args:
y_true (array-like): An N x 1 array of ground truth values.
y_score (array-like): An N x 1 array of predicted values.
Returns:
npv (float): The negative predictive value.
"""
tn = true_negative(y_true, y_score)
fn = false_negative(y_true, y_score)
npv = tn / (tn + fn)
return npv | 28f1d4fce76b6201c6dbeb99ad19337ca84b74c5 | 10,719 |
def flat_list(*alist):
"""
Flat a tuple, list, single value or list of list to flat list
e.g.
>>> flat_list(1,2,3)
[1, 2, 3]
>>> flat_list(1)
[1]
>>> flat_list([1,2,3])
[1, 2, 3]
>>> flat_list([None])
[]
"""
a = []
for x in alist:
if x is None:
continue
if isinstance(x, (tuple, list)):
a.extend([i for i in x if i is not None])
else:
a.append(x)
return a | 5a68495e507e9a08a9f6520b83a912cf579c6688 | 10,720 |
from typing import List
def do_regression(X_cols: List[str], y_col: str, df: pd.DataFrame, solver='liblinear', penalty='l1',
C=0.2) -> LogisticRegression:
"""
Performs regression.
:param X_cols: Independent variables.
:param y_col: Dependent variable.
:param df: Data frame.
:param solver: Solver. Default is liblinear.
:param penalty: Penalty. Default is ``l1``.
:param C: Strength of regularlization. Default is ``0.2``.
:return: Logistic regression model.
"""
X = df[X_cols]
y = df[y_col]
model = LogisticRegression(penalty=penalty, solver=solver, C=C)
model.fit(X, y)
return model | 8a65d49e64e96b3fc5271545afe1761382ec1396 | 10,721 |
def gaussian_smooth(var, sigma):
"""Apply a filter, along the time dimension.
Applies a gaussian filter to the data along the time dimension. if the
time dimension is missing, raises an exception. The DataArray that is
returned is shortened along the time dimension by sigma, half of sigma on
each end.
The width of the window is 2xsigma + 1.
"""
if type(var) is not xr.DataArray:
raise TypeError("First argument must be an Xarray DataArray.")
if 'time' not in var.dims:
raise IndexError("Time coordinate not found.")
# The convolution window must have the same number of dimensions as the
# variable. The length of every dimension is one, except time, which is
# 2xsigma + 1.
var_dimensions = np.ones( len(var.coords), dtype=np.int )
timepos = var.dims.index('time')
var_dimensions[timepos] = 2*sigma + 1
# Use a normalized gaussian so the average of the variable does not change.
gausswin = gaussian(2*sigma + 1, sigma)
gausswin = gausswin/np.sum(gausswin)
# The window series used in the convolve operation is the gaussion for the
# time dimension and a singleton zero for the other dimensions. This way
# the multidimension covolve is:
#
# g(m,n,...) = \sum_k \sum_l ... f[k,l,...]h[k-m]\delta_l0...
#
timeslice_specification = [0 for x in range(len(var.coords))]
timeslice_specification[timepos] = slice(None)
win = np.zeros(var_dimensions)
win[timeslice_specification] = gausswin
# The third parameter 'same' specifies a return array of the same shape as
# var.
out = convolve(var, win, 'same')
outda = xr.DataArray(out,
name=var.name,
coords=var.coords,
dims=var.dims)
outda.attrs = var.attrs
# # Append "(Gaussian filtered: sigma = ###" to the end of th variable name.
# newname = "{0} (Gaussian filtered: sigma = {1})".format(var.name, sigma)
# outda.name = newname
return outda | 809ec7b135ab7d915dd62ad10baea71bfd146e34 | 10,722 |
import logging
def make_ood_dataset(ood_dataset_cls: _BaseDatasetClass) -> _BaseDatasetClass:
"""Generate a BaseDataset with in/out distribution labels."""
class _OodBaseDataset(ood_dataset_cls):
"""Combine two datasets to form one with in/out of distribution labels."""
def __init__(
self,
in_distribution_dataset: BaseDataset,
shuffle_datasets: bool = False,
**kwargs):
super().__init__(**kwargs)
# This should be the builder for whatever split will be considered
# in-distribution (usually the test split).
self._in_distribution_dataset = in_distribution_dataset
self._shuffle_datasets = shuffle_datasets
def load(self,
*,
preprocess_fn=None,
batch_size: int = -1) -> tf.data.Dataset:
# Set up the in-distribution dataset using the provided dataset builder.
if preprocess_fn:
dataset_preprocess_fn = preprocess_fn
else:
dataset_preprocess_fn = (
self._in_distribution_dataset._create_process_example_fn()) # pylint: disable=protected-access
dataset_preprocess_fn = ops.compose(
dataset_preprocess_fn,
_create_ood_label_fn(True))
dataset = self._in_distribution_dataset.load(
preprocess_fn=dataset_preprocess_fn,
batch_size=batch_size)
# Set up the OOD dataset using this class.
if preprocess_fn:
ood_dataset_preprocess_fn = preprocess_fn
else:
ood_dataset_preprocess_fn = super()._create_process_example_fn()
ood_dataset_preprocess_fn = ops.compose(
ood_dataset_preprocess_fn,
_create_ood_label_fn(False))
ood_dataset = super().load(
preprocess_fn=ood_dataset_preprocess_fn,
batch_size=batch_size)
# We keep the fingerprint id in both dataset and ood_dataset
# Combine the two datasets.
try:
combined_dataset = dataset.concatenate(ood_dataset)
except TypeError:
logging.info(
'Two datasets have different types, concat feature and label only')
def clean_keys(example):
# only keep features and labels, remove the rest
return {
'features': example['features'],
'labels': example['labels'],
'is_in_distribution': example['is_in_distribution']
}
combined_dataset = dataset.map(clean_keys).concatenate(
ood_dataset.map(clean_keys))
if self._shuffle_datasets:
combined_dataset = combined_dataset.shuffle(self._shuffle_buffer_size)
return combined_dataset
@property
def num_examples(self):
return (
self._in_distribution_dataset.num_examples +
super().num_examples)
return _OodBaseDataset | c1c26206e352932d3a5397f047365c8c5c8b7fa7 | 10,723 |
def _title_case(value):
"""
Return the title of the string but the
first letter is affected.
"""
return value[0].upper() + value[1:] | 037bce973580f69d87c2e3b4e016b626a2b76abb | 10,724 |
import requests
def zoom_api_call(user, verb, url, *args, **kwargs):
"""
Perform an API call to Zoom with various checks.
If the call returns a token expired event,
refresh the token and try the call one more time.
"""
if not settings.SOCIAL_AUTH_ZOOM_OAUTH2_KEY:
raise DRFValidationError(
"Server is not configured with Zoom OAuth2 credentials."
)
if not user.is_authenticated:
raise DRFValidationError("You are not authenticated.")
social = user.social_auth.filter(provider="zoom-oauth2").first()
if social is None:
raise DRFValidationError("You have not linked your Zoom account yet.")
is_retry = "retry" in kwargs
if is_retry:
del kwargs["retry"]
out = requests.request(
verb,
url.format(uid=social.uid),
*args,
headers={"Authorization": f"Bearer {social.get_access_token(load_strategy())}"},
**kwargs,
)
if out.status_code == 204:
return out
# check for token expired event
data = out.json()
if data.get("code") == 124 and not is_retry:
social.refresh_token(load_strategy())
kwargs["retry"] = True
return zoom_api_call(user, verb, url, *args, **kwargs)
return out | 5c359a4a7acd69a942aedcb78fc156b8218ab239 | 10,725 |
import os
def copy_javascript(name):
"""Return the contents of javascript resource file."""
# TODO use importlib_resources to access javascript file content
folder = os.path.join(os.path.dirname(os.path.realpath(__file__)), "js")
with open(os.path.join(folder, name + ".js")) as fobj:
content = fobj.read()
return content | 927a9a87c3590f76b00f3a1085de4c6e103eeeff | 10,726 |
def addHtmlImgTagExtension(notionPyRendererCls):
"""A decorator that add the image tag extension to the argument list. The
decorator pattern allows us to chain multiple extensions. For example, we
can create a renderer with extension A, B, C by writing:
addAExtension(addBExtension(addCExtension(notionPyRendererCls)))
"""
def newNotionPyRendererCls(*extraExtensions):
new_extension = [HTMLBlock, HTMLSpan]
return notionPyRendererCls(*chain(new_extension, extraExtensions))
return newNotionPyRendererCls | 914d2395cdf9c5f52f94eef80e3f7469a70eb0ae | 10,727 |
def mechaber(mechaber_name):
"""Route function for visualizing and exploring Mechabrim."""
mechaber = Mechaber.query.filter_by(mechaber_name=mechaber_name).first_or_404()
# page = request.args.get("page", 1, type=int)
# mekorot = sefer.mekorot.order_by(Makor.ref).paginate(
# page, current_app.config["ELEMENTS_PER_PAGE"], False
# )
# next_url = (
# url_for("main.sefer", sefername=sefer.name(), page=mekorot.next_num)
# if mekorot.has_next
# else None
# )
# prev_url = (
# url_for("main.sefer", sefername=sefer.name(), page=mekorot.prev_num)
# if mekorot.has_prev
# else None
# )
# return render_template('elements/mechaber.html', mechaber=mechaber)
return render_template("todo.html", mechaber=mechaber) | 56e1b0130ec3f14c389c8d1e4e31fe500cd3a5d3 | 10,728 |
def get_symmetry_projectors(character_table, conjugacy_classes, print_results=False):
"""
:param character_table: each row gives the characters of a different irreducible rep. Each column
corresponds to a different conjugacy classes
:param conjugacy_classes: List of lists of conjugacy class elements
:param print_results:
:return projs:
"""
if not validate_char_table(character_table, conjugacy_classes):
raise Exception("invalid character table/conjugacy class combination")
# columns (or rows, since orthogonal mat) represent basis states that can be transformed into one another by symmetries
states_related_by_symm = sum([sum([np.abs(g) for g in cc]) for cc in conjugacy_classes])
# only need sums over conjugacy classes to build projectors
class_sums = [sum(cc) for cc in conjugacy_classes]
projs = [reduce_symm_projector(
sum([np.conj(ch) * cs for ch, cs in zip(chars, class_sums)]), chars[0], states_related_by_symm, print_results=print_results)
for chars in character_table]
# test projector size
proj_to_dims = np.asarray([p.shape[0] for p in projs]).sum()
proj_from_dims = projs[0].shape[1]
if proj_to_dims != proj_from_dims:
raise Exception("total span of all projectors was %d, but expected %d." % (proj_to_dims, proj_from_dims))
return projs | 8780ef1a9ebb3f6e6960d04d07677e323e7565b9 | 10,729 |
from typing import List
def is_permutation_matrix(matrix: List[List[bool]]) -> bool:
"""Returns whether the given boolean matrix is a permutation matrix."""
return (all(sum(v) == 1 for v in matrix) and
sum(any(v) for v in matrix) == len(matrix)) | b53d6f4ba6e8e1ba445783350de831b614aa187e | 10,730 |
import torch
def DPT_Hybrid(pretrained=True, **kwargs):
""" # This docstring shows up in hub.help()
MiDaS DPT-Hybrid model for monocular depth estimation
pretrained (bool): load pretrained weights into model
"""
model = DPTDepthModel(
path=None,
backbone="vitb_rn50_384",
non_negative=True,
)
if pretrained:
checkpoint = (
"https://github.com/intel-isl/MiDaS/releases/download/v3/dpt_hybrid-midas-501f0c75.pt"
)
state_dict = torch.hub.load_state_dict_from_url(
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
)
model.load_state_dict(state_dict)
return model | 0a5cb661e9e0f08daae73b8c49ba8324e0cfb3e9 | 10,731 |
def show_counts(input_dict):
"""Format dictionary count information into a string
Args:
input_dict (dictionary): input keys and their counts
Return:
string: formatted output string
"""
out_s = ''
in_dict_sorted = {k: v for k, v in sorted(input_dict.items(), key=lambda item: item[1], reverse=True)}
for idx, (k, v) in enumerate(in_dict_sorted.items()):
out_s += '\t{}:\t{} ({})\n'.format(idx, k, v)
out_s += '\n'
return out_s | 078d1f7599b22741f474c0e6d1b02f44edfc1f9b | 10,732 |
def encipher_railfence(message,rails):
"""
Performs Railfence Encryption on plaintext and returns ciphertext
Examples
========
>>> from sympy.crypto.crypto import encipher_railfence
>>> message = "hello world"
>>> encipher_railfence(message,3)
'horel ollwd'
Parameters
==========
message : string, the message to encrypt.
rails : int, the number of rails.
Returns
=======
The Encrypted string message.
References
==========
.. [1] https://en.wikipedia.org/wiki/Rail_fence_cipher
"""
r = list(range(rails))
p = cycle(r + r[-2:0:-1])
return ''.join(sorted(message, key=lambda i: next(p))) | b1a56cdb255065b18caa4ba6da1fa11759f87152 | 10,733 |
import inspect
def format_signature(name: str, signature: inspect.Signature) -> str:
"""Formats a function signature as if it were source code.
Does not yet handle / and * markers.
"""
params = ', '.join(
format_parameter(arg) for arg in signature.parameters.values())
if signature.return_annotation is signature.empty:
return_annotation = ''
else:
return_annotation = ' -> ' + _annotation_name(
signature.return_annotation)
return f'{name}({params}){return_annotation}' | a14fde11850d420d15d2f9d7f3ac4cbe9aee03cc | 10,734 |
def extract_ratios_from_ddf(ddf):
"""The same as the df version, but works with
dask dataframes instead."""
# we basicaly abuse map_partition's ability to expand indexes for lack of a working
# groupby(level) in dask
return ddf.map_partitions(extract_ratios_from_df, meta={'path': str, 'ratio': str, 'url': str}).clear_divisions() | fcb816677d3d0816b2327d458a3fdd1b820bac9e | 10,735 |
def check_if_prime(number):
"""checks if number is prime
Args:
number (int):
Raises:
TypeError: if number of type float
Returns:
[bool]: if number prime returns ,True else returns False
"""
if type(number) == float:
raise TypeError("TypeError: entered float type")
if number > 1 :
for i in range( 2, int(number / 2) + 1 ):
if number % i == 0:
return False
return True
else:
return False | 0a15a4f133b12898b32b1f52a317939cf5e30d34 | 10,736 |
import inspect
def get_signatures() -> {}:
"""
Helper method used to identify the valid arguments that can be passed
to any of the pandas IO functions used by the program
:return: Returns a dictionary containing the available arguments for each pandas IO method
"""
# Creates an empty dictionary to collect the function names and signatures
sigreturn = {}
# Loops over the functions that are used for IO operations
for io in PANDAS_IO:
# Gets the name of the function in question
funcname = io.__name__
# Gets the list of arguments that the function can take
args = list(inspect.signature(io).parameters.keys())
# Adds the arguments to the dictionary with the function name as the key
sigreturn[funcname] = args
# Returns the dictionary object
return sigreturn | 243b798e1c4c57a89749fff1d33be660ab4e973b | 10,737 |
import os
import json
def _load_flags():
"""Load flag definitions.
It will first attempt to load the file at TINYFLAGS environment variable.
If that does not exist, it will then load the default flags file bundled
with this library.
:returns list: Flag definitions to use.
"""
path = os.getenv('TINYFLAGS')
if path and os.path.exists(path) and not os.path.isdir(path):
try:
with open(path, 'r') as f:
return json.load(f)
except:
pass
return []
# with open(resource_filename('tinyenv', 'config/flags.json'), 'r') as f:
# return json.load(f) | ebf3e78296c2fd8e4590f87f87bd27b9252539f8 | 10,738 |
from typing import Optional
from typing import Union
import os
def _get_indentation_option(explicit: Optional[Union[str, int]] = None) -> Optional[str]:
"""Get the value for the ``indentation`` option.
Args:
explicit (Optional[Union[str, int]]): the value explicitly specified by user,
:data:`None` if not specified
Returns:
Optional[str]: the value for the ``indentation`` option;
:data:`None` means *auto detection* at runtime
:Environment Variables:
:envvar:`F2FORMAT_INDENTATION` -- the value in environment variable
See Also:
:data:`_default_indentation`
"""
return parse_indentation(explicit or os.getenv('F2FORMAT_INDENTATION') or _default_indentation) | ea1cfff674d620d879efec5490bbb13563e47bf4 | 10,739 |
from typing import List
def batch_answer_same_context(questions: List[str], context: str) -> List[str]:
"""Answers the questions with the given context.
:param questions: The questions to answer.
:type questions: List[str]
:param context: The context to answer the questions with.
:type context: str
:return: The answers.
:rtype: List[str]
"""
return _batch_answer_same_context[get_mode()](questions, context) | b58df72f1252427ea3e58e2f8379b8e77ea55273 | 10,740 |
import torch
def complex_multiplication(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
"""
Multiplies two complex-valued tensors. Assumes the tensor has a named dimension "complex".
Parameters
----------
x : torch.Tensor
Input data
y : torch.Tensor
Input data
Returns
-------
torch.Tensor
"""
assert_complex(x, enforce_named=True, complex_last=True)
assert_complex(y, enforce_named=True, complex_last=True)
# multiplication = torch.view_as_complex(x.rename(None)) * torch.view_as_complex(
# y.rename(None)
# )
# return torch.view_as_real(multiplication).refine_names(*x.names)
# TODO: Unsqueezing is not yet supported for named tensors, fix when it is.
complex_index = x.names.index("complex")
real_part = x.select("complex", 0) * y.select("complex", 0) - x.select("complex", 1) * y.select("complex", 1)
imaginary_part = x.select("complex", 0) * y.select("complex", 1) + x.select("complex", 1) * y.select("complex", 0)
real_part = real_part.rename(None)
imaginary_part = imaginary_part.rename(None)
multiplication = torch.cat(
[
real_part.unsqueeze(dim=complex_index),
imaginary_part.unsqueeze(dim=complex_index),
],
dim=complex_index,
)
return multiplication.refine_names(*x.names) | ed427e8f79c5bcf782da4e1c21b02528a2ccb96d | 10,741 |
import typing
def dynamic_embedding_lookup(keys: tf.Tensor,
config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
skip_gradient_update: bool = False,
timeout_ms: int = -1) -> tf.Tensor:
"""Returns the embeddings of from given keys.
Args:
keys: A string `Tensor` of shape [batch_size] or [batch_size,
max_sequence_length] where an empty string would be mapped to an all zero
embedding.
config: A DynamicEmbeddingConfig proto that configures the embedding.
var_name: A unique name for the given embedding.
service_address: The address of a knowledge bank service. If empty, the
value passed from --kbs_address flag will be used instead.
skip_gradient_update: A boolean indicating if gradient update is needed.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Returns:
A `Tensor` of shape with one of below:
- [batch_size, config.embedding_dimension] if the input Tensor is 1D, or
- [batch_size, max_sequence_length, config.embedding_dimension] if the
input is 2D.
Raises:
ValueError: If name is not specified.
"""
if not var_name:
raise ValueError("Must specify a valid var_name.")
# If skip_gradient_update is true, reate a dummy variable so that the
# gradients can be passed in.
if skip_gradient_update:
grad_placeholder = tf.constant(0.0)
else:
grad_placeholder = tf.Variable(0.0)
context.add_to_collection(var_name, config)
resource = gen_carls_ops.dynamic_embedding_manager_resource(
config.SerializeToString(), var_name, service_address, timeout_ms)
return gen_carls_ops.dynamic_embedding_lookup(keys, grad_placeholder,
resource,
config.embedding_dimension) | c1d69548e60ff00e55ab04fe83607cae31b6558c | 10,742 |
def register_unary_op(registered_name, operation):
"""Creates a `Transform` that wraps a unary tensorflow operation.
If `registered_name` is specified, the `Transform` is registered as a member
function of `Series`.
Args:
registered_name: the name of the member function of `Series` corresponding
to the returned `Transform`.
operation: a unary TensorFlow operation.
"""
doc = DOC_FORMAT_STRING.format(operation.__name__, operation.__doc__)
@property
def name(self):
return operation.__name__
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return "output"
def _apply_transform(self, input_tensors):
input_tensor = input_tensors[0]
if isinstance(input_tensor, ops.SparseTensor):
result = ops.SparseTensor(input_tensor.indices,
operation(input_tensor.values),
input_tensor.shape)
else:
result = operation(input_tensor)
# pylint: disable=not-callable
return self.return_type(result)
cls = type(operation.__name__,
(transform.Transform,),
{"name": name,
"__doc__": doc,
"input_valency": input_valency,
"_output_names": _output_names,
"_apply_transform": _apply_transform})
series.Series.register_unary_op(registered_name)(cls) | c0fb56a8e93936a4c45e199e28889ccef67d19de | 10,743 |
def add_climatology(data, clim):
"""Add 12-month climatology to a data array with more times.
Suppose you have anomalies data and you want to add back its
climatology to it. In this sense, this function does the opposite
of `get_anomalies`. Though in this case there is no way to obtain
the climatology so it has to be provided.
Parameters
----------
data: xarray.DataArray
Input must have a named `time` coordinate.
clim: xarray.DataArray
The climatology must have the same spatial dimensions as
`data`. Naturally, the time dimension can differ. The values
of this array will be replicated as many times as `data` has.
Returns
-------
xarray.DataArray with both fields added.
""" # noqa
# make sure shapes are correct
ddims = len(data.dims)
cdims = len(clim.dims)
if ddims != cdims:
msg = 'both data arrays must have same dimensions'
raise ValueError(msg)
# get number of years in dataarray
years = np.unique(data.time.dt.year)
nyear = years.size
# get tiled shape
tshape = np.ones(ddims, dtype=int)
tshape[0] = nyear
# create tiled climatology
tclim = np.tile(clim.values, tshape)
# add climatology to data array
new = data.copy()
new.values = np.array(data.values) + tclim
return new | 28845fc1455bc317d158b503ed07a7d0c1af5655 | 10,744 |
from typing import List
def already_exists(statement: str, lines: List[str]) -> bool:
"""
Check if statement is in lines
"""
return any(statement in line.strip() for line in lines) | 194d8c6c48609f5a2accacdb2ed0857815d48d1d | 10,745 |
import random
def uniform(lower_list, upper_list, dimensions):
"""Fill array """
if hasattr(lower_list, '__iter__'):
return [random.uniform(lower, upper)
for lower, upper in zip(lower_list, upper_list)]
else:
return [random.uniform(lower_list, upper_list)
for _ in range(dimensions)] | 59bcb124f0d71fd6e5890cd1d6c200319ab5910e | 10,746 |
import torch
def prepare_data(files, voxel_size, device='cuda'):
"""
Loads the data and prepares the input for the pairwise registration demo.
Args:
files (list): paths to the point cloud files
"""
feats = []
xyz = []
coords = []
n_pts = []
for pc_file in files:
pcd0 = o3d.io.read_point_cloud(pc_file)
xyz0 = np.array(pcd0.points)
# Voxelization
sel0 = ME.utils.sparse_quantize(xyz0 / voxel_size, return_index=True)
# Make point clouds using voxelized points
xyz0 = xyz0[sel0[1],:]
# Get features
npts0 = xyz0.shape[0]
xyz.append(to_tensor(xyz0))
n_pts.append(npts0)
feats.append(np.ones((npts0, 1)))
coords.append(np.floor(xyz0 / voxel_size))
coords_batch0, feats_batch0 = ME.utils.sparse_collate(coords, feats)
data = {'pcd0': torch.cat(xyz, 0).float(), 'sinput0_C': coords_batch0,
'sinput0_F': feats_batch0.float(), 'pts_list': torch.tensor(n_pts)}
return data | 1c11444d4f6ca66396651bb49b8c655bedf6b8fa | 10,747 |
def reshape(box, new_size):
"""
box: (N, 4) in y1x1y2x2 format
new_size: (N, 2) stack of (h, w)
"""
box[:, :2] = new_size * box[:, :2]
box[:, 2:] = new_size * box[:, 2:]
return box | 56fbeac7c785bd81c7964d7585686e11864ff034 | 10,748 |
import json
def sort_actions(request):
"""Sorts actions after drag 'n drop.
"""
action_list = request.POST.get("objs", "").split('&')
if len(action_list) > 0:
pos = 10
for action_str in action_list:
action_id = action_str.split('=')[1]
action_obj = Action.objects.get(pk=action_id)
action_obj.position = pos
action_obj.save()
pos = pos + 10
result = json.dumps({
"message": _(u"The actions have been sorted."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json') | 80f2042858f7a0ecad3663ae4bf50ad73935be3b | 10,749 |
def fetch_file(parsed_url, config):
"""
Fetch a file from Github.
"""
if parsed_url.scheme != 'github':
raise ValueError(f'URL scheme must be "github" but is "{parsed_url.github}"')
ghcfg = config.get('github')
if not ghcfg:
raise BuildRunnerConfigurationError('Missing configuration for github in buildrunner.yaml')
nlcfg = ghcfg.get(parsed_url.netloc)
if not nlcfg:
gh_cfgs = ', '.join(ghcfg.keys())
raise BuildRunnerConfigurationError(
f'Missing github configuration for {parsed_url.netloc} in buildrunner.yaml'
f' - known github configurations: {gh_cfgs}'
)
ver = nlcfg.get('version')
# NOTE: potentially the v3_fetch_file() works for other github API versions.
if ver == 'v3':
contents = v3_fetch_file(parsed_url, nlcfg)
else:
raise NotImplementedError(f'No version support for github API version {ver}')
return contents | c688a68aeaa4efa0cda21f3b58a94075e4555004 | 10,750 |
import calendar
def number_of_days(year: int, month: int) -> int:
"""
Gets the number of days in a given year and month
:param year:
:type year:
:param month:
:type month:
:return:
:rtype:
"""
assert isinstance(year, int) and 0 <= year
assert isinstance(month, int) and 0 < month <= 12
c = calendar.Calendar()
days = c.itermonthdays(year, month)
days = set(days)
days.remove(0)
return len(days) | d585f037292eef36ecc753fbf702035577513a15 | 10,751 |
import six
import sys
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an isntance of str
"""
if not isinstance(text, six.string_types):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors) | 8bd5a4ef516f925f7967ab50dffff0d7273f547c | 10,752 |
from functools import reduce
def medstddev(data, mask=None, medi=False, axis=0):
"""
This function computes the stddev of an n-dimensional ndarray with
respect to the median along a given axis.
Parameters:
-----------
data: ndarray
A n dimensional array frmo wich caculate the median standar
deviation.
mask: ndarray
Mask indicating the good and bad values of data.
medi: boolean
If True return a tuple with (stddev, median) of data.
axis: int
The axis along wich the median std deviation is calculated.
Examples:
--------
>>> import medstddev as m
>>> b = np.array([[1, 3, 4, 5, 6, 7, 7],
[4, 3, 4, 15, 6, 17, 7],
[9, 8, 7, 6, 5, 4, 3]])
>>> c = np.array([b, 1-b, 2+b])
>>> std, med = m.medstddev(c, medi=True, axis=2)
>>> print(median(c, axis=2))
[[ 5. 6. 6.]
[-4. -5. -5.]
[ 7. 8. 8.]]
>>> print(med)
[[ 5. 6. 6.]
[-4. -5. -5.]
[ 7. 8. 8.]]
>>> print(std)
[[ 2.23606798 6.05530071 2.1602469 ]
[ 2.23606798 6.05530071 2.1602469 ]
[ 2.23606798 6.05530071 2.1602469 ]]
>>> # take a look at the first element of std
>>> d = c[0,0,:]
>>> print(d)
[1, 3, 4, 5, 6, 7, 7]
>>> print(m.medstddev1d(d))
2.2360679775
>>> # See medstddev1d for masked examples
Modification history:
---------------------
2010-11-05 patricio Written by Patricio Cubillos
[email protected]
"""
# flag to return median value
retmed = medi
# get shape
shape = np.shape(data)
# default mask, all good.
if mask is None:
mask = np.ones(shape)
# base case: 1D
if len(shape) == 1:
return medstddev1d(data, mask, retmed)
newshape = np.delete(shape, axis)
# results
std = np.zeros(newshape)
medi = np.zeros(newshape)
# reduce dimensions until 1D case
reduce(medstddev1d, data, mask, std, medi, axis)
# return statement:
if retmed:
return (std, medi)
return std | bbab9eede714d7c64344af271f8b6e817723d837 | 10,753 |
def load_npz(filename: FileLike) -> JaggedArray:
""" Load a jagged array in numpy's `npz` format from disk.
Args:
filename: The file to read.
See Also:
save_npz
"""
with np.load(filename) as f:
try:
data = f["data"]
shape = f["shape"]
return JaggedArray(data, shape)
except KeyError:
msg = "The file {!r} does not contain a valid jagged array".format(filename)
raise RuntimeError(msg) | 640add32dab0b7bd12784a7a29331b59521a0f8a | 10,754 |
import re
def _egg_link_name(raw_name: str) -> str:
"""
Convert a Name metadata value to a .egg-link name, by applying
the same substitution as pkg_resources's safe_name function.
Note: we cannot use canonicalize_name because it has a different logic.
"""
return re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link" | 923ff815b600b95ccb5750a8c1772ee9156e53b2 | 10,755 |
def my_view(request):
"""Displays info details from nabuco user"""
owner, c = User.objects.get_or_create(username='nabuco')
# Owner of the object has full permissions, otherwise check RBAC
if request.user != owner:
# Get roles
roles = get_user_roles(request.user, owner)
# Get operation
op, c = RBACOperation.objects.get_or_create(name='display')
# Per-model permission:
# Has user permission to display groups that nabuco belongs to?
if not RBACGenericPermission.objects.get_permission(owner, Group, op, roles):
return HttpResponseForbidden("Sorry, you are not allowed to see nabuco groups")
# Per-object permission:
# Has user permission to see this group which nabuco belong to?
group_inst = get_object_or_404(Group, name='punks')
if not RBACPermission.objects.get_permission(owner, owner, op, roles):
return HttpResponseForbidden("Sorry, you are not allowed to see this group details")
return render_to_response("base.html",
{'owner': owner,
'model': Group,
'model_inst': owner,
'operation': op,
'roles': roles},
context_instance=RequestContext(request)) | 55c3443f24d56b6ea22e02c9685d6057dfc79c7e | 10,756 |
def handler500(request):
"""
Custom 500 view
:param request:
:return:
"""
return server_error(request, template_name='base/500.html') | 91db9daeaac6f7f6b2207a3c8be7fa09f932b50f | 10,757 |
def get_badpixel_mask(shape, bins):
"""Get the mask of bad pixels and columns.
Args:
shape (tuple): Shape of image.
bins (tuple): CCD bins.
Returns:
:class:`numpy.ndarray`: 2D binary mask, where bad pixels are marked with
*True*, others *False*.
The bad pixels are found *empirically*.
"""
mask = np.zeros(shape, dtype=np.bool)
if bins == (1, 1) and shape == (4136, 4096):
ny, nx = shape
mask[349:352, 627:630] = True
mask[349:ny//2, 628] = True
mask[1604:ny//2, 2452] = True
mask[280:284,3701] = True
mask[274:ny//2, 3702] = True
mask[272:ny//2, 3703] = True
mask[274:282, 3704] = True
mask[1720:1722, 3532:3535] = True
mask[1720, 3535] = True
mask[1722, 3532] = True
mask[1720:ny//2,3533] = True
mask[347:349, 4082:4084] = True
mask[347:ny//2,4083] = True
mask[ny//2:2631, 1909] = True
else:
print('No bad pixel information for this CCD size.')
raise ValueError
return mask | 2e636aef86d2462815683a975ef99fbcdeeaee19 | 10,758 |
def model_fn(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
# create logits
logits = tf.reshape(nn_last_layer, (-1, num_classes))
correct_label = tf.reshape(correct_label, (-1, num_classes))
# create loss function.
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
# Define optimizer. Adam in this case to have variable learning rate.
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# Apply optimizer to the loss function.
train_op = optimizer.minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss | d4883d451b749f06c718d37f1a49e4f4709d6695 | 10,759 |
import six
import yaml
def maybe_load_yaml(item):
"""Parses `item` only if it is a string. If `item` is a dictionary
it is returned as-is.
Args:
item:
Returns: A dictionary.
Raises:
ValueError: if unknown type of `item`.
"""
if isinstance(item, six.string_types):
return yaml.load(item)
elif isinstance(item, dict):
return item
else:
raise ValueError("Got {}, expected string or dict", type(item)) | 9288012f0368e2b087c9ef9cd9ffaca483b4f11b | 10,760 |
def histeq(im,nbr_bins=256):
"""histogram equalize an image"""
#get image histogram
im = np.abs(im)
imhist,bins = np.histogram(im.flatten(),nbr_bins,normed=True)
cdf = imhist.cumsum() #cumulative distribution function
cdf = 255 * cdf / cdf[-1] #normalize
#use linear interpolation of cdf to find new pixel values
im2 = np.interp(im.flatten(),bins[:-1],cdf)
return im2.reshape(im.shape) | bbb0e758e519a7cfcc866e3193cd1ff26bf5efbc | 10,761 |
def txgamma(v, t, gamma, H0):
"""
Takes in:
v = values at z=0;
t = list of redshifts to integrate over;
gamma = interaction term.
Returns a function f = [dt/dz, d(a)/dz,
d(e'_m)/dz, d(e'_de)/dz,
d(z)/dz,
d(dl)/dz]
"""
(t, a, ombar_m, ombar_de, z, dl) = v #omegam, omegade, z, dl) = v
Hz = H0 * (ombar_m + ombar_de)**(1/2)
if np.isnan(Hz):
print('txgamma')
print('z = %s, Hz = %s, gamma = %s, ombar_m = %s, ombar_de = %s'
%(z, Hz, gamma, ombar_m, ombar_de))
irate = (gamma/(-t+0.0001))*(1-ombar_de/(ombar_de+ombar_m)) /(1+z)/Hz
# first derivatives of functions I want to find:
f = [# dt/dz (= f.d wrt z of time)
-1/((1+z) * Hz),
# d(a)/dz (= f.d wrt z of scale factor)
-(1+z)**(-2),
# d(ombar_m)/dz (= f.d wrt z of density_m(t) / crit density(t0))
3*ombar_m /(1+z) - irate,
# d(ombar_de)/dz (= f.d wrt z of density_de(t) / crit desnity(t0))
irate,
# d(z)/dz (= f.d wrt z of redshift)
1,
# d(dl)/dz (= f.d wrt z of luminosty distance)
1/Hz] # H + Hdz*(1+z)
return f | a1506ea0b54f468fd63cd2a8bd96e8a9c46a92f3 | 10,762 |
def text_pb(tag, data, description=None):
"""Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
try:
tensor = tensor_util.make_tensor_proto(data, dtype=np.object)
except TypeError as e:
raise TypeError("tensor must be of type string", e)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description
)
summary = summary_pb2.Summary()
summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor)
return summary | 43d652ebb9ab1d52c0514407a3c47d56816cbb65 | 10,763 |
def sanitize_input(args: dict) -> dict:
"""
Gets a dictionary for url params and makes sure it doesn't contain any illegal keywords.
:param args:
:return:
"""
if "mode" in args:
del args["mode"] # the mode should always be detailed
trans = str.maketrans(ILLEGAL_CHARS, ' ' * len(ILLEGAL_CHARS))
for k, v in args.copy().items():
if isinstance(v, str): # we only need to verify v because k will never be entered by a user
args[k] = v.translate(trans)
return args | 063d314cb3800d24606b56480ce63b7dda3e8e51 | 10,764 |
def sum_to(containers, goal, values_in_goal=0):
"""
Find all sets of containers which sum to goal, store the number of
containers used to reach the goal in the sizes variable.
"""
if len(containers) == 0:
return 0
first = containers[0]
remain = containers[1:]
if first > goal:
with_first = 0
elif first == goal:
sizes.append(values_in_goal + 1)
with_first = 1
else:
with_first = sum_to(remain, goal-first, values_in_goal + 1)
return with_first + sum_to(remain, goal, values_in_goal) | db5297929332a05606dec033318ca0d7c9661b1d | 10,765 |
def rt2add_enc_v1(rt, grid):
"""
:param rt: n, k, 2 | log[d, tau] for each ped (n,) to each vic (k,)
modifies rt during clipping to grid
:param grid: (lx, ly, dx, dy, nx, ny)
lx, ly | lower bounds for x and y coordinates of the n*k (2,) in rt
dx, dy | step sizes of the regular grid
nx, ny | number of grid points in each coordinate (so nx*ny total)
:return: n, m | m = nx*ny, encoding for each ped
uses row-major indexing for the flattened (2d) indices
for nx 'rows' and ny 'columns'
"""
n, k = rt.shape[:2]
nx, ny = np.array(grid[-2:]).astype(np.int32)
m = nx * ny
Z = np.zeros((n, m), dtype=np.float32)
clip2grid(rt, grid)
# n, k
a_x = np.empty((n, k), dtype=np.int32)
r_x = np.empty((n, k), dtype=np.float32)
np.divmod(rt[..., 0] - grid[0], grid[2], a_x, r_x, casting='unsafe')
th_x = 1 - r_x / grid[2]
a_y = np.empty((n, k), dtype=np.int32)
r_y = np.empty((n, k), dtype=np.float32)
np.divmod(rt[..., 1] - grid[1], grid[3], a_y, r_y, casting='unsafe')
th_y = 1 - r_y / grid[3]
# 1d inds for m, | n, k
c_x = ny * a_x + a_y
offsets = np.array([0, ny, 1, ny+1], dtype=np.int32)
# n, k, 4
inds = c_x[..., np.newaxis] + offsets[np.newaxis, :]
vals = np.dstack((th_x*th_y, (1-th_x)*th_y, th_x*(1-th_y), (1-th_x)*(1-th_y)))
row_inds = np.repeat(np.arange(n, dtype=np.int32), 4*k)
np.add.at(Z, (row_inds, inds.ravel()), vals.ravel())
return Z | 3af0b8e15fdcc4d9bbeb604faffbd45cf013e86b | 10,766 |
import heapq
def draw_with_replacement(heap):
"""Return ticket drawn with replacement from given heap of tickets.
Args:
heap (list): an array of Tickets, arranged into a heap using heapq.
Such a heap is also known as a 'priority queue'.
Returns:
the Ticket with the least ticket number in the heap.
Side-effects:
the heap maintains its size, as the drawn ticket is replaced
by the next ticket for that id.
Example:
>>> x = Ticket('0.234', 'x', 2)
>>> y = Ticket('0.354', 'y', 1)
>>> z = Ticket('0.666', 'z', 2)
>>> heap = []
>>> heapq.heappush(heap, x)
>>> heapq.heappush(heap, y)
>>> heapq.heappush(heap, z)
>>> heap
[Ticket(ticket_number='0.234', id='x', generation=2),
Ticket(ticket_number='0.354', id='y', generation=1),
Ticket(ticket_number='0.666', id='z', generation=2)]
>>> draw_with_replacement(heap)
Ticket(ticket_number='0.234', id='x', generation=2)
>>> heap
[Ticket(ticket_number='0.354', id='y', generation=1),
Ticket(ticket_number='0.666', id='z', generation=2),
Ticket(ticket_number='0.54783080274940261636464668679572\
2512609112766306951592422621788875312684400211',
id='x', generation=3)]
"""
ticket = heapq.heappop(heap)
heapq.heappush(heap, next_ticket(ticket))
return ticket | 06eb982ecf32090da51f02356a6996429773e233 | 10,767 |
import requests
import platform
def is_docker_reachable(docker_client):
"""
Checks if Docker daemon is running.
:param docker_client : docker.from_env() - docker client object
:returns True, if Docker is available, False otherwise.
"""
errors = (
docker.errors.APIError,
requests.exceptions.ConnectionError,
)
if platform.system() == "Windows":
import pywintypes # pylint: disable=import-error
errors += (pywintypes.error,) # pylint: disable=no-member
try:
docker_client.ping()
return True
# When Docker is not installed, a request.exceptions.ConnectionError is thrown.
# and also windows-specific errors
except errors:
LOG.debug("Docker is not reachable", exc_info=True)
return False | 9bea3a564d9357c5a700c9abbfaa36564f4b9adf | 10,768 |
def get_string(entry):
"""
This function ...
:param entry:
:return:
"""
value = entry.split(" / ")[0].rstrip()
return value | 38a1dc41fd06b49aa8724cc783466b485c9017fb | 10,769 |
from typing import Any
import string
import os
import yaml
def read_yaml_env(fname: str) -> Any:
"""Parse YAML file with environment variable substitution.
Parameters
----------
fname : str
yaml file name.
Returns
-------
table : Any
the object returned by YAML.
"""
content = read_file(fname)
# substitute environment variables
content = string.Template(content).substitute(os.environ)
return yaml.load(content) | 5c3b929bb4b76d2c041b2db92649a62a5d91e61a | 10,770 |
import collections
def get_top_words(words):
"""
Получить список наиболее часто встречающихся слов, с указанием частоты
:param words: список слов для анализа
:return: [(слово1, количество повторений слова1), ..]
"""
return collections.Counter(words).most_common() | 632317f57e734a93b6f3f20dfef001028b40c6b3 | 10,771 |
def get_slope(x, y, L):
"""
Funcao que retorna o slope da serie temporal dos dados
"""
try:
x=np.array(x).reshape(-1, 1)
y=np.array(y).reshape(-1, 1)
lr=LinearRegression()
lr.fit (x[:L],y[:L])
return lr.coef_[0][0]
except:
return 0 | 23f3419049ee1372d5963823e2f52b895bc766e8 | 10,772 |
from typing import Dict
from typing import Any
def _minimize_price(price: Dict[str, Any]) -> Price:
"""
Return only the keys and values of a price the end user would be interested in.
"""
keys = ['id', 'recurring', 'type', 'currency', 'unit_amount', 'unit_amount_decimal', 'nickname',
'product', 'metadata']
return {k: price[k] for k in keys} | 7414e0f3e5ae11f55b5781a679e593294122aed2 | 10,773 |
def project(signals, q_matrix):
"""
Project the given signals on the given space.
Parameters
----------
signals : array_like
Matrix with the signals in its rows
q_matrix : array_like
Matrix with an orthonormal basis of the space in its rows
Returns
-------
proj_signals : ndarray
Matrix with the projected signals in its rows
"""
signals = np.asarray(signals)
q_matrix = np.asarray(q_matrix)
return q_matrix.T.dot(q_matrix.dot(signals.T)).T | 0d6aa780d0d424260df5f8391821c806e12c81e5 | 10,774 |
def all_movies():
"""
Returns all movie in the database for Movies
service
"""
movies = ut.get_movies()
if len(movies) == 0:
abort(404)
return make_response(jsonify({"movies":movies}),200) | d8b2e3a66adf52830d7027953c22071449d2b29a | 10,775 |
def format_map(mapping, st):
"""
Format string st with given map.
"""
return st.format_map(mapping) | 462e0a744177d125db50739eac1f2e7a62128010 | 10,776 |
def communities_greedy_modularity(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using greedy modularity.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_dic = nx.algorithms.community.greedy_modularity_communities(G)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f | cfca6ef66730f3a6ef467f1c51c66c5d46296351 | 10,777 |
import json
def load_loglin_stats(infile_path):
"""read in data in json format"""
# convert all 'stats' to pandas data frames
with open(infile_path) as infile:
data = json.load(infile)
new_data = {}
for position_set in data:
try:
new_key = eval(position_set)
except NameError:
new_key = position_set
new_data[new_key] = {}
for key, value in list(data[position_set].items()):
if key == "stats":
value = read_json(value)
new_data[new_key][key] = value
return new_data | c307ff2cf4e07bbb7843971cceaf74744422276c | 10,778 |
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
CONV_THRESH=1.e-3,MAXIT=500):
"""
Faster than logistic_regression when there is only one predictor.
"""
if len(x) != len(y):
raise ValueError, "x and y should be the same length!"
if beta_start is None:
beta_start = NA.zeros(2,x.dtype.char)
iter = 0; diff = 1.; beta = beta_start # initial values
if verbose:
print 'iteration beta log-likliehood |beta-beta_old|'
while iter < MAXIT:
beta_old = beta
p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function
# information matrix
J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
[NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
if verbose:
print iter+1, beta, l, diff
if diff <= CONV_THRESH: break
iter = iter + 1
return beta, J_bar, l | c37190b167e634df31127f79163aaeb56bac217e | 10,779 |
def preemphasis(signal,coeff=0.95):
"""perform preemphasis on the input signal.
:param signal: The signal to filter.
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
:returns: the filtered signal.
"""
return np.append(signal[0],signal[1:]-coeff*signal[:-1]) | c5173708e7b349decd34ac886493103eaadb023d | 10,780 |
from sacremoses import MosesTokenizer
from sacremoses import MosesPunctNormalizer
def build_moses_tokenizer(tokenizer: MosesTokenizerSpans,
normalizer: MosesPunctNormalizer = None) -> Callable[[str], List[Token]]:
"""
Wrap Spacy model to build a tokenizer for the Sentence class.
:param model a Moses tokenizer instance
:return a tokenizer function to provide to Sentence class constructor
"""
try:
except ImportError:
raise ImportError(
"Please install sacremoses or better before using the Spacy tokenizer, otherwise you can use segtok_tokenizer as advanced tokenizer."
)
moses_tokenizer: MosesTokenizerSpans = tokenizer
if normalizer:
normalizer: MosesPunctNormalizer = normalizer
def tokenizer(text: str) -> List[Token]:
if normalizer:
text = normalizer.normalize(text=text)
doc = moses_tokenizer.span_tokenize(text=text, escape=False)
previous_token = None
tokens: List[Token] = []
for word, (start_pos, end_pos) in doc:
word: str = word
token = Token(
text=word, start_position=start_pos, whitespace_after=True
)
tokens.append(token)
if (previous_token is not None) and (
token.start_pos - 1
== previous_token.start_pos + len(previous_token.text)
):
previous_token.whitespace_after = False
previous_token = token
return tokens
return tokenizer | 0dee31ab9030e387dd6907efad60c188eb0241b2 | 10,781 |
from typing import Callable
from typing import Hashable
from typing import Union
def horizontal_block_reduce(
obj: T_DataArray_or_Dataset,
coarsening_factor: int,
reduction_function: Callable,
x_dim: Hashable = "xaxis_1",
y_dim: Hashable = "yaxis_1",
coord_func: Union[str, CoordFunc] = coarsen_coords_coord_func,
) -> T_DataArray_or_Dataset:
"""A generic horizontal block reduce function for xarray data structures.
This is a convenience wrapper around block_reduce for applying coarsening
over n x n patches of array elements. It should only be used if a dask
implementation of the reduction method has not been implemented (e.g. for
median) or if a custom reduction method is used that is not implemented in
xarray. Otherwise, block_coarsen should be used.
Args:
obj: Input Dataset or DataArray.
coarsening_factor: Integer coarsening factor to use.
reduction_function: Array reduction function which accepts a tuple of
axes to reduce along.
x_dim: x dimension name (default 'xaxis_1').
y_dim: y dimension name (default 'yaxis_1').
coord_func: function that is applied to the coordinates, or a
mapping from coordinate name to function. See `xarray's coarsen
method for details
<http://xarray.pydata.org/en/stable/generated/xarray.DataArray.coarsen.html>`_.
Returns:
xr.Dataset or xr.DataArray.
"""
block_sizes = {x_dim: coarsening_factor, y_dim: coarsening_factor}
return xarray_block_reduce(
obj, block_sizes, reduction_function, coord_func=coord_func,
) | 07fc497ae8c5cd90699bc73bfbeab705c13ed0c6 | 10,782 |
def statements_api(context, request):
"""List all the statements for a period."""
dbsession = request.dbsession
owner = request.owner
owner_id = owner.id
period = context.period
inc_case = case([(AccountEntry.delta > 0, AccountEntry.delta)], else_=None)
dec_case = case([(AccountEntry.delta < 0, AccountEntry.delta)], else_=None)
statement_rows = (
dbsession.query(
Statement.id,
Statement.source,
Statement.filename,
func.count(inc_case).label('inc_count'),
func.count(dec_case).label('dec_count'),
func.sum(inc_case).label('inc_total'),
func.sum(dec_case).label('dec_total'),
)
.outerjoin(AccountEntry, AccountEntry.statement_id == Statement.id)
.filter(
Statement.owner_id == owner_id,
Statement.file_id == period.file_id,
Statement.period_id == period.id,
)
.group_by(Statement.id)
.order_by(Statement.id)
.all()
)
statements = [{
'id': str(row.id),
'source': row.source,
'filename': row.filename,
'inc_count': row.inc_count,
'dec_count': row.dec_count,
'inc_total': row.inc_total,
'dec_total': row.dec_total,
} for row in statement_rows]
now = dbsession.query(now_func).scalar()
return {
'now': now,
'statements': statements,
} | 87a1ec3e5fc5730eda30367a5f9f34aef6cf7339 | 10,783 |
def fp(x):
"""Function used in **v(a, b, th, nu, dimh, k)** for **analytic_solution_slope()**
:param x: real number
:type x: list
:return: fp value
:rtype: list
"""
rx = np.sqrt(x * 2 / np.pi)
s_fresnel, c_fresnel = sp.fresnel(rx)
return - 2 * 1j * np.sqrt(x) * np.exp(-1j * x) * np.sqrt(np.pi / 2.) \
* (.5 - c_fresnel + 1j * (.5 - s_fresnel)) | 202000557fb239e589ffd4d7b9709b60678ab784 | 10,784 |
def get_truck_locations(given_address):
"""
Get the location of the food trucks in Boston TODAY within 1 mile
of a given_address
:param given_address: a pair of coordinates
:return: a list of features with unique food truck locations
"""
formatted_address = '{x_coordinate}, {y_coordinate}'.format(
x_coordinate=given_address['x'],
y_coordinate=given_address['y']
)
QUERY["geometry"] = formatted_address
trucks = gis_utils.get_features_from_feature_server(BASE_URL, QUERY)
truck_unique_locations = []
for t in trucks:
if t['attributes']['Day'] == DAY:
truck_unique_locations.append(t)
return truck_unique_locations | f1d5e290c5c46e1587a2f98c2e82edee3890fc05 | 10,785 |
import inspect
import warnings
def _getRelevantKwds(method, kwds):
"""return kwd args for the given method, and remove them from the given kwds"""
argspec = inspect.getargspec(method)
d = dict()
for a in kwds:
if a not in argspec.args:
warnings.warn("Unrecognized kwd: {!r}".format(a))
for a in argspec.args:
if a in kwds:
d[a] = kwds[a]
del kwds[a]
return d | bca410b99e750f233a5e4476413e6bacfa52dcb9 | 10,786 |
import requests
def find_overview_details(park_code):
""" Find overview details from park code """
global API_KEY
fields = "&fields=images,entranceFees,entrancePasses,operatingHours,exceptions"
url = "https://developer.nps.gov/api/v1/parks?parkCode=" + park_code + "&api_key=" + API_KEY + fields
response = requests.get(url)
json_object = response.json()
overview = json_object['data']
return {'overview': overview} | 95cf281828154c45eae1e239f33d2de8bcf9e7fa | 10,787 |
import torch
def embed_nomenclature(
D,
embedding_dimension,
loss="rank",
n_steps=1000,
lr=10,
momentum=0.9,
weight_decay=1e-4,
ignore_index=None,
):
"""
Embed a finite metric into a target embedding space
Args:
D (tensor): 2D-cost matrix of the finite metric
embedding_dimension (int): dimension of the target embedding space
loss (str): embedding loss to use distortion base (loss='disto') or rank based (loss='rank')
n_steps (int): number of gradient iterations
lr (float): learning rate
momentum (float): momentum
weight_decay (float): weight decay
Returns:
embedding (tensor): embedding of each vertex of the finite metric space, shape n_vertex x embedding_dimension
"""
n_vertex = D.shape[0]
mapping = torch.rand(
(n_vertex, embedding_dimension), requires_grad=True, device=D.device
)
if loss == "rank":
crit = RankLoss(D, n_triplets=1000)
elif loss == "disto":
crit = DistortionLoss(D, scale_free=False)
else:
raise ValueError
optimizer = torch.optim.SGD(
[mapping], lr=lr, momentum=momentum, weight_decay=weight_decay
)
print("Embedding nomenclature . . .")
for i in range(n_steps):
loss = crit(mapping)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(
"Step {}: loss {:.4f} ".format(i + 1, loss.cpu().detach().numpy(), end="\r")
)
print("Final loss {:.4f}".format(crit(mapping).cpu().detach().numpy()))
return mapping.detach() | 1e9ca98dec0c3e42af0af483b6e9ef9efa11b225 | 10,788 |
def raw_env():
"""
To support the AEC API, the raw_env() function just uses the from_parallel
function to convert from a ParallelEnv to an AEC env
"""
env = parallel_env()
env = parallel_to_aec(env)
return env | dcb491c2beb50f73ba0fdab96bcd069916ce9b6d | 10,789 |
def cmd_line(preprocessor: Preprocessor, args: str) -> str:
"""the line command - prints the current line number"""
if args.strip() != "":
preprocessor.send_warning("extra-arguments", "the line command takes no arguments")
context = preprocessor.context.top
pos = context.true_position(preprocessor.current_position.begin)
return str(context.file.line_number(pos)[0]) | 061bcf2ced6c22c77d81bb30ec00a5c1964c3624 | 10,790 |
import tempfile
import zipfile
import click
import os
def install_from_zip(pkgpath, install_path, register_func, delete_after_install=False):
"""Install plugin from zipfile."""
logger.debug("%s is a file, attempting to load zip", pkgpath)
pkgtempdir = tempfile.mkdtemp(prefix="honeycomb_")
try:
with zipfile.ZipFile(pkgpath) as pkgzip:
pkgzip.extractall(pkgtempdir)
except zipfile.BadZipfile as exc:
logger.debug(str(exc))
raise click.ClickException(str(exc))
if delete_after_install:
logger.debug("deleting %s", pkgpath)
os.remove(pkgpath)
logger.debug("installing from unzipped folder %s", pkgtempdir)
return install_dir(pkgtempdir, install_path, register_func, delete_after_install=True) | 16c430ed97e1e3ee29589bec42a103f7374bf60b | 10,791 |
from xml.dom import expatbuilder
from xml.dom import pulldom
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
return expatbuilder.parse(file)
else:
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize}) | 0d4bc592143ecb7c093eceaf4f5fe0d18869ea9c | 10,792 |
import hashlib
def create_hash256(max_length=None):
"""
Generate a hash that can be used as an application secret
Warning: this is not sufficiently secure for tasks like encription
Currently, this is just meant to create sufficiently random tokens
"""
hash_object = hashlib.sha256(force_bytes(get_random_string(32)))
hash_object.update(force_bytes(settings.SECRET_KEY))
output_hash = hash_object.hexdigest()
if max_length is not None and len(output_hash) > max_length:
return output_hash[:max_length]
return output_hash | 4856be59c475bcfc07137b62511de4d5c7531eb3 | 10,793 |
from io import StringIO
def assert_content_in_file(file_name, expected_content):
"""
Fabric assertion: Check if some text is in the specified file (result of installing a test product)
Provision dir: PROVISION_ROOT_PATH
:param file_name: File name
:param expected_content: String to be found in file
:return: True if given content is in file (dir: PROVISION_ROOT_PATH).
"""
file_path = PROVISION_ROOT_PATH.format(file_name)
fd = StringIO()
get(file_path, fd)
file_content = fd.getvalue()
return expected_content in file_content | eba68222d39c55902da1c4c4ae7055b7edc170e0 | 10,794 |
import requests
import re
import os
def generate_substrate_fasta(df):
""" gemerates fasta sequence files containing sequences of
all proteins that contain phosphosites that do not have kinase
annotations in PSP or Networkin. The outputs of the function
will be used as input to run Networkin locally and predict kinases
Parameters
----------
df : pandas dataframe
subset of phoproteomics data (metadata) that do
not have kinase annotations
Returns
-------
substrate_fasta : list of strings
each pair of elements in the list is a uniprot id (eg: '>P01345')
followed by the sequence
df2 : pandas dataframe
dataframe with uniprot id, amino acid and site of each phosphosite
"""
substrate_fasta = []
ids, aa, pos = [], [], []
obsolete_entries = []
for ind, substrate in enumerate(df.Uniprot_Id.tolist()):
r = requests.get('http://www.uniprot.org/uniprot/%s.fasta' %
substrate)
# substrate_fasta.append(r.text)
seq_lines = r.text.split('\n')
sequence = ''.join(seq_lines[1:])
id_line = seq_lines[0]
try:
# id = re.search('>(.*)HUMAN', id_line).group(1) + 'HUMAN'
id = re.search('>(?:sp|tr)\|(.*)\|', id_line).group(1)
ids.append(id)
# seq_lines[0] = id
substrate_fasta.append(">%s\n%s\n" % (id, sequence))
site = df.Site.iloc[ind]
aa.append(site[0])
pos.append(site[1:])
except AttributeError:
obsolete_entries.append(substrate)
df2 = pd.DataFrame(list(zip(ids, pos, aa)))
if obsolete_entries:
with open(os.path.join(resource_path,
'obsolete_entries.txt'), 'a') as f:
for s in list(set(obsolete_entries)):
f.write("%s\n" % s)
return substrate_fasta, df2 | 7e1350444fba35331977976c19607bb34915e2f0 | 10,795 |
import math
import numpy
def _calculate_hwp_storage_fut(
hwp_shapes, base_dataset_uri, c_hwp_uri, bio_hwp_uri, vol_hwp_uri,
yr_cur, yr_fut, process_pool=None):
"""Calculates carbon storage, hwp biomassPerPixel and volumePerPixel due to
harvested wood products in parcels on current landscape.
hwp_shapes - a dictionary containing the current and/or future harvest
maps (or nothing)
hwp_shapes['cur'] - oal shapefile indicating harvest map from the
current landscape
hwp_shapes['fut'] - oal shapefile indicating harvest map from the
future landscape
c_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for current calculation
bio_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
vol_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
yr_cur - year of the current landcover map
yr_fut - year of the current landcover map
process_pool - a process pool for parallel processing (can be None)
No return value"""
############### Start
pixel_area = pygeoprocessing.geoprocessing.get_cell_size_from_uri(base_dataset_uri) ** 2 / 10000.0 #convert to Ha
nodata = -5.0
c_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
bio_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
vol_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, c_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, bio_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, vol_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
#Create a temporary shapefile to hold values of per feature carbon pools
#HWP biomassPerPixel and volumePerPixel, will be used later to rasterize
#those values to output rasters
calculatedAttributeNames = ['c_hwp_pool', 'bio_hwp', 'vol_hwp']
if 'cur' in hwp_shapes:
hwp_shape = ogr.Open(hwp_shapes['cur'])
hwp_shape_copy = \
ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = \
hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
for fieldName in calculatedAttributeNames:
field_def = ogr.FieldDefn(fieldName, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel,
#and volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it
#doesn't make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['start_date'] != 0 and field_args['cut_cur'] != 0:
time_span = (yr_fut + yr_cur) / 2.0 - field_args['start_date']
start_years = yr_fut - field_args['start_date']
#Calculate the carbon pool due to decaying HWP over the
#time_span
feature_carbon_storage_per_pixel = (
pixel_area * _carbon_pool_in_hwp_from_parcel(
field_args['cut_cur'], time_span, start_years,
field_args['freq_cur'], field_args['decay_cur']))
#Claculate biomassPerPixel and volumePerPixel of harvested wood
numberOfHarvests = \
math.ceil(time_span / float(field_args['freq_cur']))
#The measure of biomass is in terms of Mg/ha
biomassInFeaturePerArea = field_args['cut_cur'] * \
numberOfHarvests / float(field_args['c_den_cur'])
biomassPerPixel = biomassInFeaturePerArea * pixel_area
volumePerPixel = biomassPerPixel / field_args['bcef_cur']
#Copy biomassPerPixel and carbon pools to the temporary
#feature for rasterization of the entire layer later
for field, value in zip(calculatedAttributeNames,
[feature_carbon_storage_per_pixel,
biomassPerPixel, volumePerPixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attributeName, raster_uri in zip(calculatedAttributeNames,
[c_hwp_cur_uri, bio_hwp_cur_uri, vol_hwp_cur_uri]):
nodata = -1.0
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, raster_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
raster = gdal.Open(raster_uri, gdal.GA_Update)
gdal.RasterizeLayer(raster, [1], hwp_shape_layer_copy, options=['ATTRIBUTE=' + attributeName])
raster.FlushCache()
raster = None
#handle the future term
if 'fut' in hwp_shapes:
hwp_shape = ogr.Open(hwp_shapes['fut'])
hwp_shape_copy = \
ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = \
hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
for fieldName in calculatedAttributeNames:
field_def = ogr.FieldDefn(fieldName, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel,
#and volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it
#doesn't make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['cut_fut'] != 0:
time_span = yr_fut - (yr_fut + yr_cur) / 2.0
start_years = time_span
#Calculate the carbon pool due to decaying HWP over the
#time_span
feature_carbon_storage_per_pixel = pixel_area * \
_carbon_pool_in_hwp_from_parcel(
field_args['cut_fut'], time_span, start_years,
field_args['freq_fut'], field_args['decay_fut'])
#Claculate biomassPerPixel and volumePerPixel of harvested wood
numberOfHarvests = \
math.ceil(time_span / float(field_args['freq_fut']))
biomassInFeaturePerArea = field_args['cut_fut'] * \
numberOfHarvests / float(field_args['c_den_fut'])
biomassPerPixel = biomassInFeaturePerArea * pixel_area
volumePerPixel = biomassPerPixel / field_args['bcef_fut']
#Copy biomassPerPixel and carbon pools to the temporary
#feature for rasterization of the entire layer later
for field, value in zip(calculatedAttributeNames,
[feature_carbon_storage_per_pixel,
biomassPerPixel, volumePerPixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attributeName, (raster_uri, cur_raster_uri) in zip(
calculatedAttributeNames, [(c_hwp_uri, c_hwp_cur_uri), (bio_hwp_uri, bio_hwp_cur_uri), (vol_hwp_uri, vol_hwp_cur_uri)]):
temp_filename = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.new_raster_from_base_uri(
base_dataset_uri, temp_filename, 'GTiff',
nodata, gdal.GDT_Float32, fill_value=nodata)
temp_raster = gdal.Open(temp_filename, gdal.GA_Update)
gdal.RasterizeLayer(temp_raster, [1], hwp_shape_layer_copy,
options=['ATTRIBUTE=' + attributeName])
temp_raster.FlushCache()
temp_raster = None
#add temp_raster and raster cur raster into the output raster
nodata = -1.0
base_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
raster_uri)
cur_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
cur_raster_uri)
def add_op(base, current):
"""add two rasters"""
nodata_mask = (base == base_nodata) | (current == cur_nodata)
return numpy.where(nodata_mask, nodata, base+current)
pixel_size_out = (
pygeoprocessing.geoprocessing.get_cell_size_from_uri(
raster_uri))
pygeoprocessing.geoprocessing.vectorize_datasets(
[cur_raster_uri, temp_filename], add_op, raster_uri,
gdal.GDT_Float32, nodata,
pixel_size_out, "intersection", dataset_to_align_index=0,
vectorize_op=False) | 71b597c62014c120a3deb99ceea14d84612e3b19 | 10,796 |
from datetime import datetime
def test_function(client: MsGraphClient, args):
"""
Performs basic GET request to check if the API is reachable and authentication is successful.
Returns ok if successful.
"""
response = client.ms_client.http_request(
method='GET', url_suffix='security/alerts', params={'$top': 1}, resp_type='response')
try:
data = response.json() if response.text else {}
if not response.ok:
return_error(f'API call to MS Graph Security failed. Please check authentication related parameters.'
f' [{response.status_code}] - {demisto.get(data, "error.message")}')
params: dict = demisto.params()
if params.get('isFetch'):
fetch_time = params.get('fetch_time', '1 day')
fetch_providers = params.get('fetch_providers', '')
fetch_filter = params.get('fetch_filter', '')
filter_query = create_filter_query(fetch_filter, fetch_providers)
timestamp_format = '%Y-%m-%dT%H:%M:%S.%fZ'
time_from = parse_date_range(fetch_time, date_format=timestamp_format)[0]
time_to = datetime.now().strftime(timestamp_format)
try:
client.search_alerts(last_modified=None, severity=None, category=None, vendor=None, time_from=time_from,
time_to=time_to, filter_query=filter_query)['value']
except Exception as e:
if 'Invalid ODATA query filter' in e.args[0]:
raise DemistoException("Wrong filter format, correct usage: {property} eq '{property-value}'"
"\n\n" + e.args[0])
raise e
return 'ok', None, None
except TypeError as ex:
demisto.debug(str(ex))
return_error(f'API call to MS Graph Security failed, could not parse result. '
f'Please check authentication related parameters. [{response.status_code}]') | 24a66cca04c9493f7c0bbe13b54e8793188e0924 | 10,797 |
import os
def load(path='db'):
"""Recursivly load a db directory"""
if not os.path.isabs(path):
path = os.path.abspath(path)
env["datastore"].update({
"type": "yamldir",
"path": path,
})
return loaddir(path) | a51ece0de411618de0bef955adb596f8ea80efe5 | 10,798 |
def wcxf2arrays_symmetrized(d):
"""Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values.
In contrast to `wcxf2arrays`, here the numpy arrays fulfill the same
symmetry relations as the operators (i.e. they contain redundant entries)
and they do not contain undefined indices.
Zero arrays are added for missing coefficients."""
C = wcxf2arrays(d)
C = symmetrize_nonred(C)
C = add_missing(C)
return C | 6cca03761b9799a3af7b933877ff70d6d68f7644 | 10,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.