content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def build_format(name: str, pattern: str, label: bool) -> str:
"""Create snippet format.
:param name: Instruction name
:param pattern: Instruction regex pattern
"""
snip: str = f"{name:7s}" + pattern.format(**SNIPPET_REPLACEMENTS)
snip = snip.replace("(", "")
snip = snip.replace(")", "")
snip = snip.replace("number?\\\\$reg\\", "number(\\$reg)")
snip = snip.replace("\\$", "")
replace_ct = 1
reg_ct = snip.count("reg")
for i in range(0, reg_ct):
f = f"${REG_ARGS[i]}"
snip = snip.replace("reg", f, 1)
replace_ct += 1
if not label:
snip = snip.replace("number", "100")
replace_ct += 1
return snip | ec25ecf4f2d46db398c389b479620e0cbcf30ee2 | 17,790 |
import torch
def make_observation_mapper(claims):
"""Make a dictionary of observation.
Parameters
----------
claims: pd.DataFrame
Returns
-------
observation_mapper: dict
an dictionary that map rv to their observed value
"""
observation_mapper = dict()
for c in claims.index:
s = claims.iloc[c]['source_id']
observation_mapper[f'b_{s}_{c}'] = torch.tensor(
claims.iloc[c]['value'])
return observation_mapper | 43052bd9ce5e1121f3ed144ec48acf20ad117313 | 17,791 |
def toCSV(
dataset, # type: BasicDataset
showHeaders=True, # type: Optional[bool]
forExport=False, # type: Optional[bool]
localized=False, # type: Optional[bool]
):
# type: (...) -> String
"""Formats the contents of a dataset as CSV (comma separated
values), returning the resulting CSV as a string.
If the "forExport" flag is set, then the format will be appropriate
for parsing using the `fromCSV` function.
Args:
dataset: The dataset to export to CSV.
showHeaders: If set to True, a header row will be present in
the CSV. Default is True. Optional.
forExport: If set to True, extra header information will be
present in the CSV data which is necessary for the CSV to be
compatible with the fromCSV method. Overrides showHeaders.
Default is False. Optional.
localized: If set to True, the string representations of the
values in the CSV data will be localized. Default is
False. Optional.
Returns:
The CSV data as a string.
"""
print(dataset, showHeaders, forExport, localized)
return "" | 9d998891a9712f42af8744513c1f61540eee0e2e | 17,792 |
def add_record(session, data):
"""
session -
data - dictionary {"site":"Warsaw"}
"""
skeleton = Skeleton()
skeleton.site = data["site"]
skeleton.location = data["location"]
skeleton.skeleton = data["skeleton"]
skeleton.observer = data["observer"]
skeleton.obs_date = data["obs_date"]
session.add(skeleton)
session.commit()
return skeleton.skeleton_id | f91df4459b37b7df4d313fd01323451bf897a754 | 17,793 |
def hue_angle(C):
"""
Returns the *hue* angle :math:`h` in degrees from given colour difference
signals :math:`C`.
Parameters
----------
C : array_like
Colour difference signals :math:`C`.
Returns
-------
numeric or ndarray
*Hue* angle :math:`h` in degrees.
Examples
--------
>>> C = np.array([
... -5.365865581996587e-05,
... -0.000571699383647,
... 0.000625358039467
... ])
>>> hue_angle(C) # doctest: +ELLIPSIS
269.2737594...
"""
C_1, C_2, C_3 = tsplit(C)
hue = (180 * np.arctan2(0.5 * (C_2 - C_3) / 4.5, C_1 -
(C_2 / 11)) / np.pi) % 360
return hue | 599f594eff92280df06a4c6ef88ccf286f146475 | 17,794 |
def get_submodel_list_copasi(model_name: str,
model_info: pd.DataFrame):
"""
This function loads a list of Copasi model files, which all belong to the
same benchmark model, if a string with the id of the benchmark model id is
provided.
It also extracts the respective sbml files from the list and returns them
with the models, if any postprecessing of the Copasi results is necessary
"""
# get information about the model from the tsv table
model_rows = model_info.loc[model_info['short_id'] == model_name]
# only take accepted models
model_rows = model_rows[model_rows['accepted']]
submodel_paths = [path for path in model_rows['copasi_path_final']]
# collect the submodels
copasi_file_list = []
sbml_model_list = []
for submodel_path in submodel_paths:
copasi_file, sbml_model = get_submodel_copasi(submodel_path, model_info)
if copasi_file is not None:
copasi_file_list.append(copasi_file)
sbml_model_list.append(sbml_model)
return copasi_file_list, sbml_model_list | ea889e5ea836131d8febc94dd69806b2acf47559 | 17,795 |
def GetNextBmask(enum_id, value):
"""
Get next bitmask in the enum (bitfield)
@param enum_id: id of enum
@param value: value of the current bitmask
@return: value of a bitmask with value higher than the specified
value. -1 if no such bitmasks exist.
All bitmasks are sorted by their values
as unsigned longs.
"""
return idaapi.get_next_bmask(enum_id, value) | d2c415e1a3ad63c651dc2df771dbe43a082613d9 | 17,796 |
def annotate_link(domain):
"""This function is called by the url tag. Override to disable or change behaviour.
domain -- Domain parsed from url
"""
return u" [%s]"%_escape(domain) | 26b5c8979cc8cd7f581a7ff889a907cf71844c72 | 17,797 |
def kmeans(data, k, num_iterations, num_inits=10, verbose=False):
"""Execute the k-means algorithm for
determining the best k clusters of data
points in a dataset.
Parameters
----------
data : ndarray, (n,d)
n data points in R^d.
k : int
The number of clusters to separate
the data into.
num_iterations : int
The number of iterations of the k-means
algorithm to execute.
num_inits : int, optional
Number of random initializations to try.
Returns the best result.
verbose : bool, optional
Specifies whether to print info about
the execution of the algorithm.
Return
------
(clusters, data_point_assigment, centroids)
The results of the k-means algorithm. Clusters
is a list of the clusters (which are lists of ints).
data_point_assigment is a (n,) numpy array of ints
that indicates which cluster a data point has been
assigned to. And centroids is (k,d) numpy array
specifying the cluster centers.
"""
# Number of data points
num_data_points = int(data.shape[0])
# Spatial dimension d
d = int(data.shape[1])
best_results = None
best_total_distance = np.inf
for init in range(num_inits):
# Map from data point index to cluster index.
data_point_assignment = np.zeros(num_data_points, dtype=int)
# list of data points in clusters
clusters = [[]] * k
# Initialize the centroids
# using k-randomly sampled points.
centroids = np.zeros((d,k))
for ind_cluster in range(k):
inds_data = np.random.choice(num_data_points, k)
centroid = np.mean(data[inds_data, :], axis=0)
centroids[:, ind_cluster] = centroid
for iteration in range(num_iterations):
if verbose:
print('==== Iteration {}/{} ===='.format(iteration+1, num_iterations))
print('centroids = {}'.format(centroids))
clusters = []
for ind_c in range(k):
clusters.append([])
# Assignment step:
# Assign each data point to the
# cluster with nearest centroid.
total_distance = 0.0
for ind_point in range(num_data_points):
distances = np.array([nla.norm(data[ind_point, :] - centroids[:, ind_c]) for ind_c in range(k)])
ind_cluster = np.argmin(distances)
total_distance += distances[ind_cluster]
data_point_assignment[ind_point] = ind_cluster
clusters[ind_cluster].append(ind_point)
# Update step:
# Update the centroids of the
# new clusters.
for ind_cluster in range(k):
cluster = clusters[ind_cluster]
cluster_data = np.array([data[ind_point, :] for ind_point in cluster])
centroid = np.mean(cluster_data, axis=0)
centroids[:, ind_cluster] = centroid
if total_distance < best_total_distance:
best_total_distance = total_distance
best_results = (clusters, data_point_assignment, centroids)
return best_results | 3cc3681ac0d0306fc7dce2da5757e6c162f7c457 | 17,798 |
def point_on_bezier_curve(cpw, n, u):
"""
Compute point on Bezier curve.
:param ndarray cpw: Control points.
:param int n: Degree.
:param u: Parametric point (0 <= u <= 1).
:return: Point on Bezier curve.
:rtype: ndarray
*Reference:* Algorithm A1.4 from "The NURBS Book".
"""
bernstein = all_bernstein(n, u)
pnt = zeros(4, dtype=float64)
for k in range(0, n + 1):
pnt += bernstein[k] * cpw[k]
return pnt | 3e4a494ff9ffabf6ad0d2711beba0e55647e7071 | 17,799 |
import requests
def list_with_one_dict(sort_type, url_param=None):
"""
Search by parameter that returns a list with one dictionary.
Used for full country name and capital city.
"""
extra_param = ""
if sort_type == 2:
url_endpoint = "/name/"
user_msg = "full country name"
extra_param = "?fullText=true"
desc = "\nSearch by full country name. Example: United States of America"
elif sort_type == 6:
url_endpoint = "/capital/"
user_msg = "capital city"
desc = "\nSearch by capital city. Example: Washington"
if url_param is None:
print(desc)
url_param = input("\nEnter " + user_msg + ": ")
res = requests.get(URL + url_endpoint +
url_param.strip().lower() + extra_param)
try:
res.raise_for_status()
except:
return "\nError! Could not find information for the given input."
res_json = res.json()
country_name = res_json[0]["name"]
info = get_info(res_json, country_name)
print(info)
save_to_file(info, country_name) | 79f96ab76c123bfa3c3faee57e9af6c1900c3cd7 | 17,800 |
def get_form(case, action_filter=lambda a: True, form_filter=lambda f: True, reverse=False):
"""
returns the first form that passes through both filter functions
"""
gf = get_forms(case, action_filter=action_filter, form_filter=form_filter, reverse=reverse)
try:
return gf.next()
except StopIteration:
return None | b0e8caeb6fae1f56407aaf14475c5f06c9b4a3d0 | 17,801 |
from datetime import datetime
def csv_to_json_generator(df, field_map: dict, id_column: str, category_column: str):
"""
Creates a dictionary/json structure for a `single id dataframe` extracting content using the
`extract_features_by_category` function.
"""
id_list = find_ids(df=df, id_column=id_column)
logger.info('Found {} units on \'{}\' to process'.format(len(id_list), id_column))
out = []
for f_id in id_list:
f_info = {'id': str(f_id), '@timestamp': datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")}
f_df = single_id_df(df=df, id_column=id_column, id_value=f_id)
for key in field_map.keys():
try:
data = extract_features_by_category(single_id_df=f_df, category=key, category_column=category_column,
related_features=field_map[key])[key.lower()]
f_info[key.lower()] = data
except:
logger.error('id: {} key: \'{}\''.format(f_id, key))
out.append(f_info)
logger.info('Generated: {}. Delta: {}'.format(len(out), len(out)-len(id_list)))
return out | 40bf0657d8ca7d141af919f03b9b6e7cc6887749 | 17,802 |
def sunset_hour_angle(sinLat, cosLat, sinDec, cosDec):
"""
Calculate local sunset hour angle (radians) given sines and cosines
of latitude and declination.
"""
return np.arccos(np.clip(-sinDec / cosDec * sinLat / cosLat, -1, 1)) | 43e9e6d5026ea16f348f6704a8226763f0d08786 | 17,804 |
def handle_enable(options):
"""Enable a Sopel plugin.
:param options: parsed arguments
:type options: :class:`argparse.Namespace`
:return: 0 if everything went fine;
1 if the plugin doesn't exist
"""
plugin_names = options.names
allow_only = options.allow_only
settings = utils.load_settings(options)
usable_plugins = plugins.get_usable_plugins(settings)
# plugin does not exist
unknown_plugins = [
name
for name in plugin_names
if name not in usable_plugins
]
if unknown_plugins:
display_unknown_plugins(unknown_plugins)
return 1 # do nothing and return an error code
actually_enabled = tuple(
name
for name in plugin_names
if _handle_enable_plugin(settings, usable_plugins, name, allow_only)
)
# save if required
if actually_enabled:
settings.save()
else:
return 0 # nothing to disable or save, but not an error case
# display plugins actually disabled by the command
print(utils.get_many_text(
actually_enabled,
one='Plugin {item} enabled.',
two='Plugins {first} and {second} enabled.',
many='Plugins {left}, and {last} enabled.'
))
return 0 | e3b931fcd8f90e7570f80604d7690cf8c8485cd9 | 17,805 |
def compare_img_hist(img_path_1, img_path_2):
"""
Get the comparison result of the similarity by the histogram of the
two images. This is suitable for checking whether the image is close
in color. Conversely, it is not suitable for checking whether shapes
are similar.
Parameters
----------
img_path_1 : str
The path of the first image for comparison.
img_path_2 : str
The path of the second image for comparison.
Returns
-------
similarity : float
Similarity between two images. The maximum is set to 1.0, and the
closer to 1.0, the higher the similarity. It is set by the mean
value of the histogram of RGB channels.
"""
assert_img_exists(img_path=img_path_1)
assert_img_exists(img_path=img_path_2)
img_1 = cv2.imread(img_path_1)
img_2 = cv2.imread(img_path_2)
channels_list = [[0], [1], [2]]
similarity_list = []
for channels in channels_list:
img_1_hist = cv2.calcHist(
images=[img_1],
channels=channels,
mask=None,
histSize=[256],
ranges=[0, 256]
)
img_2_hist = cv2.calcHist(
images=[img_2],
channels=channels,
mask=None,
histSize=[256],
ranges=[0, 256]
)
similarity_unit = cv2.compareHist(
H1=img_1_hist, H2=img_2_hist, method=cv2.HISTCMP_CORREL)
similarity_list.append(similarity_unit)
similarity = np.mean(similarity_list)
return similarity | 4fa34b3186b69464be15052a9427bb274f95d28f | 17,806 |
import json
def recombinant_example(resource_name, doc_type, indent=2, lang='json'):
"""
Return example data formatted for use in API documentation
"""
chromo = recombinant_get_chromo(resource_name)
if chromo and doc_type in chromo.get('examples', {}):
data = chromo['examples'][doc_type]
elif doc_type == 'sort':
data = "request_date desc, file_number asc"
elif doc_type == 'filters':
data = {"resource": "doc", "priority": "high"}
elif doc_type == 'filter_one':
data = {"file_number": "86086"}
else:
data = {
"request_date": "2016-01-01",
"file_number": "42042",
"resource": "doc",
"prioroty": "low",
}
if not isinstance(data, (list, dict)):
return json.dumps(data)
left = ' ' * indent
if lang == 'pythonargs':
return ',\n'.join(
"%s%s=%s" % (left, k, json.dumps(data[k]))
for k in sorted(data))
out = json.dumps(data, indent=2, sort_keys=True, ensure_ascii=False)
return left[2:] + ('\n' + left[2:]).join(out.split('\n')[1:-1]) | 1a6cfe474425ba4d62472f571ca0eae0d3cfbff0 | 17,807 |
def _integral_diff(x, pdf, a, q):
"""Return difference between q and the integral of the function `pdf`
between a and x. This is used for solving for the ppf."""
return integrate.quad(pdf, a, x)[0] - q | 616cfba7361c92f7cbdf8bac55f9a65a60c2c32f | 17,808 |
def Fcomplete(t,y,k0m,k1,k2m,k2p,k3,k4,k5m,k6m,k7,Kr0,Kr1,Kr2,Kr2p,Km5,Km6,Km7,Gt,Rt,Mt,k_Gp,Gpt,n):
"""
Right hand side of ODE y'(t) = f(t,y,...)
It receives parameters as f_args, as given py param_array (see param.py)
3 components: G, R, M
"""
k0=k0m*Kr0 # kmi =ki/Kri or ki/Kmi
k2=k2m*Kr2
k5=k5m*Km5
k6=k6m*Km6
G=y[0]
R=y[1]
M=y[2]
if len(y) > 3:
Gp=y[3] # GEF perturbation (what's given in the data)
Gpvis=y[4] # GEF perturbation (what's given in the data)
else:
Gp = 0.
Gpvis = 0
return np.array( [ k3*R*(Gt-G) - k4*M*G, (k0*G+Gpt*Gp)*(Rt-R)/(Kr0+(Rt-R)) + k1*(Rt-R)/(Kr1+(Rt-R)) - k2*R/(Kr2+R), k5*R*(Mt-M)**n/(Km5**n+(Mt-M)**n) - k6*M/(Km6+M) + k7*(Mt-M)/(Km7+(Mt-M)),k_Gp-k_Gp*Gp-k4*Gp*M, k_Gp-k_Gp*Gpvis] ) | a6b614a34fb0c2dcaf29107c05eb397312825568 | 17,809 |
def lu_solve(l: np.ndarray, u: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Решение СЛАУ, прошедшей через LU-разложение.
Требуется предварительно умножить вектор правых частей на матрицу перестановки.
:param l: нижняя треугольная матрица
:param u: верхняя треугольная матрица
:param b: вектор правых частей СЛАУ
:return: вектор-решение СЛАУ
"""
n = l.shape[0]
z = np.zeros_like(b)
z[0] = b[0]
for i in range(1, n):
s = 0
for j in range(i):
s += l[i, j] * z[j]
z[i] = b[i] - s
x = np.zeros_like(b)
x[-1] = z[-1] / u[-1, -1]
for i in range(n - 2, -1, -1):
s = 0
for j in range(i + 1, n):
s += u[i, j] * x[j]
x[i] = (z[i] - s) / u[i, i]
return x | 5429fe91dfb15a5cf306871d07f0204f8f23a405 | 17,810 |
def zr_bfr_tj():
"""
Real Name: b'Zr bfr Tj'
Original Eqn: b'Zr aftr Dam-Wr sup aftr Zr Dam+(Wr sup aftr Zr Dam*0.2)'
Units: b''
Limits: (None, None)
Type: component
b''
"""
return zr_aftr_dam() - wr_sup_aftr_zr_dam() + (wr_sup_aftr_zr_dam() * 0.2) | aae58ac349a7039afd0f3f3f12166a39de8afe31 | 17,812 |
def simplify(n):
"""Remove decimal places."""
return int(round(n)) | 9856c8f5c0448634956d1d05e44027da2f4ebe6a | 17,813 |
def resnet_qc_18(**kwargs):
"""Constructs a ResNet-18 model."""
model = ResNetQC(BasicBlock, [2, 2, 2, 2], **kwargs)
return model | a1554e044dd69e96474602c88e8e73d4d697c861 | 17,814 |
import scipy.sparse
def sparse_from_npz(file, **_kw):
"""
Possible dispatch function for ``from_path_impl``'s ``from_npz``.
Reads a scipy sparse matrix.
"""
return scipy.sparse.load_npz(file) | 9909975ff0309cde98117e64718fe292de574987 | 17,815 |
import copy
def get_config():
"""
Read the configuration
:returns: current configuration
"""
global config
return copy.deepcopy(config) | 3e9b064123ed9165c04cbc7e1c2b0d646703cb7a | 17,816 |
def resnext20_2x64d_cifar100(classes=100, **kwargs):
"""
ResNeXt-20 (2x64d) model for CIFAR-100 from 'Aggregated Residual Transformations for Deep Neural Networks,'
http://arxiv.org/abs/1611.05431.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_resnext_cifar(classes=classes, blocks=20, cardinality=2, bottleneck_width=64,
model_name="resnext20_2x64d_cifar100", **kwargs) | 147b9ff5565fdbbcaab68d66fbfa3e8a5a4b6062 | 17,817 |
def test_target(target):
"""Returns the label for the corresponding target in the test tree."""
label = to_label(target)
test_package = label.package.replace("src/main/", "src/test/", 1)
return Label("@{workspace}//{package}:{target_name}".format(
workspace = label.workspace_name,
package = test_package,
target_name = label.name,
)) | 8b4391a5acdea7b851ef6606ad3dfa0b21132ae1 | 17,818 |
def pred_fwd_rc(model, input_npy, output_fwd, output_rc, replicates=1, batch_size=512):
"""Predict pathogenic potentials from a preprocessed numpy array and its reverse-complement."""
y_fwd, _ = predict_npy(model, input_npy, output_fwd, rc=False, replicates=replicates, batch_size=batch_size)
y_rc, _ = predict_npy(model, input_npy, output_rc, rc=True, replicates=replicates, batch_size=batch_size)
return y_fwd, y_rc | 90ed6c3a29dd3e24cef45415ec51381bf32a8b37 | 17,819 |
def get_api_version(version_string):
"""Returns checked APIVersion object"""
version_string = str(version_string)
api_version = APIVersion(version_string)
check_major_version(api_version)
return api_version | 9bbc88c3aee2139dc39367d4788d6c382f711903 | 17,821 |
def evaluate_g9( tau7, tau8, tau9, tau10, tau11, s9 ):
"""
Evaluate the ninth constraint equation and also return the Jacobian
:param float tau7: The seventh tau parameter
:param float tau8: The eighth tau parameter
:param float tau9: The ninth tau parameter
:param float tau10: The tenth tau parameter
:param float tau11: The eleventh tau parameter
:param float s9: The value of the constraint
"""
return tau7 + 2 * tau8 - abs( tau9 + tau10 + tau11 ) - s9**2,\
{ 'tau7':1., 'tau8':2., 'tau9':float( -np.sign( tau9 ) ),\
'tau10':float( -np.sign( tau10 ) ),\
'tau11':( -np.sign( tau11 ) ), 's9':-2*s9 } | 9f8181e4d6abac9207eec721d6b347a560778ecf | 17,822 |
def iterable(value,
allow_empty = False,
forbid_literals = (str, bytes),
minimum_length = None,
maximum_length = None,
**kwargs):
"""Validate that ``value`` is a valid iterable.
.. hint::
This validator checks to ensure that ``value`` supports iteration using
any of Python's three iteration protocols: the ``__getitem__`` protocol,
the ``__iter__`` / ``next()`` protocol, or the inheritance from Python's
`Iterable` abstract base class.
If ``value`` supports any of these three iteration protocols, it will be
validated. However, if iteration across ``value`` raises an unsupported
exception, this function will raise an
:exc:`IterationFailedError <validator_collection.errors.IterationFailedError>`
:param value: The value to validate.
:param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value``
is empty. If ``False``, raises a
:class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if
``value`` is empty. Defaults to ``False``.
:type allow_empty: :class:`bool <python:bool>`
:param forbid_literals: A collection of literals that will be considered invalid
even if they are (actually) iterable. Defaults to :class:`str <python:str>` and
:class:`bytes <python:bytes>`.
:type forbid_literals: iterable
:param minimum_length: If supplied, indicates the minimum number of members
needed to be valid.
:type minimum_length: :class:`int <python:int>`
:param maximum_length: If supplied, indicates the minimum number of members
needed to be valid.
:type maximum_length: :class:`int <python:int>`
:returns: ``value`` / :obj:`None <python:None>`
:rtype: iterable / :obj:`None <python:None>`
:raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False``
:raises NotAnIterableError: if ``value`` is not a valid iterable or
:obj:`None <python:None>`
:raises IterationFailedError: if ``value`` is a valid iterable, but iteration
fails for some unexpected exception
:raises MinimumLengthError: if ``minimum_length`` is supplied and the length of
``value`` is less than ``minimum_length`` and ``whitespace_padding`` is
``False``
:raises MaximumLengthError: if ``maximum_length`` is supplied and the length of
``value`` is more than the ``maximum_length``
"""
if not value and not allow_empty:
raise errors.EmptyValueError('value (%s) was empty' % value)
elif value is None:
return None
minimum_length = integer(minimum_length, allow_empty = True, force_run = True) # pylint: disable=E1123
maximum_length = integer(maximum_length, allow_empty = True, force_run = True) # pylint: disable=E1123
if isinstance(value, forbid_literals):
raise errors.NotAnIterableError('value type (%s) not iterable' % type(value))
try:
iter(value)
except TypeError:
raise errors.NotAnIterableError('value type (%s) not iterable' % type(value))
except Exception as error:
raise errors.IterationFailedError('iterating across value raised an unexpected Exception: "%s"' % error)
if value and minimum_length is not None and len(value) < minimum_length:
raise errors.MinimumLengthError(
'value has fewer items than the minimum length %s' % minimum_length
)
if value and maximum_length is not None and len(value) > maximum_length:
raise errors.MaximumLengthError(
'value has more items than the maximum length %s' % maximum_length
)
return value | 57de6a43243d9c611725f1e9987e881cf1485b63 | 17,824 |
from typing import List
def settings_notification(color: bool, messages: List[ExitMessage]) -> Form:
"""Generate a warning notification for settings errors.
:param messages: List of messages to display
:param color: Bool to reflect if color is transferred or not
:returns: The form to display
"""
# Take the initial warning if there is one
if messages[0].prefix is ExitPrefix.WARNING:
title = messages.pop(0).to_lines(color=False, width=console_width(), with_prefix=True)[0]
else:
title = "Warning"
formatted = ExitMessages(messages).to_strings(color=color, width=console_width())
formatted_curses = CursesLines(
tuple(ansi_to_curses(line) for line in formatted),
)
form = Form(
FormType.NOTIFICATION,
title=title,
title_color=Color.YELLOW,
fields=[
FieldCursesInformation(
name="settings_warning",
information=formatted_curses,
),
],
)
return form | a1ff085c76e84e01ba293f17b662626a39fda26f | 17,825 |
def message_type(ctx: 'Context', *types):
"""Filters massage_type with one of selected types.
Assumes update_type one of message, edited_message, channel_post, edited_channel_post.
:param ctx:
:param types:
:return: True or False
"""
m = None
if ctx.update.update_type is UpdateType.message:
m = ctx.update.message
elif ctx.update.update_type is UpdateType.edited_message:
m = ctx.update.edited_message
elif ctx.update.update_type is UpdateType.channel_post:
m = ctx.update.channel_post
elif ctx.update.update_type is UpdateType.edited_channel_post:
m = ctx.update.edited_channel_post
else:
return False
if m.message_type not in types:
return False
return True | 231bbb3b802d6f6dcf4a22af7704fae3ce24e783 | 17,826 |
import math
def VSphere(R):
"""
Volume of a sphere or radius R.
"""
return 4. * math.pi * R * R * R / 3. | 9e99d19683d9e86c2db79189809d24badccc197b | 17,827 |
from typing import Optional
from typing import Any
def resolve_variable(
var_name: str,
var_def: BlueprintVariableTypeDef,
provided_variable: Optional[Variable],
blueprint_name: str,
) -> Any:
"""Resolve a provided variable value against the variable definition.
Args:
var_name: The name of the defined variable on a blueprint.
var_def: A dictionary representing the defined variables attributes.
provided_variable: The variable value provided to the blueprint.
blueprint_name: The name of the blueprint that the variable is being applied to.
Returns:
The resolved variable value, could be any python object.
Raises:
MissingVariable: Raised when a variable with no default is not
provided a value.
UnresolvedBlueprintVariable: Raised when the provided variable is
not already resolved.
ValueError: Raised when the value is not the right type and cannot be
cast as the correct type. Raised by
:func:`runway.cfngin.blueprints.base.validate_variable_type`
ValidatorError: Raised when a validator raises an exception. Wraps the
original exception.
"""
try:
var_type = var_def["type"]
except KeyError:
raise VariableTypeRequired(blueprint_name, var_name) from None
if provided_variable:
if not provided_variable.resolved:
raise UnresolvedBlueprintVariable(blueprint_name, provided_variable)
value = provided_variable.value
else:
# Variable value not provided, try using the default, if it exists
# in the definition
try:
value = var_def["default"]
except KeyError:
raise MissingVariable(blueprint_name, var_name) from None
# If no validator, return the value as is, otherwise apply validator
validator = var_def.get("validator", lambda v: v)
try:
value = validator(value)
except Exception as exc:
raise ValidatorError(var_name, validator.__name__, value, exc) from exc
# Ensure that the resulting value is the correct type
value = validate_variable_type(var_name, var_type, value)
allowed_values = var_def.get("allowed_values")
if not validate_allowed_values(allowed_values, value):
raise ValueError(
f"Invalid value passed to {var_name} in Blueprint {blueprint_name}. "
f"Got '{value}', expected one of {allowed_values}."
)
return value | 1df7d4804f104c8f746999aaaad5f91ca96b5f78 | 17,828 |
def additional_args(**kwargs):
"""
Additional command-line arguments.
Provides additional command-line arguments that are unique to the extraction process.
Returns
-------
additional_args : dict
Dictionary of tuples in the form (fixed,keyword) that can be passed to an argument
parser to create a new command-line option
"""
module_name = kwargs.get('module_name', __name__)
base_defaults = get_defaults(module_name)
additional_args = {}
table_help = "The input metadata table to use."
table_args = ['table']
table_kwargs = {'help': table_help}
additional_args['table'] = (table_args, table_kwargs)
bkg_help = "Whether to subtract background before or after applying "
bkg_help += "flatfield. Default is 'flat_first'. Available options are "
bkg_help += "'flat_first', 'bkg_first' and 'bkg_only'."
bkg_args = ['-b', '--bkg_flat_order']
bkg_kwargs = {'dest': 'bkg_flat_order', 'default': base_defaults['bkg_flat_order'],
'help': bkg_help}
additional_args['bkg_flat_order'] = (bkg_args, bkg_kwargs)
plots_help = "Include result plots while running (default False)."
plots_args = ["-p", "--plots"]
plots_kwargs = {'dest': 'plots', 'action': 'store_true',
'default': base_defaults['plots'], 'help': trace_help}
additional_args['plots'] = (plots_args, plots_kwargs)
return additional_args | 1b0f10f7f9c60077de9c580163b5e3893da63a83 | 17,830 |
import html
def extract_images_url(url, source):
"""
Extract image url for a chapter
"""
r = s.get(url)
tree = html.fromstring(r.text)
if source == 'blogtruyen':
return tree.xpath('//*[@id="content"]/img/@src')
elif source == 'nettruyen':
return tree.xpath('//*[@class="reading-detail box_doc"]/div/img/@src')
elif source == 'image-container-manga':
return tree.xpath('//*[@class="image-container-manga"]/div/img/@src') | f2299d3e1dde38fc7ac2d3789e8145f5a71a1299 | 17,831 |
from typing import Dict
from typing import Any
from typing import Optional
from typing import Literal
from typing import List
import pathlib
def _ntuple_paths(
general_path: str,
region: Dict[str, Any],
sample: Dict[str, Any],
systematic: Dict[str, Any],
template: Optional[Literal["Up", "Down"]],
) -> List[pathlib.Path]:
"""Returns the paths to ntuples for a region-sample-systematic-template.
A path is built starting from the path specified in the general options in the
configuration file. This path can contain placeholders for region- and sample-
specific overrides, via ``{Region}`` and ``{Sample}``. For non-nominal templates, it
is possible to override the sample path if the ``SamplePaths`` option is specified
for the template. If ``SamplePaths`` is a list, return a list of paths (one per
entry in the list).
Args:
general_path (str): path specified in general settings, with sections that can
be overridden by region / sample settings
region (Dict[str, Any]): containing all region information
sample (Dict[str, Any]): containing all sample information
systematic (Dict[str, Any]): containing all systematic information
template (Optional[Literal["Up", "Down"]]): template considered: "Up", "Down",
or None for nominal
Returns:
List[pathlib.Path]: list of paths to ntuples
"""
# obtain region and sample paths, if they are defined
region_path = region.get("RegionPath", None)
sample_paths = sample.get("SamplePaths", None)
# check whether a systematic is being processed, and whether overrides exist
if template is not None:
# determine whether the template has an override for RegionPath specified
region_override = _check_for_override(systematic, template, "RegionPath")
if region_override is not None:
region_path = region_override
# check for SamplePaths override
sample_override = _check_for_override(systematic, template, "SamplePaths")
if sample_override is not None:
sample_paths = sample_override
region_template_exists = "{RegionPath}" in general_path
if region_path is not None:
if not region_template_exists:
log.warning(
"region override specified, but {RegionPath} not found in default path"
)
general_path = general_path.replace("{RegionPath}", region_path)
elif region_template_exists:
raise ValueError(f"no path setting found for region {region['Name']}")
sample_template_exists = "{SamplePaths}" in general_path
if sample_paths is not None:
if not sample_template_exists:
log.warning(
"sample override specified, but {SamplePaths} not found in default path"
)
# SamplePaths can be a list, so need to construct all possible paths
sample_paths = configuration._setting_to_list(sample_paths)
path_list = []
for sample_path in sample_paths:
path_list.append(general_path.replace("{SamplePaths}", sample_path))
elif sample_template_exists:
raise ValueError(f"no path setting found for sample {sample['Name']}")
else:
# no need for multiple paths, and no SamplePaths are present, so turn
# the existing path into a list
path_list = [general_path]
# convert the contents of path_lists to paths and return them
paths = [pathlib.Path(path) for path in path_list]
return paths | efb96f1b977c30c83890c2030f312c0066eed4d8 | 17,832 |
def svn_ra_do_diff2(*args):
"""
svn_ra_do_diff2(svn_ra_session_t session, svn_revnum_t revision, char diff_target,
svn_boolean_t recurse, svn_boolean_t ignore_ancestry,
svn_boolean_t text_deltas,
char versus_url, svn_delta_editor_t diff_editor,
void diff_baton, apr_pool_t pool) -> svn_error_t
"""
return _ra.svn_ra_do_diff2(*args) | 8964a6304582daf5631e8e26c8cf7ff9167837dd | 17,833 |
def ntuple_dict_length(ntuple_dict):
"""Returns a dictionary from track types to the number of tracks of
that type. Raises an exception of any value lists within one of its
track properties dicts are different lengths."""
return dict(map(lambda track_type, track_prop_dict:
(track_type, track_prop_dict_length(track_prop_dict)),
ntuple_dict.keys(), ntuple_dict.values())) | e5f7805dfb4a641268792e4e8982e21a05298f9e | 17,834 |
def range_ngram_distrib(text, n, top_most=-1):
"""
List n-grams with theis probabilities from the most popular to the smaller ones
:param text: text
:param n: n of n-gram
:param top_most: count of most popular n-grams to be returned, or -1 to return all
:return: list of ngrams, list of probs
"""
ngram_counts = count_ngrams(text, n)
if top_most >= 0:
ngrams = np.asarray(ngram_counts.most_common(top_most))[:, 0]
counts = np.asarray(np.asarray(ngram_counts.most_common(top_most))[:, 1], dtype=int)
else:
ngrams = np.asarray(ngram_counts.most_common())[:, 0]
counts = np.asarray(np.asarray(ngram_counts.most_common())[:, 1], dtype=int)
return ngrams, counts | 0822b2e7824aee6cc28e2a734dea5d9aff0df1ac | 17,835 |
import functools
def require_methods(*methods):
"""Returns a decorator which produces an error unless request.method is one
of |methods|.
"""
def decorator(func):
@functools.wraps(func)
def wrapped(request, *args, **kwds):
if request.method not in methods:
allowed = ', '.join(methods)
rsp = HttpTextResponse('This requires a specific method: %s' % allowed,
status=405)
rsp['Allow'] = allowed
return func(request, *args, **kwds)
return wrapped
return decorator | 6c02675836c95f9ee7bab124cc1287bc6d3dfb95 | 17,837 |
def getUnit3d(prompt, default=None):
"""
Read a Unit3d for the termial with checking. This will accapt and
directon in any format accepted by Unit3d().parseAngle()
Allowed formats
* x,y,z or [x,y,z] three floats
* theta,psi or [theta,psi], in radians (quoted in "" for degrees)
* theta in radians (or quotes in "" for degrees)
:param prompt: the promp to be displayed
:type prompt: str
:param default: the default Unit3d
:type: Unit3d
"""
while True:
val = __getInput(prompt, default)
try:
if isinstance(val, str): # Its a string
val = eval(val) # Eval list
u = Unit3d().parseAngle(val)
return u
except (ValueError, NameError, ZeroDivisionError, SyntaxError):
logger.error("Conversion of '{0:s}' to Unit3d failed.".format(str(val))) | dd39a706114c72ce9059686e342e8a1db1e3464b | 17,838 |
def checkOverlap(ra, rb):
"""
check the overlap of two anchors,ra=[chr,left_start,left_end,chr,right_start,right_end]
"""
if checkOneEndOverlap(ra[1], ra[2], rb[1], rb[2]) and checkOneEndOverlap(
ra[4], ra[5], rb[4], rb[5]):
return True
return False | 9644ed7e2de9d9091e21a64c7c4cd43a0e0e1210 | 17,839 |
import typing
def saveable(item: praw.models.reddit.base.RedditBase) -> dict[str, typing.Any]:
"""Generate a saveable dict from an instance"""
result = {k: legalize(v) for k, v in item.__dict__.items() if not k.startswith("_")}
return _parent_ids_interpreted(result) | 6e9e7092045e321de2beeba7debbd6dc2a2b2e61 | 17,840 |
import re
def decode_Tex_accents(in_str):
"""Converts a string containing LaTex accents (i.e. "{\\`{O}}") to ASCII
(i.e. "O"). Useful for correcting author names when bib entries were
queried from web via doi
:param in_str: input str to decode
:type in_str: str
:return: corrected string
:rtype: str
"""
# replaces latex accents with ascii letter (no accent)
pat = "\{\\\\'\{(\w)\}\}"
out = in_str
for x in re.finditer(pat, in_str):
out = out.replace(x.group(), x.groups()[0])
# replace latex {\textsinglequote} with underscore
out = out.replace('{\\textquotesingle}', "_")
# replace actual single quotes with underscore for bibtex compatibility
out = out.replace("'", '_')
return out | 2a4bd71b53cdab047a1ddd1e0e6fd6e9c81b0e0a | 17,841 |
from typing import Mapping
def tensor_dict_eq(dict1: Mapping, dict2: Mapping) -> bool:
"""Checks the equivalence between 2 dictionaries, that can contain torch Tensors as value. The dictionary can be
nested with other dictionaries or lists, they will be checked recursively.
:param dict1: Dictionary to compare.
:param dict2: Dictionary to compare.
:return: True, if dict1 and dict2 are equal, false otherwise.
"""
if len(dict1) != len(dict2):
return False
for (key1, value1), (key2, value2) in zip(dict1.items(), dict2.items()):
key_equal = key1 == key2
value_equal = tensor_container_element_eq(value1, value2)
if (not key_equal) or (not value_equal):
return False
return True | db3d7d23e633f5a240d0d0d13f6836494dc44e20 | 17,842 |
def calculate_stability(derivatives):
"""
Calculate the stability-axis derivatives with the body-axis derivatives.
"""
d = derivatives
if 'stability' not in d:
d['stability'] = {}
slat = calculate_stability_lateral(d['body'], np.deg2rad(d['alpha0']))
slong = calculate_stability_longitudinal(d['body'], np.deg2rad(d['alpha0']))
d['stability'].update(slat)
d['stability'].update(slong)
return d | 888f63454265b3811751ae91654a9de083f25fec | 17,843 |
from datetime import datetime
def read_malmipsdetect(file_detect):
"""
This function is used to read the MALMI detection file which contains detection
information, that is for each detected event how many stations are triggered,
how many phases are triggered. Those information can be used for quality control.
Parameters
----------
file_detect : str
The filename including path of the input file.
Raises
------
ValueError
datetime format is not consistent with defined one.
Returns
-------
detect_info : dic
detect_info['starttime'] : list of datetime
starttime and folder name of the detected event;
detect_info['endtime'] : list of datetime
endtime of the detected event;
detect_info['station'] : list of float
number of stations triggered of the detected event;
detect_info['phase'] : list of float
number of phase triggered of the detected event;
"""
format_f = ['starttime', 'endtime', 'station', 'phase']
datetime_format_26 = '%Y-%m-%dT%H:%M:%S.%f' # datetime format in the input file
datetime_format_19 = '%Y-%m-%dT%H:%M:%S' # datetime format in the input file
# read file
df = pd.read_csv(file_detect, delimiter=' ', header=None, names=format_f,
skipinitialspace=True, encoding='utf-8', comment='#')
# format output data
detect_info = {}
detect_info['starttime'] = []
detect_info['endtime'] = []
for ii in range(len(df)):
if len(df.loc[ii,'starttime']) == 19:
detect_info['starttime'].append(datetime.datetime.strptime(df.loc[ii,'starttime'], datetime_format_19)) # origin time
elif len(df.loc[ii,'starttime']) == 26:
detect_info['starttime'].append(datetime.datetime.strptime(df.loc[ii,'starttime'], datetime_format_26)) # origin time
else:
raise ValueError('Error! Input datetime format not recoginzed!')
if len(df.loc[ii,'endtime']) == 19:
detect_info['endtime'].append(datetime.datetime.strptime(df.loc[ii,'endtime'], datetime_format_19)) # origin time
elif len(df.loc[ii,'endtime']) == 26:
detect_info['endtime'].append(datetime.datetime.strptime(df.loc[ii,'endtime'], datetime_format_26)) # origin time
else:
raise ValueError('Error! Input datetime format not recoginzed!')
detect_info['station'] = list(df['station'])
detect_info['phase'] = list(df['phase'])
return detect_info | cf42172e9f286254f31b2e57361c25360ed73d10 | 17,846 |
def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'):
"""Generates the URL for authorizing access.
This uses the "OAuth2 for Installed Applications" flow described at
https://developers.google.com/accounts/docs/OAuth2InstalledApp
Args:
client_id: Client ID obtained by registering your app.
scope: scope for access token, e.g. 'https://mail.google.com'
Returns:
A URL that the user should visit in their browser.
"""
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['response_type'] = 'code'
return '%s?%s' % (AccountsUrl('o/oauth2/auth'),
FormatUrlParams(params)) | b4471e78eab772a57be8d3073451050fd78d904c | 17,847 |
def get_scorekeeper_details():
"""Retrieve a list of scorekeeper and their corresponding
appearances"""
return scorekeepers.get_scorekeeper_details(database_connection) | 78092d0ae6bcb21ee86a1fddfc678472c81eb55f | 17,849 |
def layer_norm(input_tensor, axis):
"""Run layer normalization on the axis dimension of the tensor."""
layer_norma = tf.keras.layers.LayerNormalization(axis = axis)
return layer_norma(input_tensor) | 9687dedf8c3a624013c5188401e86d7ef6d73969 | 17,850 |
def compute_depth(disparity, focal_length, distance_between_cameras):
"""
Computes depth in meters
Input:
-Disparity in pixels
-Focal Length in pixels
-Distance between cameras in meters
Output:
-Depth in meters
"""
with np.errstate(divide='ignore'): #ignore division by 0
# standard depth and disparity formula
depth = (focal_length * distance_between_cameras) / disparity
return depth | 5ea475dae1d4aa0c429a7f6766293a39d403904d | 17,851 |
def lecture(source=None,target=None,fseed=100,fpercent=100):
"""
Create conversion of the source file and the target file
Shuffle method is used, base on the seed (default 100)
"""
seed(fseed)
try:
copysource = []
copytarget = []
if(source!=None and target!=None):
source = create_inter_without(source)
target = create_inter_without(target)
shuffle(source)
shuffle(target)
for i in range(0,(int(len(source)*fpercent/100))):
copysource.append(source[i])
if(len(copysource)==0):
copysource.append(source[0])
for i in range(0,(int(len(target)*fpercent/100))):
copytarget.append(target[i])
if(len(copytarget)==0):
copytarget.append(target[0])
return copysource,copytarget
except Exception as e:
print(e) | b0878ab2b3d888db984aa0644080578a85e9e554 | 17,852 |
def getScale(im, scale, max_scale=None):
"""
获得图片的放缩比例
:param im:
:param scale:
:param max_scale:
:return:
"""
f = float(scale) / min(im.shape[0], im.shape[1])
if max_scale != None and f * max(im.shape[0], im.shape[1]) > max_scale:
f = float(max_scale) / max(im.shape[0], im.shape[1])
return f | 52ae195714c1d39bccec553797bf6dbf2c6c2795 | 17,853 |
def load_swc(path):
"""Load swc morphology from file
Used for sKCSD
Parameters
----------
path : str
Returns
-------
morphology : np.array
"""
morphology = np.loadtxt(path)
return morphology | 0b0a4f82344b6c16180b4a52b1077a0f28966fde | 17,854 |
def quintic_extrap((y1,y2,y3,y4,y5,y6), (x1,x2,x3,x4,x5,x6)):
"""
Quintic extrapolate from three x,y pairs to x = 0.
y1,y2...: y values from x,y pairs. Note that these can be arrays of values.
x1,x2...: x values from x,y pairs. These should be scalars.
Returns extrapolated y at x=0.
"""
# This horrid implementation came from using CForm in Mathematica.
Power = numpy.power
return (-(x1*(x1 - x3)*x3*(x1 - x4)*(x3 - x4)*x4*(x1 - x5)* (x3 - x5)*(x4 - x5)*x5*(x1 - x6)*(x3 - x6)* (x4 - x6)*(x5 - x6)*x6*y2) + Power(x2,5)*(-(x1*(x1 - x4)*x4*(x1 - x5)* (x4 - x5)*x5*(x1 - x6)*(x4 - x6)*(x5 - x6)* x6*y3) + Power(x3,4)* (-(x1*(x1 - x5)*x5*(x1 - x6)*(x5 - x6)*x6* y4) + Power(x4,3)* (x1*x6*(-x1 + x6)*y5 + Power(x5,2)*(x6*y1 - x1*y6) + x5*(-(Power(x6,2)*y1) + Power(x1,2)*y6)) + Power(x4,2)* (x1*x6*(Power(x1,2) - Power(x6,2))*y5 + Power(x5,3)*(-(x6*y1) + x1*y6) + x5*(Power(x6,3)*y1 - Power(x1,3)*y6)) + x4*(Power(x1,2)*Power(x6,2)*(-x1 + x6)* y5 + Power(x5,3)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,3)*y1) + Power(x1,3)*y6))) + Power(x3,3)* (x1*x5*x6*(Power(x1,3)*(x5 - x6) + x5*x6*(Power(x5,2) - Power(x6,2)) + x1*(-Power(x5,3) + Power(x6,3)))*y4 + Power(x4,4)* (x1*(x1 - x6)*x6*y5 + Power(x5,2)*(-(x6*y1) + x1*y6) + x5*(Power(x6,2)*y1 - Power(x1,2)*y6)) + x4*(Power(x1,2)*Power(x6,2)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,4)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,2)* (x1*x6*(-Power(x1,3) + Power(x6,3))*y5 + Power(x5,4)*(x6*y1 - x1*y6) + x5*(-(Power(x6,4)*y1) + Power(x1,4)*y6))) + x3*(Power(x1,2)*(x1 - x5)*Power(x5,2)* (x1 - x6)*(x5 - x6)*Power(x6,2)*y4 + Power(x4,4)* (Power(x1,2)*(x1 - x6)*Power(x6,2)*y5 + Power(x5,3)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,3)*y1 - Power(x1,3)*y6)) + Power(x4,2)* (Power(x1,3)*(x1 - x6)*Power(x6,3)*y5 + Power(x5,4)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,3)* (Power(x1,2)*Power(x6,2)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,4)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,4)*y1) + Power(x1,4)*y6))) + Power(x3,2)* (x1*x5*x6*(Power(x5,2)*Power(x6,2)* (-x5 + x6) + Power(x1,3)* (-Power(x5,2) + Power(x6,2)) + Power(x1,2)*(Power(x5,3) - Power(x6,3))) *y4 + Power(x4,4)* (x1*x6*(-Power(x1,2) + Power(x6,2))*y5 + Power(x5,3)*(x6*y1 - x1*y6) + x5*(-(Power(x6,3)*y1) + Power(x1,3)*y6)) + Power(x4,3)* (x1*x6*(Power(x1,3) - Power(x6,3))*y5 + Power(x5,4)*(-(x6*y1) + x1*y6) + x5*(Power(x6,4)*y1 - Power(x1,4)*y6)) + x4*(Power(x1,3)*Power(x6,3)*(-x1 + x6)* y5 + Power(x5,4)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,4)*y1) + Power(x1,4)*y6)))) + Power(x2,4)*(x1*(x1 - x4)*x4*(x1 - x5)* (x4 - x5)*x5*(x1 - x6)*(x4 - x6)*(x5 - x6)* x6*(x1 + x4 + x5 + x6)*y3 + Power(x3,5)*(x1*(x1 - x5)*x5*(x1 - x6)* (x5 - x6)*x6*y4 + Power(x4,3)* (x1*(x1 - x6)*x6*y5 + Power(x5,2)*(-(x6*y1) + x1*y6) + x5*(Power(x6,2)*y1 - Power(x1,2)*y6)) + x4*(Power(x1,2)*(x1 - x6)*Power(x6,2)*y5 + Power(x5,3)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,3)*y1 - Power(x1,3)*y6)) + Power(x4,2)* (x1*x6*(-Power(x1,2) + Power(x6,2))*y5 + Power(x5,3)*(x6*y1 - x1*y6) + x5*(-(Power(x6,3)*y1) + Power(x1,3)*y6))) + Power(x3,2)* (x1*x5*(Power(x1,2) - Power(x5,2))*x6* (Power(x1,2) - Power(x6,2))* (Power(x5,2) - Power(x6,2))*y4 + Power(x4,5)* (x1*x6*(Power(x1,2) - Power(x6,2))*y5 + Power(x5,3)*(-(x6*y1) + x1*y6) + x5*(Power(x6,3)*y1 - Power(x1,3)*y6)) + x4*(Power(x1,3)*Power(x6,3)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,5)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,3)* (x1*x6*(-Power(x1,4) + Power(x6,4))*y5 + Power(x5,5)*(x6*y1 - x1*y6) + x5*(-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,3)* (x1*x5*x6*(-(Power(x5,4)*x6) + x5*Power(x6,4) + Power(x1,4)*(-x5 + x6) + x1*(Power(x5,4) - Power(x6,4)))*y4 + Power(x4,5)* (x1*x6*(-x1 + x6)*y5 + Power(x5,2)*(x6*y1 - x1*y6) + x5*(-(Power(x6,2)*y1) + Power(x1,2)*y6)) + Power(x4,2)* (x1*x6*(Power(x1,4) - Power(x6,4))*y5 + Power(x5,5)*(-(x6*y1) + x1*y6) + x5*(Power(x6,5)*y1 - Power(x1,5)*y6)) + x4*(Power(x1,2)*Power(x6,2)* (-Power(x1,3) + Power(x6,3))*y5 + Power(x5,5)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + x3*(Power(x1,2)*Power(x5,2)*Power(x6,2)* (-(Power(x5,3)*x6) + x5*Power(x6,3) + Power(x1,3)*(-x5 + x6) + x1*(Power(x5,3) - Power(x6,3)))*y4 + Power(x4,5)* (Power(x1,2)*Power(x6,2)*(-x1 + x6)*y5 + Power(x5,3)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,3)*y1) + Power(x1,3)*y6)) + Power(x4,3)* (Power(x1,2)*Power(x6,2)* (Power(x1,3) - Power(x6,3))*y5 + Power(x5,5)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,2)* (Power(x1,3)*Power(x6,3)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,5)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,5)*y1) + Power(x1,5)*y6)))) + Power(x2,3)*(-(x1*(x1 - x4)*x4*(x1 - x5)* (x4 - x5)*x5*(x1 - x6)*(x4 - x6)*(x5 - x6)* x6*(x5*x6 + x4*(x5 + x6) + x1*(x4 + x5 + x6))*y3) + Power(x3,5)*(x1*x5*x6* (-(Power(x5,3)*x6) + x5*Power(x6,3) + Power(x1,3)*(-x5 + x6) + x1*(Power(x5,3) - Power(x6,3)))*y4 + Power(x4,4)* (x1*x6*(-x1 + x6)*y5 + Power(x5,2)*(x6*y1 - x1*y6) + x5*(-(Power(x6,2)*y1) + Power(x1,2)*y6)) + Power(x4,2)* (x1*x6*(Power(x1,3) - Power(x6,3))*y5 + Power(x5,4)*(-(x6*y1) + x1*y6) + x5*(Power(x6,4)*y1 - Power(x1,4)*y6)) + x4*(Power(x1,2)*Power(x6,2)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,4)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,4)*y1) + Power(x1,4)*y6))) + Power(x3,4)* (x1*x5*x6*(Power(x1,4)*(x5 - x6) + x5*x6*(Power(x5,3) - Power(x6,3)) + x1*(-Power(x5,4) + Power(x6,4)))*y4 + Power(x4,5)* (x1*(x1 - x6)*x6*y5 + Power(x5,2)*(-(x6*y1) + x1*y6) + x5*(Power(x6,2)*y1 - Power(x1,2)*y6)) + x4*(Power(x1,2)*Power(x6,2)* (Power(x1,3) - Power(x6,3))*y5 + Power(x5,5)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,2)* (x1*x6*(-Power(x1,4) + Power(x6,4))*y5 + Power(x5,5)*(x6*y1 - x1*y6) + x5*(-(Power(x6,5)*y1) + Power(x1,5)*y6))) + x3*(Power(x1,2)*Power(x5,2)* Power(x6,2)* (Power(x5,2)*(x5 - x6)*Power(x6,2) + Power(x1,3)* (Power(x5,2) - Power(x6,2)) + Power(x1,2)*(-Power(x5,3) + Power(x6,3)))*y4 + Power(x4,5)* (Power(x1,2)*Power(x6,2)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,4)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,2)* (Power(x1,4)*(x1 - x6)*Power(x6,4)*y5 + Power(x5,5)* (-(Power(x6,4)*y1) + Power(x1,4)*y6) + Power(x5,4)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,4)* (Power(x1,2)*Power(x6,2)* (-Power(x1,3) + Power(x6,3))*y5 + Power(x5,5)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,2)* (x1*x5*x6*(Power(x5,3)*Power(x6,3)* (-x5 + x6) + Power(x1,4)* (-Power(x5,3) + Power(x6,3)) + Power(x1,3)*(Power(x5,4) - Power(x6,4))) *y4 + Power(x4,5)* (x1*x6*(-Power(x1,3) + Power(x6,3))*y5 + Power(x5,4)*(x6*y1 - x1*y6) + x5*(-(Power(x6,4)*y1) + Power(x1,4)*y6)) + Power(x4,4)* (x1*x6*(Power(x1,4) - Power(x6,4))*y5 + Power(x5,5)*(-(x6*y1) + x1*y6) + x5*(Power(x6,5)*y1 - Power(x1,5)*y6)) + x4*(Power(x1,4)*Power(x6,4)*(-x1 + x6)* y5 + Power(x5,5)* (Power(x6,4)*y1 - Power(x1,4)*y6) + Power(x5,4)* (-(Power(x6,5)*y1) + Power(x1,5)*y6)))) + x2*(-(Power(x1,2)*(x1 - x4)*Power(x4,2)* (x1 - x5)*(x4 - x5)*Power(x5,2)*(x1 - x6)* (x4 - x6)*(x5 - x6)*Power(x6,2)*y3) + Power(x3,5)*(-(Power(x1,2)*(x1 - x5)* Power(x5,2)*(x1 - x6)*(x5 - x6)* Power(x6,2)*y4) + Power(x4,4)* (Power(x1,2)*Power(x6,2)*(-x1 + x6)*y5 + Power(x5,3)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,3)*y1) + Power(x1,3)*y6)) + Power(x4,3)* (Power(x1,2)*Power(x6,2)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,4)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,2)* (Power(x1,3)*Power(x6,3)*(-x1 + x6)*y5 + Power(x5,4)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,4)*y1) + Power(x1,4)*y6))) + Power(x3,4)* (Power(x1,2)*Power(x5,2)*Power(x6,2)* (Power(x1,3)*(x5 - x6) + x5*x6*(Power(x5,2) - Power(x6,2)) + x1*(-Power(x5,3) + Power(x6,3)))*y4 + Power(x4,5)* (Power(x1,2)*(x1 - x6)*Power(x6,2)*y5 + Power(x5,3)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,3)*y1 - Power(x1,3)*y6)) + Power(x4,2)* (Power(x1,3)*Power(x6,3)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,5)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,3)* (Power(x1,2)*Power(x6,2)* (-Power(x1,3) + Power(x6,3))*y5 + Power(x5,5)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,2)* (Power(x1,3)*(x1 - x5)*Power(x5,3)*(x1 - x6)* (x5 - x6)*Power(x6,3)*y4 + Power(x4,5)* (Power(x1,3)*(x1 - x6)*Power(x6,3)*y5 + Power(x5,4)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,3)* (Power(x1,4)*(x1 - x6)*Power(x6,4)*y5 + Power(x5,5)* (-(Power(x6,4)*y1) + Power(x1,4)*y6) + Power(x5,4)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,4)* (Power(x1,3)*Power(x6,3)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,5)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,3)* (Power(x1,2)*Power(x5,2)*Power(x6,2)* (Power(x5,2)*Power(x6,2)*(-x5 + x6) + Power(x1,3)* (-Power(x5,2) + Power(x6,2)) + Power(x1,2)*(Power(x5,3) - Power(x6,3))) *y4 + Power(x4,5)* (Power(x1,2)*Power(x6,2)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,4)* (Power(x6,2)*y1 - Power(x1,2)*y6) + Power(x5,2)* (-(Power(x6,4)*y1) + Power(x1,4)*y6)) + Power(x4,4)* (Power(x1,2)*Power(x6,2)* (Power(x1,3) - Power(x6,3))*y5 + Power(x5,5)* (-(Power(x6,2)*y1) + Power(x1,2)*y6) + Power(x5,2)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,2)* (Power(x1,4)*Power(x6,4)*(-x1 + x6)*y5 + Power(x5,5)* (Power(x6,4)*y1 - Power(x1,4)*y6) + Power(x5,4)* (-(Power(x6,5)*y1) + Power(x1,5)*y6)))) + Power(x2,2)*(x1*(x1 - x4)*x4*(x1 - x5)* (x4 - x5)*x5*(x1 - x6)*(x4 - x6)*(x5 - x6)* x6*(x4*x5*x6 + x1*(x5*x6 + x4*(x5 + x6)))*y3 + Power(x3,5)* (x1*x5*x6*(Power(x5,2)*(x5 - x6)* Power(x6,2) + Power(x1,3)* (Power(x5,2) - Power(x6,2)) + Power(x1,2)*(-Power(x5,3) + Power(x6,3)))*y4 + Power(x4,4)* (x1*x6*(Power(x1,2) - Power(x6,2))*y5 + Power(x5,3)*(-(x6*y1) + x1*y6) + x5*(Power(x6,3)*y1 - Power(x1,3)*y6)) + x4*(Power(x1,3)*(x1 - x6)*Power(x6,3)*y5 + Power(x5,4)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,4)*y1 - Power(x1,4)*y6)) + Power(x4,3)* (x1*x6*(-Power(x1,3) + Power(x6,3))*y5 + Power(x5,4)*(x6*y1 - x1*y6) + x5*(-(Power(x6,4)*y1) + Power(x1,4)*y6))) + Power(x3,3)* (x1*x5*x6*(Power(x5,3)*(x5 - x6)* Power(x6,3) + Power(x1,4)* (Power(x5,3) - Power(x6,3)) + Power(x1,3)*(-Power(x5,4) + Power(x6,4)))*y4 + Power(x4,5)* (x1*x6*(Power(x1,3) - Power(x6,3))*y5 + Power(x5,4)*(-(x6*y1) + x1*y6) + x5*(Power(x6,4)*y1 - Power(x1,4)*y6)) + x4*(Power(x1,4)*(x1 - x6)*Power(x6,4)*y5 + Power(x5,5)* (-(Power(x6,4)*y1) + Power(x1,4)*y6) + Power(x5,4)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,4)* (x1*x6*(-Power(x1,4) + Power(x6,4))*y5 + Power(x5,5)*(x6*y1 - x1*y6) + x5*(-(Power(x6,5)*y1) + Power(x1,5)*y6))) + Power(x3,4)* (-(x1*x5*(Power(x1,2) - Power(x5,2))*x6* (Power(x1,2) - Power(x6,2))* (Power(x5,2) - Power(x6,2))*y4) + Power(x4,5)* (x1*x6*(-Power(x1,2) + Power(x6,2))*y5 + Power(x5,3)*(x6*y1 - x1*y6) + x5*(-(Power(x6,3)*y1) + Power(x1,3)*y6)) + Power(x4,3)* (x1*x6*(Power(x1,4) - Power(x6,4))*y5 + Power(x5,5)*(-(x6*y1) + x1*y6) + x5*(Power(x6,5)*y1 - Power(x1,5)*y6)) + x4*(Power(x1,3)*Power(x6,3)* (-Power(x1,2) + Power(x6,2))*y5 + Power(x5,5)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,5)*y1) + Power(x1,5)*y6))) + x3*(-(Power(x1,3)*(x1 - x5)*Power(x5,3)* (x1 - x6)*(x5 - x6)*Power(x6,3)*y4) + Power(x4,5)* (Power(x1,3)*Power(x6,3)*(-x1 + x6)*y5 + Power(x5,4)* (Power(x6,3)*y1 - Power(x1,3)*y6) + Power(x5,3)* (-(Power(x6,4)*y1) + Power(x1,4)*y6)) + Power(x4,4)* (Power(x1,3)*Power(x6,3)* (Power(x1,2) - Power(x6,2))*y5 + Power(x5,5)* (-(Power(x6,3)*y1) + Power(x1,3)*y6) + Power(x5,3)* (Power(x6,5)*y1 - Power(x1,5)*y6)) + Power(x4,3)* (Power(x1,4)*Power(x6,4)*(-x1 + x6)*y5 + Power(x5,5)* (Power(x6,4)*y1 - Power(x1,4)*y6) + Power(x5,4)* (-(Power(x6,5)*y1) + Power(x1,5)*y6)))))/((x1 - x2)*(x1 - x3)*(-x2 + x3)*(x1 - x4)* (-x2 + x4)*(-x3 + x4)*(x1 - x5)*(x2 - x5)* (x3 - x5)*(x4 - x5)*(x1 - x6)*(x2 - x6)* (x3 - x6)*(x4 - x6)*(x5 - x6)) | 41090e09f2b7f58e98fa9fc91ea6000d70594046 | 17,856 |
def answer_question_interactively(question):
"""Returns True or False for t yes/no question to the user"""
while True:
answer = input(question + '? [Y or N]: ')
if answer.lower() == 'y':
return True
elif answer.lower() == 'n':
return False | 52a123cc2237441de3b0243da268e53b7cc0d807 | 17,857 |
def connect(
instance_id,
database_id,
project=None,
credentials=None,
pool=None,
user_agent=None,
):
"""Creates a connection to a Google Cloud Spanner database.
:type instance_id: str
:param instance_id: The ID of the instance to connect to.
:type database_id: str
:param database_id: The ID of the database to connect to.
:type project: str
:param project: (Optional) The ID of the project which owns the
instances, tables and data. If not provided, will
attempt to determine from the environment.
:type credentials: Union[:class:`~google.auth.credentials.Credentials`, str]
:param credentials: (Optional) The authorization credentials to attach to
requests. These credentials identify this application
to the service. These credentials may be specified as
a file path indicating where to retrieve the service
account JSON for the credentials to connect to
Cloud Spanner. If none are specified, the client will
attempt to ascertain the credentials from the
environment.
:type pool: Concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`.
:param pool: (Optional). Session pool to be used by database.
:type user_agent: str
:param user_agent: (Optional) User agent to be used with this connection's
requests.
:rtype: :class:`google.cloud.spanner_dbapi.connection.Connection`
:returns: Connection object associated with the given Google Cloud Spanner
resource.
:raises: :class:`ValueError` in case of given instance/database
doesn't exist.
"""
client_info = ClientInfo(
user_agent=user_agent or DEFAULT_USER_AGENT, python_version=PY_VERSION
)
if isinstance(credentials, str):
client = spanner.Client.from_service_account_json(
credentials, project=project, client_info=client_info
)
else:
client = spanner.Client(
project=project, credentials=credentials, client_info=client_info
)
instance = client.instance(instance_id)
if not instance.exists():
raise ValueError("instance '%s' does not exist." % instance_id)
database = instance.database(database_id, pool=pool)
if not database.exists():
raise ValueError("database '%s' does not exist." % database_id)
conn = Connection(instance, database)
if pool is not None:
conn._own_pool = False
return conn | 7cff910615df346a5a503dec5e1938476cb701e6 | 17,858 |
from typing import List
def intersect(linked_list_1: List, linked_list_2: List):
"""Intersection point of two linked list."""
length_diff = len(linked_list_1) - len(linked_list_2)
enum1 = list(enumerate(linked_list_1))
enum2 = list(enumerate(linked_list_2))
if length_diff < 0:
enum2 = _helper(length_diff=length_diff, linked_list=enum2)
else:
enum1 = _helper(length_diff=length_diff, linked_list=enum1)
for (i, j,) in zip(enum1, enum2):
if i[1] == j[1]:
return (i[0], j[0],)
return None | dea64a6618ab3bda421250036e4eea8fa316a6ec | 17,859 |
import torch
def content_loss(sharp_images, deblur_images, cont_net):
"""
Computes the Content Loss to compare the
reconstructed (deblurred) and the original(sharp) images
Takes the output feature maps of the relu4_3 layer of pretrained VGG19 to compare the content between
images as proposed in :
Johnson et. al. "Perceptual losses for real-time style transfer and super-resolution." (ECCV 2016)
"""
# Torchvision models documentation:
# All pre-trained models expect input images normalized in the same way, The images have to be loaded in
# to a range of [0, 1] and then normalized using mean = [0.485, 0.456, 0.406] and std = [0.229, 0.224, 0.225].
deblur_images = (deblur_images + 1) * 0.5
sharp_images = (sharp_images + 1) * 0.5
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
deblur_images = normalize(deblur_images)
sharp_images= normalize(sharp_images)
content_deblur = cont_net(deblur_images)
content_sharp = cont_net(sharp_images)
content_sharp = content_sharp.detach()
loss = nn.MSELoss()
lossC = torch.mean(loss(content_deblur,content_sharp))
return lossC | f55b4c22391d6e562afb927251fb6af267f9da08 | 17,861 |
def generate_cashflow_diagram(
cashflows, d=None, net=False, scale=None, color=None, title=None, **kwargs):
""" Generates a barplot showing cashflows over time
Given a set of cashflows, produces a stacked barplot with bars at each
period. The height of each bar is set by the amount of cash produced
by a cashflow at the specified period.
Note that this function does not display the produced plot; call
matplotlib.pyplot.show() to view the plot.
Args:
cashflows: A sequence of cashflows to plot
d: Optional; A two-integer list whose elements represent the first
and final periods to be plotted
net: Optional; When true, only the net cashflows are shown, and the
individual cashflow information is omitted.
scale: Optional; The y-axis scale; must be a member or key of Scales
kwargs: A list of keyword arguments to be passed to Dataframe.plot()
Returns:
A Figure and Axis for the plot
"""
# Parse Args
cashflows = (cashflows,) if isinstance(cashflows, Cashflow) else cashflows
d = parse_d(d or get_final_period(cashflows, finite=True) or 5)
net = bool(net)
if color:
color = color.colors if isinstance(color, ListedColormap) else color
else:
color = default_colormap.colors
if scale:
scale = (
scale if isinstance(scale, Scales)
else Scales[scale.upper()])
# Extract information
periods = list(range(d[0], d[1] + 1))
titles = [cashflow.get_title() for cashflow in cashflows]
cashflows = [
[cashflow[n].amount for cashflow in cashflows]
for n in periods
]
# Format information
if net:
cashflows = [[sum(cashflows[n])] for n in periods]
if scale:
cashflows = [
[cashflow * scale.value for cashflow in cashflows[n]]
for n in periods
]
# Plot the Cashflow Diagram with matplotlib
plotdata = pd.DataFrame(cashflows, index=periods, columns=titles)
fig, ax = plt.subplots()
plotdata.plot(kind="bar", stacked="true", ax=ax, color=color, **kwargs)
ax.set_title(title)
ax.set_ylabel("Cashflows" + (f" [{scale.name.title()}]" if scale else ""))
ax.set_xlabel("Period")
ax.axhline()
return fig, ax | 88afbd975a20041dccd24b8dc25899347a0b44ae | 17,862 |
import collections
def is_iterable(obj):
# type: (Any) -> bool
"""
Returns True if obj is a non-string iterable
"""
if is_str(obj) is True or isinstance(obj, collections.Iterable) is False:
return False
else:
return True | 92db9be57250a53cf27118c9a4c91344a9d14fcb | 17,863 |
def other_players(me, r):
"""Return a list of all players but me, in turn order starting after me"""
return list(range(me+1, r.nPlayers)) + list(range(0, me)) | 5c2d2b03bfb3b99eb4c347319ccaaa3fc495b6c4 | 17,864 |
def mock_dd_slo_history(*args, **kwargs):
"""Mock Datadog response for datadog.api.ServiceLevelObjective.history."""
return load_fixture('dd_slo_history.json') | 963fbbe20373e4e207852be82b88e33ca2c24e9a | 17,865 |
import torch
def check_joints2d_visibility_torch(joints2d, img_wh):
"""
Checks if 2D joints are within the image dimensions.
"""
vis = torch.ones(joints2d.shape[:2], device=joints2d.device, dtype=torch.bool)
vis[joints2d[:, :, 0] > img_wh] = 0
vis[joints2d[:, :, 1] > img_wh] = 0
vis[joints2d[:, :, 0] < 0] = 0
vis[joints2d[:, :, 1] < 0] = 0
return vis | a276d93a66dfd5bca15a684f652a0eede3094868 | 17,866 |
def getIntervalIntersectionLength(aa, bb, wrapAt=360):
"""Returns the length of the intersection between two intervals."""
intersection = getIntervalIntersection(aa, bb, wrapAt=wrapAt)
if intersection is False:
return 0.0
else:
if wrapAt is None:
return (intersection[1] - intersection[0])
else:
return (intersection[1] - intersection[0]) % wrapAt | 46924c149e8b5b802fc83e489417850f3a8dbd18 | 17,867 |
def test_jvp_construct_single_input_single_output_default_v_graph():
"""
Features: Function jvp
Description: Test jvp with Cell construct, single input, single output and default v in graph mode.
Expectation: No exception.
"""
x = Tensor(np.array([[1, 2], [3, 4]]).astype(np.float32))
v = Tensor(np.array([[1, 1], [1, 1]]).astype(np.float32))
class Net(nn.Cell):
def __init__(self, network):
super(Net, self).__init__()
self.net = network
def construct(self, inputs, vectors):
net_out, jvp_out = jvp(self.net, inputs, vectors)
return net_out, jvp_out
test_net = Net(SingleInputSingleOutputNet())
primal, grad = test_net(x, v)
expect_primal = Tensor(np.array([[1, 8], [27, 64]]).astype(np.float32))
expect_grad = Tensor(np.array([[3, 12], [27, 48]]).astype(np.float32))
assert np.allclose(primal.asnumpy(), expect_primal.asnumpy())
assert np.allclose(grad.asnumpy(), expect_grad.asnumpy()) | edd6b1c93dc880310fc0af2012d3fbfca328b64c | 17,868 |
def get_environment() -> Environment:
"""
Parses environment variables and sets their defaults if they do not exist.
"""
return Environment(
permission_url=get_endpoint("PERMISSION"),
media_url=get_endpoint("MEDIA"),
datastore_reader_url=get_endpoint("DATASTORE_READER"),
datastore_writer_url=get_endpoint("DATASTORE_WRITER"),
) | 2ce9b56c76fadcd19a861f00e8d880a181b676ed | 17,869 |
from typing import Optional
def get_kubernetes_cluster(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKubernetesClusterResult:
"""
Use this data source to access information about an existing Managed Kubernetes Cluster (AKS).
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.containerservice.get_kubernetes_cluster(name="myakscluster",
resource_group_name="my-example-resource-group")
```
:param str name: The name of the managed Kubernetes Cluster.
:param str resource_group_name: The name of the Resource Group in which the managed Kubernetes Cluster exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:containerservice/getKubernetesCluster:getKubernetesCluster', __args__, opts=opts, typ=GetKubernetesClusterResult).value
return AwaitableGetKubernetesClusterResult(
addon_profiles=__ret__.addon_profiles,
agent_pool_profiles=__ret__.agent_pool_profiles,
api_server_authorized_ip_ranges=__ret__.api_server_authorized_ip_ranges,
disk_encryption_set_id=__ret__.disk_encryption_set_id,
dns_prefix=__ret__.dns_prefix,
fqdn=__ret__.fqdn,
id=__ret__.id,
identities=__ret__.identities,
kube_admin_config_raw=__ret__.kube_admin_config_raw,
kube_admin_configs=__ret__.kube_admin_configs,
kube_config_raw=__ret__.kube_config_raw,
kube_configs=__ret__.kube_configs,
kubelet_identities=__ret__.kubelet_identities,
kubernetes_version=__ret__.kubernetes_version,
linux_profiles=__ret__.linux_profiles,
location=__ret__.location,
name=__ret__.name,
network_profiles=__ret__.network_profiles,
node_resource_group=__ret__.node_resource_group,
private_cluster_enabled=__ret__.private_cluster_enabled,
private_fqdn=__ret__.private_fqdn,
private_link_enabled=__ret__.private_link_enabled,
resource_group_name=__ret__.resource_group_name,
role_based_access_controls=__ret__.role_based_access_controls,
service_principals=__ret__.service_principals,
tags=__ret__.tags,
windows_profiles=__ret__.windows_profiles) | 38beb9a7a96e51364f35c658d85428991e0686a8 | 17,870 |
def gene_expression_conv_base():
"""Hparams for GeneExpressionConv model."""
hparams = common_hparams.basic_params1()
batch_size = 10
output_length = 2048
inputs_per_output = 128
chunk_size = 4
input_length = output_length * inputs_per_output // chunk_size
hparams.batch_size = input_length * batch_size
hparams.dropout = 0.1
hparams.add_hparam("num_conv_layers", 4)
hparams.add_hparam("num_dconv_layers", 7)
# The product of these pooling windows should match
# input_length/target_length.
hparams.add_hparam("pooling_windows", [2, 2, 2, 4])
hparams.hidden_size = 256
hparams.kernel_width = 20
hparams.add_hparam("stride", 1)
return hparams | 7bd239fb511a7f72837a139f236557278f0c1dab | 17,871 |
import typing
from typing import _GenericAlias
import collections
def is_callable(type_def, allow_callable_class: bool = False) -> bool:
"""
Checks whether the ``type_def`` is a callable according to the following rules:
1. Functions are callable.
2. ``typing.Callable`` types are callable.
3. Generic aliases of types which are ``is_callable`` are callable.
4. If ``allow_callable_class`` is set to ``True``, then classes which have a ``__call__`` method are callable.
:param type_def: the type to check.
:param allow_callable_class: set to ``True`` to consider classes which have a ``__call__`` method callable.
:return: ``True`` if ``type_def`` is a callable type, ``False`` otherwise.
"""
if isinstance(type_def, type(_check_callable_signature)):
return True
if isinstance(type_def, typing._SpecialForm):
return False
if isinstance(type_def, _GenericAlias):
if type_def.__origin__ == typing.Callable or type_def.__origin__ == collections.abc.Callable:
return True
if type_def._special:
return False
return is_callable(type_def.__origin__, allow_callable_class=allow_callable_class)
if allow_callable_class and hasattr(type_def, "__call__"):
return True
return False | 68c51cfc4da4891c90c376e8ff1b26dc630a96de | 17,872 |
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_pretrained_model(model_zoo.load_url(model_urls['vgg19']))
return model | 7c7e43eb46ffeb20fd5901d5761e11f701650727 | 17,873 |
def get_formats(input_f, input_case="cased", is_default=True):
"""
Adds various abbreviation format options to the list of acceptable input forms
"""
multiple_formats = load_labels(input_f)
additional_options = []
for x, y in multiple_formats:
if input_case == "lower_cased":
x = x.lower()
additional_options.append((f"{x}.", y)) # default "dr" -> doctor, this includes period "dr." -> doctor
additional_options.append((f"{x[0].upper() + x[1:]}", f"{y[0].upper() + y[1:]}")) # "Dr" -> Doctor
additional_options.append((f"{x[0].upper() + x[1:]}.", f"{y[0].upper() + y[1:]}")) # "Dr." -> Doctor
multiple_formats.extend(additional_options)
if not is_default:
multiple_formats = [(x, f"|raw_start|{x}|raw_end||norm_start|{y}|norm_end|") for (x, y) in multiple_formats]
multiple_formats = pynini.string_map(multiple_formats)
return multiple_formats | a20a02a7e85c1711d8b9d779e5804ed8e1dec83a | 17,874 |
def db_select_entry(c, bibkey):
""" Select entry from database
:argument
c: sqlite3 cursor
:returns
entry_dict: dict
"""
fields = ['bibkey', 'author', 'genre', 'thesis', 'hypothesis',
'method', 'finding', 'comment', 'img_linkstr']
sql = "SELECT {:s} FROM note WHERE bibkey = (?)".format(','.join(fields))
c.execute(sql, (bibkey,))
result = c.fetchall()[0]
a_dict = {}
for field, value in zip(fields, result):
a_dict[field] = value
c.execute("SELECT tag FROM tags WHERE bibkey = (?) ORDER BY tag ASC",
(a_dict['bibkey'],))
tags = tuple(r[0] for r in c.fetchall())
return a_dict, tags | a93425aa2487f28bc3215a65ad1f963ea1173fcd | 17,875 |
def to_bytes(obj, encoding='utf-8', errors='strict'):
"""Makes sure that a string is a byte string.
Args:
obj: An object to make sure is a byte string.
encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid codecs error
handler may be specified.
Returns: Typically this returns a byte string.
"""
if isinstance(obj, bytes):
return obj
return bytes(obj, encoding=encoding, errors=errors) | 4f8a0dcfdcfd3e2a77b5cbeedea4cb2a11acd4c1 | 17,876 |
def compact(number, strip_check_digit=True):
"""Convert the MEID number to the minimal (hexadecimal) representation.
This strips grouping information, removes surrounding whitespace and
converts to hexadecimal if needed. If the check digit is to be preserved
and conversion is done a new check digit is recalculated."""
# first parse the number
number, cd = _parse(number)
# strip check digit if needed
if strip_check_digit:
cd = ''
# convert to hex if needed
if len(number) == 18:
number = '%08X%06X' % (int(number[0:10]), int(number[10:18]))
if cd:
cd = calc_check_digit(number)
# put parts back together again
return number + cd | ced106ed8c97d432a8059d8654cfb437620deb64 | 17,878 |
from typing import List
def hashtag_getter(doc: Doc) -> List[str]:
"""
Extract hashtags from text
Args:
doc (Doc): A SpaCy document
Returns:
List[str]: A list of hashtags
Example:
>>> from spacy.tokens import Doc
>>> Doc.set_extension("hashtag", getter=dacy.utilities.twitter.hashtags)
>>> doc = nlp("Fuck hvor fedt! #yolo #life")
>>> doc._.hashtag # extrac the hashtags from your document
["#yolo", "#life"]
"""
def find_hashtags(
text,
valid_tags={"#", "#"},
valid_chars={"_", "-"},
invalid_tag_suffix={b"\xe2\x83\xa3", b"\xef\xb8\x8f"},
):
def is_letter(t):
if (
t.isalnum()
or t in valid_chars
or str.encode(t).startswith(b"\xcc")
or str.encode(t).startswith(b"\xe0")
):
return True
return False
start = None
for i, char in enumerate(text):
if (
char in valid_tags
and not (
i + 1 != len(text) and str.encode(text[i + 1]) in invalid_tag_suffix
)
and (i == 0 or not (is_letter(text[i - 1]) or text[i - 1] == "&"))
):
start = i
continue
if start is not None and not is_letter(char):
if char in valid_tags:
start = None
continue
print(start, i)
if not text[start + 1 : i].isnumeric():
yield "#" + text[start + 1 : i]
start = None
if start is not None and not text[start + 1 : i + 1].isnumeric():
print(start, i)
yield "#" + text[start + 1 : i + 1]
return list(find_hashtags(doc.text)) | a85c5cf9bd2fec2ec74e70bf2dadfb1df688c128 | 17,879 |
def options():
"""Stub version of the parsed command line options."""
class StubOptions(object):
profile = None
return StubOptions() | dea85d2956eb6cbf97f870a74b122896915c8c19 | 17,880 |
def plot_bootstrap_delta_grp(dfboot, df, grp, force_xlim=None, title_add=''):
"""Plot delta between boostrap results, grouped"""
count_txt_h_kws, mean_txt_kws, pest_mean_point_kws, mean_point_kws = _get_kws_styling()
if dfboot[grp].dtypes != 'object':
dfboot = dfboot.copy()
dfboot[grp] = dfboot[grp].map(lambda x: f's{x}')
mn = dfboot.groupby(grp).size()
f = plt.figure(figsize=(14, 2+(len(mn)*.2))) #, constrained_layout=True)
gs = gridspec.GridSpec(1, 2, width_ratios=[11, 1], figure=f)
ax0 = f.add_subplot(gs[0])
ax1 = f.add_subplot(gs[1], sharey=ax0)
_ = sns.boxplot(x='lr_delta', y=grp, data=dfboot, palette='cubehelix_r',
sym='', whis=[3, 97], showmeans=True, notch=True, ax=ax0)
_ = ax0.axvline(0, ls='--', lw=2, c='#555555', zorder=-1)
if force_xlim is not None:
_ = ax0.set(xlim=force_xlim)
_ = sns.countplot(y=grp, data=df, ax=ax1, palette='cubehelix_r')
ct = df.groupby(grp).size().tolist()
_ = [ax1.annotate(f'{v}', xy=(v, i%len(ct)), **count_txt_h_kws) for i, v in enumerate(ct)]
ypos = 1.02
if title_add != '':
ypos = 1.05
title_add = f'\n{title_add}'
title = (f'2-sample bootstrap test - grouped by {grp}')
_ = f.suptitle(f'{title}{title_add}', y=ypos)
f.tight_layout() # prefer over constrained_layout
return gs | 1d9d25604392433bc9cc10d645f0a8f2122a2a38 | 17,882 |
from typing import Tuple
from typing import Optional
from typing import Dict
from typing import Any
def inception_inspired_reservoir_model(
input_shape: Tuple[int, int, int],
reservoir_weight: np.ndarray,
num_output_channels: int,
seed: Optional[int] = None,
num_filters: int = 32,
reservoir_base: str = 'DenseReservoir',
reservoir_params: Optional[Dict[str, Any]] = None,
final_activation: Optional[str] = 'sigmoid',
task: str = 'segmentation',
) -> tf.keras.Model:
"""Builds a simple recurrent reservoir model with inception-style head.
The model is an SRN in the sense that a copy of the output of a first
reservoir is passed through a set of trainable weights and then through
a second identical reservoir.
Args:
input_shape: (image_height, image_width, num_channels) of the input image.
reservoir_weight: Weight matrix to be assigned to the fixed layers.
num_output_channels: how many output channels to use.
seed: int seed to use to get a deterministic set of "random" weights.
num_filters: how many filters to include in each layer of the inception
block.
reservoir_base: the reservoir base to use. Default is 'DenseReservoir'.
reservoir_params: the parameters to initialize the reservoir_base. (Any
field provided MUST be a Correct argument for the reservoir base,
e.g. common options include {
'recurrence_degree': 3,
'keep_memory': True,
'trainable_reservoir': True,
'use_bias': True,
'activation_within_recurrence': True,
'kernel_local_learning': 'hebbian',
'kernel_local_learning_params': {'eta': 0.1},
'recurrent_kernel_local_learning': 'hebbian',
'recurrent_kernel_local_learning_params': {'eta': 0.1},
'state_discount': 1.0,
}. If variable not included in the params, the default values are used.)
final_activation: 'sigmoid', 'softmax', 'tanh', or None.
task: which task this model is used for (options includes: 'segmentation',
'classification')
Returns:
A simple recurrent reservoir model with convolutional head
Raises:
ValueError: if task not in accepted tasks (segmentation, classification).
"""
if task not in ['segmentation', 'classification']:
raise ValueError(
f'Task not defined in accepted tasks (segmentation, classification). Got {task}'
)
# Create a sequential keras model
if reservoir_params is None:
reservoir_params = {}
reservoir_params['weight'] = reservoir_weight
inputs = tf.keras.layers.Input(input_shape)
if seed:
kernel_initializer = initializers.FixedRandomInitializer(seed=seed)
else:
kernel_initializer = tf.keras.initializers.RandomNormal()
# Inception 'stem'
x = tf.keras.layers.Conv2D(
num_filters, 8, padding='same', input_shape=input_shape,
activation='elu')(
inputs)
x = tf.keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(1, 1), padding='same')(
x)
x = tf.keras.layers.Conv2D(
num_filters, 1, activation='elu', padding='same')(
x)
x = tf.keras.layers.Conv2D(
num_filters, 3, activation='elu', padding='same')(
x)
x = tf.keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(1, 1), padding='same')(
x)
x = tf.keras.layers.Conv2D(
num_filters, 1, activation='elu', padding='same')(
x)
x = tf.keras.layers.Conv2D(
num_filters, 3, activation='elu', padding='same')(
x)
# Inception block
incepta = tf.keras.layers.Conv2D(
num_filters, [1, 1], strides=(1, 1), activation='elu', padding='same')(
x)
incepta = tf.keras.layers.Conv2D(
num_filters, [5, 5], strides=(1, 1), activation='elu', padding='same')(
incepta)
inceptb = tf.keras.layers.Conv2D(
num_filters, [1, 1], strides=(1, 1), activation='elu', padding='same')(
x)
inceptb = tf.keras.layers.Conv2D(
num_filters, [3, 3], strides=(1, 1), activation='elu', padding='same')(
inceptb)
inceptc = tf.keras.layers.MaxPooling2D(
pool_size=(3, 3), strides=(1, 1), padding='same')(
x)
inceptc = tf.keras.layers.Conv2D(
num_filters, [1, 1], strides=(1, 1), activation='elu', padding='same')(
inceptc)
inceptd = tf.keras.layers.Conv2D(
num_filters, [1, 1], strides=(1, 1), activation='elu', padding='same')(
x)
y = tf.concat([incepta, inceptb, inceptc, inceptd], -1)
# Dense layer
y = tf.keras.layers.Dense(reservoir_weight.shape[0], activation='elu')(y)
# The first reservoir layer
y = reservoir_registry.get_reservoir(reservoir_base)(**reservoir_params)(y)
# Trainable layer in between reservoirs
y = tf.keras.layers.Dense(reservoir_weight.shape[0], activation='elu')(y)
# The second fixed reservoir layer
y = reservoir_registry.get_reservoir(reservoir_base)(**reservoir_params)(y)
# Create outputs.
if task == 'classification':
y = tf.keras.layers.Flatten()(y)
outputs = tf.keras.layers.Dense(
units=num_output_channels,
activation=final_activation,
kernel_initializer=kernel_initializer)(
y)
model = tf.keras.models.Model(inputs, outputs)
return model | 221c0c17035d44178ca460c011c92d37f26b4ed4 | 17,883 |
def midnight(date):
"""Returns a copy of a date with the hour, minute, second, and
millisecond fields set to zero.
Args:
date (Date): The starting date.
Returns:
Date: A new date, set to midnight of the day provided.
"""
return date.replace(hour=0, minute=0, second=0, microsecond=0) | b92086dd9d99a4cea6657d37f40e68696ad41f7c | 17,884 |
from typing import Callable
from typing import Any
from typing import Sequence
def foldr(fun: Callable[[Any, Any], Any], acc: Any, seq: Sequence[Any]) -> Any:
"""Implementation of foldr in Python3.
This is an implementation of the right-handed
fold function from functional programming.
If the list is empty, we return the accumulator
value. Otherwise, we recurse by applying the
function which was passed to the foldr to
the head of the iterable collection
and the foldr called with fun, acc, and
the tail of the iterable collection.
Below are the implementations of the len
and sum functions using foldr to
demonstrate how foldr function works.
>>> foldr((lambda _, y: y + 1), 0, [0, 1, 2, 3, 4])
5
>>> foldr((lambda x, y: x + y), 0, [0, 1, 2, 3, 4])
10
foldr takes the second argument and the
last item of the list and applies the function,
then it takes the penultimate item from the end
and the result, and so on.
"""
return acc if not seq else fun(seq[0], foldr(fun, acc, seq[1:])) | 5648d8ce8a2807270163ebcddad3f523f527986e | 17,886 |
def tj_dom_dem(x):
"""
Real Name: b'Tj Dom Dem'
Original Eqn: b'( [(1,0.08)-(365,0.09)],(1,0.08333),(2,0.08333),(3,0.08333),(4,0.08333),(5,0.08333),(6\\\\ ,0.08333),(7,0.08333),(8,0.08333),(9,0.08333),(10,0.08333),(11,0.08333),(12,0.08333\\\\ ),(13,0.08333),(14,0.08333),(15,0.08333),(16,0.08333),(17,0.08333),(18,0.08333),(19\\\\ ,0.08333),(20,0.08333),(21,0.08333),(22,0.08333),(23,0.08333),(24,0.08333),(25,0.08333\\\\ ),(26,0.08333),(27,0.08333),(28,0.08333),(29,0.08333),(30,0.08333),(31,0.08333),(32\\\\ ,0.08333),(33,0.08333),(34,0.08333),(35,0.08333),(36,0.08333),(37,0.08333),(38,0.08333\\\\ ),(39,0.08333),(40,0.08333),(41,0.08333),(42,0.08333),(43,0.08333),(44,0.08333),(45\\\\ ,0.08333),(46,0.08333),(47,0.08333),(48,0.08333),(49,0.08333),(50,0.08333),(51,0.08333\\\\ ),(52,0.08333),(53,0.08333),(54,0.08333),(55,0.08333),(56,0.08333),(57,0.08333),(58\\\\ ,0.08333),(59,0.08333),(60,0.08333),(61,0.08333),(62,0.08333),(63,0.08333),(64,0.08333\\\\ ),(65,0.08333),(66,0.08333),(67,0.08333),(68,0.08333),(69,0.08333),(70,0.08333),(71\\\\ ,0.08333),(72,0.08333),(73,0.08333),(74,0.08333),(75,0.08333),(76,0.08333),(77,0.08333\\\\ ),(78,0.08333),(79,0.08333),(80,0.08333),(81,0.08333),(82,0.08333),(83,0.08333),(84\\\\ ,0.08333),(85,0.08333),(86,0.08333),(87,0.08333),(88,0.08333),(89,0.08333),(90,0.08333\\\\ ),(91,0.08333),(92,0.08333),(93,0.08333),(94,0.08333),(95,0.08333),(96,0.08333),(97\\\\ ,0.08333),(98,0.08333),(99,0.08333),(100,0.08333),(101,0.08333),(102,0.08333),(103,\\\\ 0.08333),(104,0.08333),(105,0.08333),(106,0.08333),(107,0.08333),(108,0.08333),(109\\\\ ,0.08333),(110,0.08333),(111,0.08333),(112,0.08333),(113,0.08333),(114,0.08333),(115\\\\ ,0.08333),(116,0.08333),(117,0.08333),(118,0.08333),(119,0.08333),(120,0.08333),(121\\\\ ,0.08333),(122,0.08333),(123,0.08333),(124,0.08333),(125,0.08333),(126,0.08333),(127\\\\ ,0.08333),(128,0.08333),(129,0.08333),(130,0.08333),(131,0.08333),(132,0.08333),(133\\\\ ,0.08333),(134,0.08333),(135,0.08333),(136,0.08333),(137,0.08333),(138,0.08333),(139\\\\ ,0.08333),(140,0.08333),(141,0.08333),(142,0.08333),(143,0.08333),(144,0.08333),(145\\\\ ,0.08333),(146,0.08333),(147,0.08333),(148,0.08333),(149,0.08333),(150,0.08333),(151\\\\ ,0.08333),(152,0.08333),(153,0.08333),(154,0.08333),(155,0.08333),(156,0.08333),(157\\\\ ,0.08333),(158,0.08333),(159,0.08333),(160,0.08333),(161,0.08333),(162,0.08333),(163\\\\ ,0.08333),(164,0.08333),(165,0.08333),(166,0.08333),(167,0.08333),(168,0.08333),(169\\\\ ,0.08333),(170,0.08333),(171,0.08333),(172,0.08333),(173,0.08333),(174,0.08333),(175\\\\ ,0.08333),(176,0.08333),(177,0.08333),(178,0.08333),(179,0.08333),(180,0.08333),(181\\\\ ,0.08333),(182,0.08333),(183,0.08333),(184,0.08333),(185,0.08333),(186,0.08333),(187\\\\ ,0.08333),(188,0.08333),(189,0.08333),(190,0.08333),(191,0.08333),(192,0.08333),(193\\\\ ,0.08333),(194,0.08333),(195,0.08333),(196,0.08333),(197,0.08333),(198,0.08333),(199\\\\ ,0.08333),(200,0.08333),(201,0.08333),(202,0.08333),(203,0.08333),(204,0.08333),(205\\\\ ,0.08333),(206,0.08333),(207,0.08333),(208,0.08333),(209,0.08333),(210,0.08333),(211\\\\ ,0.08333),(212,0.08333),(213,0.08333),(214,0.08333),(215,0.08333),(216,0.08333),(217\\\\ ,0.08333),(218,0.08333),(219,0.08333),(220,0.08333),(221,0.08333),(222,0.08333),(223\\\\ ,0.08333),(224,0.08333),(225,0.08333),(226,0.08333),(227,0.08333),(228,0.08333),(229\\\\ ,0.08333),(230,0.08333),(231,0.08333),(232,0.08333),(233,0.08333),(234,0.08333),(235\\\\ ,0.08333),(236,0.08333),(237,0.08333),(238,0.08333),(239,0.08333),(240,0.08333),(241\\\\ ,0.08333),(242,0.08333),(243,0.08333),(244,0.08333),(245,0.08333),(246,0.08333),(247\\\\ ,0.08333),(248,0.08333),(249,0.08333),(250,0.08333),(251,0.08333),(252,0.08333),(253\\\\ ,0.08333),(254,0.08333),(255,0.08333),(256,0.08333),(257,0.08333),(258,0.08333),(259\\\\ ,0.08333),(260,0.08333),(261,0.08333),(262,0.08333),(263,0.08333),(264,0.08333),(265\\\\ ,0.08333),(266,0.08333),(267,0.08333),(268,0.08333),(269,0.08333),(270,0.08333),(271\\\\ ,0.08333),(272,0.08333),(273,0.08333),(274,0.08333),(275,0.08333),(276,0.08333),(277\\\\ ,0.08333),(278,0.08333),(279,0.08333),(280,0.08333),(281,0.08333),(282,0.08333),(283\\\\ ,0.08333),(284,0.08333),(285,0.08333),(286,0.08333),(287,0.08333),(288,0.08333),(289\\\\ ,0.08333),(290,0.08333),(291,0.08333),(292,0.08333),(293,0.08333),(294,0.08333),(295\\\\ ,0.08333),(296,0.08333),(297,0.08333),(298,0.08333),(299,0.08333),(300,0.08333),(301\\\\ ,0.08333),(302,0.08333),(303,0.08333),(304,0.08333),(305,0.08333),(306,0.08333),(307\\\\ ,0.08333),(308,0.08333),(309,0.08333),(310,0.08333),(311,0.08333),(312,0.08333),(313\\\\ ,0.08333),(314,0.08333),(315,0.08333),(316,0.08333),(317,0.08333),(318,0.08333),(319\\\\ ,0.08333),(320,0.08333),(321,0.08333),(322,0.08333),(323,0.08333),(324,0.08333),(325\\\\ ,0.08333),(326,0.08333),(327,0.08333),(328,0.08333),(329,0.08333),(330,0.08333),(331\\\\ ,0.08333),(332,0.08333),(333,0.08333),(334,0.08333),(335,0.08333),(336,0.08333),(337\\\\ ,0.08333),(338,0.08333),(339,0.08333),(340,0.08333),(341,0.08333),(342,0.08333),(343\\\\ ,0.08333),(344,0.08333),(345,0.08333),(346,0.08333),(347,0.08333),(348,0.08333),(349\\\\ ,0.08333),(350,0.08333),(351,0.08333),(352,0.08333),(353,0.08333),(354,0.08333),(355\\\\ ,0.08333),(356,0.08333),(357,0.08333),(358,0.08333),(359,0.08333),(360,0.08333),(361\\\\ ,0.08333),(362,0.08333),(363,0.08333),(364,0.08333),(365,0.08333))'
Units: b'Dmnl'
Limits: (None, None)
Type: lookup
b''
"""
return functions.lookup(x, [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113,
114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167,
168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185,
186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203,
204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221,
222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239,
240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257,
258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 275,
276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 287, 288, 289, 290, 291, 292, 293,
294, 295, 296, 297, 298, 299, 300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311,
312, 313, 314, 315, 316, 317, 318, 319, 320, 321, 322, 323, 324, 325, 326, 327, 328, 329,
330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347,
348, 349, 350, 351, 352, 353, 354, 355, 356, 357, 358, 359, 360, 361, 362, 363, 364, 365
], [
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333, 0.08333,
0.08333, 0.08333, 0.08333, 0.08333, 0.08333
]) | df670af98362fd67bece677aa85bf37d31a75389 | 17,887 |
def filter_chants_without_volpiano(chants, logger=None):
"""Exclude all chants with an empty volpiano field"""
has_volpiano = chants.volpiano.isnull() == False
return chants[has_volpiano] | 3f03bbf3f247afd3a115442e8121a773aa90fb56 | 17,888 |
def sample_input():
"""Return the puzzle input and expected result for the part 1
example problem.
"""
lines = split_nonblank_lines("""
position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>""")
sky_lines = split_nonblank_lines("""
......................
......................
......................
......................
......#...#..###......
......#...#...#.......
......#...#...#.......
......#####...#.......
......#...#...#.......
......#...#...#.......
......#...#...#.......
......#...#..###......
......................
......................
......................
......................""")
expected = trim_sky(sky_lines)
return lines, expected | e027d2831ef5d16776b5c5f7bb8e759042056e2f | 17,891 |
def vector_angle(v):
"""Angle between v and the positive x axis.
Only works with 2-D vectors.
returns: angle in radians
"""
assert len(v) == 2
x, y = v
return np.arctan2(y, x) | 4402795a27ca20269dbfa5a5823c4e2768681ed0 | 17,892 |
def get_user_record_tuple(param) -> ():
"""
Internal method for retrieving the user registration record from the DB.
:return:
"""
conn = mariadb.connect(host=DB_URI, user=DB_USERNAME, password=DB_PASSWORD, database=DB_NAME)
db = conn.cursor()
# discord_id provided
if isinstance(param, int):
cmd = '''SELECT last_updated, token, discord_id, discord_name, is_verified, callsign
FROM registration WHERE discord_id=%s'''
# token provided
# else:
elif isinstance(param, str):
cmd = '''SELECT last_updated, token, discord_id, discord_name, is_verified, callsign
FROM registration WHERE token=%s'''
else:
return None
db.execute(cmd, (param,))
result = db.fetchone()
return result | d35468b7b2141f6c19f0c5669f80c103a7499221 | 17,893 |
def A_weighting(x, Fs):
"""A-weighting filter represented as polynomial transfer function.
:returns: Tuple of `num` and `den`.
See equation E.6 of the standard.
"""
f1 = _POLE_FREQUENCIES[1]
f2 = _POLE_FREQUENCIES[2]
f3 = _POLE_FREQUENCIES[3]
f4 = _POLE_FREQUENCIES[4]
offset = _NORMALIZATION_CONSTANTS['A']
numerator = np.array([(2.0 * np.pi * f4)**2.0 * (10**(-offset / 20.0)), 0.0, 0.0, 0.0, 0.0])
part1 = [1.0, 4.0 * np.pi * f4, (2.0 * np.pi * f4)**2.0]
part2 = [1.0, 4.0 * np.pi * f1, (2.0 * np.pi * f1)**2.0]
part3 = [1.0, 2.0 * np.pi * f3]
part4 = [1.0, 2.0 * np.pi * f2]
denomenator = np.convolve(np.convolve(np.convolve(part1, part2), part3), part4)
B, A = bilinear(numerator, denomenator, Fs)
return lfilter(B, A, x) | a4592939d3809da292c4f05f47ca69e41ad9d27a | 17,894 |
import inspect
def register(class_=None, **kwargs):
"""Registers a dataset with segment specific hyperparameters.
When passing keyword arguments to `register`, they are checked to be valid
keyword arguments for the registered Dataset class constructor and are
saved in the registry. Registered keyword arguments can be retrieved with
the `list_datasets` function.
All arguments that result in creation of separate datasets should be
registered. Examples are datasets divided in different segments or
categories, or datasets containing multiple languages.
Once registered, an instance can be created by calling
:func:`~gluonnlp.data.create` with the class name.
Parameters
----------
**kwargs : list or tuple of allowed argument values
For each keyword argument, it's value must be a list or tuple of the
allowed argument values.
Examples
--------
>>> @gluonnlp.data.register(segment=['train', 'test', 'dev'])
... class MyDataset(gluon.data.Dataset):
... def __init__(self, segment='train'):
... pass
>>> my_dataset = gluonnlp.data.create('MyDataset')
>>> print(type(my_dataset))
<class 'MyDataset'>
"""
def _real_register(class_):
# Assert that the passed kwargs are meaningful
for kwarg_name, values in kwargs.items():
try:
real_args = inspect.getfullargspec(class_).args
except AttributeError:
# pylint: disable=deprecated-method
real_args = inspect.getargspec(class_.__init__).args
if not kwarg_name in real_args:
raise RuntimeError(
('{} is not a valid argument for {}. '
'Only valid arguments can be registered.').format(
kwarg_name, class_.__name__))
if not isinstance(values, (list, tuple)):
raise RuntimeError(('{} should be a list of '
'valid arguments for {}. ').format(
values, kwarg_name))
# Save the kwargs associated with this class_
_REGSITRY_NAME_KWARGS[class_] = kwargs
register_ = registry.get_register_func(Dataset, 'dataset')
return register_(class_)
if class_ is not None:
# Decorator was called without arguments
return _real_register(class_)
return _real_register | f1ce9a7abc8224fcd4bd80cddbf0d46e11ee997a | 17,895 |
import random
def random_population(pop_size, tune_params, tuning_options, max_threads):
"""create a random population of pop_size unique members"""
population = []
option_space = np.prod([len(v) for v in tune_params.values()])
assert pop_size < option_space
while len(population) < pop_size:
dna = [random.choice(v) for v in tune_params.values()]
if not dna in population and util.config_valid(dna, tuning_options, max_threads):
population.append(dna)
return population | b5f2fa518e29bdd6ccb5d10213080f9c594f01b0 | 17,896 |
def check_genome(genome):
"""Check if genome is a valid FASTA file or genomepy genome genome.
Parameters
----------
genome : str
Genome name or file to check.
Returns
-------
is_genome : bool
"""
try:
Genome(genome)
return True
except Exception:
pass
return False | 5f51d74203b77f39b0c8054d36fa6b68399540dd | 17,897 |
def extract_row_loaded():
"""extract_row as it should appear in memory"""
result = {}
result['classification_id'] = '91178981'
result['user_name'] = 'MikeWalmsley'
result['user_id'] = '290475'
result['user_ip'] = '2c61707e96c97a759840'
result['workflow_id'] = '6122'
result['workflow_name'] = 'DECaLS DR5'
result['workflow_version'] = '28.30'
result['created_at'] = '2018-02-20 10:44:42 UTC'
result['gold_standard'] = ''
result['expert'] = ''
result['metadata'] = {
'session': 'e69d40c94873e2e4e2868226d5567e0e997bf58e8800eef4def679ff3e69f97f',
'viewport': {
'width': 1081,
'height': 1049
},
'started_at':'2018-02-20T10:41:13.381Z',
'user_agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:58.0) Gecko/20100101 Firefox/58.0',
'utc_offset': '0',
'finished_at': '2018-02-20T10:44:42.480Z',
'live_project': True,
'user_language': 'en',
'user_group_ids':[],
'subject_dimensions': [{
'clientWidth': 424,
'clientHeight': 424,
'naturalWidth': 424,
'naturalHeight': 424
}]}
result['annotations'] = [
{
'task': 'T0',
'task_label': 'Is the galaxy simply smooth and rounded, with no sign of a disk?',
'value':' Features or Disk'
},
{
'task': 'T2',
'task_label': 'Could this be a disk viewed edge-on?',
'value':' No'
},
{
'task': 'T4',
'task_label': 'Is there a bar feature through the centre of the galaxy?',
'value': 'No Bar'
},
{
'task': 'T5',
'task_label': 'Is there any sign of a spiral arm pattern?',
'value': 'Yes'
},
{
'task': 'T6',
'task_label': 'How tightly wound do the spiral arms appear?',
'value': ' Tight'
},
{
'task': 'T7',
'task_label':'How many spiral arms are there?',
'value':' Cant tell'
},
{
'task':'T8',
'task_label':'How prominent is the central bulge, compared with the rest of the galaxy?',
'value':' No bulge'
},
{
'task':'T11',
'task_label':'Is the galaxy currently merging, or is there any sign of tidal debris?',
'value':'Neither'
},
{
'task':'T10',
'task_label':'Do you see any of these rare features in the image?',
'value':[]
}
]
result['subject_data'] = {
'15715879': {
'retired': None,
'ra': 319.11521779916546,
'dec': -0.826509379829966,
'mag.g': 13.674222230911255,
'mag.i': 12.560198307037354,
'mag.r': 12.938228249549866,
'mag.u': 15.10558009147644,
'mag.z':12.32387661933899,
'nsa_id':189862.0,
'redshift':0.019291512668132782,
'mag.abs_r':-20.916738510131836,
'mag.faruv':16.92647397518158,
'petroflux':5388.59814453125,
'petroth50':13.936717987060547,
'mag.nearuv':16.298240423202515,
'petrotheta':28.682878494262695,
'absolute_size':11.334824080956198
}
}
result['subject_ids'] = '15715879'
return result | c5fd87149ab7dff6e9980a898013053ce309c259 | 17,898 |
def getb_reginsn(*args):
"""
getb_reginsn(ins) -> minsn_t
Skip assertions backward.
@param ins (C++: const minsn_t *)
"""
return _ida_hexrays.getb_reginsn(*args) | 04f0141c0053e74981264b90b3bb0be1fbea8c08 | 17,899 |
from solith.li_nofk.int_nofk import calc_jp1d, calc_nk1d
def convolve_nk(myk, nkm, gfunc, klim, nk, kmin=0.02, kmax=1.5):
"""Convolve n(k) by going to J(p); apply resolution; and back.
Args:
myk (np.array): k
nkm (np.array): n(k)
gfunc (callable): resolution function
klim (float): maxmimum kvalue to include in convolution
nk (nk): number of points on linear grid
kmin (float, optional): minimum n(k) to keep after conv., default 0.02
kmax (float, optional): maximum n(k) to keep after conv., default 1.50
Return:
(np.array, np.array): (myk1, mynk1), convolved n(k)
Example:
>>> gfunc = lambda x:lorentz(x, 0.026)
>>> klim = 40. # need large number for lorentzian tail
>>> nk = 1024**2
>>> myk1, nkm1 = convolve_nk(myk, nkm, gfunc, klim, nk)
"""
# first integrate n(k) to J(p)
jpm = calc_jp1d(myk, nkm)
# second convolve J(p)
fjp = flip_and_clamp(myk, jpm)
finep = np.linspace(-klim, klim, nk)
jp1 = fft_convolve(fjp, gfunc, finep)
# third differentiate J(p) to n(k)
sel = (finep > kmin) & (finep < kmax)
myk1, nk1 = calc_nk1d(finep[sel], jp1[sel])
return myk1, nk1 | 7b7c782b35e1a58c104d4e5fef38f867f85ba014 | 17,900 |
def label_connected_components(label_images, start_label=1, is_3d=False):
"""Label connected components in a label image.
Create new label images where labels are changed so that each \
connected componnent has a diffetent label. \
To find the connected components, it is used a 8-neighborhood.
Parameters
----------
label_images : ndarray
Label images with size :math:`(N, H, W)`.
start_labeel: int
First label, by default 1.
Returns
-------
ndarray
Label images with new labels.
"""
# label_image = label_image.squeeze()
if label_images.ndim == 2:
label_images = np.expand_dims(label_images, 0)
if is_3d and label_images.ndim == 3:
label_images = np.expand_dims(label_images, 0)
new_label_images = np.zeros_like(label_images).astype(np.int)
_c = start_label
for label_image, new_label_image in zip(label_images, new_label_images):
num_labels = label_image.astype(np.int32).max()
#new_label_image = np.zeros_like(label_image).astype(np.int)
if is_3d:
structure = np.ones((3, 3, 3), dtype=np.uint8)
else:
structure = np.ones((3, 3), dtype=np.uint8)
for _l in range(1, num_labels + 1):
_label_image = label(label_image == _l, structure=structure)[0]
for new_l in range(1, _label_image.max() + 1):
mask = _label_image == new_l
new_label_image[mask] = _c
_c += 1
return new_label_images | 670a90421237718e263de23987c4d646e0669ae1 | 17,901 |
def metadataAbstractElementEmptyValuesTest1():
"""
No empty values.
>>> doctestMetadataAbstractElementFunction(
... testMetadataAbstractElementEmptyValue,
... metadataAbstractElementEmptyValuesTest1(),
... requiredAttributes=["required1"],
... optionalAttributes=["optional1"])
[]
"""
metadata = """<?xml version="1.0" encoding="UTF-8"?>
<test required1="foo" optional1="foo" />
"""
return ElementTree.fromstring(metadata) | 6504b570a49876dfdbb84be4b56bd87978aef22c | 17,904 |
def broadcast_dynamic_shape(shape_x, shape_y):
"""Computes the shape of a broadcast given symbolic shapes.
When shape_x and shape_y are Tensors representing shapes (i.e. the result of
calling tf.shape on another Tensor) this computes a Tensor which is the shape
of the result of a broadcasting op applied in tensors of shapes shape_x and
shape_y.
For example, if shape_x is [1, 2, 3] and shape_y is [5, 1, 3], the result is a
Tensor whose value is [5, 2, 3].
This is useful when validating the result of a broadcasting operation when the
tensors do not have statically known shapes.
Args:
shape_x: A rank 1 integer `Tensor`, representing the shape of x.
shape_y: A rank 1 integer `Tensor`, representing the shape of y.
Returns:
A rank 1 integer `Tensor` representing the broadcasted shape.
"""
return gen_array_ops.broadcast_args(shape_x, shape_y) | 3716b11f62712d4fedd4f1c997e2ad022178848b | 17,905 |
def extract_args_for_httpie_main(context, method=None):
"""Transform a Context object to a list of arguments that can be passed to
HTTPie main function.
"""
args = _extract_httpie_options(context)
if method:
args.append(method.upper())
args.append(context.url)
args += _extract_httpie_request_items(context)
return args | 7e6b4f9ff3bc5bc9d44d3a7537063f4dc0965ab3 | 17,906 |
def apply_configuration(dist: "Distribution", filepath: _Path) -> "Distribution":
"""Apply the configuration from a ``setup.cfg`` file into an existing
distribution object.
"""
_apply(dist, filepath)
dist._finalize_requires()
return dist | 0c5f8c3a8d41faacaa1c8a5c13fafbb67e561f64 | 17,907 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.