content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import ast
def apply_bin_op(left, right, op):
"""
Finds binary expression class suitable for combination of left and right
expressions depending on whether their output is scalar or vector and
creates instance of this expression with specified operation.
"""
exr_class = BIN_EXPR_CLASSES.get(
(left.output_size > 1, right.output_size > 1))
if exr_class is None:
# change the positions of left and right
left, right = right, left
exr_class = ast.BinVectorNumExpr
return exr_class(left, right, op)
|
fb5e6c0434dfa209ab34c4253e71bba6cb74e901
| 27,740 |
def pre_filter(image):
"""Apply morphological filter"""
return cv2.morphologyEx(image, cv2.MORPH_OPEN, np.ones((3, 3)))
|
a1af13d831d8462bc9c23443d2416efe66d3082b
| 27,742 |
def build_corpus(docs: DocumentSet, *, remove_words=None, min_word_length=3,
min_docs=5, max_docs_ratio=0.75, max_tokens=5000,
replace_words=None, custom_bigrams=None, ngram_threshold=None
) -> Corpus:
""" Build a `Corpus` object.
This function takes the words from the title/abstract of the given
documents, preprocesses the tokens, and returns a corpus consisting of a
word frequency vector for each document. This preprocessing stage is
highly customizable, thus it is advised to experiment with the many
parameters.
:param remove_words: list of words that should be ignored while building
the word frequency vectors.
:param min_word_length: Words shorter than this are ignored.
:param min_docs: Words that occur in fewer than this many documents are
ignored.
:param max_docs_ratio: Words that occur in more than this document are
ignored. Should be ratio between 0 and 1.
:param max_tokens: Only the top most common tokens are preserved.
:param replace_words: Replace words by other words. Must be a `dict`
containing *original word* to *replacement word*
pairs.
:param custom_bigrams: Add custom bigrams. Must be a `dict` where keys
are `(first, second)` tuples and values are
replacements. For example, the key can be
`("Big", "Data")` and the value `"BigData"`.
:param ngram_threshold: Threshold used for n-gram detection. Is passed
to `gensim.models.phrases.Phrases` to detect
common n-grams.
:returns: a `Corpus object`.
"""
filters = []
if custom_bigrams:
filters.append(lambda w: preprocess_merge_bigrams(w, custom_bigrams))
if remove_words:
filters.append(lambda w: preprocess_remove_words(w, remove_words))
if replace_words:
filters.append(lambda w: preprocess_replace_words(w, replace_words))
if min_word_length:
filters.append(lambda w: preprocess_remove_short(w,
min_length=min_word_length))
filters.append(preprocess_stopwords)
if ngram_threshold is not None:
filters.append(lambda w: preprocess_merge_ngrams(w, ngram_threshold))
filters.append(preprocess_smart_stemming)
if min_docs > 1 or max_docs_ratio < 1.0:
max_docs = int(len(docs) * max_docs_ratio)
filters.append(lambda w: preprocess_outliers(w, min_docs, max_docs))
return Corpus(docs, filters, max_tokens)
|
746eaa78fa498cd32950b3a006bfa669431037d0
| 27,743 |
def _Data_to_bytes(data: Data) -> bytes:
"""
Cast websockets.typing.Data to bytes.
Parameters
----------
data : str | bytes
Returns
-------
bytes
Either casted string or original bytes.
"""
return data.encode() if isinstance(data, str) else data
|
0391dd9b9de0c8a978b16b6c89f9f3515f1a49de
| 27,744 |
import torch
import time
def run_pairs(genotype_df, variant_df, phenotype1_df, phenotype2_df, phenotype_pos_df,
covariates1_df=None, covariates2_df=None, p1=1e-4, p2=1e-4, p12=1e-5, mode='beta',
maf_threshold=0, window=1000000, batch_size=10000, logger=None, verbose=True):
"""Compute COLOC for all phenotype pairs"""
assert np.all(phenotype1_df.index == phenotype2_df.index)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if logger is None:
logger = SimpleLogger()
logger.write('Computing COLOC for all pairs of phenotypes')
logger.write(f' * {phenotype1_df.shape[0]} phenotypes')
logger.write(f' * phenotype group 1: {phenotype1_df.shape[1]} samples')
logger.write(f' * phenotype group 2: {phenotype2_df.shape[1]} samples')
if covariates1_df is not None:
assert np.all(phenotype1_df.columns == covariates1_df.index)
logger.write(f' * phenotype group 1: {covariates1_df.shape[1]} covariates')
residualizer1 = Residualizer(torch.tensor(covariates1_df.values, dtype=torch.float32).to(device))
else:
residualizer1 = None
if covariates2_df is not None:
assert np.all(phenotype2_df.columns == covariates2_df.index)
logger.write(f' * phenotype group 2: {covariates2_df.shape[1]} covariates')
residualizer2 = Residualizer(torch.tensor(covariates2_df.values, dtype=torch.float32).to(device))
else:
residualizer2 = None
if maf_threshold > 0:
logger.write(f' * applying in-sample {maf_threshold} MAF filter (in at least one cohort)')
genotype1_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype1_df.columns])
genotype1_ix_t = torch.from_numpy(genotype1_ix).to(device)
genotype2_ix = np.array([genotype_df.columns.tolist().index(i) for i in phenotype2_df.columns])
genotype2_ix_t = torch.from_numpy(genotype2_ix).to(device)
igc = genotypeio.InputGeneratorCis(genotype_df, variant_df, phenotype1_df, phenotype_pos_df, window=window)
coloc_df = []
start_time = time.time()
logger.write(' * Computing pairwise colocalization')
for phenotype1, genotypes, genotype_range, phenotype_id in igc.generate_data(verbose=verbose):
phenotype2 = phenotype2_df.loc[phenotype_id]
# copy to GPU
phenotype1_t = torch.tensor(phenotype1, dtype=torch.float).to(device)
phenotype2_t = torch.tensor(phenotype2, dtype=torch.float).to(device)
genotypes_t = torch.tensor(genotypes, dtype=torch.float).to(device)
genotypes1_t = genotypes_t[:,genotype1_ix_t]
genotypes2_t = genotypes_t[:,genotype2_ix_t]
del genotypes_t
# filter monomorphic sites
m = ((genotypes1_t==0).all(1) | (genotypes1_t==1).all(1) | (genotypes1_t==2).all(1) |
(genotypes2_t==0).all(1) | (genotypes2_t==1).all(1) | (genotypes2_t==2).all(1))
genotypes1_t = genotypes1_t[~m]
genotypes2_t = genotypes2_t[~m]
impute_mean(genotypes1_t)
impute_mean(genotypes2_t)
if maf_threshold > 0:
maf1_t = calculate_maf(genotypes1_t)
maf2_t = calculate_maf(genotypes2_t)
mask_t = (maf1_t >= maf_threshold) | (maf2_t >= maf_threshold)
genotypes1_t = genotypes1_t[mask_t]
genotypes2_t = genotypes2_t[mask_t]
coloc_t = coloc(genotypes1_t, genotypes2_t, phenotype1_t, phenotype2_t,
residualizer1=residualizer1, residualizer2=residualizer2,
p1=p1, p2=p2, p12=p12, mode=mode)
coloc_df.append(coloc_t.cpu().numpy())
logger.write(' time elapsed: {:.2f} min'.format((time.time()-start_time)/60))
coloc_df = pd.DataFrame(coloc_df, columns=[f'pp_h{i}_abf' for i in range(5)], index=phenotype1_df.index)
logger.write('done.')
return coloc_df
|
3a38f2f38bd7351b93d3ede0f6c6d2e07427263d
| 27,745 |
def main(local_argv):
"""
local_argv is the argument list, progrom name is first arugment
this function prints the fibonacci list calcuated by the command line argument n
"""
if len(local_argv) != 2:
print("must add one and only one command argument, , exit ")
return
argument_n = int(local_argv[1]) #remember, this is the 2nd argument in command line
if argument_n <= 0:
print("please input an positive interger number, exit")
return
retList = []
h = gen_eratosthenes()
[retList.append(next(h)) for _ in range (0,argument_n)] #generates 1 new prime per iteration
#retList =eratosthenes(argument_n)
print(retList)
return retList
|
932c2b7a4f82ed03c62739e6e48350db057957da
| 27,746 |
def alternate(*iterables):
"""
[a[0], b[0], ... , a[1], b[1], ..., a[n], b[n] ...]
>>> alternate([1,4], [2,5], [3,6])
[1, 2, 3, 4, 5, 6]
"""
items = []
for tup in zip(*iterables):
items.extend([item for item in tup])
return items
|
ed3b0c8a32de8d88fc24b8bb08012a0900b37823
| 27,747 |
def softmax(x, axis=-1, t=-100.):
"""
Softmax operation
Args:
x (numpy.array): input X
axis (int): axis for sum
Return:
**softmax** (numpy.array) - softmax(X)
"""
x = x - np.max(x)
if np.min(x) < t:
x = x/np.min(x)*t
e_x = np.exp(x)
return e_x / e_x.sum(axis, keepdims=True)
|
7979c7eeaf4be319f06532abc2a2cc3a23af134d
| 27,748 |
def format_params_in_str_format(format_string):
"""
Get the "parameter" indices/names of the format_string
Args:
format_string: A format string (i.e. a string with {...} to mark parameter placement and formatting
Returns:
A list of parameter indices used in the format string, in the order they appear, with repetition.
Parameter indices could be integers, strings, or None (to denote "automatic field numbering".
>>> format_string = '{0} (no 1) {2}, and {0} is a duplicate, {} is unnamed and {name} is string-named'
>>> list(format_params_in_str_format(format_string))
[0, 2, 0, None, 'name']
"""
return map(lambda x: int(x) if str.isnumeric(x) else x if x != '' else None,
filter(_is_not_none, (x[1] for x in dflt_formatter.parse(format_string))))
|
a8b79cb6ee7a544b60c193dfbc2dbdc22d5d1f92
| 27,749 |
def AMO(df, M1=5, M2=10):
"""
成交金额
:param M1:
:param M2:
:return:
"""
AMOUNT = df['amount']
AMOW = AMOUNT / 10000.0
AMO1 = MA(AMOW, M1)
AMO2 = MA(AMOW, M2)
return pd.DataFrame({
'AMOW': AMOW, 'AMO1': AMO1, 'AMO2': AMO2
})
|
ef45e245e1abb5705760e55b99e2e0757c66667c
| 27,750 |
def tex_initial_states(data):
"""Initial states are texed."""
initial_state = []
initial_state = [''.join(["\lstick{\ket{", str(data['init'][row]),"}}"]) for row in range(len(data['init']))]
return data, initial_state
|
cd1758b594ee854cfb7854ec742dc177a43b54b7
| 27,751 |
import csv
def get_upgraded_dependencies_count(repo_path, django_dependency_sheet) -> tuple:
"""
Entry point to read, parse and calculate django dependencies
@param repo_path: path for repo which we are calculating django deps
@param django_dependency_sheet: csv which contains latest status of django deps
@return: count for all + upgraded django deps in repo
"""
reader_instance = DjangoDependencyReader(repo_path)
deps = reader_instance.read()
django_deps = []
deps_support_django32 = []
upgraded_in_repo = []
csv_path = django_dependency_sheet
with open(csv_path, encoding="utf8") as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',', quotechar='"')
for line in csv_reader:
package_name = line["Django Package Name"]
if package_name in deps.keys(): # pylint: disable=consider-iterating-dictionary
django_deps.append(package_name)
if line["Django 3.2"] and line["Django 3.2"] != '-':
deps_support_django32.append(package_name)
if parse(deps[package_name]) >= parse(line["Django 3.2"]):
upgraded_in_repo.append(package_name)
django_deps = list(set(django_deps))
deps_support_django32 = list(set(deps_support_django32))
upgraded_in_repo = list(set(upgraded_in_repo))
return django_deps, deps_support_django32, upgraded_in_repo
|
1a793c57b69966c45f680fd39fec31a94d4d5616
| 27,752 |
def FanOut(num):
"""Layer construction function for a fan-out layer."""
init_fun = lambda rng, input_shape: ([input_shape] * num, ())
apply_fun = lambda params, inputs, **kwargs: [inputs] * num
return init_fun, apply_fun
|
7e6d07319be600dabf650a4b87f661bf20832455
| 27,754 |
import urllib
import logging
def is_online(url="http://detectportal.firefox.com", expected=b"success\n"):
"""
Checks if the user is able to reach a selected hostname.
:param hostname: The hostname to test against.
Default is packages.linuxmint.com.
:returns: True if able to connect or False otherwise.
"""
try:
with urllib.request.urlopen(url) as response:
response_data = response.read()
if response_data == expected:
return True
logging.error(
"Response from %s was not %s as expected. Received: %s",
url, expected, response_data
)
return False
except urllib.error.URLError as url_err:
logging.error(
"Unable to connect to %s", url, exc_info=url_err
)
return False
return False
|
00081e05fbb1cfaa81a03a487201af4c07b23fd2
| 27,755 |
def get_Data_temblor():
"""Shows basic usage of the Sheets API.
Creates a Sheets API service object and prints the names and majors of
students in a sample spreadsheet:
https://docs.google.com/spreadsheets/d/1BxiMVs0XRA5nFMdKvBdBZjgmUUqptlbs74OgvE2upms/edit
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?'
'version=v4')
service = discovery.build('sheets',
'v4',
http=http,
discoveryServiceUrl=discoveryUrl)
# DAÑOS Y DERRUMBES VERIFICADOS
# Para descargar otras páginas cambiar el onmbre en el campo range
result = service.spreadsheets().values().get(
spreadsheetId='1i__c44wIg760LmxZcM8oTjDR0cGFVdL9YrjbCcb9Op0',
range='Form Responses 1!A1:AH10000').execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
return values
|
1dca23ae977dc4400b31e323d1c2aac5e6e685a3
| 27,756 |
def triplet_loss(y_true, y_pred, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula (3)
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]
# Compute the (encoding) distance between the anchor and the positive
pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)
# Compute the (encoding) distance between the anchor and the negative
neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)
# subtract the two previous distances and add alpha.
basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)
# Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0), axis=None)
return loss
|
41257b626fdb4773769bf20d2aa11caaa8896259
| 27,757 |
import inspect
def node_from_dict(node_dict):
"""Create a new node from its ``node_dict``.
This is effectively a shorthand for choosing the correct node class and
then calling its ``from_dict`` method.
Args:
node_dict (dict): dict-representation of the node.
Returns:
New schema node with the specified type and configuration.
"""
node_types = {name: cls for name, cls in globals().items()
if inspect.isclass(cls) and issubclass(cls, SchemaNode)}
if node_dict['node_type'] not in node_types:
raise ValueError('Invalid node type specified.')
node_type = node_types[node_dict['node_type']]
return node_type.from_dict(node_dict)
|
299cc2e1f194338ff3bbc13f729284b61b884b1b
| 27,758 |
def _get_activation(upsample_activation):
"""get activation"""
nonlinear = getattr(nn, upsample_activation)
return nonlinear
|
4423edf977ccd6db6fa6fdb5de426ecbaaed7e55
| 27,759 |
import time
def computeSensitivity(P, W):
"""
The function at hand computes the sensitivity of each point using a reduction from L_\infty to L1.
:return: None
"""
P = np.hstack((P, np.arange(P.shape[0])[:, np.newaxis]))
B, idxs = applyBiCriterea(P[:, :-1], W) # attain set of flats which gives 2^j approximation to the optimal solution
sensitivity_additive_term = initializeSens(P, B, idxs) # initialize the sensitivities
unique_cetner_idxs = np.unique(idxs) # get unique indices of clusters
sensitivity = np.empty((P.shape[0], ))
clusters = [np.where(idxs == idx)[0] for idx in unique_cetner_idxs]
Qs = [[] for idx in range(len(clusters))]
for idx in range(len(clusters)): # apply L_\infty conversion to L_1 on each cluster of points
# Qs[idx] = np.hstack(((P[clusters[idx], :-1] - B[idx][1]).dot(B[idx][0].T.dot(B[idx][0])), P[clusters[idx], -1][:, np.newaxis]))
Qs[idx] = np.hstack(((P[clusters[idx], :-1] - B[idx][1]).dot(B[idx][0].T), P[clusters[idx], -1][:, np.newaxis]))
ts = time.time()
# s = computeSensitivityPerCluster(Qs[0])
# print('max = {}, min = {}'.format(np.max(s[0,:]), np.min(s[0,:])))
# print('Time for one cluster took {} secs'.format(time.time() - ts))
# input()
# pool = multiprocessing.Pool(3)
# list_of_sensitivities = pool.map(computeSensitivityPerCluster, Qs)
# print('Time for parallel took {} secs'.format(time.time() - ts))
for i in range(len(Qs)):
s = computeSensitivityPerCluster(Qs[i])
sensitivity[s[:, -1].astype(np.int)] = s[:, 0]
# print('Number of unique values = {}, max = {}, min = {}'.format(np.unique(sensitivity).shape[0],
# np.max(sensitivity), np.min(sensitivity)))
sensitivity += 2 ** Utils.J * sensitivity_additive_term # add the additive term for the sensitivity
return sensitivity
|
089442a4c136ef473582db93b04575562ebecd96
| 27,760 |
def num_bytes_needed_for_data(rom_dict):
"""
Get the number of bytes to store the largest data in the rom.
Args:
rom_dict (dict(int:int)): Dictionary of address and data values
Returns:
(int): Number of bytes needed for largest piece of data
"""
largest_data = max(rom_dict.itervalues())
num_bytes = num_bytes_for_value(largest_data)
return num_bytes
|
af039a39c86c17d0349d53b2e5020d8cf3560f5d
| 27,761 |
def run_simulation(model, **kwargs):
"""Runs the given model using KaSim and returns the parsed results.
Parameters
----------
**kwargs : List of keyword arguments
All keyword arguments specifying conditions for the simulation are
passed to the function :py:func:`run_kasim` (see documentation
associated with that function for more information).
Returns
-------
numpy.ndarray
Returns the kasim simulation data as a Numpy ndarray. Data is accessed
using the syntax::
results[index_name]
The index 'time' gives the data for the time coordinates of the
simulation. Data for the observables can be accessed by indexing the
array with the names of the observables.
"""
outs = run_kasim(model, **kwargs)
return parse_kasim_outfile(outs['out'])
|
26aba6818a567faf8a08df8d5a47c213bb717183
| 27,762 |
import importlib
def _bot_exists(botname):
"""
Utility method to import a bot.
"""
module = None
try:
module = importlib.import_module('%s.%s' % (botname, botname))
except ImportError as e:
quit('Unable to import bot "%s.%s": %s' % (botname, botname, str(e)))
return module
|
c091be6d586faa8aacd48b30f4ce2f4fcc665e0b
| 27,763 |
def bootstrap_alert_message(msg, alert_type):
"""
Wrap Ajax error message for display
:param msg: Message text
:param alert_type: must be alert-danger, alert-success, alert-info, alert-warning
:return: html formatted message
"""
if not msg:
msg = _('An unknown error has occurred')
if alert_type == 'error':
alert_type = 'danger'
if not alert_type or alert_type not in 'danger, success, info, warning':
alert_type = 'warning'
alert_label = alert_type
if alert_label == 'danger':
alert_label = 'error'
f_msg = """
<div class="alert alert-{0} alert-dismissable fade show">
<button type="button" class="close" data-dismiss="alert">×</button>
<strong>{1}:</strong> {2}.
</div>
""".format(alert_type, alert_label.title(), msg)
return f_msg
|
cec9fa851272274a734dfbf6599ee67ed2c7c9f8
| 27,765 |
def get_message_dispatched(correlation_id, steps, primary=True):
"""
Sets a flag in cache to indicate that a message has been dispatched.
:param correlation_id: a str guid for the fsm
:param steps: an integer corresponding to the step in the fsm execution
:return: True if cached and False otherwise
"""
if primary:
source_arn = get_primary_cache_source()
else:
source_arn = get_secondary_cache_source()
service = get_arn_from_arn_string(source_arn).service
if not service: # pragma: no cover
log_once(logger.warning, "No cache source for primary=%s" % primary)
elif service == AWS.ELASTICACHE:
engine, _ = _get_elasticache_engine_and_connection(source_arn)
if engine == AWS_ELASTICACHE.ENGINE.MEMCACHED:
return _get_message_dispatched_memcache(source_arn, correlation_id, steps)
elif engine == AWS_ELASTICACHE.ENGINE.REDIS:
return _get_message_dispatched_redis(source_arn, correlation_id, steps)
elif service == AWS.DYNAMODB:
return _get_message_dispatched_dynamodb(source_arn, correlation_id, steps)
|
cd9a373f6b104617dfae2d488fc2348ffae61c5b
| 27,766 |
def _format_exponent_notation(input_number, precision, num_exponent_digits):
"""
Format the exponent notation. Python's exponent notation doesn't allow
for a user-defined number of exponent digits.
Based on [Anurag Uniyal's answer][answer] to the StackOverflow
question ['Python - number of digits in exponent'][question]
[question]: http://stackoverflow.com/q/9910972/95592
[answer]: http://stackoverflow.com/a/9911741/95592
"""
python_exponent_notation = '{number:.{precision}e}'.format(
number=input_number,
precision=precision)
mantissa, exponent = python_exponent_notation.split('e')
# Add 1 to the desired number of exponenent digits to account for the sign
return '{mantissa}e{exponent:+0{exp_num}d}'.format(
mantissa=mantissa,
exponent=int(exponent),
exp_num=num_exponent_digits+1)
|
59f61897c70ca1d9f95412b2892d5c9592e51561
| 27,767 |
import typing
def unicode_blackboard_activity_stream(
activity_stream: typing.List[blackboard.ActivityItem]=None,
indent: int=0,
show_title: bool=True
):
"""
Pretty print the blackboard stream to console.
Args:
activity_stream: the log of activity, if None, get the entire activity stream
indent: the number of characters to indent the blackboard
show_title: include the title in the output
"""
return _generate_text_activity(
activity_stream=activity_stream,
show_title=show_title,
indent=indent,
symbols=unicode_symbols if console.has_unicode() else ascii_symbols
)
|
435c52b630d467aea2835644297e5099d1c69490
| 27,768 |
def normalize(vector):
"""Normalize a vector to unit length.
Args:
vector (list/tuple/numpy.ndarray): Array to be normalized
Return:
vector (numpy.ndarray): Normalized vector.
"""
length = np.sqrt(np.sum(np.asarray(vector) ** 2))
if length == 0:
return np.asarray(vector)
else:
return np.asarray(vector) / length
|
5f51933a93fd6b3df0955024585105a8887f3fd2
| 27,770 |
def get_struc_qty(*args):
"""get_struc_qty() -> size_t"""
return _idaapi.get_struc_qty(*args)
|
f370db3d60fdc0b5870baf8a9b616163d55860c6
| 27,771 |
from typing import Callable
def evaluate0(gametree_: Callable[[Board],
Node], static_eval_: Callable[[Board], int],
prune_: Callable[[Node], Node]) -> Callable[[Board], int]:
"""Return a tree evaluation function"""
def evaluate_(board: Board) -> int:
return maximize0(maptree(static_eval_, prune_(gametree_(board))))
return evaluate_
|
eda97c4a6be5bac08ca0a73e0653b9c58321b539
| 27,772 |
import http
def fortune(inp):
"""
.fortune -- returns one random card and it's fortune
"""
try:
cards = http.get_json("https://tarot-api.com/draw/1")
except HTTPError:
return "The spirits are displeased."
card = cards[0]
return card["name"] + ": " + ", ".join(card["keywords"])
|
d167238880a63afdd0aee11c2d02354a672d7fb8
| 27,773 |
def declin_0(x, obl):
"""
declination of a point of ecliptic
:param x: longitude of the point in degree
:param obl: obliquity of the ecliptic in degree
return declination in degree
"""
return DEG * m.asin(m.sin(x * RAD) * m.sin(obl * RAD))
|
5853ceb9d0424618a1b1406ee53ea87779f3b535
| 27,774 |
def network_data_structures(stream_names_tuple, agent_descriptor_dict):
"""Builds data structures for the network. These data
structures are helpful for animating the network and
for building networks of processes.
Parameters
----------
Same as for make_network.
Return Values
-------------
(stream_to_agent_list_dict,
agent_to_stream_dict,
agent_to_agent_list_dict)
stream_to_agent_list_dict
key: stream_name
value: list of agent_name.
The stream with name stream_name (the key)
is an input stream of each agent
whose name is in the list (the value).
For example if key is 's' and value is
['u', 'v', 'w'] then the stream with name 's'
is an input stream of the agents with names
'u', 'v', and 'w'.
agent_to_stream_dict
key: stream_name
value: str. A single agent_name.
The stream with name stream_name (the key)
is the unique output stream of the agent
with name agent_name (the value). For example,
if a key is 's' and the corresponding value
is 'a', then the stream with name 's' is
generated by the agent with name 'a'.
agent_to_agent_list_dict
key: agent_name
value: list of agent names
The agent with name agent_name (the key) has an
output stream to each agent whose name is in value.
agent_from_agent_list_dict
key: agent_name
value: list of agent names
The agent with name agent_name (the key) has an
input stream from each agent whose name is in value.
"""
stream_to_agent_list_dict = dict()
for stream_name in stream_names_tuple:
stream_to_agent_list_dict[stream_name] = list()
agent_to_stream_dict = dict()
# Construct stream_to_agent_list_dict and agent_to_stream_dict
# from agent_descriptor_dict
for agent_name, descriptor in agent_descriptor_dict.iteritems():
input_stream_list = descriptor[0]
output_stream_list = descriptor[1]
for stream_name in input_stream_list:
stream_to_agent_list_dict[stream_name].append(agent_name)
for stream_name in output_stream_list:
if stream_name in agent_to_stream_dict:
raise Exception(
stream_name+'output by'+agent_to_stream_dict[stream_name]+'and'+agent_name)
agent_to_stream_dict[stream_name] = agent_name
# Construct agent_to_agent_list_dict from
# agent_descriptor_dict, stream_to_agent_list_dict, and
# agent_to_stream_dict.
agent_to_agent_list_dict = dict()
# Initialize agent_to_agent_list_dict
for agent_name in agent_descriptor_dict.keys():
agent_to_agent_list_dict[agent_name] = list()
# Compute agent_to_agent_list_dict
# If a stream is output of agent x and input to agents y, z
# then agent x outputs to [y,z]
for stream_name, agent_name in agent_to_stream_dict.iteritems():
agent_to_agent_list_dict[agent_name].extend(
stream_to_agent_list_dict[stream_name])
# Construct agent_from_agent_list_dict from
# agent_descriptor_dict, stream_to_agent_list_dict, and
# agent_to_stream_dict.
agent_from_agent_list_dict = dict()
# Initialize agent_from_agent_list_dict
for agent_name in agent_descriptor_dict.keys():
agent_from_agent_list_dict[agent_name] = list()
# Compute agent_from_agent_list_dict
# If a stream is an input of agent x and is an output of agents y, z
# then agents[y,z] output to agent x.
for stream_name, agent_name_list in stream_to_agent_list_dict.iteritems():
for receiving_agent_name in agent_name_list:
agent_from_agent_list_dict[receiving_agent_name].append(
agent_to_stream_dict[stream_name])
return (stream_to_agent_list_dict, agent_to_stream_dict,
agent_to_agent_list_dict, agent_from_agent_list_dict)
|
158484dd882eb14da1824408c35d7124282d47e1
| 27,775 |
import numpy
from typing import OrderedDict
def to_onnx(model, X=None, name=None, initial_types=None,
target_opset=None, options=None, rewrite_ops=False,
white_op=None, black_op=None, final_types=None):
"""
Converts a model using on :epkg:`sklearn-onnx`.
@param model model to convert or a function
wrapped into :epkg:`_PredictScorer` with
function :epkg:`make_scorer`
@param X training set (at least one row),
can be None, it is used to infered the
input types (*initial_types*)
@param initial_types if *X* is None, then *initial_types* must be
defined
@param name name of the produced model
@param target_opset to do it with a different target opset
@param options additional parameters for the conversion
@param rewrite_ops rewrites some existing converters,
the changes are permanent
@param white_op white list of ONNX nodes allowed
while converting a pipeline, if empty,
all are allowed
@param black_op black list of ONNX nodes allowed
while converting a pipeline, if empty,
none are blacklisted
@param final_types a python list. Works the same way as
initial_types but not mandatory, it is used
to overwrites the type (if type is not None)
and the name of every output.
@return converted model
The function rewrites function *to_onnx* from :epkg:`sklearn-onnx`
but may changes a few converters if *rewrite_ops* is True.
For example, :epkg:`ONNX` only supports *TreeEnsembleRegressor*
for float but not for double. It becomes available
if ``rewrite_ops=True``.
.. faqref::
:title: How to deal with a dataframe as input?
Each column of the dataframe is considered as an named input.
The first step is to make sure that every column type is correct.
:epkg:`pandas` tends to select the least generic type to
hold the content of one column. :epkg:`ONNX` does not automatically
cast the data it receives. The data must have the same type with
the model is converted and when the converted model receives
the data to predict.
.. runpython::
:showcode:
from io import StringIO
from textwrap import dedent
import numpy
import pandas
from pyquickhelper.pycode import ExtTestCase
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from mlprodict.onnx_conv import to_onnx
from mlprodict.onnxrt import OnnxInference
text = dedent('''
__SCHEMA__
7.4,0.7,0.0,1.9,0.076,11.0,34.0,0.9978,3.51,0.56,9.4,5,red
7.8,0.88,0.0,2.6,0.098,25.0,67.0,0.9968,3.2,0.68,9.8,5,red
7.8,0.76,0.04,2.3,0.092,15.0,54.0,0.997,3.26,0.65,9.8,5,red
11.2,0.28,0.56,1.9,0.075,17.0,60.0,0.998,3.16,0.58,9.8,6,red
''')
text = text.replace(
"__SCHEMA__",
"fixed_acidity,volatile_acidity,citric_acid,residual_sugar,chlorides,"
"free_sulfur_dioxide,total_sulfur_dioxide,density,pH,sulphates,"
"alcohol,quality,color")
X_train = pandas.read_csv(StringIO(text))
for c in X_train.columns:
if c != 'color':
X_train[c] = X_train[c].astype(numpy.float32)
numeric_features = [c for c in X_train if c != 'color']
pipe = Pipeline([
("prep", ColumnTransformer([
("color", Pipeline([
('one', OneHotEncoder()),
('select', ColumnTransformer(
[('sel1', 'passthrough', [0])]))
]), ['color']),
("others", "passthrough", numeric_features)
])),
])
pipe.fit(X_train)
pred = pipe.transform(X_train)
print(pred)
model_onnx = to_onnx(pipe, X_train, target_opset=12)
oinf = OnnxInference(model_onnx)
# The dataframe is converted into a dictionary,
# each key is a column name, each value is a numpy array.
inputs = {c: X_train[c].values for c in X_train.columns}
inputs = {c: v.reshape((v.shape[0], 1)) for c, v in inputs.items()}
onxp = oinf.run(inputs)
print(onxp)
"""
if isinstance(model, OnnxOperatorMixin):
if not hasattr(model, 'op_version'):
raise RuntimeError( # pragma: no cover
"Missing attribute 'op_version' for type '{}'.".format(
type(model)))
return model.to_onnx(
X=X, name=name, options=options, black_op=black_op,
white_op=white_op, final_types=final_types)
if rewrite_ops:
old_values = register_rewritten_operators()
register_converters()
else:
old_values = None
def _guess_type_(X, itype, dtype):
initial_types = guess_initial_types(X, itype)
if dtype is None:
if hasattr(X, 'dtypes'): # DataFrame
dtype = numpy.float32
elif hasattr(X, 'dtype'):
dtype = X.dtype
elif hasattr(X, 'type'):
dtype = guess_numpy_type(X.type)
elif initial_types is not None:
dtype = guess_numpy_type(initial_types[0][1])
else:
raise RuntimeError( # pragma: no cover
"dtype cannot be guessed: {}".format(
type(X)))
if dtype != numpy.float64:
dtype = numpy.float32
if dtype is None:
raise RuntimeError("dtype cannot be None") # pragma: no cover
if isinstance(dtype, FloatTensorType):
dtype = numpy.float32
elif isinstance(dtype, DoubleTensorType):
dtype = numpy.float64
new_dtype = dtype
if isinstance(dtype, numpy.ndarray):
new_dtype = dtype.dtype
elif isinstance(dtype, DataType):
new_dtype = numpy.float32
if new_dtype not in (numpy.float32, numpy.float64, numpy.int64,
numpy.int32):
raise NotImplementedError( # pragma: no cover
"dtype should be real not {} ({})".format(new_dtype, dtype))
return initial_types, dtype, new_dtype
if isinstance(model, _PredictScorer):
if X is not None and not isinstance(X, OrderedDict):
raise ValueError("For a scorer, parameter X should be a OrderedDict not {}."
"".format(type(X)))
if initial_types is None:
dts = []
initial_types = []
for k, v in X.items():
if hasattr(v, 'dtype'):
dtype = guess_numpy_type(v.dtype)
else:
dtype = v
if dtype != numpy.float64:
dtype = numpy.float32
it, _, ndt = _guess_type_(v, None, dtype)
for i in range(len(it)): # pylint: disable=C0200
it[i] = (k, it[i][1]) # pylint: disable=C0200
initial_types.extend(it)
dts.append(ndt)
ndt = set(dts)
if len(ndt) != 1:
raise RuntimeError( # pragma: no cover
"Multiple dtype is not efficient {}.".format(ndt))
res = convert_scorer(model, initial_types, name=name,
target_opset=target_opset, options=options,
black_op=black_op, white_op=white_op,
final_types=final_types)
else:
if name is None:
name = "mlprodict_ONNX(%s)" % model.__class__.__name__
initial_types, dtype, _ = _guess_type_(X, initial_types, None)
res = convert_sklearn(model, initial_types=initial_types, name=name,
target_opset=target_opset, options=options,
black_op=black_op, white_op=white_op,
final_types=final_types)
if old_values is not None:
register_rewritten_operators(old_values)
return res
|
27a21d66d78e2432362718b89d8df830471d521c
| 27,776 |
def setRx(phi):
"""Rotation matrix around x axis"""
Rx = np.zeros((phi.size,3,3))
Rx[:,0,0] = 1.
Rx[:,1,1] = np.cos(phi)
Rx[:,2,2] = Rx[:,0,0]
Rx[:,1,2] = -np.sin(phi)
Rx[:,2,1] = -Rx[:,0,2]
return Rx
|
a70699ccc61b30d820fd9172b459680a1287e8e0
| 27,777 |
def discrete_resample(df, freq_code, agg_fun, remove_inter=False, **kwargs):
"""
Function to properly set up a resampling class for discrete data. This assumes a linear interpolation between data points.
Parameters
----------
df: DataFrame or Series
DataFrame or Series with a time index.
freq_code: str
Pandas frequency code. e.g. 'D'.
agg_fun : str
The aggregation function to be applied on the resampling object.
**kwargs
Any keyword args passed to Pandas resample.
Returns
-------
Pandas DataFrame or Series
"""
if isinstance(df, (pd.Series, pd.DataFrame)):
if isinstance(df.index, pd.DatetimeIndex):
reg1 = pd.date_range(df.index[0].ceil(freq_code), df.index[-1].floor(freq_code), freq=freq_code)
reg2 = reg1[~reg1.isin(df.index)]
if isinstance(df, pd.Series):
s1 = pd.Series(np.nan, index=reg2)
else:
s1 = pd.DataFrame(np.nan, index=reg2, columns=df.columns)
s2 = pd.concat([df, s1]).sort_index()
s3 = s2.interpolate('time')
s4 = (s3 + s3.shift(-1))/2
s5 = s4.resample(freq_code, **kwargs).agg(agg_fun).dropna()
if remove_inter:
index1 = df.index.floor(freq_code).unique()
s6 = s5[s5.index.isin(index1)].copy()
else:
s6 = s5
else:
raise ValueError('The index must be a datetimeindex')
else:
raise TypeError('The object must be either a DataFrame or a Series')
return s6
|
502a63b81c8cf027853e45f77e2c9b18c1beddb4
| 27,778 |
def reflect(data, width):
"""Ceflect a data word, means revert the bit order."""
reflected = data & 0x01
for _ in range(width - 1):
data >>= 1
reflected = (reflected << 1) | (data & 0x01)
return reflected
|
bd5a0b804419c52ebdc6777fa0256c6a2dd4475c
| 27,779 |
def snell_angle_2(angle_1, n_1, n_2):
"""Calculate the angle of refraction of a ray travelling between two mediums
according to Snell's law.
Args: angle_1 (array_like[float]): angle of incidence with respect to surface
normal in radians. n_1 (float): index of refraction in first medium.
n_2 (float): index of refraction in second medium. Returns: float: angle of
refraction in radians.
"""
angle_2 = np.arcsin(n_1 / n_2 * np.sin(angle_1))
return angle_2
|
5917dfc412e002bdfe494b3648dfdb7ba63a3cd7
| 27,780 |
def npv(ico, nci, r, n):
""" This capital budgeting function computes the net present
value on a cash flow generating investment.
ico = Initial Capital Outlay
nci = net cash inflows per period
r = discounted rate
n = number of periods
Example: npv(100000, 15000, .03, 10)
"""
pv_nci = 0
for x in range(n):
pv_nci = pv_nci + (nci/((1 + r) ** (x + 1)))
return pv_nci - ico
|
fa3128de0fe8a2f7b8bbe754f0e1b1e1a0eb222d
| 27,781 |
import time
import functools
import threading
def instrument_endpoint(time_fn=time.time):
"""Decorator to instrument Cloud Endpoint methods."""
def decorator(fn):
method_name = fn.__name__
assert method_name
@functools.wraps(fn)
def decorated(service, *args, **kwargs):
service_name = service.__class__.__name__
endpoint_name = '/_ah/spi/%s.%s' % (service_name, method_name)
start_time = time_fn()
response_status = 0
interface.state.store.initialize_context()
flush_thread = None
time_now = time_fn()
if need_to_flush_metrics(time_now):
flush_thread = threading.Thread(target=_flush_metrics, args=(time_now,))
flush_thread.start()
try:
ret = fn(service, *args, **kwargs)
response_status = 200
return ret
except endpoints.ServiceException as e:
response_status = e.http_status
raise
except Exception:
response_status = 500
raise
finally:
if flush_thread:
flush_thread.join()
elapsed_ms = int((time_fn() - start_time) * 1000)
http_metrics.update_http_server_metrics(
endpoint_name, response_status, elapsed_ms)
return decorated
return decorator
|
cf2fdfff5aa8854cc6c02850d6a9787ca45b7c7e
| 27,782 |
def manhattan_distances(X, Y):
"""Compute pairwise Manhattan distance between the rows of two matrices X (shape MxK)
and Y (shape NxK). The output of this function is a matrix of shape MxN containing
the Manhattan distance between two rows.
Arguments:
X {np.ndarray} -- First matrix, containing M examples with K features each.
Y {np.ndarray} -- Second matrix, containing N examples with K features each.
Returns:
D {np.ndarray}: MxN matrix with Manhattan distances between rows of X and rows of Y.
"""
all_distances = []
for exampleX in X:
one_row = []
for exampleY in Y:
one_row.append(manhattan_distance(exampleX, exampleY))
all_distances.append(one_row)
return np.array(all_distances)
# raise NotImplementedError()
|
0cbec7eae4cb33d0ed13947bbc7df7b4b6171807
| 27,783 |
def remove_access_group(request):
"""
Add access groups to the image.
"""
if request.method != "POST":
messages.error(request, "Invalid request method.")
return redirect('images')
if 'id' not in request.POST or not request.POST.get('id').isdigit() or 'access_group' not in request.POST:
messages.error(request, "Invalid POST request.")
return redirect('images')
group_id = request.POST.get('access_group')
img_id = request.POST.get('id')
client = get_httpclient_instance(request)
params = {"access_groups": [group_id]}
try:
client.containers.images(img_id).remove_access_groups.post(params)
messages.success(request, "Group successfully removed from image.")
except Exception as e:
messages.error(request, api_error_message(e, params))
return(redirect('image_manage', img_id))
return redirect('image_manage', img_id)
|
1e61b0a403a06154dce5b7f745a401e7ddf635d6
| 27,784 |
def replace_strings_in_file(i):
"""
Input: {
file
(file_out) - if !='', use this file for output, otherwise overwrite original one!
replacement_json_file - replacement file with multiple strings to substitute
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
(updated) - if 'yes', files was updated
}
"""
o=i.get('out','')
fin=i['file']
rjf=i['replacement_json_file']
fout=i.get('file_out','')
if fout=='': fout=fin
rx=ck.load_text_file({'text_file':fin})
if rx['return']>0: return rx
s=rx['string']
rx=ck.load_json_file({'json_file':rjf})
if rx['return']>0: return rx
rep=rx['dict']
sx=s
for k in rep:
v=rep[k]
sx=sx.replace(k,v)
r={'return':0, 'updated':'no'}
if s!=sx or fin!=fout:
r=ck.save_text_file({'text_file':fout, 'string':sx})
r['updated']='yes'
return r
|
b15d3058e1486d6f6171438bd62da05cb8d5732b
| 27,786 |
def fill_polygon(points, im_shape):
"""Fill the polygon defined by convex or contour points
Parameters
----------
points: array
Coordinates of the points that define the convex or contour of the mask
im_shape: array
Array shape of the mask
Returns
-------
im_cnt: array
Filled contour or convex
"""
im_cnt = np.zeros((im_shape[0],im_shape[1],1), np.uint8)
cv.fillPoly(im_cnt, [points], (255,255))
return im_cnt
|
abbe69180582c12233c8632ca2ca60485ce4d717
| 27,787 |
def load(section, option, archive=_ConfigFile):
"""
Load variable
"""
cfg = ConfigParser()
try:
cfg.readfp(file(archive))
except Exception, e:
sys.stderr.write("%s, %s\n" % (archive, e.strerror))
return
try:
return cfg.get(section, option)
except:
sys.stderr.write("Incorrect value for %s or %s parameter\n" % \
(section, option))
return
|
3cf8bebb9ffdaf4a25ece950560680fabdf8c459
| 27,788 |
def merge_duplicates(model_name, keep_descriptors=False):
"""
Identifies repeated experimental values and returns mean values for those
data along with their standard deviation. Only aggregates experimental
values that have been acquired at the same temperature and pressure.
Parameters
----------
model_name: dev_model
the dev_model object to be interrogated
keep_descriptors: boolean, default False
if True descriptors will be included in the output DataFrame
Returns
-----------
out: dataframe
pandas DataFrame of the original data where repeated measurements
have been averaged and their variance stored in a separate column
"""
model_outputs = -6 + model_name.Data_summary.shape[0]
devmodel = model_name
cols = devmodel.Data.columns
if (devmodel.Data.iloc[:, -(4 + model_outputs):-4].max() < 700).all():
for output_index in range(model_outputs):
devmodel.Data.iloc[:, -(5 + output_index)] = \
devmodel.Data.iloc[:, -(5 + output_index)].apply(
lambda x: exp(float(x)))
output_val = pd.DataFrame()
output_xtd = pd.DataFrame()
for output_index in range(model_outputs):
val = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].mean().\
reset_index()
xtd = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].std().\
reset_index()
if output_index == 0:
output_val = val
output_xtd = xtd
else:
output_val = pd.merge(output_val, val)
output_xtd = pd.merge(output_xtd, xtd)
size = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)[cols[-(5 + output_index)]].count().\
reset_index()
cations = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)['name-cation'].first().reset_index()
anions = devmodel.Data.groupby(['smiles-cation', 'smiles-anion']
)['name-anion'].first().reset_index()
size.columns.values[2] = "count"
salts = (devmodel.Data["smiles-cation"] + "." + devmodel.
Data["smiles-anion"]).unique()
print("Identified {} unique salts in {} datapoints".
format(len(salts), devmodel.Data.shape[0]))
out = pd.merge(output_val, output_xtd,
on=['smiles-cation', 'smiles-anion'],
suffixes=['_mean', '_std'])
out = pd.merge(out, size)
out = pd.merge(out, cations)
out = pd.merge(out, anions)
if keep_descriptors:
cationDescriptors = load_data("cationDescriptors.csv")
cationDescriptors.columns = [str(col) + '-cation' for
col in cationDescriptors.columns]
anionDescriptors = load_data("anionDescriptors.csv")
anionDescriptors.columns = [str(col) + '-anion' for
col in anionDescriptors.columns]
new_df = pd.merge(cationDescriptors, out,
on=["name-cation", "smiles-cation"], how="right")
new_df = pd.merge(anionDescriptors, new_df,
on=["name-anion", "smiles-anion"], how="right")
out = new_df
return out
|
a3baa232535f9c3bfd9496259f0cf3fd6142665b
| 27,789 |
def autoencoder_cost_and_grad_sparse(theta, visible_size, hidden_size, lambda_, rho_, beta_, data):
"""
Version of cost and grad that incorporates the hidden layer sparsity constraint
rho_ : the target sparsity limit for each hidden node activation
beta_ : controls the weight of the sparsity penalty term relative
to other loss components
The input theta is a 1-dimensional array because scipy.optimize.minimize expects
the parameters being optimized to be a 1d array.
First convert theta from a 1d array to the (W1, W2, b1, b2)
matrix/vector format, so that this follows the notation convention of the
lecture notes and tutorial.
You must compute the:
cost : scalar representing the overall cost J(theta)
grad : array representing the corresponding gradient of each element of theta
"""
m = data.shape[1]
len = visible_size * hidden_size
w1 = theta[0 : len].reshape((hidden_size, visible_size)) # (h,v)
w2 = theta[len : 2*len].reshape((visible_size, hidden_size)) # (v,h)
b1 = theta[2*len : 2*len + hidden_size].flatten() # (h)
b2 = theta[2*len + hidden_size: ].flatten() # (v)
# FORWARD PROPAGATION (Lecture 24, Slides 11-13)
# HW5 #4: Vectorized Implementation of Forward Propagation
# Code moved to autoencoder_feedforward(theta, visible_size, hidden_size, data)
tau = autoencoder_feedforward(theta, visible_size, hidden_size, data)
z2 = tau[0 : hidden_size] # (h,m)
a2 = tau[hidden_size : 2*hidden_size] # (h,m)
z3 = tau[2*hidden_size : 2*hidden_size + visible_size] # (v,m)
h = tau[2* hidden_size + visible_size:] # (v,m)
# COST FUNCTION (Equation on Lecture 24, Slide 15)
#
# J(W,b) = Squared Error Term + Weight Decay Term (for regularization)
#
# = Sum{m=1...m} ||h_{W,b}(x^(i)) - y^(i)||^2 / (2m) + lambda/2 * \
# Sum{l=1...n_l-1} Sum{i=1...s_l} Sum{j=1...s_l+1} (W_{j,i}^(i))^2
#
# where
# m = # training pairs {(x^(1), y^(1)), ... , (x^(m), y^(,))}
cost = np.linalg.norm(h - data)**2/2./m + lambda_/2. * (np.linalg.norm(w1)**2 + np.linalg.norm(w2)**2)
#---------------------------#
# SPARSITY PARAMETER
# http://ufldl.stanford.edu/wiki/index.php/Autoencoders_and_Sparsity
#
# rho-hat = Sum{i=1...m} [a_j^(2) * x^(i)] / m
rhat = np.sum(a2, axis=1) / m
# Kullback-Leibler (KL) Divergence
# KL(rho || rho-hat) = rho * log(rho/rho_hat) + (1-rho) * log((1-rho)/(1-rho_hat))
# Penalty = Sum{j=1...s_2} KL(rho || rho-hat)
kl = np.sum(rho_ * np.log(rho_/rhat) + (1-rho_) * np.log((1-rho_)/(1-rhat)))
cost += beta_ * kl
#---------------------------#
# BACKPROPAGATION (Lecture 24, Slides 15-16)
# Step 1: Perform feedforward pass, computing activations for layers L_{2...n}.
# Completed above.
# Step 2: Compute "error terms." (Slide 16)
# Use original equation for delta3.
# Use modified version for delta2.
sparsity_der = beta_ * (-rho_/rhat + (1-rho_)/(1-rhat))
delta3 = -(data - h) * derivative(z3)
delta2 = (w2.T.dot(delta3) + np.repeat(sparsity_der,m).reshape((sparsity_der.shape[0],m))) * derivative(z2)
#---------------------------#
# Step 3: Compute partial derivatives. (Slide 15)
# partial J / partial W^(l) = a_j^(l) * delta_i^(l+1)
# partial J / partial b_i^(l) = delta_i^(l+1)
w1_grad = delta2.dot(data.T) / m + lambda_ * w1
w2_grad = delta3.dot(a2.T) / m + lambda_ * w2
b1_grad = np.sum(delta2, axis=1) / m
b2_grad = np.sum(delta3, axis=1) / m
grad = np.concatenate((w1_grad.flatten(), w2_grad.flatten(), b1_grad, b2_grad))
#print("\tgrad shape:", grad.shape)
return cost, grad
|
9810517cab7adc5ca71d46d602e347b301a392d4
| 27,790 |
def isnpixok(npix):
"""Return :const:`True` if npix is a valid value for healpix map size, :const:`False` otherwise.
Parameters
----------
npix : int, scalar or array-like
integer value to be tested
Returns
-------
ok : bool, scalar or array-like
:const:`True` if given value is a valid number of pixel, :const:`False` otherwise
Examples
--------
>>> import healpy as hp
>>> hp.isnpixok(12)
True
>>> hp.isnpixok(768)
True
>>> hp.isnpixok([12, 768, 1002])
array([ True, True, False], dtype=bool)
"""
nside = np.sqrt(np.asarray(npix) / 12.0)
return nside == np.floor(nside)
|
9501ab6e3172761cba0ec2e3693e5442924162af
| 27,791 |
def get_team_metadata(convert_users_role_to_string=False):
"""Returns team metadata
:param convert_users_role_to_string: convert integer team users' roles to human comprehensible strings
:type convert_users_role_to_string: bool
:return: team metadata
:rtype: dict
"""
response = _api.send_request(req_type='GET', path=f'/team/{_api.team_id}')
if not response.ok:
raise SABaseException(
response.status_code, "Couldn't get team metadata. " + response.text
)
res = response.json()
if convert_users_role_to_string:
for user in res["users"]:
user["user_role"] = user_role_int_to_str(user["user_role"])
return res
|
6b48cc6f30bd2304f54c1ff695be6faaab187a9f
| 27,794 |
def get_request_timestamp(req_type=None, timestamp=getCurrentMillis()):
"""
:param req_type: YTD, QTD, MTD, WTD, TODAY or None
if None return first unix timestamp
:return: unix timestamp
"""
@dataclass
class YTD(object):
pass
bench_date = pd.to_datetime(getUTCBeginDay(timestamp), unit='ms')
if req_type not in (YTD, QTD, MTD, WTD, TODAY):
return DAY_OF_MILLISECONDS
if req_type == YTD:
date = bench_date + pd.tseries.offsets.DateOffset(months=1 - bench_date.month, days=1 - bench_date.day)
if req_type == QTD:
date = bench_date + pd.tseries.offsets.DateOffset(months=-((bench_date.month - 1) % 3),
days=1 - bench_date.day)
if req_type == MTD:
date = bench_date + pd.tseries.offsets.DateOffset(days=1 - bench_date.day)
if req_type == WTD:
date = bench_date + pd.tseries.offsets.DateOffset(days=-bench_date.weekday())
if req_type == TODAY:
date = bench_date
return getMillisSeconds(date) - DAY_OF_MILLISECONDS
|
2e18c23167bca388b8f2ad4febfe6f3a39df1151
| 27,795 |
def to_nest_placeholder(nested_tensor_specs,
default=None,
name_scope="",
outer_dims=()):
"""Converts a nest of TensorSpecs to a nest of matching placeholders.
Args:
nested_tensor_specs: A nest of tensor specs.
default: Optional constant value to set as a default for the placeholder.
name_scope: String name for the scope to create the placeholders in.
outer_dims: Optional leading dimensions for the placeholder.
Returns:
A nest of placeholders matching the given tensor spec.
Raises:
ValueError: If a default is provided outside of the allowed types, or if
default is a np.array that does not match the spec shape.
"""
if default is None:
to_ph = lambda spec: to_placeholder(spec, outer_dims=outer_dims)
else:
if not isinstance(default, (int, float, np.ndarray)):
raise ValueError("to_nest_placeholder default value must be an int, "
"float, or np.ndarray")
def to_ph(spec):
shape = list(outer_dims) + spec.shape.as_list()
if isinstance(default, np.ndarray) and list(default.shape) != shape:
raise ValueError("Shape mismatch between default value and spec. "
"Got {}, expected {}".format(default.shape, shape))
const = tf.constant(default, shape=shape, dtype=spec.dtype)
return to_placeholder_with_default(const, spec, outer_dims=outer_dims)
with tf.name_scope(name_scope):
return tf.nest.map_structure(to_ph, nested_tensor_specs)
|
329bf5e76f5753a07f78394a8a065e3e44334929
| 27,796 |
import asyncio
async def discover():
"""Discover and return devices on local network."""
discovery = TuyaDiscovery()
try:
await discovery.start()
await asyncio.sleep(DEFAULT_TIMEOUT)
finally:
discovery.close()
return discovery.devices
|
fb3fed149011983520d884f139f9296973686459
| 27,797 |
def update_firewall_policy(module, oneandone_conn):
"""
Updates a firewall policy based on input arguments.
Firewall rules and server ips can be added/removed to/from
firewall policy. Firewall policy name and description can be
updated as well.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
firewall_policy_id = module.params.get('firewall_policy')
name = module.params.get('name')
description = module.params.get('description')
add_server_ips = module.params.get('add_server_ips')
remove_server_ips = module.params.get('remove_server_ips')
add_rules = module.params.get('add_rules')
remove_rules = module.params.get('remove_rules')
changed = False
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
if firewall_policy is None:
_check_mode(module, False)
if name or description:
_check_mode(module, True)
firewall_policy = oneandone_conn.modify_firewall(
firewall_id=firewall_policy['id'],
name=name,
description=description)
changed = True
if add_server_ips:
if module.check_mode:
_check_mode(module, _add_server_ips(module,
oneandone_conn,
firewall_policy['id'],
add_server_ips))
firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
changed = True
if remove_server_ips:
chk_changed = False
for server_ip_id in remove_server_ips:
if module.check_mode:
chk_changed |= _remove_firewall_server(module,
oneandone_conn,
firewall_policy['id'],
server_ip_id)
_remove_firewall_server(module,
oneandone_conn,
firewall_policy['id'],
server_ip_id)
_check_mode(module, chk_changed)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
changed = True
if add_rules:
firewall_policy = _add_firewall_rules(module,
oneandone_conn,
firewall_policy['id'],
add_rules)
_check_mode(module, firewall_policy)
changed = True
if remove_rules:
chk_changed = False
for rule_id in remove_rules:
if module.check_mode:
chk_changed |= _remove_firewall_rule(module,
oneandone_conn,
firewall_policy['id'],
rule_id)
_remove_firewall_rule(module,
oneandone_conn,
firewall_policy['id'],
rule_id)
_check_mode(module, chk_changed)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
changed = True
return (changed, firewall_policy)
except Exception as e:
module.fail_json(msg=str(e))
|
0d99fbee3a67e2c571bd9d9ad5ce698588ac0df4
| 27,798 |
import math
def expanded_velocities_from_line_vortices(
points,
origins,
terminations,
strengths,
ages=None,
nu=0.0,
):
"""This function takes in a group of points, and the attributes of a group of
line vortices. At every point, it finds the induced velocity due to each line
vortex.
Citation: The equations in this function are from "Extended Unsteady
Vortex-Lattice Method for Insect Flapping Wings" (Nguyen et al., 2016)
Note: This function uses a modified version of the Bio-Savart law to create a
smooth induced velocity decay based on a vortex's core radius. The radius is
determined based on a vortex's age and kinematic viscosity. If the age of the
vortex is 0.0 seconds, the radius is set to 0.0 meters. The age of a vortex in
only relevant for vortices that have been shed into the wake.
Note: This function's performance has been highly optimized for unsteady
simulations via Numba. While using Numba dramatically increases unsteady
simulation performance, it does cause a performance drop for the less intense
steady simulations.
:param points: 2D array of floats
This variable is an array of shape (N x 3), where N is the number of points.
Each row contains the x, y, and z float coordinates of that point's position
in meters.
:param origins: 2D array of floats
This variable is an array of shape (M x 3), where M is the number of line
vortices. Each row contains the x, y, and z float coordinates of that line
vortex's origin's position in meters.
:param terminations: 2D array of floats
This variable is an array of shape (M x 3), where M is the number of line
vortices. Each row contains the x, y, and z float coordinates of that line
vortex's termination's position in meters.
:param strengths: 1D array of floats
This variable is an array of shape (, M), where M is the number of line
vortices. Each position contains the strength of that line vortex in meters
squared per second.
:param ages: 1D array of floats, optional
This variable is an array of shape (, M), where M is the number of line
vortices. Each position contains the age of that line vortex in seconds. This
is only relevant for vortices that have been shed into the wake. The default
value is None. If the age of a specific vortex is 0.0 seconds, then the
vortex core radius is set to 0.0 meters.
:param nu: float, optional
This variable is a float that represents the kinematic viscosity of the fluid
in meters squared per second. The default value is 0.0 meters squared per
second.
:return velocities: 3D array of floats
This is an array of shape (N x M x 3), where each row/column pair identifies
the velocity induced at one point by one of the line vortices. The units are
meters per second.
"""
num_vortices = origins.shape[0]
num_points = points.shape[0]
# Initialize an empty array, which we will fill with the induced velocities.
velocities = np.empty((num_points, num_vortices, 3))
# If the user didn't specify any ages, set the age of each vortex to 0.0 seconds.
if ages is None:
ages = np.zeros(num_vortices)
for vortex_id in range(num_vortices):
origin = origins[vortex_id]
termination = terminations[vortex_id]
strength = strengths[vortex_id]
age = ages[vortex_id]
# Calculate the radius of the vortex's core. If the age is 0.0 seconds,
# this will evaluate to be 0.0 meters.
r_c = 2 * math.sqrt(lamb * (nu + squire * abs(strength)) * age)
# The r_0 vector goes from the line vortex's origin to its termination.
r_0_x = termination[0] - origin[0]
r_0_y = termination[1] - origin[1]
r_0_z = termination[2] - origin[2]
# Find the r_0 vector's length.
r_0 = math.sqrt(r_0_x ** 2 + r_0_y ** 2 + r_0_z ** 2)
c_1 = strength / (4 * math.pi)
c_2 = r_0 ** 2 * r_c ** 2
for point_id in range(num_points):
point = points[point_id]
# The r_1 vector goes from the point to the line vortex's origin.
r_1_x = origin[0] - point[0]
r_1_y = origin[1] - point[1]
r_1_z = origin[2] - point[2]
# The r_2 vector goes from the point to the line vortex's termination.
r_2_x = termination[0] - point[0]
r_2_y = termination[1] - point[1]
r_2_z = termination[2] - point[2]
# The r_3 vector is the cross product of the r_1 and r_2 vectors.
r_3_x = r_1_y * r_2_z - r_1_z * r_2_y
r_3_y = r_1_z * r_2_x - r_1_x * r_2_z
r_3_z = r_1_x * r_2_y - r_1_y * r_2_x
# Find the r_1, r_2, and r_3 vectors' lengths.
r_1 = math.sqrt(r_1_x ** 2 + r_1_y ** 2 + r_1_z ** 2)
r_2 = math.sqrt(r_2_x ** 2 + r_2_y ** 2 + r_2_z ** 2)
r_3 = math.sqrt(r_3_x ** 2 + r_3_y ** 2 + r_3_z ** 2)
c_3 = r_1_x * r_2_x + r_1_y * r_2_y + r_1_z * r_2_z
# If part of the vortex is so close to the point that they are touching (
# within machine epsilon), there is a removable discontinuity. In this
# case, set the velocity components to their true values, which are 0.0
# meters per second.
if r_1 < eps or r_2 < eps or r_3 ** 2 < eps:
velocities[point_id, vortex_id, 0] = 0.0
velocities[point_id, vortex_id, 1] = 0.0
velocities[point_id, vortex_id, 2] = 0.0
else:
c_4 = (
c_1
* (r_1 + r_2)
* (r_1 * r_2 - c_3)
/ (r_1 * r_2 * (r_3 ** 2 + c_2))
)
velocities[point_id, vortex_id, 0] = c_4 * r_3_x
velocities[point_id, vortex_id, 1] = c_4 * r_3_y
velocities[point_id, vortex_id, 2] = c_4 * r_3_z
return velocities
|
228f7aa527dd1e9386bdc76da7c6e3c58698e58c
| 27,800 |
def normcase(path):
"""Normalize the case of a pathname. On Unix and Mac OS X, this returns the
path unchanged; on case-insensitive filesystems, it converts the path to
lowercase. On Windows, it also converts forward slashes to backward slashes."""
return 0
|
d52dca00cc9db607d4ba22c12ba38f512a05107b
| 27,801 |
def typeof(obj, t):
"""Check if a specific type instance is a subclass of the type.
Args:
obj: Concrete type instance
t: Base type class
"""
try:
return issubclass(obj, t)
except TypeError:
return False
|
67fbcf8b1506f44dba8360a4d23705a2e8a69b47
| 27,802 |
def get_all_clusters(cluster_type, client_id):
"""Get a list of (cluster_name, cluster_config)
for the available kafka clusters in the ecosystem at Yelp.
:param cluster_type: kafka cluster type
(ex.'scribe' or 'standard').
:type cluster_type: string
:param client_id: name of the client making the request. Usually
the same client id used to create the Kafka connection.
:type client_id: string
:returns: list of py:class:`yelp_kafka.config.ClusterConfig`
"""
client = get_kafka_discovery_client(client_id)
try:
cluster_names = client.v1.getClustersAll(cluster_type).result()
except HTTPError as e:
log.exception(
"Failure while fetching clusters for cluster type:{clustertype}"
.format(clustertype=cluster_type),
)
raise InvalidClusterType(e.response.text)
return [
get_kafka_cluster(cluster_type, client_id, cluster_name)
for cluster_name in cluster_names
]
|
8860435edfde332fd78e3bf01789a2831d884165
| 27,803 |
from .. import getPlottingEngine
def plot(x, y, show=True, **kwargs):
""" Create a 2D scatter plot.
:param x: A numpy array describing the X datapoints. Should have the same number of rows as y.
:param y: A numpy array describing the Y datapoints. Should have the same number of rows as x.
:param color: The color to use.
:param tag: A tag so that all traces of the same type are plotted using same color/label (for e.g. multiple stochastic traces).
:param tags: Like tag, but for multiple traces.
:param name: The name of the trace.
:param name: Like name, but for multiple traces.
:param alpha: Floating point representing the opacity ranging from 0 (transparent) to 1 (opaque).
:param mode: Either 'lines' or 'markers' (defaults to 'lines').
"""
# global _plot_index
return getPlottingEngine().plot(x, y, show=show, **kwargs)
|
0b43a2b1e442ae19feaf0fb3a64550cad01b3602
| 27,804 |
def get_transform_ids(workprogress_id, request_id=None, workload_id=None, transform_id=None, session=None):
"""
Get transform ids or raise a NoObject exception.
:param workprogress_id: Workprogress id.
:param session: The database session in use.
:raises NoObject: If no transform is founded.
:returns: list of transform ids.
"""
return orm_transforms.get_transform_ids(workprogress_id=workprogress_id, request_id=request_id,
workload_id=workload_id, transform_id=transform_id, session=session)
|
08fbd67eb932c7c48b04528dd0863e67669e526e
| 27,805 |
from typing import Optional
def get_link(hub_name: Optional[str] = None,
link_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLinkResult:
"""
The link resource format.
Latest API Version: 2017-04-26.
:param str hub_name: The name of the hub.
:param str link_name: The name of the link.
:param str resource_group_name: The name of the resource group.
"""
pulumi.log.warn("""get_link is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:customerinsights:getLink'.""")
__args__ = dict()
__args__['hubName'] = hub_name
__args__['linkName'] = link_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:customerinsights/latest:getLink', __args__, opts=opts, typ=GetLinkResult).value
return AwaitableGetLinkResult(
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
link_name=__ret__.link_name,
mappings=__ret__.mappings,
name=__ret__.name,
operation_type=__ret__.operation_type,
participant_property_references=__ret__.participant_property_references,
provisioning_state=__ret__.provisioning_state,
reference_only=__ret__.reference_only,
source_entity_type=__ret__.source_entity_type,
source_entity_type_name=__ret__.source_entity_type_name,
target_entity_type=__ret__.target_entity_type,
target_entity_type_name=__ret__.target_entity_type_name,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
|
b94a7d2ed3977afbff0699e861da67e1267581b4
| 27,806 |
import random
def selection_elites_random(individuals : list, n : int = 4, island=None) -> list:
"""
Completely random selection.
Args:
individuals (list): A list of Individuals.
n (int): Number to select (default = 4).
island (Island): The Island calling the method (default = None).
Returns:
list: Random n Individuals.
"""
return random.choice(individuals, size=n).tolist()
|
2f7df4e8a347bcd9a770d0d15d91b52956dbd26c
| 27,808 |
import ctypes
def ekssum(handle, segno):
"""
Return summary information for a specified segment in a specified EK.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekssum_c.html
:param handle: Handle of EK.
:type handle: int
:param segno: Number of segment to be summarized.
:type segno: int
:return: EK segment summary.
:rtype: spicepy.utils.support_types.SpiceEKSegSum
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
segsum = stypes.SpiceEKSegSum()
libspice.ekssum_c(handle, segno, ctypes.byref(segsum))
return segsum
|
9c9920d29ed1c1c85524a511119f37322143888c
| 27,809 |
def compare_prices_for_same_urls(
source_df: pd.DataFrame, target_df: pd.DataFrame, tagged_fields: TaggedFields
):
"""For each pair of items that have the same `product_url_field` tagged field,
compare `product_price_field` field
Returns:
A result containing pairs of items with same `product_url_field`
from `source_df` and `target_df` which `product_price_field` differ,
missing and new `product_url_field` tagged fields.
"""
result = Result("Compare Prices For Same Urls")
url_field = tagged_fields.get("product_url_field")
if not url_field:
result.add_info(Outcome.SKIPPED)
return result
url_field = url_field[0]
source_df = source_df.dropna(subset=[url_field])
target_df = target_df.dropna(subset=[url_field])
same_urls = source_df[(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
new_urls = source_df[~(source_df[url_field].isin(target_df[url_field].values))][
url_field
]
missing_urls = target_df[(~target_df[url_field].isin(source_df[url_field].values))][
url_field
]
errors = {}
for url, group in missing_urls.groupby(missing_urls):
errors[f"Missing {url}"] = set(group.index)
if not missing_urls.empty:
result.add_info(
f"{len(missing_urls)} urls missing from the tested job", errors=errors
)
if not new_urls.empty:
result.add_info(f"{len(new_urls)} new urls in the tested job")
result.add_info(f"{len(same_urls)} same urls in both jobs")
diff_prices_count = 0
price_field = tagged_fields.get("product_price_field")
if not price_field:
result.add_info("product_price_field tag is not set")
else:
price_field = price_field[0]
detailed_messages = []
for url in same_urls:
if url.strip() != "nan":
source_price = source_df[source_df[url_field] == url][price_field].iloc[
0
]
target_price = target_df[target_df[url_field] == url][price_field].iloc[
0
]
if (
is_number(source_price)
and is_number(target_price)
and ratio_diff(source_price, target_price) > 0.1
):
diff_prices_count += 1
source_key = source_df[source_df[url_field] == url].index[0]
target_key = target_df[target_df[url_field] == url].index[0]
msg = (
f"different prices for url: {url}\nsource price is {source_price} "
f"for {source_key}\ntarget price is {target_price} for {target_key}"
)
detailed_messages.append(msg)
res = f"{len(same_urls)} checked, {diff_prices_count} errors"
if detailed_messages:
result.add_error(res, detailed="\n".join(detailed_messages))
else:
result.add_info(res)
return result
|
dc23577169bf6788ecd17eb2fc6c959e367b5000
| 27,810 |
def _check_param(dict_):
"""
check dictionary elements and reformat if need be
:return: dictionary reformat
"""
# default empty dictionary
_ = {}
if "google_users" in dict_:
_["google_users"] = _check_param_google_users(dict_["google_users"])
else:
_logger.exception(f"No key 'google_users' in yaml file {setupcfg.extraParam}")
raise KeyError
if "dataset_ids" in dict_:
_["dataset_ids"] = _check_param_dataset_ids(dict_["dataset_ids"])
else:
_logger.exception(f"No key 'dataset_ids' in yaml file {setupcfg.extraParam}")
raise KeyError
return _
|
17ccc017fb4a34f3d68ef7150165185e62c7a8dc
| 27,811 |
def fetch_dataset_insistently(url: str, link_text_prefix: str, user_agent: str) -> dict:
"""Fetch the approved routes dataset."""
proxies = get_proxies_geonode() + get_proxies()
print(f'{len(proxies)} proxies found.')
for i, proxy in enumerate(proxies):
print(f'Fetching dataset, try with proxy [{i + 1}] {proxy}.')
req_proxy = {
'http': f'http://{proxy["ip"]}:{proxy["port"]}'
}
try:
download_info = fetch_dataset(url, link_text_prefix, user_agent, req_proxy)
return download_info
except FetchException as e:
raise
except Exception as e:
print(f'Fetching dataset try {proxy} failed with error: {e}')
pass
|
c98985becc3989980782c4886e18fe7fa56f8d06
| 27,812 |
import numpy
def _dense_to_one_hot(labels_dense):
"""Convert class labels from scalars to one-hot vectors."""
num_classes = len(set(labels_dense))
num_labels = labels_dense.shape[0]
labels_to_numbers = {label: i for i, label in enumerate(list(set(labels_dense)))}
labels_as_numbers = numpy.asarray([labels_to_numbers[label] for label in labels_dense])
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_as_numbers.ravel()] = 1
return labels_one_hot, labels_to_numbers
|
49cc3ab6bc5f4ec81323321a9fe13d4da030fb4a
| 27,813 |
def swish(x):
"""Swish activation function. For more info: https://arxiv.org/abs/1710.05941"""
return tf.multiply(x, tf.nn.sigmoid(x))
|
40766f934d2e691dc28d5dcd3a44c37cef601896
| 27,814 |
def fatorial(n=1):
"""
-> Calcúla o fatorial de um número e o retorna
:param n: número
"""
f = 1
for i in range(1, n + 1):
f *= i
return f
|
5c64b8ccf4a62a1b4294e576b49fbf69e85972ec
| 27,815 |
def retrieve_context_nw_topology_service_name_name_by_id(value_name): # noqa: E501
"""Retrieve name by ID
Retrieve operation of resource: name # noqa: E501
:param value_name: ID of value_name
:type value_name: str
:rtype: NameAndValue
"""
return 'do some magic!'
|
4dcc0b25c6fd76bf94e14d63cb9731946b97b06a
| 27,816 |
def conv1x1(in_planes, out_planes, wib, stride=1):
"""1x1 convolution"""
# resnet_wib = False
resnet_wib = True
resnet_alpha = 1E-3
if not wib:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
else:
return WibConv2d(alpha=resnet_alpha,
in_channels=in_planes, out_channels=out_planes, kernel_size=1, stride=stride, bias=False)
|
0ce1d9d47ff98dc7ce607afafb4ea506c850afc3
| 27,817 |
def get_rules(fault_block, zone):
"""Get rules for fault block and zone names.
In this model the rules depend only on the zone; they do NOT
vary from fault block to fault block for a given zone.
Args:
fault_block (str)
Name of fault block.
zone (str)
Zone name.
Returns:
Function (rule) that computes elastic properties (density, Vp, Vs, Qp, and Qs) from x, y, depth.
"""
RULES = {
"Mantle": {
"default": rules_aagaard_etal_2010.upper_mantle,
},
"Lower Crust": {
"default": rules_aagaard_etal_2010.mafic_great_valley_ophiolite,
},
"Great_Valley_Ophiolite": {
"default": rules_aagaard_etal_2010.mafic_great_valley_ophiolite,
},
"San Leandro G": {
"default": rules_aagaard_etal_2010.mafic_great_valley_ophiolite,
},
"Logan G": {
"default": rules_aagaard_etal_2010.mafic_great_valley_ophiolite,
},
"Kjf_Berkeley": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_Chabot": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_Foothills": {
"default": rules_aagaard_etal_2010.franciscan_foothills,
},
"Kjf_Merced": {
"default": rules_aagaard_etal_2010.franciscan_napa_sonoma,
},
"Kjf_Sur": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_Napa_Somoma": {
"default": rules_aagaard_etal_2010.franciscan_napa_sonoma,
"Napa": rules_hirakawa_aagaard_2021.franciscan_napa,
"Sonoma": rules_aagaard_etal_2010.franciscan_napa_sonoma,
},
"Kjf_Bay Block": {
"default": rules_aagaard_etal_2010.franciscan_napa_sonoma,
},
"Kjf_Evergreen": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_E_Diablo": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kjf_W_Diablo": {
"default": rules_aagaard_etal_2010.franciscan_berkeley,
},
"Kgr_Gab": {
"default": rules_aagaard_etal_2010.salinian_granitic,
},
"Kgr_Halfmoon": {
"default": rules_aagaard_etal_2010.salinian_granitic,
},
"Kgr_SCoast": {
"default": rules_aagaard_etal_2010.salinian_granitic,
},
"Kgr_NShelf": {
"default": rules_aagaard_etal_2010.salinian_granitic,
},
"Valley Sequence": {
"default": rules_aagaard_etal_2010.great_valley_sequence_sedimentary,
"San Leandro": rules_hirakawa_aagaard_2021.valley_sequence_sanleandro,
"Sunol": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
},
"Great Valley Seq": {
"default": rules_aagaard_etal_2010.great_valley_sequence_sedimentary,
"Berkeley": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
"Chabot": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
"W Diablo Range": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
},
"Cenozoic": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
"Napa": rules_hirakawa_aagaard_2021.cenozoic_napa,
"Sonoma": rules_hirakawa_aagaard_2021.cenozoic_sonoma,
"Alexander": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
"W Diablo Range": rules_hirakawa_aagaard_2021.quaternary_livermore,
},
"Cenozoic_Great Valley": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
},
"Cenozoic_Halfmoon": {
"default": rules_aagaard_etal_2010.cenozoic_sedimentary_halfmoonbay,
},
"Cenozoic_Ever": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
},
"T_SouthBay": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
},
"T_La Honda": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_lahondabasin,
},
"T_Pilarcitos": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
},
"T_Bay_Block_Santa_Rosa": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
"Santa Rosa": rules_hirakawa_aagaard_2021.brocher2005_older_cenozoic_sedimentary,
},
"T_Berkeley": {
"default": rules_aagaard_etal_2010.tertiary_sedimentary_southbay,
"Berkeley": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
"Chabot": rules_hirakawa_aagaard_2021.brocher2008_great_valley_sequence,
},
"QT_Bay_Block_Santa Rosa": {
"default": rules_aagaard_etal_2010.quaternary_tertiary_sedimentary,
},
"QT_Merced_Pilarcitos": {
"default": rules_aagaard_etal_2010.quaternary_tertiary_sedimentary,
},
"QT_South_Bay": {
"default": rules_aagaard_etal_2010.quaternary_tertiary_sedimentary,
},
"water": {
"default": rules_aagaard_etal_2010.seawater,
},
"<null>": {
"default": rules_aagaard_etal_2010.outside_model,
},
"": {
"default": rules_aagaard_etal_2010.outside_model,
},
}
return RULES[zone].get(fault_block, RULES[zone]["default"])
|
12b19cd795ecf618995a2f67f3844792b372d09d
| 27,818 |
from operator import concat
def conv_cond_concat(x, y):
"""Concatenate conditioning vector on feature map axis."""
x_shapes = tf.shape(x)
y_shapes = tf.shape(y)
return concat([
x, y*tf.ones([x_shapes[0], x_shapes[1], x_shapes[2], y_shapes[3]])], 3)
|
583ed5df67245b483531f8e3129ba88b9ec811ef
| 27,819 |
def get_clusters(cloud_filtered):
"""
Get clusters from the cloud.
Parameters:
-----------
cloud: pcl.PointCloud()
Returns:
-----------
clusters: pcl.PointCloud() array N
"""
clusters = []
tree = cloud_filtered.make_kdtree()
ec = cloud_filtered.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance (0.02)
ec.set_MinClusterSize (5)
ec.set_MaxClusterSize (100)
ec.set_SearchMethod (tree)
cluster_indices = ec.Extract()
cloud_cluster = pcl.PointCloud()
for j, indices in enumerate(cluster_indices):
#print('indices = ' + str(len(indices)))
points = np.zeros((len(indices), 3), dtype=np.float32)
for i, indice in enumerate(indices):
points[i][0] = cloud_filtered[indice][0]
points[i][1] = cloud_filtered[indice][1]
points[i][2] = cloud_filtered[indice][2]
cloud_cluster.from_array(points)
clusters.append(cloud_cluster)
return clusters
|
b4b7fa0ff8b7f362783bc94727253a2eb41f6f7e
| 27,820 |
import json
def errorResult(request, response, error, errorMsg, httpStatus = 500, result = None, controller = None):
""" set and return the error result
@param controller: pylon controller handling the request, where cal context is injected and later retrieved by trackable
"""
response.status_int = httpStatus
response.content_type = 'application/json'
res = {'error':error, 'errorMsg':errorMsg}
if (result != None):
res['result'] = result
title = __getreqstr(request)
msg = 'Error Result - (%s, %s)' % (str(error), errorMsg)
__injectcontext(controller, title, CRITICAL, msg)
LOG.warning(msg)
return json.dumps(res)
|
df386f939751ea268907c016120db7219c3e93bc
| 27,821 |
from typing import Dict
import logging
def parse_main(text: str = "") -> Dict:
"""
A loop for processing each parsing recipe. Returns a dict of parsed values.
"""
if text == "":
logging.warning("Empty string provided for parsing")
parsed_data = {}
for recipe in parser_recipe:
field = recipe["field"]
parsed_data[field] = parser(text, **recipe)
return parsed_data
|
4574b3e9dce321f169f31031e46e572fac14fce3
| 27,822 |
def main():
""" Returns the answer. """
return 42
|
f6800af5efb0b65f7c7afdd5ea0ede896fd740f8
| 27,823 |
def collect_accuracy(path):
""" Collects accuracy values in log file. """
r1 = None
r5 = None
mAP = None
r1_content = 'Rank-1 '
r5_content = 'Rank-5 '
map_content = 'mAP:'
with open(path) as input_stream:
for line in input_stream:
candidate = line.strip()
if r1_content in candidate:
r1 = float(candidate.split(':')[-1].replace('%', ''))
elif r5_content in candidate:
r5 = float(candidate.split(':')[-1].replace('%', ''))
elif map_content in candidate:
mAP = float(candidate.split(':')[-1].replace('%', ''))
return r1, r5, mAP
|
fa94724f16a332fe18d13df3cc0fbcdd060fe897
| 27,825 |
from typing import Type
from typing import Mapping
def recursively_get_annotations(ty: Type) -> Mapping[str, Type]:
"""Given a type, recursively gather annotations for its subclasses as well. We only
gather annotations if its subclasses are themselves subclasses of Deserializable,
and not Deserializable itself.
This is bad evil code that uses internal Python details that may break in
3.8 or later."""
# Get initial annotations
annotations: Mapping[str, Type] = getattr(ty, "__annotations__", {})
# Recursively gather annotations for base classes
for base in getattr(ty, "__bases__", {}):
if issubclass(base, Deserializable) and (base != Deserializable):
annotations = dict(annotations, **recursively_get_annotations(base))
return annotations
|
ba531eeba8006aa9393fa2487c056d6309df43d4
| 27,826 |
def sector_model():
"""SectorModel requiring precipitation and cost, providing water
"""
model = EmptySectorModel('water_supply')
model.add_input(
Spec.from_dict({
'name': 'precipitation',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'ml'
})
)
model.add_input(
Spec.from_dict({
'name': 'reservoir_level',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'ml'
})
)
model.add_input(
Spec.from_dict({
'name': 'rGVA',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'million GBP'
})
)
model.add_output(
Spec.from_dict({
'name': 'water',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'Ml'
})
)
model.add_output(
Spec.from_dict({
'name': 'reservoir_level',
'dims': ['LSOA'],
'coords': {'LSOA': [1, 2, 3]},
'dtype': 'float',
'unit': 'ml'
})
)
model.add_parameter(
Spec.from_dict({
'name': 'test_parameter',
'description': 'a dummy parameter to test narratives',
'dims': ['national'],
'coords': {'national': ['GB']},
'abs_range': (0.5, 2),
'exp_range': (0.5, 2),
'dtype': 'float',
'default': [[1.]],
'unit': '%'
})
)
return model
|
528548e24052913a315804a782cb74cef53b0f08
| 27,827 |
def maybe_flip_x_across_antimeridian(x: float) -> float:
"""Flips a longitude across the antimeridian if needed."""
if x > 90:
return (-180 * 2) + x
else:
return x
|
50fac7a92d0ebfcd003fb478183b05668b9c909c
| 27,828 |
def contour_check(check_points, random_walk):
"""check_points have dim (n, ndim)
random_walk has 3 elements.
[0] is boundary unit vectors (can be in any space),
[1] is boundary ls (relative to origin)
[2] is origin
returns: indexer of [True,..... etc.] of which points are in or not
operates by finding which direction we are closest too and then comparing our l to that l
"""
boundary_unit_vectors = random_walk[0]
boundary_ls = random_walk[1]
origin = random_walk[2]
points = check_points - origin
#holds the projections of the points onto each of the unitvectors of the boundary
projections = np.dot(points, boundary_unit_vectors) #npoints x nunitvecs
maxprojs = np.argmax(projections, axis = 1) #argmax (of the unitvec) projection for each point
#this tells us which len to compare against
compare_distance_to = np.array([boundary_ls[i] for i in maxprojs])
distances = np.sqrt(points[:,0]**2 + points[:,1]**2)
whichinside = distances<=compare_distance_to
return whichinside
|
fa973c943c4827bd180eb95a3bbc3e0a7d2beb2a
| 27,829 |
def _get_erroneous_call(report_text: str) -> str:
"""."""
erroneous_line = [
line for line in report_text.splitlines() if line.startswith('> ') and RAISES_OUTPUT_SIGNAL_IN_CONTEXT in line
][0]
erroneous_assertion = erroneous_line.lstrip('> ')
erroneous_assertion = string_remove_from_start(erroneous_assertion, 'assert ')
erroneous_assertion = string_remove_after(erroneous_assertion, ' ==')
erroneous_assertion = erroneous_assertion.rstrip('= ')
return erroneous_assertion
|
125267db8fb978285fc44ec078364b214c022ca9
| 27,830 |
def write_output(features, forecast_hours, poly, line, point):
"""
writes output to OUTDATA dict depending on query type
:param features: output from clipping function
:param forecast_hours: list of all queried forecast hours
:param poly: boolean to identify a polygon query
:param line: boolean to identify a line query
:param point: boolean to identify a point query
:returns: dict with all queried forecast hours and clipping results
"""
i = 0
if line and not poly and not point:
OUTDATA = {"type": "FeatureCollection",
"features": {
"type": "Feature",
"geometry": {
"type": "LineString",
"coordinates": features[0][0][0][4],
},
"properties": {
"Forecast Hours": []
}
}
}
temp_line = []
dir_line = []
speed_line = []
for hour in forecast_hours:
OUTDATA["features"]['properties']["Forecast Hours"].append({
"Forecast Hour": hour,
})
for i in features[0]:
if 'Temperature Data' in features[0][i][3]:
for x in features[0][0]:
temp_line.append([[x[0], x[1]], x[2]])
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Temperature"]) = {
"Observations Along Line": temp_line
}
if 'Wind Direction Data' in features[0][i][3]:
for x in features[0][1]:
dir_line.append([[x[0], x[1]], x[2]])
(OUTDATA["features"]['properties']["Forecast Hours"][
int(i/3)]["Wind Direction"]) = {
"Observations Along Line": dir_line
}
if 'Wind Speed Data' in features[0][i][3]:
for x in features[0][2]:
speed_line.append([[x[0], x[1]], x[2]])
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Speed"]) = {
"Observations Along Line": speed_line
}
return OUTDATA
if poly:
OUTDATA = {"type": "FeatureCollection",
"features": {
"type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": features[0][0][4],
},
"properties": {
"Forecast Hours": []
}
}
}
for hour in forecast_hours:
OUTDATA["features"]['properties']["Forecast Hours"].append({
"Forecast Hour": hour,
})
for i in features[0]:
if 'Temperature Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Temperature"]) = {
"Min Temperature": features[0][i][0],
"Max Temperature": features[0][i][1],
"Mean Temperature": features[0][i][2]
}
if 'Wind Direction Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Direction"]) = {
"Min Wind Direction": features[0][i][0],
"Max Wind Direction": features[0][i][1],
"Mean Wind Direction": features[0][i][2]
}
if 'Wind Speed Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Speed"]) = {
"Min Wind Speed": features[0][i][0],
"Max Wind Speed": features[0][i][1],
"Mean Wind Speed": features[0][i][2]
}
return OUTDATA
if point:
OUTDATA = {"type": "FeatureCollection",
"features": {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [features[0][0][0],
features[0][0][1]],
},
"properties": {
"Forecast Hours": []
}
}
}
for hour in forecast_hours:
OUTDATA["features"]['properties']["Forecast Hours"].append({
"Forecast Hour": hour,
})
for i in features[0]:
if 'Temperature Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Temperature"]) = {
"Temperature": features[0][i][2],
}
if 'Wind Direction Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Direction"]) = {
"Wind Direction": features[0][i][2],
}
if 'Wind Speed Data' in features[0][i][3]:
(OUTDATA["features"]['properties']["Forecast Hours"]
[int(i/3)]["Wind Speed"]) = {
"Wind Speed": features[0][i][2],
}
return OUTDATA
|
abc92e597e4d8a409f7c4d0e0b224a76b4a6cd63
| 27,831 |
def index():
"""
Index set as main route
"""
return render_template('index.html', title='Home')
|
bacc433a4523a9b390bdde636ead91d72303cf01
| 27,832 |
def plus_one(digits):
"""
Given a non-empty array of digits representing a non-negative integer,
plus one to the integer.
:param digits: list of digits of a non-negative integer,
:type digits: list[int]
:return: digits of operated integer
:rtype: list[int]
"""
result = []
carry = 1
for i in range(len(digits) - 1, -1, -1):
result.append((digits[i] + carry) % 10)
carry = (digits[i] + carry) // 10
if carry:
result.append(1)
return list(reversed(result))
|
a11668a1b2b9adb9165152f25bd1528d0cc2bd71
| 27,833 |
def run_experiment(input_frame, n_samples=1, temperature=1, npartitions=1):
"""
Runs experiment given inputs.
Takes `n_samples` samples from the VAE
Returns a list of size `n_samples` of results for each input
"""
encoder_data = a.get_encoder()
decoder_data = a.get_decoder()
vae = a.get_vae(
encoder_data["model"],
decoder_data["model"],
encoder_data["tokenizer"],
decoder_data["tokenizer"],
beta=0,
)
# partially apply function for evaluator
# series here represents a row (we call with axis=1)
def evaluator(series):
return a.eval_analogy(
vae,
encoder_data["tokenizer"],
decoder_data["tokenizer"],
series[0],
series[1],
series[2],
temperature=temperature,
)[0]
new_columns = ["pred_{}".format(i) for i in range(n_samples)]
output_frame = pd.DataFrame()
parallelize = npartitions > 1
for col in new_columns:
if parallelize:
output_frame[col] = input_frame.map_partitions(
lambda df: df.apply(evaluator, axis=1)
).compute()
else:
output_frame[col] = input_frame.apply(evaluator, axis=1)
return output_frame
|
2729a977b14235bf5a0e020fbe8a528a5906f212
| 27,834 |
def to_angle(s, sexagesimal_unit=u.deg):
"""Construct an `Angle` with default units.
This creates an :class:`~astropy.coordinates.Angle` with the following
default units:
- A number is in radians.
- A decimal string ('123.4') is in degrees.
- A sexagesimal string ('12:34:56.7') or tuple has `sexagesimal_unit`.
In addition, bytes are decoded to ASCII strings to normalize user inputs.
Parameters
----------
s : :class:`~astropy.coordinates.Angle` or equivalent, string, float, tuple
Anything accepted by `Angle` and also unitless strings, numbers, tuples
sexagesimal_unit : :class:`~astropy.units.UnitBase` or str, optional
The unit applied to sexagesimal strings and tuples
Returns
-------
angle : :class:`~astropy.coordinates.Angle`
Astropy `Angle`
"""
try:
return Angle(s)
except u.UnitsError:
# Bytes is a sequence of ints that will inadvertently end up as radians, so crash instead
if isinstance(s, bytes):
raise TypeError(f'Raw bytes {s} not supported: '
'first decode to string (or add unit)') from None
# We now have a number, string or tuple without a unit
if isinstance(s, str) and ':' in s or isinstance(s, tuple):
return Angle(s, unit=sexagesimal_unit)
elif isinstance(s, str):
return Angle(s, unit=u.deg)
else:
# XXX Maybe deprecate this in future and only deal with strings here
return Angle(s, unit=u.rad)
|
106a5be01c3f9150862c1e02f5cd77292b029cf6
| 27,835 |
def simpleBlocking(rec_dict, blk_attr_list):
"""Build the blocking index data structure (dictionary) to store blocking
key values (BKV) as keys and the corresponding list of record identifiers.
A blocking is implemented that simply concatenates attribute values.
Parameter Description:
rec_dict : Dictionary that holds the record identifiers as keys
and corresponding list of record values
blk_attr_list : List of blocking key attributes to use
This method returns a dictionary with blocking key values as its keys and
list of record identifiers as its values (one list for each block).
Examples:
If the blocking is based on 'postcode' then:
block_dict = {'2000': [rec1_id, rec2_id, rec3_id, ...],
'2600': [rec4_id, rec5_id, ...],
...
}
while if the blocking is based on 'postcode' and 'gender' then:
block_dict = {'2000f': [rec1_id, rec3_id, ...],
'2000m': [rec2_id, ...],
'2600f': [rec5_id, ...],
'2600m': [rec4_id, ...],
...
}
"""
block_dict = {} # The dictionary with blocks to be generated and returned
print('Run simple blocking:')
print(' List of blocking key attributes: '+str(blk_attr_list))
print(' Number of records to be blocked: '+str(len(rec_dict)))
print('')
for (rec_id, rec_values) in rec_dict.items():
rec_bkv = '' # Initialise the blocking key value for this record
# Process selected blocking attributes
#
for attr in blk_attr_list:
attr_val = rec_values[attr]
rec_bkv += attr_val
# Insert the blocking key value and record into blocking dictionary
#
if (rec_bkv in block_dict): # Block key value in block index
# Only need to add the record
#
rec_id_list = block_dict[rec_bkv]
rec_id_list.append(rec_id)
else: # Block key value not in block index
# Create a new block and add the record identifier
#
rec_id_list = [rec_id]
block_dict[rec_bkv] = rec_id_list # Store the new block
return block_dict
|
5bf9b85ad84ffa3dc11a39a876cbcfefe09a5b2c
| 27,836 |
def get_marker_obj(plugin, context, resource, limit, marker):
"""Retrieve a resource marker object.
This function is used to invoke
plugin._get_<resource>(context, marker) and is used for pagination.
:param plugin: The plugin processing the request.
:param context: The request context.
:param resource: The resource name.
:param limit: Indicates if pagination is in effect.
:param marker: The id of the marker object.
:returns: The marker object associated with the plugin if limit and marker
are given.
"""
if limit and marker:
return getattr(plugin, '_get_%s' % resource)(context, marker)
|
5e66ca50382c6e8a611983252ce44bf50019177b
| 27,837 |
def generative(max_value: int = FIBONACCI_MAX) -> int:
"""
This is the fully generative method for the Fibonacci sequence. The full sequence list is generated, the even ones
are sought out, and summed
--> benchmark: 8588 ns/run
:param max_value: The ceiling value of Fibonacci numbers to be added
:return: The summation in question
"""
i = 2
fibonacci_list = [*FIBONACCI_SEED]
# generate the logger
while (fibonacci_list[i-1] + fibonacci_list[i-2]) < max_value:
fibonacci_list.append(fibonacci_list[i-1] + fibonacci_list[i-2])
i += 1
summation = 0
for number in fibonacci_list:
if not number % 2: # if not odd
summation += number
return summation
|
086076d2599297fd23eaa5577985bd64df10cc81
| 27,839 |
def enc_net(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = Net(num_classes, **kwargs)
# if pretrained:
# model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
|
ff8429e6b0e5ef522f6f2a3b0b10a76951a95db8
| 27,841 |
def get_bytes(data):
"""
Helper method to get the no. of bytes in the hex"""
data = str(data)
return int(len(sanatize_hex(data)) / 2)
|
014715aa301370ba2d3598c02b66c5b88741b218
| 27,842 |
import collections
def _group_by(input_list, key_fn):
"""Group a list according to a key function (with a hashable range)."""
result = collections.defaultdict(list)
for x in input_list:
result[key_fn(x)].append(x)
return result
|
288c108588f9e4ea60c4dac6ff656c8c8ffde580
| 27,843 |
import json
def set_parameters_in_cookie(response: Response) -> Response:
"""Set request parameters in the cookie, to use as future defaults."""
if response.status_code == HTTPStatus.OK:
data = {
param: request.args[param]
for param in PARAMS_TO_PERSIST
if param in request.args
}
response.set_cookie(PARAMS_COOKIE_NAME, json.dumps(data))
return response
|
bc5f4e225bdef907ee794e3f8f95ac3d78677046
| 27,844 |
from _pytest.logging import LogCaptureFixture
import json
from typing import Any
import logging
def test_wild_dlq_error(mock_handler: MagicMock, mock_rsmq: MagicMock, caplog: LogCaptureFixture) -> None:
"""test error level logs when message fails to successfully reach DLQ"""
mock_handler.return_value = False, False
mock_rsmq.return_value.sendMessage.return_value.execute = lambda: False
m = {"uuid": str(uuid4())}
rsmq_msg = {
"id": "rsmq_id",
"message": json.dumps(m),
}
mock_rsmq.return_value.receiveMessage.return_value.exceptions.return_value.execute = lambda: rsmq_msg
cfg = {
"comp_1": {"value": "random_val", "profile": "rsmq"},
"dlq": {"profile": "rsmq-dlq", "value": "my_dlq"},
}
eng = Engine(input_queue="comp_1", dead_letter_queue="dlq", queue_config=cfg, metrics_port=None)
@eng.stream_app
def fun(msg: Any) -> bool: # pylint: disable=W0613
return True
with caplog.at_level(logging.ERROR):
fun()
assert "failed producing message to dlq" in caplog.text
eng.shutdown()
|
d00c32e27833975d819b333c723734d24b3ead65
| 27,845 |
from typing import VT
from typing import Optional
def teleport_reduce(g: BaseGraph[VT,ET], quiet:bool=True, stats:Optional[Stats]=None) -> BaseGraph[VT,ET]:
"""This simplification procedure runs :func:`full_reduce` in a way
that does not change the graph structure of the resulting diagram.
The only thing that is different in the output graph are the location and value of the phases."""
s = Simplifier(g)
s.full_reduce(quiet=quiet, stats=stats)
return s.mastergraph
|
38474e11094a21e581a18591b7649fdbdd977d72
| 27,846 |
def map_coords_to_scaled(coords, orig_size, new_size):
"""
maps coordinate indices relative to the original 3-D image to indices corresponding to the
re-scaled 3-D image, given the coordinate indices and the shapes of the original
and "new" scaled images. Returns integer indices of the voxel that contains the center of
the transformed coordinate location.
"""
return tuple(
[int(i) for i in map_coords_to_scaled_float(coords, orig_size, new_size)]
)
|
74236074a0c6c5afbb56bd5ec75caaf517730040
| 27,847 |
def _PromptToUpdate(path_update, completion_update):
"""Prompt the user to update path or command completion if unspecified.
Args:
path_update: bool, Value of the --update-path arg.
completion_update: bool, Value of the --command-completion arg.
Returns:
(path_update, completion_update) (bool, bool) Whether to update path and
enable completion, respectively, after prompting the user.
"""
# If both were specified, no need to prompt.
if path_update is not None and completion_update is not None:
return path_update, completion_update
# Ask the user only one question to see if they want to do any unspecified
# updates.
actions = []
if path_update is None:
actions.append(_PATH_PROMPT)
if completion_update is None:
actions.append(_COMPLETION_PROMPT)
prompt = '\nModify profile to {}?'.format(' and '.join(actions))
response = console_io.PromptContinue(prompt)
# Update unspecified values to equal user response.
path_update = response if path_update is None else path_update
completion_update = (response if completion_update is None
else completion_update)
return path_update, completion_update
|
2759e9c42c702a69fa617fc3302cb3c850afc54a
| 27,848 |
def _get_static_covariate_df(trajectories):
"""The (static) covariate matrix."""
raw_v_df = (
trajectories.static_covariates.reset_coords(drop=True).transpose(
'location', 'static_covariate').to_pandas())
# This can then be used with, e.g. patsy.
# expanded_v_df = patsy(raw_v_df, ...patsy details...)
# Optionally it can be converted back to xa using.
# expanded_v_xa = xarray.DataArray(expanded_v_df)
# for now...
v_df = raw_v_df
return v_df
|
15c8f367452fc5007ad93fd86e04cfea07e96982
| 27,850 |
import json
def answer_cells_of_nb(a_ipynb):
"""
get the contents of all answer cells (having grade_id)
in an a_ipynb file
"""
cells = {}
with open(a_ipynb) as ipynb_fp:
content = json.load(ipynb_fp)
for cell in content["cells"]:
meta = cell["metadata"]
nbg = meta.get("nbgrader")
if nbg is None or not nbg["solution"]:
continue
assert("grade_id" in nbg), (a_ipynb, cell)
prob_name = nbg["grade_id"] # like a1-1-1
source = cell["source"]
outputs = cell.get("outputs", [])
assert(prob_name not in cells), prob_name
cells[prob_name] = source, outputs
return cells
|
3b011d48a8ccfa13d462cccf1b0a58440231a1ce
| 27,851 |
def _transpose_augment(img_arr):
""" 对称扩增 """
img = Image.fromarray(img_arr, "L")
return [np.asarray(img.transpose(Image.FLIP_LEFT_RIGHT))]
|
096c8db4c008c78a5f22bffeea8bb64fb0a4de09
| 27,852 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.