content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def norm(x):
"""Calculate the Euclidean norm of a vector x."""
return np.sqrt(np.dot(x, x))
|
c09ff946ed6248e4bf57b87f96ec4ad54ce4cdc8
| 34,923 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_palo_alto_wildfire package"""
reload_params = {"package": u"fn_palo_alto_wildfire",
"incident_fields": [],
"action_fields": [],
"function_params": [u"artifact_id", u"artifact_type", u"artifact_value", u"attachment_id", u"incident_id"],
"datatables": [u"palo_alto_wildfire_results"],
"message_destinations": [u"palo_alto_wildfire"],
"functions": [u"palo_alto_wildfire_file_submission_artifact", u"palo_alto_wildfire_file_submission_attachment", u"palo_alto_wildfire_url_submission"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_palo_alto_wildfire_file_submission_artifact", u"example_palo_alto_wildfire_file_submission_attachment", u"example_palo_alto_wildfire_url_submission"],
"actions": [u"Example: Submit File (Artifact) to WildFire", u"Example: Submit File (Attachment) to WildFire", u"Example: Submit URL to WildFire"],
"incident_artifact_types": []
}
return reload_params
|
554f93deb50664db00ef59a67f76a979779cb9c9
| 34,924 |
def min_number_in_rotated_array(r_nums):
"""
:param r_nums:rotated arrat
:return: min number
"""
if not r_nums:
return None
left = 0
right = len(r_nums)-1
while left < right:
mid = (left + right) // 2
if r_nums[mid] == r_nums[right] == r_nums[left]:
right -= 1
elif r_nums[mid] <= r_nums[right]:
right = mid
else:
left = mid + 1
return r_nums[left]
|
97cd37fb040a38b6c52cf816d29b97aa36c3c338
| 34,925 |
def scatter_columns(A):
"""
Performs the reverse operation as gather_columns. Thus, each prow receives
the prow'th row-slice of A.
If A had local shape(grid[0] * m_l, n_l), thre result has local shape
(m_l, n_l). If the number of local rows in A is not an even multiple
of grid[0] an error is thrown.
"""
m_l, n_l = A.shape
if m_l % NROWS != 0:
raise TypeError(f"Rows of A: {A.shape} can't be scattered over"
f"{NROWS} prows.")
panel_size = m_l // NROWS
start = my_prow() * panel_size
return jax.lax.dynamic_slice(A, (start, jnp.zeros_like(start)),
(panel_size, n_l))
|
7619491aa08ee172aa77c15b44caa889b690cc54
| 34,927 |
def digitos(valor):
"""Resulta em uma string contendo apenas os dígitos da string original."""
return ''.join([d for d in valor if d.isdigit()])
|
dc742d871efefa8067f33c95cc277963e3cfa201
| 34,928 |
def from_Point(ros_pt):
"""From ROS Point to Klamp't point"""
return [ros_pt.x,ros_pt.y,ros_pt.z]
|
34d83ea0266883679c7e2f51c4eb555e189940d4
| 34,929 |
def greedy():
"""A greedy distribution."""
def sample_fn(key: ArrayLike, preferences: ArrayLike):
probs = _argmax_with_random_tie_breaking(preferences)
return _categorical_sample(key, probs)
def probs_fn(preferences: ArrayLike):
return _argmax_with_random_tie_breaking(preferences)
def log_prob_fn(sample: ArrayLike, preferences: ArrayLike):
probs = _argmax_with_random_tie_breaking(preferences)
return base.batched_index(jnp.log(probs), sample)
def entropy_fn(preferences: ArrayLike):
probs = _argmax_with_random_tie_breaking(preferences)
return -jnp.nansum(probs * jnp.log(probs), axis=-1)
return DiscreteDistribution(sample_fn, probs_fn, log_prob_fn, entropy_fn)
|
b99c4c2d410be42be33c38633b4b998e5db37be7
| 34,930 |
def GibbsSampler(dna_list, k, t, N, mode = 'v1', repeat=20):
"""Input a list of Dna sequence, out put the best set of motifs.
The motifs are generated by Gibbs sampling and optimized by comparing
hamming or entropy scores. Mode 'v1' returns kmer by weighted probability;
mode v2 returns the most probable kmer.
"""
start = True
for i in range(repeat):
ran_list = [rd.randint(0, len(dna_list[0]) - k) for i in range(t)]
iter_seqs = [seq[ran_list[i]: ran_list[i] + k]
for i, seq in enumerate(dna_list)]
if start:
best_seqs = iter_seqs[:]
best_score = w4.ProfileMatrix(best_seqs, k, t, mode='ham')
start = False
# Inner loop.
for j in range(N):
index = rd.randint(0, t-1)
# Generate the profile matrix with the rest of the temp_seqs.
iter_seqs.pop(index)
profile = w4.ProfileMatrix(iter_seqs, k, t-1, mode='profile')
if mode == 'v1':
iter_seqs.insert(index, w4.GibbsKmer(dna_list[index],
k, profile))
if mode == 'v2':
iter_seqs.insert(index, w4.ProfileMostPKmer(dna_list[index],
k, profile))
# iter_seqs[index] = w4.ProfileMostPKmer(dna_list[index],
# k, profile)
iter_score = w4.ProfileMatrix(iter_seqs, k, t, mode='ham')
if iter_score < best_score:
best_seqs = iter_seqs[:]
best_score = iter_score
print(w4.ProfileMatrix(best_seqs, k, t, mode='ham'))
return best_seqs
|
767597df8b23baf09666009a90452d9d51a814fb
| 34,931 |
def read_input_h5(h5):
"""
Reads astra inpu5 from h5
See: write_input_h5
"""
d = {}
for g in h5:
d[g] = dict(h5[g].attrs)
# Convert to native types
for k, v in d[g].items():
d[g][k] = native_type(v)
return d
|
1bcf29418df9fcd4d49bb86e32c47febf2bb2403
| 34,932 |
import math
def to_half_life(days):
"""
Return the constant [1/s] from the half life length [day]
"""
s= 24 * 3600 * days
return -math.log(1/2)/s
|
7224be1e3e460336493d49c3f2b3d8932341f575
| 34,933 |
def fit_on_batch(model, x, y, loss_fn, optimizer, metrics=["loss", "acc"]):
"""Trains the model on a single batch of examples.
This is a training function for a basic classifier. For more complex models,
you should write your own training function.
NOTE: Before you call this, make sure to do `model.train(True)`.
Parameters
----------
model: nn.Module
The model to train.
x: Tensor or numpy array
Image tensors should have size (batch_size, in_channels, height, width).
y: Tensor or numpy array
Contains the label indices (not one-hot encoded).
loss_fn:
The loss function to use.
optimizer:
The SGD optimizer to use.
metrics: list
Which metrics to compute over the batch.
Returns
-------
dict
The computed metrics for this batch.
"""
optimizer.zero_grad()
# Forward pass
outputs = model(make_var(x))
# Compute loss
y_true = make_var(y, dtype=np.int)
loss = loss_fn(outputs, y_true)
# Backward pass
loss.backward()
optimizer.step()
# Additional metrics
results = {}
if "loss" in metrics:
results["loss"] = loss.data[0]
if "acc" in metrics:
results["acc"] = accuracy_metric(outputs, y_true)
if "mse" in metrics:
results["mse"] = F.mse_loss(outputs, y_true).data[0]
return results
|
87fa7342bdf84a382fdcce5edde9458bec7bb4c8
| 34,934 |
def get_longest_common_substring(text_a, text_b):
"""Find longest common subtring."""
# isjunk=None, a='', b='', autojunk=True
seqMatch = SequenceMatcher(None, text_a, text_b, autojunk=False)
#Also:
# autojunk = True (default)
# isjunk = None (deafult), same as: lambda x: False;
# or return True for junk (ignored) using: isjunk = lambda x: x in " \t"
# find_longest_match(alo=0, ahi=None, blo=0, bhi=None)
# Find longest matching block in a[alo:ahi] and b[blo:bhi].
match = seqMatch.find_longest_match(0, len(text_a), 0, len(text_b))
if (match.size):
return text_a[match.a: match.a + match.size]
|
9b4c75525aa071892aba11919aa59dae2e4a43de
| 34,935 |
from typing import OrderedDict
def from_group(group, time_index=False, absolute_time=False, scaled_data=True):
"""
Converts a TDMS group object to a DataFrame. DataFrame columns are named using the channel names.
:param group: Group object to convert.
:param time_index: Whether to include a time index for the dataframe.
:param absolute_time: If time_index is true, whether the time index
values are absolute times or relative to the start time.
:param scaled_data: By default the scaled data will be used.
Set to False to use raw unscaled data.
:return: The TDMS object data.
:rtype: pandas.DataFrame
"""
channels_to_export = OrderedDict((ch.name, ch) for ch in group.channels())
return _channels_to_dataframe(channels_to_export, time_index, absolute_time, scaled_data)
|
ba2d049a8c3076e02c7a9bbb8019ef901957aa7a
| 34,936 |
import time
def _bq_harness_with_result(sql, do_batch):
"""
Handles all the boilerplate for running a BQ job
"""
client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
if do_batch:
job_config.priority = bigquery.QueryPriority.BATCH
location = 'US'
# API request - starts the query
query_job = client.query(sql, location=location, job_config=job_config)
# Query
job_state = 'NOT_STARTED'
while job_state != 'DONE':
query_job = client.get_job(query_job.job_id, location=location)
#print('Job {} is currently in state {}'.format(query_job.job_id, query_job.state))
job_state = query_job.state
if job_state != 'DONE':
time.sleep(5)
#print('Job {} is done'.format(query_job.job_id))
query_job = client.get_job(query_job.job_id, location=location)
if query_job.error_result is not None:
print('Error result!! {}'.format(query_job.error_result))
return None
results = query_job.result()
return results
|
8546498d7e4553e5c0c74f7515db5c75ef4338ad
| 34,938 |
def create_workflow(name=None, namespace=None, bucket=None, **kwargs):
"""
Args:
name: Name to give to the workflow. This can also be used to name
things associated with the workflow.
"""
builder = Builder(name=name, namespace=namespace, bucket=bucket, **kwargs)
return builder.build()
|
58980a10d3ebe858547c71efc65c369a78373016
| 34,939 |
def validadeInequalityFilter(inequality_filters, entity):
"""Check if a entity attends all the inequality filters inputed"""
check = True
# Iterated on every the inequality filter informed
for f in inequality_filters:
if hasattr(entity, f["field"]): # Check if the entity has the attribute
# Check if the entity atends the filter
if not VALIDATORS[f["operator"]](getattr(entity, f["field"]), f['value']):
check = False
else:
check = False
return check
|
374174dc4b9aac720c74fdf83b55321ed6525a7b
| 34,940 |
def flops_metric_map(end_points, mean_metric, total_name='Total Flops'):
"""Assembles flops-count metrics into a map for use in tf.contrib.metrics."""
metric_map = {}
total_flops = tf.to_float(end_points['flops'])
flops_map = moments_metric_map(total_flops, total_name, mean_metric,
delimiter='/', do_shift=True)
metric_map.update(flops_map)
for block_scope in end_points['block_scopes']:
name = '{}/flops'.format(block_scope)
flops = tf.to_float(end_points[name])
flops_map = moments_metric_map(flops, name, mean_metric, do_shift=True)
metric_map.update(flops_map)
return metric_map
|
fd18df2d4f442e8afc9902760a864a479eefe7c5
| 34,942 |
def _is_false2(x):
"""Non-vectorized helper function"""
return (x in ("False", "false", "0") or not bool(x)) and not _is_na2(x)
|
972983f25474017065128023de1011f40c319866
| 34,943 |
import re
def process_document(document, context_size, dictionary, fixed_dictionary=False):
"""
Given a dictionary, extract the tuples of words of length equal to
context_size. Each word is represented by a unique integer number.
If fixed_dictionary is True, only take consecutive tuples of words
being (all of them) in the dictionary.
Example:
document = "This is a new document"
context_size = 4
dictionary = {
0: "this",
1: "is",
2: "a",
3: "new",
4: "document"
}
return
[(0, 1, 2, 3), (1, 2, 3, 4)]
"""
text = document.lower()
p = re.compile("[a-z]+")
tokens = p.findall(text)
list_of_points = []
for i in range(len(tokens) - context_size + 1):
data_point = [0 for l in range(context_size)]
add_new_data_point = True
for j in range(context_size):
k = i+j
if tokens[k] not in dictionary.index:
if fixed_dictionary:
# only takes series of words in the dictionary
add_new_data_point = False
break
else:
new_Ix = dictionary.size
dictionary[new_Ix] = tokens[k]
data_point[j] = dictionary.index[tokens[k]]
if add_new_data_point:
list_of_points.append(tuple(data_point))
return list_of_points
|
3f3531faa8c9aad63ac798e9c3e3a06230d5ecf7
| 34,944 |
def list_buckets():
"""
Lists available buckets.
:return: list of available buckets
Amazon (2019) s3-python-example-list-buckets.py
Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
"""
# create s3 client
s3 = boto3.client('s3')
# call client and get list of buckets
response = s3.list_buckets()
# get a list of all bucket names from the response
buckets = [bucket['Name'] for bucket in response['Buckets']]
# print list of bucket names
return buckets
|
e669d1137d43ffc62d474a36be6e64c5b02eca2a
| 34,945 |
def site_time_zone(request, registry, settings):
"""Expose website URL from ``tm.site_time_zone`` config variable to templates.
By best practices, all dates and times should be stored in the database using :term:`UTC` time. This setting
allows quickly convert dates and times to your local time.
Example:
.. code-block:: html+jinja
<p>
<strong>Bar opens</strong>:
{{ opening_at|friendly_time(timezone=site_time_zone) }}
</p>
Default value is ``UTC``.
See `timezone abbreviation list <https://en.wikipedia.org/wiki/List_of_time_zone_abbreviations>`_.
"""
return settings.get("tm.site_timezone", "UTC")
|
40d3314d35fde3d77a4e2f5fe441eee499f8de03
| 34,946 |
def qubic_spline_coeff(x_nodes, y_nodes):
"""Here underscored variables are related to the matrix equation,
whereas normal ones stand for the spline coefficients
"""
polynomials_num = len(x_nodes) - 1
coeffs = np.zeros((polynomials_num, 3))
hs = (x_nodes - np.roll(x_nodes, 1))[1:]
ys = (y_nodes - np.roll(y_nodes, 1))[1:]
# Build A
upper_diag = hs.copy()
upper_diag[0] = 0
lower_diag = hs.copy()
lower_diag[-1] = 0
diag = np.r_[[1], 2 * (upper_diag[1:] + lower_diag[:-1]), [1]]
A_ = np.diag(upper_diag, 1) + np.diag(diag) + np.diag(lower_diag, -1)
# Build b
b_ = np.r_[[0], 3 / hs[1:] * ys[1:] - 3 / hs[:-1] * ys[:-1], [0]]
c = np.dot(np.linalg.inv(A_), b_)
a = y_nodes[:-1]
b = 1 / hs * ys - hs / 3 * (2 * c[:-1] + c[1:])
d = 1 / (3 * hs) * (c[1:] - c[:-1])
return np.c_[a, b, c[:-1], d]
|
95d1f50d638919355acbe21d2564ab7a37a4b920
| 34,947 |
from bs4 import BeautifulSoup
import itertools
def load_C2O(xml_file):
"""Load a C2O decision model.
See http://www.jku.at/isse/content/e139529/e126342/e126343 for
information about the C2O (Configurator 2.0) tool.
Arguments:
xml_file: Path to a C2O XML file.
Returns:
A tuple with two elements `var_domains` and `constraints`
giving the variable domains and constraints, respectively.
"""
with open(xml_file) as fd:
soup = BeautifulSoup(fd, "xml")
# Fail early if the file contains cnfRelation tags.
if soup.find("cnfRelation"):
raise ValueError("CNF relations not yet supported")
# Parse the questions.
# Keys: a question id.
# Values: set of possible answers.
questions = {}
for question in soup.model.questions.find_all("question"):
choices = set()
for choice in question.find_all("choice"):
choices.add(choice["name"])
questions[question["identifier"]] = choices
# Parse the constraint relations.
# Keys: (source question, target question) tuples.
# Values: dict mapping a source choice to a set of allowed target choices.
restrictions = defaultdict(dict)
for constraint in soup.model.relations.find_all("constraintRelation"):
source_id = constraint.source["questionIdentifier"]
for rule in [r for r in constraint.find_all("rule") if r.contents]:
source_choice = rule["choiceName"]
for target in constraint.targets.find_all("target"):
target_id = target["questionIdentifier"]
target_choices = set()
if rule.contents[0].name == "allowed":
for allowed in rule.find_all("allowed"):
target_choices.add(allowed["choiceName"])
else:
target_choices.update(questions[target_id])
for disallowed in rule.find_all("disallowed"):
target_choices.remove(disallowed["choiceName"])
# Populate the constraints dict.
k = (source_id, target_id)
restrictions[k][source_choice] = target_choices
# Parse the relevancy relations.
# Keys: (source question, target question) tuples.
# Values: a set of choices of the source question that makes
# the target question irrelevant.
relevancies = {}
for relevancy in soup.model.relations.find_all("relevancyRelation"):
source_id = relevancy.source["questionIdentifier"]
for target in relevancy.targets.find_all("target"):
target_id = target["questionIdentifier"]
irrelevant_choices = set()
if any(t.name == "irrelevantIf" for t in relevancy.find_all(True)):
for irrelevant in relevancy.find_all("irrelevantIf"):
irrelevant_choices.add(irrelevant["choiceName"])
else:
irrelevant_choices.update(questions[source_id])
for relevant in relevancy.find_all("relevantIf"):
irrelevant_choices.remove(relevant["choiceName"])
# Populate the relevancies dict.
k = (source_id, target_id)
relevancies[k] = irrelevant_choices
# Transform the problem into the representation used in the library.
irrelevant_value = "Irrelevant"
question_ids = {i: question for i, question in
enumerate(sorted(questions.keys()))}
question_indices = {q: i for i, q in question_ids.items()}
var_domains = [list(sorted(questions[question]))
for question in sorted(questions.keys())]
for question, i in question_indices.items():
assert questions[question] == set(var_domains[i])
assert irrelevant_value not in questions[question]
# Add an additional value for variables that can become irrelevant.
for source, target in relevancies.keys():
var_domain = var_domains[question_indices[target]]
if irrelevant_value not in var_domain:
var_domain.append(irrelevant_value)
def constraint_fun(var_indices, var_values):
source = question_ids[var_indices[0]]
target = question_ids[var_indices[1]]
source_value, target_value = var_values
# Check for restrictions.
if (source, target) in restrictions:
allowed_target_choices = restrictions[(source, target)]
if (source_value in allowed_target_choices and
target_value not in allowed_target_choices[source_value]):
return False
# Check for relevancy relations.
if (source, target) in relevancies:
irrelevant_choices = relevancies[(source, target)]
if (source_value in irrelevant_choices and
target_value != irrelevant_value):
return False
# Passed all the constraints.
return True
constraints = {}
for source, target in \
itertools.chain(restrictions.keys(), relevancies.keys()):
if (source, target) not in constraints:
var_indices = (question_indices[source],
question_indices[target])
constraints[var_indices] = constraint_fun
constraints = list(constraints.items())
return var_domains, constraints
|
b708e94d6a29829d0df775f784ad7070aa663edc
| 34,948 |
def contact_infectivity_asymptomatic_40x50():
"""
Real Name: b'contact infectivity asymptomatic 40x50'
Original Eqn: b'contacts per person normal 40x50*infectivity per contact'
Units: b'1/Day'
Limits: (None, None)
Type: component
b''
"""
return contacts_per_person_normal_40x50() * infectivity_per_contact()
|
63d1d87a35bc2eafa64b3b22993f388ce617c3b4
| 34,949 |
def freezeclass(cls):
""" Decorator to freeze a class."""
cls.__frozen = False
def frozensetattr(self, key, value):
if self.__frozen and not hasattr(self, key):
print("Class {} is frozen. Cannot set {} = {}"
.format(cls.__name__, key, value))
else:
object.__setattr__(self, key, value)
def init_decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self.__frozen = True
return wrapper
cls.__setattr__ = frozensetattr
cls.__init__ = init_decorator(cls.__init__)
return cls
|
58fd3754c93dfcfa3aeb8b7ed722cd7d4bfd307e
| 34,950 |
def gsc_url_keyword(prop, start, end, query, url):
"""Return position, clicks & impressions from GSC for keyword for URL."""
#API Not adapted to Pipulate yet
request = {
"startDate": start,
"endDate": end,
"dimensions": [
"query",
"page"
],
"dimensionFilterGroups": [
{
"filters": [
{
"operator": "equals",
"expression": url,
"dimension": "page"
},
{
"operator": "equals",
"expression": query,
"dimension": "query"
}
]
}
]
}
data = gs.gsc().searchanalytics().query(siteUrl=prop, body=request).execute()
val = []
if 'rows' in data:
r = data['rows'][0]
val = [start] + [r['keys'][0]] + [r['keys'][1]] + [
r['position']] + [r['clicks']] + [r['impressions']] + [r['ctr']]
return val
|
3fa1993e73cf8672eef232a7de12c6db1914ca51
| 34,951 |
def zerofill_net(input_size=(640, None, 1), **dummy_kwargs):
"""A net that performs a simple zero-filled reconstruction
Parameters:
input_size (tuple): the size of your input kspace
Returns:
keras.models.Model: the zerofill net model, compiled
"""
# shapes
mask_shape = input_size[:-1]
# inputs and buffers
kspace_input = Input(input_size, dtype='complex64', name='kspace_input_simple')
mask = Input(mask_shape, dtype='complex64', name='mask_input_simple')
# # simple inverse
image_res = Lambda(tf_adj_op, output_shape=input_size, name='ifft_simple')([kspace_input, mask])
image_res = tf_fastmri_format(image_res)
model = Model(inputs=[kspace_input, mask], outputs=image_res)
default_model_compile(model, lr=1e-3)
return model
|
bde693f37097c797c8a2344ef8f196241e619147
| 34,952 |
def __useless_contour(shape, detected, last_cont,center,angle):
"""
Erase the useless contours, center, and angle.
Contours, center, and angle are erased if the shape type is ALL, PARTIAL or UNKNOWN.
Parameters
----------
shape : Shape
The shape we want to detect
detected : Shape
The detected shape
last_cont : numpy.ndarray
Contours of the shape.
center : tuple of float
Center of the detected shape.
angle : float
Angle of rotation of the detected shape.
Returns
-------
last_cont : numpy.ndarray
Contours of the shape.
center : tuple of float
Center of the detected shape.
angle : float
Angle of rotation of the detected shape.
"""
if shape == Shape.Shape.ALL.value or \
shape == Shape.Shape.PARTIAL.value or \
shape == Shape.Shape.UNKNOW.value:
last_cont = np.array([])
center = None
angle = None
return last_cont
|
6daa7add5ae79222853600d3e7e7f406a2d4c37e
| 34,953 |
def add_b(_rb):
""" Add for Baselines
"""
def add(e):
for i in range(e["obs"].shape[0]):
_rb.add(obs_t=e["obs"][i],
action=e["act"][i],
reward=e["rew"][i],
obs_tp1=e["next_obs"][i],
done=e["done"][i])
return add
|
1b2d6bb94958a00a5c43e0b601f4999b0271f932
| 34,954 |
def get_ncfile(fname='cmip5.CSIRO-Mk3-6-0.nc'):
""" Return one netCDF file
"""
return join(get_datadir(),fname)
|
1638eaa0519ac4415ea114dc6abdb760f951f3d5
| 34,955 |
def generate_complete_path(filename:str, main_folder="./temp/", subfolders='', file_extension = ".png", save_files=True):
"""
Function to create the full path of a plot based on `name`. It creates all the subfolders required to save the final file.
If `save_files=False` returns `None`, useful to control from a global variable whether files should be updated or not.
:param filename: Name of the file (without extension)
:type filename: str
:param main_folder: Root folder for the files
:type main_folder: str
:param subfolders: Subfolders attached after the root.
:type subfolders: str
:param file_extension: Extension of the image
:type file_extension: str
:param subfolders: Create path or return None.
:type save_files: boolean
:return: Complete path to create a file
:rtype: str
Example: generate_complete_path("histogram", main_folder="./plots/", subfolders='dataset1/') will return
"./plots/dataset1/histogram.png"
"""
if (save_files):
path = main_folder + subfolders + filename + file_extension
check_or_create_folder(path)
return path
else:
return None
|
b97e7531088688d72ef3a248abedcfef3a054394
| 34,956 |
def realm_from_principal(principal):
"""
Attempt to retrieve a realm name from a principal, if the principal is fully qualified.
:param principal: A principal name: [email protected]
:type: principal: str
:return: realm if present, else None
:rtype: str
"""
if '@' not in principal:
return
else:
parts = principal.split('@')
if len(parts) < 2:
return
return parts[-1]
|
1880fef7b4383edc6f2ccd94958200686d500e0c
| 34,957 |
from typing import List
import torch
def patch_batchnorm(module: nn.Module) -> List:
"""Patch all batchnorm instances (1d, 2d, 3d, sync_bn, etc.) of a module
so that they don't track running stats when torch.no_grad() is enabled.
This is important in activation checkpointing to ensure stats are tracked
correctly as if there were no activation checkpointing. The reason is
that activation checkpointing runs the forward function twice, first
with torch.no_grad(), then with torch.grad().
Args:
module (nn.Module):
The module to be patched in-place.
Returns:
(list):
A list of hook handles, late can be freed.
"""
def pre_forward(module: _BatchNorm, input: Tensor) -> None:
if torch.is_grad_enabled():
return
module._track_running_stats_backup = module.track_running_stats
module.track_running_stats = False
def post_forward(module: _BatchNorm, input: Tensor, result: Tensor) -> None:
if torch.is_grad_enabled():
return
module.track_running_stats = module._track_running_stats_backup
hooks = []
for name, child in module.named_modules():
# _BatchNorm is base for bn1d, bn2d, bn3d and sync_bn, apex_sync_bn, etc.
if isinstance(child, _BatchNorm):
# Register the pre/post hooks.
pre_handle = child.register_forward_pre_hook(pre_forward)
post_handle = child.register_forward_hook(post_forward)
hooks += [pre_handle, post_handle]
return hooks
|
d1456d48db5f2016716aaaa4f87f2eb77e4dcd43
| 34,958 |
import six
def _joined_names_column(df):
"""
Join data from all name columns into a single column.
"""
return df.apply(
lambda row: ','.join(set([
six.text_type(n)
for n in [row['main_name'], row['asciiname'], row['alternatenames']]
if n and n is not np.nan
])),
axis=1
)
|
d563971403758035bf9c57442a3d99c246f2fb92
| 34,960 |
def skip(
num_input_channels=2, num_output_channels=3, num_channels_down=[16, 32, 64, 128, 128],
num_channels_up=[16, 32, 64, 128, 128],
num_channels_skip=[4, 4, 4, 4, 4], filter_size_down=3,
filter_size_up=3, filter_skip_size=1, need_sigmoid=True, need_bias=True,
pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU', need1x1_up=True):
"""
Assembles encoder-decoder with skip connections.
Arguments:
act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)
pad (string): zero|reflection (default: 'zero')
upsample_mode (string): 'nearest|bilinear' (default: 'nearest')
downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')
"""
assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)
n_scales = len(num_channels_down)
if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)):
upsample_mode = [upsample_mode] * n_scales
if not (isinstance(downsample_mode, list) or isinstance(downsample_mode, tuple)):
downsample_mode = [downsample_mode] * n_scales
if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)):
filter_size_down = [filter_size_down] * n_scales
if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)):
filter_size_up = [filter_size_up] * n_scales
last_scale = n_scales - 1
cur_depth = None
model = nn.Sequential()
model_tmp = model
input_depth = num_input_channels
for i in range(len(num_channels_down)):
deeper = nn.Sequential()
skip = nn.Sequential()
if num_channels_skip[i] != 0:
model_tmp.add(Concat(1, skip, deeper))
else:
model_tmp.add(deeper)
model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))
if num_channels_skip[i] != 0:
skip.add(conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))
skip.add(bn(num_channels_skip[i]))
skip.add(act(act_fun))
deeper.add(conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad,
downsample_mode=downsample_mode[i]))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
deeper.add(conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
deeper_main = nn.Sequential()
if i == len(num_channels_down) - 1:
# The deepest
k = num_channels_down[i]
else:
deeper.add(deeper_main)
k = num_channels_up[i + 1]
deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i], align_corners=True))
model_tmp.add(conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
# model_tmp.add(layer_norm(num_channels_up[i]))
model_tmp.add(act(act_fun))
if need1x1_up:
model_tmp.add(conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
# model_tmp.add(layer_norm(num_channels_up[i]))
model_tmp.add(act(act_fun))
input_depth = num_channels_down[i]
model_tmp = deeper_main
model.add(conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))
if need_sigmoid:
model.add(nn.Sigmoid())
return model
|
4c26a931d701f1cfb439a835c1fe628569bbc24f
| 34,961 |
def validate_kml(possible_files):
"""Validate uploaded KML file and a possible image companion file
KML files that specify vectorial data typers are uploaded standalone.
However, if the KML specifies a GroundOverlay type (raster) they are
uploaded together with a raster file.
"""
kml_file = [
f for f in possible_files if f.name.lower().endswith(".kml")][0]
others = [
f.name for f in possible_files if not f.name.lower().endswith(".kml")]
kml_file.seek(0)
kml_bytes = kml_file.read()
result = _validate_kml_bytes(kml_bytes, others)
if not result:
kml_doc, namespaces = get_kml_doc(kml_bytes)
if kml_doc and namespaces:
return ("kml", "sld", )
return result
|
e5389c7b3b9972757fb44a5c2e29af4aaede1618
| 34,962 |
import base64
def mailform():
"""Sample form for sending email via Microsoft Graph."""
# read user profile data
user_profile = MSGRAPH.get('me/', headers=request_headers()).data
user_name = user_profile['displayName']
# get profile photo
photo_data, _, profile_pic = profile_photo(client=MSGRAPH, save_as='me')
# save photo data as config.photo for use in mailform.html/mailsent.html
if profile_pic:
config.photo = base64.b64encode(photo_data).decode()
else:
profile_pic = 'static/images/no-profile-photo.png'
with open(profile_pic, 'rb') as fhandle:
config.photo = base64.b64encode(fhandle.read()).decode()
# upload profile photo to OneDrive
upload_response = upload_file(client=MSGRAPH, filename=profile_pic)
if str(upload_response.status).startswith('2'):
# create a sharing link for the uploaded photo
link_url = sharing_link(client=MSGRAPH, item_id=upload_response.data['id'])
else:
link_url = ''
body = flask.render_template('email.html', name=user_name, link_url=link_url)
return flask.render_template('mailform.html',
name=user_name,
email=user_profile['userPrincipalName'],
profile_pic=profile_pic,
photo_data=config.photo,
link_url=link_url,
body=body)
|
e9180515b84e7f012aef576e123ea08fba3aefda
| 34,963 |
def _global_query_(included_interviews=None, included_globals=None, client_as_numeric=True, exclude_reliability=True):
"""
Constructs the globals query for session-level datasets
:param included_interviews: iterable of str specifying names of interviews to include
:param included_globals:
:param client_as_numeric: Whether to cast client_id as a numeric type. Default True
:param exclude_reliability: Whether to exclude (True, default) or include (False) interviews of type 'reliability'
:return: ModelSelect, Cte - The full query for global ratings and the CTE associated object
"""
client_column = Cast(Interview.client_id, "INT").alias('client_id') if client_as_numeric else Interview.client_id
# May want only certain interviews included or certain properties included,
# so construct some predicates for where clauses, if necessary
types = ['general'] if exclude_reliability else ['general', 'reliability']
predicate = Interview.interview_type.in_(types)
if included_interviews is not None:
predicate = predicate & Interview.interview_name.in_(included_interviews)
if included_globals is not None:
predicate = predicate & GlobalProperty.gp_name.in_(included_globals)
"""
Logic above replaces this
global_predicate = ((p1) & (p2) & (p3)) if included_interviews is not None and included_globals is not None else \
((p1) & (p3)) if included_interviews is not None else \
((p2) & (p3)) if included_globals is not None else \
p3
"""
# For any session-level/decile dataset, we want scores for all session-level globals.
# Thus, there will need to be either a UNION ALL of counts and global ratings
# or a separate query for globals.
# Below constructs the global ratings part of the UNION ALL
global_query = (GlobalRating.select(GlobalRating.interview_id, GlobalProperty.gp_name,
Cast(GlobalValue.gv_value, "INT"), GlobalValue.global_property_id)
.join(GlobalValue).join(GlobalProperty, JOIN.LEFT_OUTER))
global_cte = global_query.cte("global_cte", columns=['interview_id', 'gp_name', 'gv_value', 'global_property_id'])
outer_global_query = (Interview
.select(Interview.interview_name, Interview.interview_type, client_column, Interview.rater_id,
Interview.session_number, GlobalProperty.gp_name, global_cte.c.gv_value)
.join(CodingSystem)
.join(GlobalProperty))
full_global_query = outer_global_query.join(
global_cte, JOIN.LEFT_OUTER, on=((Interview.interview_id == global_cte.c.interview_id) &
(GlobalProperty.global_property_id == global_cte.c.global_property_id))
)
# Append the predicate
full_global_query = full_global_query.where(predicate)
return full_global_query, global_cte
|
dca8599fc7625f4f8a2db5321c32666b6b384c02
| 34,964 |
def basic_detokenizer(tokens):
"""Reverse the process of the basic tokenizer below."""
result = []
previous_nospace = True
for t in tokens:
if is_char(t):
result.append(t[_CHAR_MARKER_LEN:])
previous_nospace = True
elif t == _SPACE:
result.append(" ")
previous_nospace = True
elif previous_nospace:
result.append(t)
previous_nospace = False
else:
result.extend([" ", t])
previous_nospace = False
return "".join(result)
|
073a388a3a7f2133457e9c7a61a499b830d59760
| 34,965 |
def get_task_state(exit_code):
"""Interprets the exit_code and return the corresponding task status string
Parameters
----------
exit_code: int
An integer that represents the return code of the task.
Returns
-------
A task status string corresponding to the exit code.
"""
if exit_code > 0:
return cook.TASK_FAILED
elif exit_code < 0:
return cook.TASK_KILLED
else:
return cook.TASK_FINISHED
|
99837d42586ffacc2d2b1e8dc938add185bc0e04
| 34,967 |
def filtertime(timestamp, interval):
"""Check if timestamp is between timestamp_range - (time1,time2)
Args:
timestamp --> UNIX timestamp value.
interval --> `Tuple` of 2 UNIX timestamp values.
Returns:
`bool` --> True/False
"""
T0, T1 = interval
if (timestamp <= T1) and (timestamp >= T0):
return True
else:
return False
|
72fe1aa9ed01e59ad7bbe5299b4c21272fab7354
| 34,968 |
def is_dataframe(value):
"""
Check if an object is a Spark DataFrame
:param value:
:return:
"""
return isinstance(value, DataFrame)
|
954276a168586d7cd19846575d239a82083d3f9f
| 34,969 |
from typing import Type
from typing import Dict
def reverse_enum(enum_to_reverse: Type[SMOOTHIE_GCODE]) -> Dict:
"""
Returns dictionary with keys and values switched from passed Enum
:param enum_to_reverse: The Enum that you want to reverse
:return: Reversed dictionary
"""
# I don't know what is going on with mypy, it is complaining
# about keys not existing as an attribute. I am not calling it
# as an attribute. I am calling it as a function.
members = enum_to_reverse._member_map_.keys() # type: ignore[attr-defined]
values = [
enum_to_reverse[member].value
for member in members
]
return dict(zip(values, members))
|
4dbb65905d2441089eb989b28733b6f4c4e0b529
| 34,970 |
def install_npm(path=None, build_dir=None, source_dir=None, build_cmd='build', force=False):
"""Return a Command for managing an npm installation.
Note: The command is skipped if the `--skip-npm` flag is used.
Parameters
----------
path: str, optional
The base path of the node package. Defaults to the repo root.
build_dir: str, optional
The target build directory. If this and source_dir are given,
the JavaScript will only be build if necessary.
source_dir: str, optional
The source code directory.
build_cmd: str, optional
The npm command to build assets to the build_dir.
"""
class NPM(BaseCommand):
description = 'install package.json dependencies using npm'
def run(self):
if skip_npm:
log.info('Skipping npm-installation')
return
node_package = path or here
node_modules = pjoin(node_package, 'node_modules')
if not which("npm"):
log.error("`npm` unavailable. If you're running this command "
"using sudo, make sure `npm` is availble to sudo")
return
if force or is_stale(node_modules, pjoin(node_package, 'package.json')):
log.info('Installing build dependencies with npm. This may '
'take a while...')
run(['npm', 'install'], cwd=node_package)
if build_dir and source_dir and not force:
should_build = is_stale(build_dir, source_dir)
else:
should_build = True
if should_build:
run(['npm', 'run', build_cmd], cwd=node_package)
return NPM
|
dd99c1a80f3fe3228d08c4ffad0efaf450c35aed
| 34,971 |
from typing import Iterable
from typing import List
from typing import Tuple
from typing import Counter
def group_Counter(trip: Iterable[Leg]) -> List[Tuple[int, int]]:
"""Group legs into bins with distances 5 nm or less.
>>> trip = [ ('s1', 'e1', 1), ('s4', 'e4', 4.9), ('s5', 'e5', 5), ('s6', 'e6', 6)]
>>> group_Counter(trip)
[(0, 2), (5, 2)]
"""
quantized = (int(5*(dist//5)) for start, stop, dist in trip)
return Counter(quantized).most_common()
|
51cf9dac6e25a4c5c3666a5ad60169304759d7c7
| 34,972 |
import time
def run_deduper(deduper, data_frame, settings_file, training_file, recall_weight = 1):
"""
Given a deduper object and a dataset, this function trains the model and
predicts which records are duplicates.
depends:
dedupe as dd
pandas as pd
time
params:
deduper: a Dedupe or StaticDedupe object
data_frame: a pandas data frame where each row is a record to be deduped
settings_file: a string giving the path where the settings should be written
training_file: a string giving the path where the labelled training examples should be written
recall_weight: a number indicating how much to privilege recall over precision.
returns:
deduper: the trained Dedupe object
matches: a list of tuples giving record ids of duplicates and confidence scores
"""
# Convert data frame into dict as required by Dedupe
df_nones = data_frame.where(pd.notnull(data_frame), None) # NaN must be converted to 'None'
data = df_nones.to_dict("index") # convert to list of record dicts
# If the model is untrained (i.e. if it has not been loaded from a saved 'settings file'),
# then train it:
if type(deduper) != dd.StaticDedupe:
# Train the model
print("Active Dedupe object found. Now training model...")
start = time.perf_counter()
deduper.train()
end = time.perf_counter()
print(f"Training complete. It took {end - start:.3f} seconds.")
print("Saving training data and trained parameters...")
# Save the training examples
with open(training_file, 'w') as tf:
deduper.writeTraining(tf)
print(f'Training data written to {training_file}.')
# Save the model parameters
with open(settings_file, 'wb') as sf:
deduper.writeSettings(sf)
print(f'Trained parameters written to {settings_file}.')
# Calculate threshold for matches
print(f"Computing threshold based on a recall weighting of {recall_weight}.")
start = time.perf_counter()
threshold = deduper.threshold(data, recall_weight = recall_weight)
end = time.perf_counter()
print(f"Computation complete. Threshold = {threshold}. It took {end - start:.3f} seconds.")
# Compute the matches
print("Clustering...")
start = time.perf_counter()
matches = deduper.match(data, threshold)
end = time.perf_counter()
print(f"Clustering complete. {len(matches)} clusters found. It took {end - start:.3f} seconds.")
return deduper, matches
|
918969cf37dc948ab88e19e7239873da384bafa2
| 34,973 |
def _get_file_preferred_suffix() -> tuple:
"""Based on ontologia/core.lkg.yml + env variable LANGUAGE, build preferred
user language
Returns:
tuple: the result of file sufisex
"""
userpref_suffix = []
core_suffix = CORE_LKG['fs']['hdp']['base']
userlangs_upper = get_language_user_know()
if len(userlangs_upper) > 0:
userlangs = map(lambda x: x.lower(), userlangs_upper)
for lang_ in userlangs:
userpref_suffix.append(lang_ + '.hdp.json')
userpref_suffix.append(lang_ + '.hdp.yml')
combined_suffixes = userpref_suffix + core_suffix
return tuple(combined_suffixes)
|
2bb2c3a7d2543d284203467d93ea4d3c0cfedbd3
| 34,974 |
def fatorial(n, show=False):
"""
-> Call
:param n:
:param show:
:return:
"""
f = 1
for c in range (n, 0, -1):
if show:
print(c, end='')
if c>1:
print(' x ', end='')
else:
#print(f'{c} X ')
print(' = ', end='')
f *= c
return f
|
edc42b1269799716d90896cdada721c33d156503
| 34,975 |
import uuid
def integrate_whole(payload, org, out_uuid, group):
"""integrates payload into whole of profile, returns dict"""
if group:
in_uuid = str(uuid.uuid4())
nested = {"PayloadContent": payload,
"PayloadEnabled": True,
"PayloadIdentifier": 'SparkleDisabler',
"PayloadType": "com.apple.ManagedClient.preferences",
"PayloadUUID": in_uuid,
"PayloadVersion": 1,
}
payload = [nested]
else:
payload = payload
finished_profile = {"PayloadContent": payload,
"PayloadOrganization": org,
"PayloadRemovalDisallowed": True,
"PayloadScope": "System",
"PayloadType": "Configuration",
"PayloadUUID": out_uuid,
"PayloadVersion": 1,
}
return finished_profile
|
b08cab03f0a1e3a2b74110a7829f6fc6d736d0f4
| 34,976 |
from typing import List
def search_hospitals(request_input: RequestInput, db_session) -> List[dict]:
"""
Search hospitals based on requested items. Sort them by nearest location
:param request_input: RequestInput
:param db_session: DB session
:return: List of Hospital dictionaries
"""
# Search hospitals which has requested resources
hospitals_list = search_items_on_quantity(request_input, db_session)
if len(hospitals_list) < 1:
return []
# Get source hospital
source_hospital = db_session.query(HospitalModel).filter(HospitalModel.id == request_input.hospital_id).one()
# Sort hospitals by nearest location
return sort_by_location(source_hospital, hospitals_list)
|
22dde6acb46fcc03e2cf6f2ef56fca05a2b4090b
| 34,977 |
def get_inverse_metric():
"""Computes and returns the inverse metric
Sets the inverse metric variable, so that DendroSym knows how to compute
various derived variables. This should be done early on in the
generating script. It requires the metric to already be defined.
Returns
-------
sympy.Matrix
The 3x3 matrix returned is the inverse metric
Example
-------
>>> gt = dendrosym.dtypes.sym_3x3("gt")
>>> dendrosym.nr.set_metric(gt)
>>> igt = dendrosym.nr.get_inverse_metric()
"""
global metric, inv_metric, undef
if metric == undef:
raise ValueError('Dendro: Metric not defined.')
if inv_metric == undef:
# method : ('GE', 'LU', or 'ADJ')
inv_metric = sym.simplify(metric.inv('ADJ'))
return inv_metric
|
da6399366b0b4096065dcb8ea8e2dd5f95555432
| 34,978 |
def add_towers_4G_km2_sheet(ws, cols, lnth):
"""
"""
for col in cols:
cell = "{}1".format(col)
ws[cell] = "=Towers!{}".format(cell)
for col in cols[:2]:
for i in range(1, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "=Towers!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "=IFERROR(Towers!{}/Area!{},0)".format(cell,cell)
set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000")
return ws
|
1fe86a6ddd210c13d3e92f68a1deb9b41375d14b
| 34,979 |
from typing import List
from typing import Dict
def get_all_netting_channel_events(
chain: BlockChainService,
token_network_address: Address,
netting_channel_identifier: ChannelID,
contract_manager: ContractManager,
from_block: BlockSpecification = GENESIS_BLOCK_NUMBER,
to_block: BlockSpecification = 'latest',
) -> List[Dict]:
""" Helper to get all events of a NettingChannelContract. """
filter_args = get_filter_args_for_all_events_from_channel(
token_network_address=token_network_address,
channel_identifier=netting_channel_identifier,
contract_manager=contract_manager,
from_block=from_block,
to_block=to_block,
)
return get_contract_events(
chain,
contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK),
token_network_address,
filter_args['topics'],
from_block,
to_block,
)
|
bcaf2a96db7a95c3660abcc7daac4a448baad50d
| 34,980 |
from typing import List
def custom_extractors(
eval_shared_model: tfma.MaybeMultipleEvalSharedModels,
eval_config: tfma.EvalConfig,
tensor_adapter_config: tensor_adapter.TensorAdapterConfig,
) -> List[tfma.extractors.Extractor]:
"""Returns default extractors plus a custom prediction extractor."""
predict_extractor = make_xgboost_predict_extractor(eval_shared_model,
eval_config)
return tfma.default_extractors(eval_shared_model=eval_shared_model,
eval_config=eval_config,
tensor_adapter_config=tensor_adapter_config,
custom_predict_extractor=predict_extractor)
|
a6efc2c15bbb6b437710d79a3b22174c19eb1083
| 34,981 |
def custom_detrending(flc):
"""Wrapper"""
f = flc.flux[np.isfinite(flc.flux)]
if np.abs(f[0]-f[-1])/np.median(f) > .2:
print("Do a coarse spline interpolation to remove trends.")
flc = fit_spline(flc, spline_coarseness=12)
flc.flux[:] = flc.detrended_flux[:]
# Iteratively remove fast sines with Periods of 0.1 to 2 day periods (the very fast rotators)
flc = iteratively_remove_sines(flc)
flc.flux[:] = flc.detrended_flux[:]
# remove some rolling medians on a 10 hours time scale
flc.flux[:] = flc.flux - pd.Series(flc.flux).rolling(300, center=True).median() + np.nanmedian(flc.flux)#15h
# Determine the window length for the SavGol filter for each continuous observation gap
flc = find_iterative_median(flc)
w = search_gaps_for_window_length(flc)
flc = flc[np.isfinite(flc.flux)]
#Use lightkurve's SavGol filter while padding outliers with 25 data points around the outliers/flare candidates
# print(w)
# flc = flc.detrend("savgol", window_length=w, pad=7)
# flc.flux[:] = flc.detrended_flux[:]
#After filtering, always use a 2.5 hour window to remove the remaining
# flcd = flc.detrend("savgol", window_length=25, pad=7)
flcd=flc
# Determine the noise properties with a rolling std, padding masked outliers/candidates
flcd = refine_detrended_flux_err(flcd, mask_pos_outliers_sigma=1.5,
std_rolling_window_length=15, pad=25)
return flcd
|
534e0881ccab91811128f1e7f109460321f98937
| 34,982 |
import random
def summon_blocks(board):
"""Place 1-8 circles in random places on the speed board"""
for _ in range(random.randint(1, 8)):
x = random.randint(0, 4)
y = random.randint(0, 4)
while board[x][y] != 'g':
x = random.randint(0, 4)
y = random.randint(0, 4)
board[x][y] = 'b'
return board
|
0cfa703b6451e44ea8688561bc857ac70f560c90
| 34,983 |
from typing import List
from typing import Tuple
def three_sum_brute_force(array: List[int], target: int) -> Tuple[int, int, int]:
"""
args:
array:
target:
returns:
idxs
>>> s = [-1, 0, 1, 2, -1, -4]
>>> three_sum_brute_force(s, 0)
(0, 1, 2)
O(N^3)
"""
for i, u in enumerate(array):
for j, v in enumerate(array):
for k, w in enumerate(array):
if j > i and k > j and u + v + w == target:
return i, j, k
return None
|
4a47fae2c4f81ef654bcf21c73462cc4965b6b78
| 34,984 |
def on_cooldown(user):
"""Shortcut: Get remaining cooldown of a user."""
return Parent.GetUserCooldownDuration(ScriptName, settings["command"], user)
|
afadc17c45f09303d1fdcddc997c6b5d98c5e903
| 34,986 |
def get_ieconstraints(unknowns, segment):
""" Runs the mission if the inequality constraint values are needed, these are specific to a climb
Assumptions:
Time only goes forward
CL is less than a specified limit
CL is greater than zero
All altitudes are greater than zero
The vehicle accelerates not decelerates
Inputs:
state.unknowns [Data]
Outputs:
constraints [array]
Properties Used:
N/A
"""
if isinstance(unknowns,array_type):
segment.state.unknowns.unpack_array(unknowns)
else:
segment.state.unknowns = unknowns
if not np.all(segment.state.inputs_last == segment.state.unknowns):
segment.process.iterate(segment)
# Time goes forward, not backward
t_final = segment.state.conditions.frames.inertial.time[-1,0]
time_con = (segment.state.conditions.frames.inertial.time[1:,0] - segment.state.conditions.frames.inertial.time[0:-1,0])/t_final
# Less than a specified CL limit
lift_coefficient_limit = segment.lift_coefficient_limit
CL_con = (lift_coefficient_limit - segment.state.conditions.aerodynamics.lift_coefficient[:,0])/lift_coefficient_limit
CL_con2 = segment.state.conditions.aerodynamics.lift_coefficient[:,0]
# Altitudes are greater than 0
alt_con = segment.state.conditions.freestream.altitude[:,0]/segment.altitude_end
# Acceleration constraint, go faster not slower
acc_con = segment.state.conditions.frames.inertial.acceleration_vector[:,0]
constraints = np.concatenate((time_con,CL_con,CL_con2,alt_con,acc_con))
return constraints
|
6cdc5ce218ecdc40b792f0bf2fce8789e8577aa9
| 34,987 |
def check_file_name(file_name, file_type="", extension=""):
"""
check_file_name(file_name, file_type="", extension="")
Checks file_name for file_type or extension
"""
file_name = check_string(file_name, -1, '.', extension)
file_name = check_string(file_name, -1, '_', file_type)
return file_name
|
250937094bc90e67ccf5a3d2615105b4e448dfff
| 34,988 |
def render_experiments(
driver: Driver = None,
collab_id: str = "",
project_id: str = "",
form_type: str = "display",
show_details: bool = True
):
""" Renders out retrieved experiment metadata in a custom form
Args:
driver (Driver): A connected Synergos driver to communicate with the
selected orchestrator.
collab_id (str): ID of selected collaboration to be rendered
project_id (str): ID of selected project to be rendered
form_type (str): What type of form to render (i.e. 'display' or 'modify'
mode). This is due to the way at which experiments are declared.
show_details (bool): Toogles if experiment details should be shown
Returns:
Selected experiment ID (str)
Updated experiment record (dict)
"""
expt_data = driver.experiments.read_all(
collab_id=collab_id,
project_id=project_id
).get('data', [])
expt_ids = [expt['key']['expt_id'] for expt in expt_data]
with st.beta_container():
selected_expt_id = st.selectbox(
label="Experiment ID:",
options=expt_ids,
help="""Select an experiment to peruse."""
)
if not show_details:
return selected_expt_id, None
selected_expt_data = driver.experiments.read(
collab_id=collab_id,
project_id=project_id,
expt_id=selected_expt_id
).get('data', {})
if selected_expt_data:
selected_expt_data.pop('relations') # no relations rendering
with st.beta_expander("Experiment Details"):
updated_experiment = (
expt_renderer.display(selected_expt_data)
if form_type == "display"
else expt_renderer.modify(selected_expt_data)
)
return selected_expt_id, updated_experiment
|
8f210251052081184b0e3792ed7381263a9961b2
| 34,989 |
import json
def tag_lookup(request):
"""JSON endpoint that returns a list of potential tags.
Used for upload template autocomplete.
"""
tag = request.GET['tag']
tagSlug = slugify(tag.strip())
tagCandidates = Tag.objects.values('word').filter(slug__startswith=tagSlug)
tags = json.dumps([candidate['word'] for candidate in tagCandidates])
return HttpResponse(tags, content_type='application/json')
|
c6cca931538a30bfa2e6f356f87eaa17158f5145
| 34,990 |
def scope_to_list(scope):
"""Convert a space separated string to a list of scopes."""
if isinstance(scope, list) or scope is None:
return scope
else:
return scope.split(" ")
|
c806f91192f86dbc42719787d9ddfe0d79690f0c
| 34,991 |
from pathlib import Path
import importlib
def import_migration_script(filepath: Path) -> ModuleType:
"""
Import migration script as if it were a module.
"""
spec = importlib.util.spec_from_file_location(filepath.stem, filepath)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return module
|
15c8a5532d0a38d0741c3a82c299ad13e6885792
| 34,992 |
from typing import Callable
import inspect
def numargs(func: Callable) -> int:
"""Get number of arguments."""
return len(inspect.signature(func).parameters)
|
2b4e068798add68323db6bd43253fbca34ea71ba
| 34,993 |
def _embed(x, order=3, delay=1):
"""Time-delay embedding.
Parameters
----------
x : 1d-array, shape (n_times)
Time series
order : int
Embedding dimension (order)
delay : int
Delay.
Returns
-------
embedded : ndarray, shape (n_times - (order - 1) * delay, order)
Embedded time-series.
"""
N = len(x)
Y = np.empty((order, N - (order - 1) * delay))
for i in range(order):
Y[i] = x[i * delay:i * delay + Y.shape[1]]
return Y.T
|
e2834c835521e57132f67a19f4a405af752e497f
| 34,994 |
def form_baseline_intro(current_law):
"""
Form final sentance of introduction paragraph
"""
if not current_law:
return f"{date()}"
else:
return (
f"{date()}, along with some modifications. A summary of these "
"modifications can be found in the \"Summary of Baseline Policy\" "
"section"
)
|
8e93002290188605eb5df6992f5f41ae2352634d
| 34,996 |
from typing import Any
def serialise(entry: Directive) -> Any:
"""Serialise an entry."""
if not entry:
return None
ret = entry._asdict()
ret["type"] = entry.__class__.__name__
if isinstance(entry, Transaction):
ret["payee"] = entry.payee or ""
if entry.tags:
ret["narration"] += " " + " ".join(["#" + t for t in entry.tags])
if entry.links:
ret["narration"] += " " + " ".join(
["^" + link for link in entry.links]
)
del ret["links"]
del ret["tags"]
ret["postings"] = [serialise(pos) for pos in entry.postings]
elif ret["type"] == "Balance":
amt = ret["amount"]
ret["amount"] = {"number": str(amt.number), "currency": amt.currency}
return ret
|
9ec9e1ea77011a79f8d1aac7ce4c804ef288f9f6
| 34,997 |
def MeanValueCoordinateMapping(dpoint, uv, physical_points):
"""MVC mapping from parametric uv to physical 3D
inputs:
dpoint: [list, tuple or 1D array of floats] desired uv point
uv: [2D array] of parametric uv points of polygon vertices
physical_points: [2D array] of physical points of polygon vertices
"""
norm = np.linalg.norm
counter = 0
cp = False
for i, p in enumerate(uv):
if np.abs(norm(p-dpoint)) < 1e-9:
counter = i
cp = True
break
if cp:
return physical_points[counter,:]
dpoint_tile = np.tile(dpoint,uv.shape[0]).reshape(uv.shape[0],uv.shape[1])
segments = dpoint_tile - uv
seg_lengths = norm(segments, axis=1)
num_vertices = uv.shape[0]
alphas = []
for i in range(num_vertices):
if i<num_vertices-1:
n0 = norm(segments[i,:])
n1 = norm(segments[i+1,:])
s0 = norm(segments[i,:]/n0 + segments[i+1,:]/n1)
s1 = norm(segments[i,:]/n0 - segments[i+1,:]/n1)
a = 2.*np.arctan2(s1,s0)
else:
n0 = norm(segments[i,:])
n1 = norm(segments[0,:])
s0 = norm(segments[i,:]/n0 + segments[0,:]/n1)
s1 = norm(segments[i,:]/n0 - segments[0,:]/n1)
a = 2.*np.arctan2(s1,s0)
alphas.append(a)
ws = []
for i in range(num_vertices):
if i==0:
a0 = alphas[-1]
a1 = alphas[i]
n1 = seg_lengths[i]
w = (np.tan(a0/2.) + np.tan(a1/2.))/n1
else:
a0 = alphas[i-1]
a1 = alphas[i]
n1 = seg_lengths[i]
w = (np.tan(a0/2.) + np.tan(a1/2.))/n1
ws.append(w)
ws = np.array(ws)
lmbs = ws / np.sum(ws)
candidate_point = np.zeros((physical_points.shape[1]))
for i in range(num_vertices):
candidate_point += physical_points[i,:]*lmbs[i]
candidate_point = candidate_point.reshape(candidate_point.shape[0],1)
candidate_point = candidate_point.ravel()
return candidate_point
|
e5212fc953a67bc3e9827aa0d3bb4c088a6c848d
| 34,998 |
import re
def _handle_discogs(url):
"""https://*discogs.com/*"""
apiurl = 'https://api.discogs.com/'
headers = {'user-agent': 'pyfibot-urltitle'}
title_formats = {
'release': '{0[artists][0][name]} - {0[title]} - ({0[year]}) - {0[labels][0][catno]}',
'artist': '{0[name]}',
'label': '{0[name]}',
'master': '{0[artists][0][name]} - {0[title]} - ({0[year]})',
}
m = re.match(
r'https:\/\/(?:www\.)?discogs\.com\/(?:([A-Za-z0-9-]+)\/)?(release|master|artist|label|item|seller|user)\/(\d+|[A-Za-z0-9_.-]+)', url)
if m:
m = m.groups()
if m[1] in title_formats:
endpoint = '%ss/%s' % (m[1], m[2])
data = bot.get_url('%s%s' % (apiurl, endpoint),
headers=headers).json()
title = title_formats[m[1]].format(data)
elif m[1] in ['seller', 'user']:
endpoint = 'users/%s' % m[2]
data = bot.get_url('%s%s' % (apiurl, endpoint),
headers=headers).json()
title = ['{0[name]}']
if data['num_for_sale'] > 0:
plural = 's' if data['num_for_sale'] > 1 else ''
title.append('{0[num_for_sale]} item%s for sale' % plural)
if data['releases_rated'] > 10:
title.append(
'Rating avg: {0[rating_avg]} (total {0[releases_rated]})')
title = ' - '.join(title).format(data)
elif m[0:2] == ['sell', 'item']:
endpoint = 'marketplace/listings/%s' % m[2]
data = bot.get_url('%s%s' % (apiurl, endpoint)).json()
for field in ('condition', 'sleeve_condition'):
if field in ['Generic', 'Not Graded', 'No Cover']:
data[field] = field
else:
m = re.match(r'(?:\w+ )+\(([A-Z]{1,2}[+-]?)( or M-)?\)',
data[field])
data[field] = m.group(1)
fmt = ('{0[release][description]} [{0[price][value]}'
'{0[price][currency]} - ships from {0[ships_from]} - '
'Condition: {0[condition]}/{0[sleeve_condition]}]')
title = fmt.format(data)
if title:
return title
|
37be95755e77782ad4b6beee000a6b096a681aa9
| 34,999 |
def get_api_all_item():
"""Get map of class name to class for all ApiItem subclasses.
Returns:
:obj:`dict`
"""
return {c.__name__: c for c in ApiItem.__subclasses__()}
|
621a867b0f4b28eb3d1b5721e22ea4914a5f577d
| 35,000 |
def get_image_url(soup_object: object) -> str:
"""Return book image url."""
image_url = soup_object.find("img")["src"]
return image_url.replace("../..", "https://books.toscrape.com/")
|
5a52145eacc2489762d3f32b63e092e26d4fc4c8
| 35,001 |
def concat_train_val(train_df, val_df):
"""Utility function to concatenate the train and validation set, while maintaining globally unique ids."""
# increment id tags
max_article_id = train_df['Article_ID'].max() + 1
max_ner_tag_id = train_df['NER_Tag_ID'].max() + 1
increment_article_id = partial(increment_id, increment=max_article_id)
increment_tag_id = partial(increment_id, increment=max_ner_tag_id)
val_df['Article_ID'] = val_df['Article_ID'].apply(increment_article_id)
val_df['NER_Tag_ID'] = val_df['NER_Tag_ID'].apply(increment_tag_id)
# combine train/validation set
train_val_df = pd.concat((train_df, val_df), axis=0)
return train_val_df
|
b4c949e08061a724441caf0abf8a2a14f1e494c3
| 35,003 |
import math
def frequencyToMidi(frequency):
"""
Convert a given frequency in Hertz to its corresponding MIDI pitch number (60 = Middle C)
"""
return int(round(69 + 12 * math.log(frequency / 440.0, 2)))
|
29d4b92b9deacb81f768b554200c4b63b632bf23
| 35,005 |
def lower(value): # Only one argument.
"""Converts a string into all lowercase
How to Use
{{ value|lower|lower|.... }}
"""
return value.lower()
|
a6c290276aa777cee5e948d0c89202c67a97e6d1
| 35,006 |
def stdp2rbp_non_linear_calcium(*args, **kwargs):
"""Rate-based plasticity from STDP using the non linear calcium model.
Same arguments as ~cbsp.population_1.non_linear_calcium(u, v, w0, seed)
Returns:
float: the population average change of synapse strength at time point 0
"""
w_rec, t, _ = non_linear_calcium(*args, **kwargs)
return utils.derivative(w_rec.mean(axis=0), t)
|
454990dc4783641c2da2da17297dd77abc95e3cd
| 35,007 |
def create_RevPAR(dataframe):
"""Calculate revpar from answer_num_rooms converted to supply of
room nights divided by answer_ann_revenue"""
dataframe['CREATED_revpar'] = dataframe['ANSWER_ann_revenue'].astype(float)/(dataframe['ANSWER_num_rooms'].astype(float)*365)
return dataframe
|
0d3c91ff3909ea693fdde4b0ea8baae43a31a9a0
| 35,008 |
def get_find_response_normal_field_dict(model):
"""
:param model:
:return:
"""
normal_fields = get_normal_field_list_by_model(model) # 普通字段
find_response_allow_fields = {}
for normal_field in normal_fields:
response_key = newbee_model.get_attr_by_model_field_key(model, normal_field, "response_key")
can_found = newbee_model.get_attr_by_model_field_key(model, normal_field, "can_found")
is_found_as_foreign_return = newbee_model.get_attr_by_model_field_key(model, normal_field,
"is_found_as_foreign_return")
if can_found:
find_response_allow_fields[normal_field] = {
"response_key": response_key or normal_field,
"is_found_as_foreign_return": is_found_as_foreign_return,
}
return find_response_allow_fields
|
c55da5f2a573857dc7f349bac79b467cd2345450
| 35,009 |
def predict():
"""Return Cell with Summary"""
global CLEAN_SUMMARY
global model
summary = CLEAN_SUMMARY
in_count = None
out_count = None
logger.info('Received Text Input')
if request.method == 'POST':
out = request.form['rawtext']
# preprocess text
text = preprocess(out)
logger.info('text preprocessed')
# get summary
summary = get_summary(model, text)
logger.info('obtained summary')
in_count = len(request.form['rawtext'].split())
out_count = len(summary.split())
input_count_words = f"{in_count} words."
output_count_words = f"{out_count} words."
CLEAN_SUMMARY = summary
return render_template('index.html', input_count=input_count_words, output=summary, output_count=output_count_words)
|
94fbc9d75b9d488e371185912c54741596a23506
| 35,011 |
import random
import string
def id_generator(N=10):
"""
Generator a random string of characters.
"""
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
|
3e454a583f00f9218a8305dc5f7dca5d0bd27243
| 35,012 |
def transform_covariance(cov_in, transform):
"""
Apply a given transform to a covariance matrix.
:param cov_in: Covariance matrix
:param transform: The transform that will be applies
:returns: The transformed covariance matrix
"""
# Converting the Quaternion to a Rotation Matrix first
# Taken from: https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/
q0 = transform.transform.rotation.w
q1 = transform.transform.rotation.x
q2 = transform.transform.rotation.y
q3 = transform.transform.rotation.z
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# Code reference: https://github.com/ros2/geometry2/pull/430
# Mathematical Reference:
# A. L. Garcia, “Linear Transformations of Random Vectors,” in Probability,
# Statistics, and Random Processes For Electrical Engineering, 3rd ed.,
# Pearson Prentice Hall, 2008, pp. 320–322.
R = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
R_transpose = np.transpose(R)
cov_11 = np.array([cov_in[:3], cov_in[6:9], cov_in[12:15]])
cov_12 = np.array([cov_in[3:6], cov_in[9:12], cov_in[15:18]])
cov_21 = np.array([cov_in[18:21], cov_in[24:27], cov_in[30:33]])
cov_22 = np.array([cov_in[21:24], cov_in[27:30], cov_in[33:]])
# And we perform the transform
result_11 = R @ cov_11 @ R_transpose
result_12 = R @ cov_12 @ R_transpose
result_21 = R @ cov_21 @ R_transpose
result_22 = R @ cov_22 @ R_transpose
cov_out = PoseWithCovarianceStamped()
cov_out.pose.covariance[0] = result_11[0][0]
cov_out.pose.covariance[1] = result_11[0][1]
cov_out.pose.covariance[2] = result_11[0][2]
cov_out.pose.covariance[6] = result_11[1][0]
cov_out.pose.covariance[7] = result_11[1][1]
cov_out.pose.covariance[8] = result_11[1][2]
cov_out.pose.covariance[12] = result_11[2][0]
cov_out.pose.covariance[13] = result_11[2][1]
cov_out.pose.covariance[14] = result_11[2][2]
cov_out.pose.covariance[3] = result_12[0][0]
cov_out.pose.covariance[4] = result_12[0][1]
cov_out.pose.covariance[5] = result_12[0][2]
cov_out.pose.covariance[9] = result_12[1][0]
cov_out.pose.covariance[10] = result_12[1][1]
cov_out.pose.covariance[11] = result_12[1][2]
cov_out.pose.covariance[15] = result_12[2][0]
cov_out.pose.covariance[16] = result_12[2][1]
cov_out.pose.covariance[17] = result_12[2][2]
cov_out.pose.covariance[18] = result_21[0][0]
cov_out.pose.covariance[19] = result_21[0][1]
cov_out.pose.covariance[20] = result_21[0][2]
cov_out.pose.covariance[24] = result_21[1][0]
cov_out.pose.covariance[25] = result_21[1][1]
cov_out.pose.covariance[26] = result_21[1][2]
cov_out.pose.covariance[30] = result_21[2][0]
cov_out.pose.covariance[31] = result_21[2][1]
cov_out.pose.covariance[32] = result_21[2][2]
cov_out.pose.covariance[21] = result_22[0][0]
cov_out.pose.covariance[22] = result_22[0][1]
cov_out.pose.covariance[23] = result_22[0][2]
cov_out.pose.covariance[27] = result_22[1][0]
cov_out.pose.covariance[28] = result_22[1][1]
cov_out.pose.covariance[29] = result_22[1][2]
cov_out.pose.covariance[33] = result_22[2][0]
cov_out.pose.covariance[34] = result_22[2][1]
cov_out.pose.covariance[35] = result_22[2][2]
return cov_out.pose.covariance
|
0926bf95e58702f6594de572c7eeb1be2053b0c5
| 35,013 |
def delete_vocabulary(vid):
"""
Delete an existing vocabulary
:return:
"""
try:
vocs.delete_vocabulary(vid)
except IndexError:
raise APIError('Ontology URI not found')
except vocs.UnknownVocabulary, e:
raise NotFound(e.message)
__analyse_vocabularies([vid])
__check_seeds()
response = make_response()
response.status_code = 200
return response
|
49e1be4b8132d3db2533ea6e5155d9891a3c221c
| 35,014 |
import numpy as np
def available_kurucz_models():
"""These hard-code the available Kurucz models, as present on Nov 22, 2019 on
'http://kurucz.harvard.edu/grids/grid'"""
T_a = np.concatenate((np.arange(3500,13250,250),np.arange(13000,1000,51000)))
logg_a = np.arange(0,5.5,0.5)
Z_a = np.round(np.concatenate((np.arange(-5,0,0.5),np.arange(-0.3,0.4,0.1),np.array([0.5,1.0]))),decimals=1)#I need to do a rounding here because np.concatenate() makes a numerical error on the middle array... Weird!
return(T_a,logg_a,Z_a)
|
2779e39e55127783986a71b65e0b7f09373551f7
| 35,015 |
import numpy as np
def draw_skill_prices(
T,
J,
pi_fun='pi_fixed',
low=-0.2,
high=0.2,
const=[0.0, 0.05]
):
""" Draws initial skill prices and simulates random prices changes.
With the normalization of wages in task 1 to zero, some parts
of this function are redundent. However, the way this function is currently
set up allows for a simulation without this normalization, too.
Arguments:
T (int) Number of periods
J (int) Number of tasks
seed (int) Seed for random draw of prices
pi_fun (str) defines the process of wage changes. Currently
implemented options:
- pi_normal: Draws from standard normal distribution.
- pi_uniform: Draws uniform distribution. Borders
are defined in "low" and "high" arguments.
- pi_fixed: Non-random, constant price changes.
Changes can be provided in "const" argument.
low (int) Lower border of uniform distributed price changes.
high (int) Upper border of uniform distributed price changes.
const (list) Changes for pi_fixed option.
Returns:
pi1, pi2 JxT array of prices for tasks 1 and 2.
Assumptions:
(1) Initial relative skill price for task 2 is +5%
(2) No price changes in a base period (t=0 to t=1)
"""
# import packages
# # set seed
# np.random.seed(seed)
# define functions that return price changes for different specifications
# (1) Draw stadard normal distributed changes in log prices.
def pi_normal(J=J, T=T, **kwargs):
pi_normal = np.around(np.random.normal(size=(J, T-1)), 4)
return pi_normal
# (2) Draw changes in log prices that are uniformly distributed over
# some interval.
def pi_uniform(J=2, T=T, **kwargs):
low, high = kwargs['low'], kwargs['high']
pi_uniform = np.around(
np.random.uniform(low, high, size=(J, T-1)),
4
)
return pi_uniform
# (3) Fix changes in log prices.
def pi_fixed(J=J, T=T, **kwargs):
const = kwargs['const']
pi_fixed = np.array([const, ]*T).transpose()
return pi_fixed
# Set initial task prices
# Assume task 1 has a lower price than task 2
pi1_0 = 0
pi2_0 = 0.1
# Define price array
pi = np.empty([J, T])
# Set intial prices
pi[:, 0] = pi1_0, pi2_0
pi[:, 1] = pi1_0, pi2_0
# Get price changes.
# Find price changes function of choice:
price_changes = eval(pi_fun)
d_pi = price_changes(T=T, J=J, low=low, high=high, const=const)
# Calculate prices in each period, while there is no price change in a base
# period (from t=0 to t=1)
for t in range(2, T):
pi[:, t] = pi[:, t-1] + d_pi[:, t-1]
return pi
|
a14af64fae651d3728fe4be2eae3f3e51eeecfe5
| 35,016 |
def deal_with_categorical(feasible_values, one_hot_values):
""" function to do the one hot encoding of the categorical values """
index = np.argmax(one_hot_values)
#index = one_hot_values.argmax()
return feasible_values[int(index)]
|
f57d4c8ae3dbe7183849eced8c42a4561ab910f0
| 35,017 |
def pretty_size(n, pow=0, b=1024, u='B', pre=[''] + [p + 'i' for p in 'KMGTPEZY']):
""" origin: https://stackoverflow.com/a/31178618 """
pow, n = min(int(m_log(max(n * b ** pow, 1), b)), len(pre) - 1), n * b ** pow
return "%%.%if %%s%%s" % abs(pow % (-pow - 1)) % (n / b ** float(pow), pre[pow], u)
|
dbef4901c93f240847f37755712a57a225a98900
| 35,018 |
def launch_container(container, **kwargs):
"""
Launch a specific container
:param container:
:return: the container ID
"""
# Build the container if it doesn't exist
logger.info("Building %s container..." % container)
client.images.build(path='../%s' % container,
tag="%s/%s:%s" % (NAMESPACE, IMAGE_NAME_MAP[container], VERSION),
rm=True)
logger.info("Done building %s" % container)
# Run the container
logger.info("Running %s container..." % container)
container_id = client.containers.run("%s/%s:%s" % (NAMESPACE, IMAGE_NAME_MAP[container], VERSION),
detach=True,
**kwargs).short_id
logger.info("%s up and running" % container)
return container_id
|
fdeda303a6bc36a3d9a0a828a69cd0bc2a31275d
| 35,019 |
def collatz_function(n):
"""
This function, collatz function, takes a number n and the entire part on the division
with 2 if n is even or 3*n+1 is n is odd.
"""
if n % 2 == 0:
return n//2
else:
return 3*n+1
|
bd9b061e9651e46e4c6efd3f6e45524d824040ff
| 35,020 |
def has_enabled_clear_method(store):
"""Returns True iff obj has a clear method that is enabled (i.e. not disabled)"""
return hasattr(store, 'clear') and ( # has a clear method...
not hasattr(store.clear, 'disabled') # that doesn't have a disabled attribute
or not store.clear.disabled
)
|
28ee30f92d44d14300e30fec0de37a2a241c8e92
| 35,021 |
def get_module_for_handler(handler_name):
"""
Gets the module for a handler using naming convention. First the name of the handler is capitalized and appended by the
string "Handler". Then it is converted from camel to snake case to get the name of the module that will be loaded. Raises an
ImportError exception if no module is found for the constructed module name
:param handler_name:
:return:
"""
module_name = HANDLERS_MODULE_NAME.format(pascal_to_snake_case(handler_name))
# noinspection PyPep8
try:
return _get_module(module_name)
except:
raise ImportError(ERR_NO_MODULE_FOR_HANDLER.format(module_name, handler_name, ", ".join(all_handlers())))
|
1aace2db3658c4309501bb13deeaa0cbd3647654
| 35,023 |
def create_model(activations_outfeed_queue, gradient_accumulation_steps_per_replica):
""" Create the model using the Keras Model class.
Outfeed the activations for a single layer.
"""
input_layer = keras.layers.Input(shape=(28, 28, 1), dtype=tf.float32, batch_size=32)
x = keras.layers.Flatten()(input_layer)
x = keras.layers.Dense(128, activation='relu', name="Dense_128")(x)
# Outfeed the activations for a single layer:
x = outfeed_layers.Outfeed(activations_outfeed_queue, name="Dense_128_acts")(x)
x = keras.layers.Dense(10, activation='softmax', name="Dense_10")(x)
model = keras.Model(input_layer, x)
model.set_gradient_accumulation_options(
gradient_accumulation_steps_per_replica=gradient_accumulation_steps_per_replica)
return model
|
d7f536becc97a0cb7a4d6465b784a6ce828faeda
| 35,024 |
def project_config(opts):
"""Template of project_config.yaml
Args:
opts: mapping parameters as dictionary
Returns:
str: file content as string
"""
template = get_template("project_config")
return template.safe_substitute(opts)
|
12ce2cfbf967912586dd6c7a9a9dfe8e7eb00717
| 35,025 |
def _make_dense_split(quantile_accumulator_handle, stats_accumulator_handle,
stamp_token, next_stamp_token, multiclass_strategy,
class_id, feature_column_id, l1_regularization,
l2_regularization, tree_complexity_regularization,
min_node_weight, is_multi_dimentional,
loss_uses_sum_reduction, weak_learner_type):
"""Function that builds splits for a dense feature column."""
# Get the bucket boundaries
are_splits_ready, buckets = (
gen_quantile_ops.quantile_accumulator_get_buckets(
quantile_accumulator_handles=[quantile_accumulator_handle],
stamp_token=stamp_token))
# quantile_accumulator_get_buckets returns a list of results per handle that
# we pass to it. In this case we're getting results just for one resource.
are_splits_ready = are_splits_ready[0]
buckets = buckets[0]
# After we receive the boundaries from previous iteration we can flush
# the quantile accumulator.
with ops.control_dependencies([buckets]):
flush_quantiles = gen_quantile_ops.quantile_accumulator_flush(
quantile_accumulator_handle=quantile_accumulator_handle,
stamp_token=stamp_token,
next_stamp_token=next_stamp_token)
if is_multi_dimentional:
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_tensor_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
else:
num_minibatches, partition_ids, bucket_ids, gradients, hessians = (
gen_stats_accumulator_ops.stats_accumulator_scalar_flush(
stats_accumulator_handle, stamp_token, next_stamp_token))
# For sum_reduction, we don't need to divide by number of minibatches.
num_minibatches = control_flow_ops.cond(loss_uses_sum_reduction,
lambda: math_ops.to_int64(1),
lambda: num_minibatches)
# Put quantile and stats accumulator flushing in the dependency path.
with ops.control_dependencies([flush_quantiles, partition_ids]):
are_splits_ready = array_ops.identity(are_splits_ready)
partition_ids, gains, split_infos = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=num_minibatches,
bucket_boundaries=buckets,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
class_id=class_id,
feature_column_group_id=feature_column_id,
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
multiclass_strategy=multiclass_strategy,
weak_learner_type=weak_learner_type))
return are_splits_ready, partition_ids, gains, split_infos
|
0d688a7290ebe05ab13488fc2ab0a04044beb29b
| 35,026 |
import torchvision
import torch
def load_data(dataset='cifar10', batch_size=128, num_workers=4):
"""
Loads the required dataset
:param dataset: Can be either 'cifar10' or 'cifar100'
:param batch_size: The desired batch size
:return: Tuple (train_loader, test_loader, num_classes)
"""
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if dataset == 'cifar10':
# classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
num_classes = 10
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
elif dataset == 'cifar100':
num_classes = 100
trainset = torchvision.datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)
testset = torchvision.datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)
else:
raise ValueError('Only cifar 10 and cifar 100 are supported')
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return trainloader, testloader, num_classes
|
2983bb2d350c5c6b8ad613186490815f954313e7
| 35,028 |
import netrc
import errno
def get_auth_from_netrc(hostname):
"""Try to find login auth in ``~/.netrc``. Return ``(user, pwd)`` tuple. """
try:
auth = netrc(file=NETRC)
except IOError as cause:
if cause.errno != errno.ENOENT:
raise
return None, None
username, _, password = auth.hosts.get(hostname, None) or (None,) * 3
return username, password
|
da35fec0c9981166b14cce8870319eb9f1fb8e87
| 35,029 |
def _add_rows_by_count(df, amount, count, alloc_id, constraint, stuff=False):
"""
Add rows to a table so that the sum of values in the `count` column
is increased by `amount`.
Parameters
----------
df : pandas.DataFrame
amount : float
Amount by which to increase sum of `count` column.
count : str
Name of the column in `df` to use for accounting.
alloc_id : str
Name of column in `df` that specifies container ID.
constraint : pandas.Series
The constraint property that limits where new rows can be placed.
Index must correspond to values in `alloc_id` column of `df`.
stuff : bool, optional
Whether it's okay for allocation to go over constraints.
If False rows are still added to meet targets, but some will
not be placed.
Returns
-------
new_df : pandas.DataFrame
"""
if amount == 0:
return df.copy()
sort_count = df[count].sort_values(ascending=False, inplace=False)
sort_count = sort_count[sort_count != 0]
orig_sort_count = sort_count.copy()
to_add = []
while amount >= 1:
sort_count = sort_count[sort_count <= amount]
if len(sort_count) == 0:
# see if we can pop the most recent thing off to_add
# and try again with a smaller number.
k = to_add.pop()
v = orig_sort_count[k]
amount += v
sort_count = orig_sort_count[
(orig_sort_count < v) & (orig_sort_count <= amount)]
if len(sort_count) == 0:
break
for k, v in sort_count.iteritems():
if v <= amount:
to_add.append(k)
amount -= v
if amount == 0:
break
rows_to_add = df.loc[to_add]
# update the new rows' index
max_idx = df.index.max()
rows_to_add.index = range(max_idx + 1, max_idx + len(rows_to_add) + 1)
# allocate rows to containers
_allocate_rows(rows_to_add, alloc_id, constraint, stuff)
return pd.concat([df, rows_to_add])
|
1bb28f8e58116c469f100c65e9ec2f703f62cc6f
| 35,030 |
def rot2(theta, deg=True):
"""returns 2D rotation matrix :math:`R \in SO(2)` to rotate a vector/point in a
plane in counter-clockwise direction
Parameters
----------
theta : float
the angle of rotation
deg : bool
``True`` = degree (default), ``False`` = radians
Returns
-------
r : ndarray
the rotation matrix
Notes
-----
Same as the function ``rotMat2D()`` execpt for slight change in the input parameter
specification, plus ``rot2()`` returns ``ndarray`` instead of numpy matrix.
See the function's docstring for details.
See Also
--------
rotMat2D()
"""
atype = 'd' if deg else 'r'
return _np.asarray(rotMat2D(angle=theta, atype=atype))
|
557ff9b7135c62043ff767f4a33c8ca0484832bc
| 35,031 |
import torch
def coin_flip(prob):
"""
Return the outcome of a biased coin flip.
Args:
prob: the probability of True.
Returns: bool
"""
return prob > 0 and torch.rand(1).item() < prob
|
672929fb49a0e65101a4bdfdd13e981ae5eae31c
| 35,032 |
import warnings
def _pair_exp_cov(X, Y, span=180):
"""
Calculate the exponential covariance between two timeseries of returns.
:param X: first time series of returns
:type X: pd.Series
:param Y: second time series of returns
:type Y: pd.Series
:param span: the span of the exponential weighting function, defaults to 180
:type span: int, optional
:return: the exponential covariance between X and Y
:rtype: float
"""
covariation = (X - X.mean()) * (Y - Y.mean())
# Exponentially weight the covariation and take the mean
if span < 10:
warnings.warn("it is recommended to use a higher span, e.g 30 days")
return covariation.ewm(span=span).mean().iloc[-1]
|
ffd64bb660d54444b64f11fc1a46c0a7c26169b4
| 35,033 |
def _protected_division(x1, x2):
"""Closure of division (x1/x2) for zero denominator."""
with np.errstate(divide='ignore', invalid='ignore'):
return np.where(np.abs(x2) > 0.001, np.divide(x1, x2), x1)
|
676e8a2f72f076773d33501d2888871674ab6346
| 35,034 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.