content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def template(m1, m2):
"""
:param m1:
:param m2:
:return:
"""
c_mass = chirp_mass(m1, m2)
B = 16.6 # In seconds to - 5/8
t = np.linspace(0, 0.45, 10000)
tc = 0.48
gw_frequency = B * c_mass ** (-5 / 8) * (tc - t) ** (-3 / 8)
t_h = np.linspace(-450, 0, 10000)
t_merge_h = 10
phase = 2 * np.pi * B * c_mass ** (-5 / 8) * (-3 / 8) * (t_merge_h - t_h) ** (5 / 8)
f = B * c_mass ** (-5 / 8) * (t_merge_h - t_h) ** (-3 / 8)
h = f ** (2 / 3) * np.cos(phase)
return t, gw_frequency, t_h, h, c_mass
|
64e81538e8b37472c7142e9c21d047bf10a19bc7
| 27,158 |
def _como_hasheable(matriz):
"""Retorna una copia hasheable (y por tanto inmutable) de `matriz`."""
return tuple(tuple(fila) for fila in matriz)
|
a6a1c4371536636d45cfabaf0e2d6938b26a8e08
| 27,159 |
def diff(*args, **kwargs):
"""
Return a diff between two hex list
:param args:
:param kwargs:
:return:
"""
skip_if_same = True
if kwargs.get("skip_if_same", False):
skip_if_same = kwargs["skip_if_same"]
if len(args) != 2:
raise NotImplementedError("Only comparison of the two list are supported")
result_list = ([], [])
diff_indexes = []
diff_list = []
for a in args:
diff_list.append(_prepare(a))
_fill_empty_elements(*diff_list)
for e, _ in enumerate(diff_list[0]):
if diff_list[0][e].lower() != diff_list[1][e].lower():
for i in range(2):
result_list[i].append(diff_list[i][e])
diff_indexes.append(e)
continue
if skip_if_same:
for i in range(2):
result_list[i].append("__")
else:
for i in range(2):
result_list[i].append(diff_list[i][e])
return result_list[0], result_list[1], diff_indexes
|
c7ec1cc92ef3143798e675576dcc2924e24159bb
| 27,160 |
def gravity_effect(position, other_position):
"""Return effect other_position has on position."""
if position == other_position:
return 0
elif position > other_position:
return -1
return 1
|
25130c253cb888057e9b52817cac9cf3778a4c69
| 27,161 |
from operator import mul
def rots(n_phi_pairs):
"""
From the provided list of (axis,angle) pairs, construct the
product of rotations roti(axis0,angle0) *** roti(axis1,angle1) ...
Because rotation of q by A is achieved through A***q***conj(A),
rotate( A *** B, q )
is the same as
rotate( A, rotate(B, q) )
"""
if len(n_phi_pairs) == 0:
return unit()
out = roti(*n_phi_pairs[0])
for n_phi in n_phi_pairs[1:]:
out = mul(out, roti(*n_phi))
return out
|
743ebb3a7a8a68f1178ef4f9116607f33dcdb9cf
| 27,163 |
def n_cr_shell(
thickness,
radius,
length
):
"""
Critical compressive load for cylindrical shell.
Calculates the critical load for a cylindrical shell under pure
compression and assumes uniform stress distribution. Calculation
according to EN1993-1-6 [1], Annex D.
Parameters
----------
thickness : float
[mm] Shell thickness
radius : float
[mm] Cylinder radius
length : float
[mm] Cylnder length
Returns
-------
float
[N] Critical load
References
----------
.. [1] Eurocode 3: Design of steel structures - Part 1-6: Strength and stability of shell structures.
Brussels: CEN, 2006.
"""
# Convert inputs to floats
thickness, radius, length = float(thickness), float(radius), float(length)
# Elastic critical load acc to EN3-1-6 Annex D
nn_cr_shell = 2 * np.pi * radius * thickness * sigma_x_rcr(thickness, radius, length)
# Return value
return nn_cr_shell
|
1210c3f19a801a7ddf3bae7b478c47732c701433
| 27,164 |
def unique_slug(s, model, num_chars=50):
"""
Return slug of num_chars length unique to model
`s` is the string to turn into a slug
`model` is the model we need to use to check for uniqueness
"""
slug = slugify(s)
slug = slug[:num_chars].strip('-')
while True:
dup = model.objects.filter(slug=slug)
if not dup:
return slug
slug = slug[:39] + '-' + random_string(10)
|
ef34215722cca23417c9e944f6320dba79188c8c
| 27,165 |
def _to_vertexes(data):
"""create points at every vertex, incl holes"""
# create new file
outfile = GeoTable()
outfile.fields = list(data.fields)
# loop points
if "LineString" in data.type:
for feat in data:
if "Multi" in feat.geometry["type"]:
for linestring in feat.geometry["coordinates"]:
for point in linsetring:
geoj = {"type": "Point",
"coordinates": point}
outfile.add_feature(feat.row, geoj)
else:
for point in feat.geometry["coordinates"]:
geoj = {"type": "Point",
"coordinates": point}
outfile.add_feature(feat.row, geoj)
return outfile
elif "Polygon" in data.type:
for feat in data:
if "Multi" in feat.geometry["type"]:
for polygon in feat.geometry["coordinates"]:
for ext_or_hole in polygon:
for point in ext_or_hole:
geoj = {"type": "Point",
"coordinates": point}
outfile.add_feature(feat.row, geoj)
else:
for ext_or_hole in feat.geometry["coordinates"]:
for point in ext_or_hole:
geoj = {"type": "Point",
"coordinates": point}
outfile.add_feature(feat.row, geoj)
return outfile
else:
return data.copy()
|
8c82eac68399e10b1cf87155f6c2b9e8318a8205
| 27,166 |
import pickle
def _load(fname) :
"""
Load a cached file and return the resulting object
@param fname: file name
"""
try :
f = open(fname)
return pickle.load(f)
finally :
f.close()
|
5fd5496d226c2ff8265b3dafa0b038bb8015ec5d
| 27,167 |
import warnings
def reale(x, com="error", tol=None, msg=None, xp=None):
"""Return real part of complex data (with error checking).
Parameters
----------
x : array-like
The data to check.
com : {'warn', 'error', 'display', 'report'}
Control rather to raise a warning, an error, or to just display to the
console. If ``com == 'report'``, the relative magnitude of the
imaginary component is printed to the console.
tol : float or None
Allow complex values below ``tol`` in magnitude. If None, ``tol`` will
be ``1000*eps``.
msg : str or None
Additional message to print upon encountering complex values.
Notes
-----
Based on Jeff Fessler's Matlab function of the same name.
Python port by Gregory Lee.
"""
xp, on_gpu = get_array_module(x, xp)
if not xp.iscomplexobj(x):
return x
if tol is None:
tol = 1000 * xp.finfo(x.dtype).eps
if com not in ["warn", "error", "display", "report"]:
raise ValueError(
(
"Bad com: {}. It must be one of {'warn', 'error', 'display', "
"'report'}"
).format(com)
)
max_abs_x = xp.max(xp.abs(x))
if max_abs_x == 0:
if xp.any(xp.abs(xp.imag(x)) > 0):
raise RuntimeError("max real 0, but imaginary!")
else:
return xp.real(x)
frac = xp.max(xp.abs(x.imag)) / max_abs_x
if com == "report":
print("imaginary part %g%%" % frac * 100)
if frac > tol:
t = "imaginary fraction of x is %g (tol=%g)" % (frac, tol)
if msg is not None:
t += "\n" + msg
if com == "display":
print(t)
elif com == "warn":
warnings.warn(t)
else:
raise RuntimeError(t)
return xp.real(x)
|
0823cdc3cb989f7b29dc70e010ad0ae0d2132fbd
| 27,168 |
def find_less_than_or_equal(series_key, value):
"""Find the largest value less-than or equal-to the given value.
Args:
series_key: An E-Series key such as E24.
value: The query value.
Returns:
The largest value from the specified series which is less-than
or equal-to the query value.
Raises:
ValueError: If series_key is not known.
ValueError: If value is not finite.
ValueError: If value is out of range.
"""
candidates = find_nearest_few(series_key, value, num=3)
for candidate in reversed(candidates):
if candidate <= value:
return candidate
|
4bc6f00910c8d5453d7db82869fe37cd3244cd45
| 27,169 |
def GetCommentsByMigration(migration):
"""Get the comments for a migration"""
q = db.Query(models.Comment).filter('migration =', migration)
return list(q.fetch(1000))
|
555f5c8d2df5b05c579b8e20d30e54a035056081
| 27,170 |
def get_proxy_list(html_response):
"""
Returns list of proxies scraped from html_response.
:param html_response: Raw HTML text
:type html_response: unicode
:rtype: list[unicode]
"""
try:
tmp = IPS_REGEXP.findall(html_response.replace("\n", ","))[0]
proxies = tmp.split("</textarea>")[0].split(",")
except Exception:
raise ParserError()
return proxies
|
38978d1c65022f2575fd9ce94cddb02782fb82fd
| 27,171 |
import fnmatch
def ignore_paths(path_list, ignore_patterns, process=str):
"""
Go through the `path_list` and ignore any paths that match the patterns in `ignore_patterns`
:param path_list: List of file/directory paths.
:param ignore_patterns: List of nukeignore patterns.
:param process: Function to apply to every element in the path list before performing match.
:return: The updated path list
"""
for pattern in ignore_patterns:
path_list = [
n for n in path_list if not fnmatch.fnmatch(process(n), pattern)
]
return path_list
|
63196e54eb4505cbe12ebf77d2a42fede68c1d0b
| 27,172 |
import requests
def check_static(url):
"""
Check viability of static links on cf.gov home page and sub-pages.
Example call to check static assets in production:
./cfgov/scripts/static_asset_smoke_test.py -v /ask-cfpb/ /owning-a-home/
Example of local check of home page:
./cfgov/scripts/static_asset_smoke_test.py -v --base http://localhost:8000
"""
count = 0
failures = []
response = requests.get(url)
if not response.ok:
return "\x1B[91mFAIL! Request to {} failed ({})".format(
url, response.reason
)
static_links = extract_static_links(response.content)
for link in static_links:
count += 1
if link.startswith("/"):
final_url = "{}{}".format(CFPB_BASE, link)
else:
final_url = "{}{}".format(url, link)
code = requests.get(
final_url, headers={"referer": CFPB_BASE}
).status_code
if code == 200:
logger.info("checked {}".format(final_url))
else:
failures.append((link, code))
if failures:
if len(failures) > 2: # allow for font failures when testing locally
return (
"\x1B[91mFAIL! {} static links out of {} failed "
"for {}: {}\x1B[0m\n".format(
len(failures), count, url, failures
)
)
else:
return (
"\x1B[91mPartial failure: {} static links out of {} failed"
" for {}: {}\x1B[0m\n".format(
len(failures), count, url, failures
)
)
else:
return "\x1B[32m{} static links passed " "for {}\x1B[0m\n".format(
count, url
)
|
bccc85254c21471447c15c6fd6a9f2aaaa6ce10d
| 27,173 |
def validation_error_handler(err):
"""
Used to parse use_kwargs validation errors
"""
headers = err.data.get("headers", None)
messages = err.data.get("messages", ["Invalid request."])
schema = ResponseWrapper()
data = messages.get("json", None)
error_msg = "Sorry validation errors occurred"
if headers:
return schema.dump({"data": data, "message": error_msg}), 400, headers
else:
return schema.dump({"data": data, "message": error_msg}), 400
|
3b7ef977b0cf4ec892314923e988509f17e7f49c
| 27,174 |
def is_element(a, symbol="C"):
""" Is the atom of a given element """
return element(a) == symbol
|
a04068346d8872f2f3d6228c0f862bcc11d0ff1b
| 27,175 |
def create_call_status(job, internal_storage):
""" Creates a call status class based on the monitoring backend"""
monitoring_backend = job.config['lithops']['monitoring']
Status = getattr(lithops.worker.status, '{}CallStatus'
.format(monitoring_backend.capitalize()))
return Status(job, internal_storage)
|
63c9903d5facff8512c40b2838b0796869cdb9ff
| 27,176 |
from pathlib import Path
import logging
def file_handler() -> RotatingFileHandler:
"""Create a file-based error handler."""
handler = RotatingFileHandler(
Path("log") / "error.log",
maxBytes=50_000,
backupCount=5,
delay=True,
)
handler.setLevel(logging.ERROR)
handler.setFormatter(
logging.Formatter("[%(asctime)s] %(levelname)s in %(module)s: %(message)s")
)
return handler
|
62de46cf48e99dabc04a0f3c5ed7083f9261ed6a
| 27,177 |
def get_additional_node_groups(node_name, deployment_id):
"""This enables users to reuse hosts in multiple groups."""
groups = []
try:
client = get_rest_client()
except KeyError:
return groups
deployment = client.deployments.get(deployment_id)
for group_name, group in deployment.get('groups', {}).items():
if node_name in group.get('members', []) and group_name:
groups.append(group_name)
return groups
|
76cc8f8b98adde91a75c6ab6f39fa6ca17ceabe0
| 27,178 |
def is_requirement(line):
"""
Return True if the requirement line is a package requirement.
Returns:
bool: True if the line is not blank, a comment, a URL, or
an included file
"""
return line and not line.startswith(('-r', '#', '-e', 'git+', '-c'))
|
2b89ced1920ac136e9437fda2fd2f8841debf847
| 27,179 |
def pden(s, t, p, pr=0):
"""
Calculates potential density of water mass relative to the specified
reference pressure by pden = dens(S, ptmp, PR).
Parameters
----------
s(p) : array_like
salinity [psu (PSS-78)]
t(p) : array_like
temperature [℃ (ITS-90)]
p : array_like
pressure [db].
pr : number
reference pressure [db], default = 0
Returns
-------
pden : array_like
potential density relative to the ref. pressure [kg m :sup:3]
Examples
--------
>>> # Data from Unesco Tech. Paper in Marine Sci. No. 44, p22.
>>> import seawater as sw
>>> from seawater.library import T90conv
>>> s = [0, 0, 0, 0, 35, 35, 35, 35]
>>> t = T90conv([0, 0, 30, 30, 0, 0, 30, 30])
>>> p = [0, 10000, 0, 10000, 0, 10000, 0, 10000]
>>> sw.pden(s, t, p)
array([ 999.842594 , 999.79523994, 995.65113374, 996.36115932,
1028.10633141, 1028.15738545, 1021.72863949, 1022.59634627])
:math:`\sigma_{4}` (at 4000 db)
>>> sw.pden(s, t, p, 4000) - 1000
array([ 19.2895493 , 19.33422519, 12.43271053, 13.27563816,
46.30976432, 46.48818851, 37.76150878, 38.74500757])
References
----------
.. [1] A.E. Gill 1982. p.54 Eqn. 3.7.15 "Atmosphere-Ocean Dynamics"
Academic Press: New York. ISBN: 0-12-283522-0
"""
s, t, p, pr = map(np.asanyarray, (s, t, p, pr))
pt = ptmp(s, t, p, pr)
return dens(s, pt, pr)
|
c2b64dbfc3ed554a8929f420792ea56df3858cc0
| 27,180 |
import typing
def SWAP(first: int, second: int, control: typing.Union[int, list] = None, power: float = None) -> QCircuit:
"""
Notes
----------
SWAP gate, order of targets does not matter
Parameters
----------
first: int
target qubit
second: int
target qubit
control
int or list of ints
power
numeric type (fixed exponent) or hashable type (parametrized exponent)
Returns
-------
QCircuit
"""
return _initialize_power_gate(name="SWAP", target=[first, second], control=control, power=power)
|
f5a7af3cc4c9618a17465c3890be81d3251750da
| 27,181 |
def stable_normalize(x, etha=1.0e-8):
""" Numerically stable vector normalization
"""
n = np.linalg.norm(x, axis=-1, keepdims=True)
if n < etha:
n = etha
return x / n
|
36428ecde3993c225b19ccc19278d34c4d9bac36
| 27,182 |
import time
from functools import reduce
def Word2VecFeatureGenerator(df):
"""
Finds and returns word embedding for the head and body
and computes cosine similarity.
Input: DataFrame
Returns list(headlineVec, bodyVec, simVec)"""
t0 = time()
print("\n---Generating Word2Vector Features:---")
df["Headline_unigram_vec"] = df["Headline"].map(lambda x: preprocess_data(x, exclude_stopword=False, stem=False))
df["articleBody_unigram_vec"] = df["articleBody"].map(lambda x: preprocess_data(x, exclude_stopword=False, stem=False))
# Document vector built by multiplying together all the word vectors
# using Google's pre-trained word vectors
Headline_unigram_array = df['Headline_unigram_vec'].values
# word vectors weighted by normalized tf-idf coefficient?
headlineVec = list(map(lambda x: reduce(np.add, [model[y] for y in x if y in model], [0.]*50), Headline_unigram_array))
headlineVec = np.array(headlineVec)
#headlineVec = np.exp(headlineVec)
headlineVec = normalize(headlineVec)
print ('headlineVec.shape: ', headlineVec.shape)
Body_unigram_array = df['articleBody_unigram_vec'].values
bodyVec = list(map(lambda x: reduce(np.add, [model[y] for y in x if y in model], [0.]*50), Body_unigram_array))
bodyVec = np.array(bodyVec)
bodyVec = normalize(bodyVec)
print ('bodyVec.shape: ', bodyVec.shape)
# compute cosine similarity between headline/body word2vec features
simVec = np.asarray(list(map(cosine_sim, headlineVec, bodyVec)))[:, np.newaxis]
print ('simVec.shape: ', simVec.shape)
print("---Word2Vector Features is complete---")
print("Time taken {} seconds\n".format(time() - t0))
return [headlineVec, bodyVec, simVec]
|
0ff2c339ed953592173c0d5afafc72daeeef86a2
| 27,183 |
def reachable(Adj, s, t):
"""
Adj is adjacency list rep of graph
Return True if edges in Adj have directed path from s to t.
Note that this routine is one of the most-used and most time-consuming
of this whole procedure, which is why it is passed an adjacency list
rep rather than a list of vertices and edges, since the adjacency list
rep is easy to update when a new edge is committed to in RP.
"""
# search for path
Q = [ s ] # vertices to expand
R = set([s]) # reachable
while Q:
i = Q.pop()
for j in Adj[i]:
if j == t:
return True
if j not in R:
R.add(j)
Q.append(j)
return False
|
dc0ea0c6d2314fa1c40c3f3aa257a1c77892141f
| 27,184 |
def remove_links(
actor: Actor, company: Company, *, facebook=False, linkedin=False, twitter=False
) -> Response:
"""Remove links to all existing Online Profiles."""
response, _ = update_profiles(
actor,
company,
facebook=facebook,
linkedin=linkedin,
twitter=twitter,
specific_facebook="",
specific_linkedin="",
specific_twitter="",
)
return response
|
bdfeefa8366031022a3a108c385f244e6fd740bf
| 27,185 |
def get_basis_psd(psd_array, notes):
"""Get avg psd from the training set (will serve as a basis)"""
psd_dict = {}
psd_basis_list = []
syl_basis_list = []
unique_note = unique(notes) # convert note string into a list of unique syllables
# Remove unidentifiable note (e.g., '0' or 'x')
if '0' in unique_note:
unique_note.remove('0')
if 'x' in unique_note:
unique_note.remove('x')
for note in unique_note:
ind = find_str(notes, note)
if len(ind) >= num_note_crit_basis: # number should exceed the criteria
syl_pow_array = psd_array[ind, :]
syl_pow_avg = syl_pow_array.mean(axis=0)
temp_dict = {note: syl_pow_avg}
psd_basis_list.append(syl_pow_avg)
syl_basis_list.append(note)
psd_dict.update(temp_dict) # basis
# plt.plot(psd_dict[note])
# plt.show()
return psd_basis_list, syl_basis_list
|
f8b1e596fdda1125159a963d3362e87abe7b4bfe
| 27,186 |
def _get_columns(statement):
"""Get the available columns in the query `statement`.
:param statement: A SQL SELECT statement.
:returns: A list of columns that are being selected.
"""
expecting_columns = False
for token in statement.tokens:
if token.is_whitespace():
pass
elif token.ttype is sqlparse.tokens.DML and token.value.upper() == "SELECT":
expecting_columns = True
elif expecting_columns:
return [x.value for x in token.flatten() if x.ttype is sqlparse.tokens.Name]
|
49db28bd92d05f4d6e35c32f87e0be4a04e80a92
| 27,187 |
def create_conv(in_channels, out_channels, kernel_size, order, num_groups, padding=1):
"""
Create a list of modules with together constitute a single conv layer with non-linearity
and optional batchnorm/groupnorm.
Args:
in_channels (int): number of input channels
out_channels (int): number of output channels
order (string): order of things, e.g.
'cr' -> conv + ReLU
'crg' -> conv + ReLU + groupnorm
'cl' -> conv + LeakyReLU
'ce' -> conv + ELU
num_groups (int): number of groups for the GroupNorm
padding (int): add zero-padding to the input
Return:
list of tuple (name, module)
"""
assert 'c' in order, "Conv layer MUST be present"
assert order[0] not in 'rle', 'Non-linearity cannot be the first operation in the layer'
modules = []
for i, char in enumerate(order):
if char == 'r':
modules.append(('ReLU', nn.ReLU(inplace=True)))
elif char == 'l':
modules.append(('LeakyReLU', nn.LeakyReLU(negative_slope=0.1, inplace=True)))
elif char == 'e':
modules.append(('ELU', nn.ELU(inplace=True)))
elif char == 'c':
# add learnable bias only in the absence of gatchnorm/groupnorm
bias = not ('g' in order or 'b' in order)
modules.append(('conv', conv3d(in_channels, out_channels, kernel_size, bias, padding=padding)))
elif char == 'g':
is_before_conv = i < order.index('c')
assert not is_before_conv, 'GroupNorm MUST go after the Conv3d'
# number of groups must be less or equal the number of channels
if out_channels < num_groups:
num_groups = out_channels
modules.append(('groupnorm', nn.GroupNorm(num_groups=num_groups, num_channels=out_channels)))
elif char == 'b':
is_before_conv = i < order.index('c')
if is_before_conv:
modules.append(('batchnorm', nn.BatchNorm3d(in_channels)))
else:
modules.append(('batchnorm', nn.BatchNorm3d(out_channels)))
else:
raise ValueError(f"Unsupported layer type '{char}'. MUST be one of ['b', 'g', 'r', 'l', 'e', 'c']")
return modules
|
43ccd6342b0598ab0715960cbae8ad9efb7e29ce
| 27,189 |
def expand_json(metadata, context=DEFAULT_CONTEXT):
"""
Expand json, but be sure to use our documentLoader.
By default this expands with DEFAULT_CONTEXT, but if you do not need this,
you can safely set this to None.
# @@: Is the above a good idea? Maybe it should be set to None by
# default.
"""
options = {
"documentLoader": load_context}
if context is not None:
options["expandContext"] = context
return jsonld.expand(metadata, options=options)
|
6fa1f5c4f93f75e45c9a535fe56190ea869dfaa0
| 27,190 |
def parsemsg(s, encoding="utf-8"):
"""Parse an IRC Message from s
:param s bytes: bytes to parse
:param encoding str: encoding to use (Default: utf-8)
:returns tuple: parsed message in the form of (prefix, command, args)
"""
s = s.decode(encoding, 'replace')
prefix = u("")
trailing = []
if s and s[0] == u(":"):
prefix, s = s[1:].split(u(" "), 1)
prefix = parseprefix(prefix)
if s.find(u(" :")) != -1:
s, trailing = s.split(u(" :"), 1)
args = s.split()
args.append(trailing)
else:
args = s.split()
args = iter(args)
command = next(args, None)
command = command and str(command)
return prefix, command, list(args)
|
b0205609724eb91d6b53fe37cc9be508a640a95a
| 27,195 |
def concatenate_unique(la, lb):
"""Add all the elements of `lb` to `la` if they are not there already.
The elements added to `la` maintain ordering with respect to `lb`.
Args:
la: List of Python objects.
lb: List of Python objects.
Returns:
`la`: The list `la` with missing elements from `lb`.
"""
la_set = set(la)
for l in lb:
if l not in la_set:
la.append(l)
la_set.add(l)
return la
|
307fde291233727c59e2211afc3e0eed7c8ea092
| 27,196 |
from datetime import datetime
def email_last_send_for_sub(sub_id):
"""Return when an email was last sent for a subscription, or None."""
last_sent = db.get('email_sub_last_sent:{}'.format(sub_id))
if last_sent is not None:
return datetime.datetime.strptime(last_sent, '%Y-%m-%dT%H:%M:%SZ')
|
e44740a38a2af35c92474aa8da92481d439cc94c
| 27,197 |
def is_output_op(node):
"""Return true when the node is the output of the graph."""
return node.WhichOneof("op_type") == "output_conf"
|
9a20a471a397a480cc2b295cc96961e030f71e43
| 27,198 |
import torchvision
def plot_samples_close_to_score(ood_dict: dict, dataset_name: str, min_score: float, max_score: float, n: int = 32,
do_lesional: bool = True, show_ground_truth: bool = False,
print_score: bool = False) -> None:
"""Arrange slices in a grid such that each slice displayed is closest to the interpolation OOD score from
linspace which goes from min_score to max_score with n samples."""
ood_dict = ood_dict[dataset_name]
ref_scores = np.linspace(min_score, max_score, n)
def create_ood_grids(healthy_leasional: str):
scores = ood_dict[healthy_leasional]
slices = ood_dict[f'{healthy_leasional}_scans']
masks = ood_dict['masks']
segmentations = ood_dict[f'{healthy_leasional}_segmentations']
final_scores = []
final_slices = []
final_masks = []
final_segmentations = []
for ref_score in ref_scores:
scores_idx = get_idx_of_closest_value(scores, ref_score)
final_scores.append(scores[scores_idx])
final_slices.append(slices[scores_idx])
final_masks.append(masks[scores_idx])
if show_ground_truth:
final_segmentations.append(segmentations[scores_idx])
final_slices = [normalize_to_0_1(s) for s in final_slices]
final_slices = [mask_background_to_zero(s, m) for s, m in zip(final_slices, final_masks)]
slices_grid = torchvision.utils.make_grid(final_slices, padding=0, normalize=False)
segmentations_grid = None
if show_ground_truth:
segmentations_grid = torchvision.utils.make_grid(final_segmentations, padding=0, normalize=False)
if print_score:
formatted_scores = [f'{val:.2f}' for val in final_scores]
LOG.info(f'Scores: {formatted_scores}')
return slices_grid, segmentations_grid
healthy_slices_grid, healthy_segmentations_grid = create_ood_grids('healthy')
imshow_grid(healthy_slices_grid, one_channel=True, figsize=(12, 8),
title=f'Healthy {dataset_name} {min_score}-{max_score}', axis='off')
if show_ground_truth:
imshow_grid(healthy_segmentations_grid, one_channel=True, figsize=(12, 8),
title=f'Healthy Ground Truth {dataset_name} {min_score}-{max_score}', axis='off')
if do_lesional:
lesional_slices_grid, lesional_segmentations_grid = create_ood_grids('lesional')
imshow_grid(lesional_slices_grid, one_channel=True, figsize=(12, 8),
title=f'Lesional {dataset_name} {min_score}-{max_score}', axis='off')
if show_ground_truth:
imshow_grid(lesional_segmentations_grid, one_channel=True, figsize=(12, 8),
title=f'Lesional Ground Truth {dataset_name} {min_score}-{max_score}', axis='off')
|
33149237d3d36cbae04a1902994487107447a5e5
| 27,199 |
from salt.cloud.exceptions import SaltCloudException
def avail_images(call=None):
"""
REturns available upcloud templates
"""
if call == 'action':
raise SaltCloudException(
'The avail_locations function must be called with -f or --function.'
)
manager = _get_manager()
templates = manager.get_storages(storage_type='template')
ret = {}
for storage in templates:
ret[storage.uuid] = {
attr: getattr( storage, attr )
for attr in storage.ATTRIBUTES if hasattr(storage, attr)
}
return ret
|
8adf233a1c1bfeef23b94689c0ccb4230fc2b5b5
| 27,200 |
import requests
def _verify_email_upload(transfer_id: str, session: requests.Session) -> str:
"""Given a transfer_id, read the code from standard input.
Return the parsed JSON response.
"""
code = input('Code:')
j = {
"code": code,
"expire_in": WETRANSFER_EXPIRE_IN,
}
r = session.post(WETRANSFER_VERIFY_URL.format(transfer_id=transfer_id),
json=j)
return r.json()
|
d46610f4dc7582df68fc82654de167a43729d8af
| 27,202 |
def envelope_generic(pshape, *args, **kwargs):
"""
Envelope for a given pulse shape at a given time or times.
Parameters
----------
pshape : str or function object
Pulse shape type or user-provided function.
Allowed string values are 'square', 'gauss', 'cos',
'flattop_gauss', 'flattop_cos', 'user'.
*args and **kwargs
Positional and keyword arguments to pass on to a pulse shaping
function.
See Also
--------
derenv_generic
"""
if callable(pshape):
return pshape(*args, **kwargs)
elif pshape == 'square':
return envelope_square(*args, **kwargs)
elif pshape == 'gauss':
return envelope_gauss(*args, **kwargs)
elif pshape == 'cos':
return envelope_cos(*args, **kwargs)
elif pshape == 'flattop_gauss':
return envelope_flattop_gauss(*args, **kwargs)
elif pshape == 'flattop_cos':
return envelope_flattop_cos(*args, **kwargs)
else:
raise ValueError(
'`pshape` must be a functin object or one of ' +
'"square", "gauss", "cos", "flattop_gauss", "flattop_cos"')
return None
|
6e6d007a6602c90c1d39b4749442b910d90f9cf1
| 27,204 |
def sdb_longitude(longitude):
"""Return an 8 character, zero padded string version of the
longitude parameter.
**Arguments:**
* *longitude* -- Longitude.
"""
adjusted = (180 + float(longitude)) * 100000
return str(int(adjusted)).zfill(8)
|
b7b82819952d30ea58dc9dc08e0c4ab63d92743e
| 27,205 |
def is_allowed_location(location, allowed_location):
""""
Returns true if the location is allowed_location
Args:
location: location id
allowed_location: allowed_location
Returns:
is_allowed(bool): Is location allowed.
"""
if allowed_location == 1:
return True
global allowed_locations_locs
if allowed_locations_locs is None:
allowed_locations_locs = get_locations(db.session)
if is_child(allowed_location, int(location), allowed_locations_locs):
return True
return False
|
e16b8bb15827b34c65b3ee6aa36f710ad2aaeea5
| 27,207 |
def gsl_isinf(*args, **kwargs):
"""gsl_isinf(double x) -> int"""
return _gslwrap.gsl_isinf(*args, **kwargs)
|
71b1a114cdb581721eaafc442a038b8183a056d1
| 27,208 |
def abstract_clone(__call__, self, x, *args):
"""Clone an abstract value."""
def proceed():
if isinstance(x, AbstractValue) and x in cache:
return cache[x]
result = __call__(self, x, *args)
if not isinstance(result, GeneratorType):
return result
cls = result.send(None)
if cls is not None:
inst = cls.empty()
else:
inst = None
constructor = _make_constructor(inst)
cache[x] = inst
try:
result.send(constructor)
except StopIteration as e:
if inst is not None:
assert e.value is inst
return e.value
else:
raise AssertionError(
'Generators in abstract_clone must yield once, then return.'
)
cache = self.state.cache
prop = self.state.prop
if prop:
if hasattr(x, prop):
return getattr(x, prop)
elif isinstance(x, AbstractValue):
if self.state.check(x, *args):
res = x
else:
res = proceed()
setattr(x, prop, res)
return res
else:
return proceed()
elif self.state.check and self.state.check(x, *args):
return x
else:
return proceed()
|
4c5f85a710d29f751adb7b2a5cd37ca33136e227
| 27,209 |
from typing import Union
from typing import Dict
from typing import Any
import typing
def Layout(
align_baseline: bool = None,
align_center: bool = None,
align_content_center: bool = None,
align_content_end: bool = None,
align_content_space_around: bool = None,
align_content_space_between: bool = None,
align_content_start: bool = None,
align_end: bool = None,
align_start: bool = None,
attributes: dict = {},
children: list = [],
class_: str = None,
column: bool = None,
d_block: bool = None,
d_contents: bool = None,
d_flex: bool = None,
d_grid: bool = None,
d_inherit: bool = None,
d_initial: bool = None,
d_inline: bool = None,
d_inline_block: bool = None,
d_inline_flex: bool = None,
d_inline_grid: bool = None,
d_inline_table: bool = None,
d_list_item: bool = None,
d_none: bool = None,
d_run_in: bool = None,
d_table: bool = None,
d_table_caption: bool = None,
d_table_cell: bool = None,
d_table_column: bool = None,
d_table_column_group: bool = None,
d_table_footer_group: bool = None,
d_table_header_group: bool = None,
d_table_row: bool = None,
d_table_row_group: bool = None,
fill_height: bool = None,
id: str = None,
justify_center: bool = None,
justify_end: bool = None,
justify_space_around: bool = None,
justify_space_between: bool = None,
justify_start: bool = None,
layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {},
ma_0: bool = None,
ma_1: bool = None,
ma_2: bool = None,
ma_3: bool = None,
ma_4: bool = None,
ma_5: bool = None,
ma_auto: bool = None,
mb_0: bool = None,
mb_1: bool = None,
mb_2: bool = None,
mb_3: bool = None,
mb_4: bool = None,
mb_5: bool = None,
mb_auto: bool = None,
ml_0: bool = None,
ml_1: bool = None,
ml_2: bool = None,
ml_3: bool = None,
ml_4: bool = None,
ml_5: bool = None,
ml_auto: bool = None,
mr_0: bool = None,
mr_1: bool = None,
mr_2: bool = None,
mr_3: bool = None,
mr_4: bool = None,
mr_5: bool = None,
mr_auto: bool = None,
mt_0: bool = None,
mt_1: bool = None,
mt_2: bool = None,
mt_3: bool = None,
mt_4: bool = None,
mt_5: bool = None,
mt_auto: bool = None,
mx_0: bool = None,
mx_1: bool = None,
mx_2: bool = None,
mx_3: bool = None,
mx_4: bool = None,
mx_5: bool = None,
mx_auto: bool = None,
my_0: bool = None,
my_1: bool = None,
my_2: bool = None,
my_3: bool = None,
my_4: bool = None,
my_5: bool = None,
my_auto: bool = None,
pa_0: bool = None,
pa_1: bool = None,
pa_2: bool = None,
pa_3: bool = None,
pa_4: bool = None,
pa_5: bool = None,
pa_auto: bool = None,
pb_0: bool = None,
pb_1: bool = None,
pb_2: bool = None,
pb_3: bool = None,
pb_4: bool = None,
pb_5: bool = None,
pb_auto: bool = None,
pl_0: bool = None,
pl_1: bool = None,
pl_2: bool = None,
pl_3: bool = None,
pl_4: bool = None,
pl_5: bool = None,
pl_auto: bool = None,
pr_0: bool = None,
pr_1: bool = None,
pr_2: bool = None,
pr_3: bool = None,
pr_4: bool = None,
pr_5: bool = None,
pr_auto: bool = None,
pt_0: bool = None,
pt_1: bool = None,
pt_2: bool = None,
pt_3: bool = None,
pt_4: bool = None,
pt_5: bool = None,
pt_auto: bool = None,
px_0: bool = None,
px_1: bool = None,
px_2: bool = None,
px_3: bool = None,
px_4: bool = None,
px_5: bool = None,
px_auto: bool = None,
py_0: bool = None,
py_1: bool = None,
py_2: bool = None,
py_3: bool = None,
py_4: bool = None,
py_5: bool = None,
py_auto: bool = None,
reverse: bool = None,
row: bool = None,
slot: str = None,
style_: str = None,
tag: str = None,
v_model: Any = "!!disabled!!",
v_on: str = None,
v_slots: list = [],
wrap: bool = None,
on_align_baseline: typing.Callable[[bool], Any] = None,
on_align_center: typing.Callable[[bool], Any] = None,
on_align_content_center: typing.Callable[[bool], Any] = None,
on_align_content_end: typing.Callable[[bool], Any] = None,
on_align_content_space_around: typing.Callable[[bool], Any] = None,
on_align_content_space_between: typing.Callable[[bool], Any] = None,
on_align_content_start: typing.Callable[[bool], Any] = None,
on_align_end: typing.Callable[[bool], Any] = None,
on_align_start: typing.Callable[[bool], Any] = None,
on_attributes: typing.Callable[[dict], Any] = None,
on_children: typing.Callable[[list], Any] = None,
on_class_: typing.Callable[[str], Any] = None,
on_column: typing.Callable[[bool], Any] = None,
on_d_block: typing.Callable[[bool], Any] = None,
on_d_contents: typing.Callable[[bool], Any] = None,
on_d_flex: typing.Callable[[bool], Any] = None,
on_d_grid: typing.Callable[[bool], Any] = None,
on_d_inherit: typing.Callable[[bool], Any] = None,
on_d_initial: typing.Callable[[bool], Any] = None,
on_d_inline: typing.Callable[[bool], Any] = None,
on_d_inline_block: typing.Callable[[bool], Any] = None,
on_d_inline_flex: typing.Callable[[bool], Any] = None,
on_d_inline_grid: typing.Callable[[bool], Any] = None,
on_d_inline_table: typing.Callable[[bool], Any] = None,
on_d_list_item: typing.Callable[[bool], Any] = None,
on_d_none: typing.Callable[[bool], Any] = None,
on_d_run_in: typing.Callable[[bool], Any] = None,
on_d_table: typing.Callable[[bool], Any] = None,
on_d_table_caption: typing.Callable[[bool], Any] = None,
on_d_table_cell: typing.Callable[[bool], Any] = None,
on_d_table_column: typing.Callable[[bool], Any] = None,
on_d_table_column_group: typing.Callable[[bool], Any] = None,
on_d_table_footer_group: typing.Callable[[bool], Any] = None,
on_d_table_header_group: typing.Callable[[bool], Any] = None,
on_d_table_row: typing.Callable[[bool], Any] = None,
on_d_table_row_group: typing.Callable[[bool], Any] = None,
on_fill_height: typing.Callable[[bool], Any] = None,
on_id: typing.Callable[[str], Any] = None,
on_justify_center: typing.Callable[[bool], Any] = None,
on_justify_end: typing.Callable[[bool], Any] = None,
on_justify_space_around: typing.Callable[[bool], Any] = None,
on_justify_space_between: typing.Callable[[bool], Any] = None,
on_justify_start: typing.Callable[[bool], Any] = None,
on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None,
on_ma_0: typing.Callable[[bool], Any] = None,
on_ma_1: typing.Callable[[bool], Any] = None,
on_ma_2: typing.Callable[[bool], Any] = None,
on_ma_3: typing.Callable[[bool], Any] = None,
on_ma_4: typing.Callable[[bool], Any] = None,
on_ma_5: typing.Callable[[bool], Any] = None,
on_ma_auto: typing.Callable[[bool], Any] = None,
on_mb_0: typing.Callable[[bool], Any] = None,
on_mb_1: typing.Callable[[bool], Any] = None,
on_mb_2: typing.Callable[[bool], Any] = None,
on_mb_3: typing.Callable[[bool], Any] = None,
on_mb_4: typing.Callable[[bool], Any] = None,
on_mb_5: typing.Callable[[bool], Any] = None,
on_mb_auto: typing.Callable[[bool], Any] = None,
on_ml_0: typing.Callable[[bool], Any] = None,
on_ml_1: typing.Callable[[bool], Any] = None,
on_ml_2: typing.Callable[[bool], Any] = None,
on_ml_3: typing.Callable[[bool], Any] = None,
on_ml_4: typing.Callable[[bool], Any] = None,
on_ml_5: typing.Callable[[bool], Any] = None,
on_ml_auto: typing.Callable[[bool], Any] = None,
on_mr_0: typing.Callable[[bool], Any] = None,
on_mr_1: typing.Callable[[bool], Any] = None,
on_mr_2: typing.Callable[[bool], Any] = None,
on_mr_3: typing.Callable[[bool], Any] = None,
on_mr_4: typing.Callable[[bool], Any] = None,
on_mr_5: typing.Callable[[bool], Any] = None,
on_mr_auto: typing.Callable[[bool], Any] = None,
on_mt_0: typing.Callable[[bool], Any] = None,
on_mt_1: typing.Callable[[bool], Any] = None,
on_mt_2: typing.Callable[[bool], Any] = None,
on_mt_3: typing.Callable[[bool], Any] = None,
on_mt_4: typing.Callable[[bool], Any] = None,
on_mt_5: typing.Callable[[bool], Any] = None,
on_mt_auto: typing.Callable[[bool], Any] = None,
on_mx_0: typing.Callable[[bool], Any] = None,
on_mx_1: typing.Callable[[bool], Any] = None,
on_mx_2: typing.Callable[[bool], Any] = None,
on_mx_3: typing.Callable[[bool], Any] = None,
on_mx_4: typing.Callable[[bool], Any] = None,
on_mx_5: typing.Callable[[bool], Any] = None,
on_mx_auto: typing.Callable[[bool], Any] = None,
on_my_0: typing.Callable[[bool], Any] = None,
on_my_1: typing.Callable[[bool], Any] = None,
on_my_2: typing.Callable[[bool], Any] = None,
on_my_3: typing.Callable[[bool], Any] = None,
on_my_4: typing.Callable[[bool], Any] = None,
on_my_5: typing.Callable[[bool], Any] = None,
on_my_auto: typing.Callable[[bool], Any] = None,
on_pa_0: typing.Callable[[bool], Any] = None,
on_pa_1: typing.Callable[[bool], Any] = None,
on_pa_2: typing.Callable[[bool], Any] = None,
on_pa_3: typing.Callable[[bool], Any] = None,
on_pa_4: typing.Callable[[bool], Any] = None,
on_pa_5: typing.Callable[[bool], Any] = None,
on_pa_auto: typing.Callable[[bool], Any] = None,
on_pb_0: typing.Callable[[bool], Any] = None,
on_pb_1: typing.Callable[[bool], Any] = None,
on_pb_2: typing.Callable[[bool], Any] = None,
on_pb_3: typing.Callable[[bool], Any] = None,
on_pb_4: typing.Callable[[bool], Any] = None,
on_pb_5: typing.Callable[[bool], Any] = None,
on_pb_auto: typing.Callable[[bool], Any] = None,
on_pl_0: typing.Callable[[bool], Any] = None,
on_pl_1: typing.Callable[[bool], Any] = None,
on_pl_2: typing.Callable[[bool], Any] = None,
on_pl_3: typing.Callable[[bool], Any] = None,
on_pl_4: typing.Callable[[bool], Any] = None,
on_pl_5: typing.Callable[[bool], Any] = None,
on_pl_auto: typing.Callable[[bool], Any] = None,
on_pr_0: typing.Callable[[bool], Any] = None,
on_pr_1: typing.Callable[[bool], Any] = None,
on_pr_2: typing.Callable[[bool], Any] = None,
on_pr_3: typing.Callable[[bool], Any] = None,
on_pr_4: typing.Callable[[bool], Any] = None,
on_pr_5: typing.Callable[[bool], Any] = None,
on_pr_auto: typing.Callable[[bool], Any] = None,
on_pt_0: typing.Callable[[bool], Any] = None,
on_pt_1: typing.Callable[[bool], Any] = None,
on_pt_2: typing.Callable[[bool], Any] = None,
on_pt_3: typing.Callable[[bool], Any] = None,
on_pt_4: typing.Callable[[bool], Any] = None,
on_pt_5: typing.Callable[[bool], Any] = None,
on_pt_auto: typing.Callable[[bool], Any] = None,
on_px_0: typing.Callable[[bool], Any] = None,
on_px_1: typing.Callable[[bool], Any] = None,
on_px_2: typing.Callable[[bool], Any] = None,
on_px_3: typing.Callable[[bool], Any] = None,
on_px_4: typing.Callable[[bool], Any] = None,
on_px_5: typing.Callable[[bool], Any] = None,
on_px_auto: typing.Callable[[bool], Any] = None,
on_py_0: typing.Callable[[bool], Any] = None,
on_py_1: typing.Callable[[bool], Any] = None,
on_py_2: typing.Callable[[bool], Any] = None,
on_py_3: typing.Callable[[bool], Any] = None,
on_py_4: typing.Callable[[bool], Any] = None,
on_py_5: typing.Callable[[bool], Any] = None,
on_py_auto: typing.Callable[[bool], Any] = None,
on_reverse: typing.Callable[[bool], Any] = None,
on_row: typing.Callable[[bool], Any] = None,
on_slot: typing.Callable[[str], Any] = None,
on_style_: typing.Callable[[str], Any] = None,
on_tag: typing.Callable[[str], Any] = None,
on_v_model: typing.Callable[[Any], Any] = None,
on_v_on: typing.Callable[[str], Any] = None,
on_v_slots: typing.Callable[[list], Any] = None,
on_wrap: typing.Callable[[bool], Any] = None,
) -> Element[ipyvuetify.generated.Layout]:
""" """
kwargs: Dict[Any, Any] = without_default(Layout, locals())
if isinstance(kwargs.get("layout"), dict):
kwargs["layout"] = w.Layout(**kwargs["layout"])
widget_cls = ipyvuetify.generated.Layout
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs)
|
b20157e22d69e32fc89d5abda243b5e967d1eefe
| 27,210 |
def paint_hull(inputfile, hull={}):
"""Launches the emergency hull painting robot with the specified Intcode source file
Parameters
----------
inputfile: str
Path/Filename of Intcode source code
hull : dict<(int,int): int>
Initial state of the hull
"""
robot_pos = (0, 0)
robot_dir = (0, -1)
machine = IntcodeVM(inputfile, silent=True)
machine.run()
while machine.waiting:
color, turn = machine.resume([hull.get(robot_pos, BLACK)])
hull[robot_pos] = color
robot_dir = TURNS[robot_dir][turn]
robot_pos = (robot_pos[0] + robot_dir[0], robot_pos[1] + robot_dir[1])
return hull
|
a781aca8f946cc6931b310848064df93ce888012
| 27,211 |
from typing import Tuple
from datetime import datetime
def get_token_expiration(parser: ConfigParser, profile: str) -> Tuple[str, str]:
"""Return token expiration date and whether it is expired.
Parameters
----------
parser : ConfigParser
Parser with all configuration files.
profile : str
Profile name.
Returns
-------
Tuple[str, str]
Tuple with expiration date and whether it is expired.
"""
expiration = parser.get(profile, 'aws_session_token_expiration', fallback=None)
if expiration is None:
return "", ""
dt = datetime.strptime(expiration, "%Y-%m-%dT%H:%M:%S%z")
expired = "N" if dt > datetime.now(dt.tzinfo) else "Y"
return f"{dt:%Y-%m-%d %H:%M:%S}", expired
|
0ec467b5c1455784f28b1529e82116e1dbc0dde6
| 27,212 |
def ensure_format(s: str, n_chars: int = None) -> str:
"""
Removes spaces within a string and ensures proper format
------
PARAMS
------
1. 's' -> input string
2. 'n_chars' -> Num characters the string should consist of. Defaults to None.
"""
assert isinstance(s, str), "Input must be a string."
s = s.replace(" ", "") #clean spaces
if n_chars:
assert len(s) == n_chars, f"Input must be a payload of {n_chars} characters."
return s
|
646faf1f155fdd2023ea711adfe62d6e13dd0954
| 27,213 |
def gamma_boundary_condition(gamma=-3):
"""
Defines boundary condition parameterized by either a scalar or list/iterable.
In the latter case, piecewise-interpolation on an equispaced grid over
the interior of (0, 1). In the former, the scalar defines the minimum displacement
value of the boundary condition.
"""
if isinstance(gamma, int) or isinstance(gamma, float): # 1-D case
# the function below will have a min at (2/7, gamma) by design
# (scaling factor chosen via calculus)
lam = gamma * 823543 / 12500
expr = fin.Expression(f"pow(x[1], 2) * pow(1 - x[1], 5) * {lam}", degree=3)
else: # Higher-D case
expr = fin.Expression(piecewise_eval_from_vector(gamma, d=1), degree=1)
return expr
|
acdfe4e311361c27ef0cec4dbebb2fb51d180683
| 27,214 |
def toggleDateSyncButton(click):
"""Change the color of on/off date syncing button - for css."""
if not click:
click = 0
if click % 2 == 0:
children = "Date Syncing: On"
style = {**on_button_style, **{"margin-right": "15px"}}
else:
children = "Date Syncing: Off"
style = {**off_button_style, **{"margin-right": "15px"}}
return style, children
|
f8d74feb690044c96d41d83802f791f54f3f6ab8
| 27,215 |
def _opt(spc_info, mod_thy_info, geo, run_fs,
script_str, opt_cart=True, **kwargs):
""" Run an optimization
"""
# Optimize displaced geometry
geom = geo if opt_cart else automol.geom.zmatrix(geo)
success, ret = es_runner.execute_job(
job=elstruct.Job.OPTIMIZATION,
script_str=script_str,
run_fs=run_fs,
geo=geom,
spc_info=spc_info,
thy_info=mod_thy_info,
zrxn=None,
overwrite=True,
**kwargs,
)
if success:
inf_obj, _, out_str = ret
prog = inf_obj.prog
ret_geo = elstruct.reader.opt_geometry(prog, out_str)
else:
ret_geo = None
return ret_geo, ret
|
b06a97dd33d66a3bc6dc755f48e010f03eb96832
| 27,218 |
def fi(x, y, z, i):
"""The f1, f2, f3, f4, and f5 functions from the specification."""
if i == 0:
return x ^ y ^ z
elif i == 1:
return (x & y) | (~x & z)
elif i == 2:
return (x | ~y) ^ z
elif i == 3:
return (x & z) | (y & ~z)
elif i == 4:
return x ^ (y | ~z)
else:
assert False
|
af30fff4cfc2036eb2b7d3e6b33d365b8d64404a
| 27,219 |
import torch
def th_accuracy(pad_outputs, pad_targets, ignore_label):
"""Calculate accuracy.
Args:
pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
pad_targets (LongTensor): Target label tensors (B, Lmax, D).
ignore_label (int): Ignore label id.
Returns:
float: Accuracy value (0.0 - 1.0).
"""
pad_pred = pad_outputs.view(
pad_targets.size(0),
pad_targets.size(1),
pad_outputs.size(1)).argmax(2)
mask = pad_targets != ignore_label
numerator = torch.sum(pad_pred.masked_select(mask) == pad_targets.masked_select(mask))
denominator = torch.sum(mask)
return float(numerator) / float(denominator)
|
31b89a949a6c2cfa7e9dd2dddc8e0f25d148d5e9
| 27,220 |
def staff_level():
""" Staff Levels Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
|
f665c822c002a9b27e2f8132213c7c2b8841611d
| 27,221 |
def aa_status_string (status):
"""usage: str return = aa_status_string(int status)"""
if not AA_LIBRARY_LOADED: return AA_INCOMPATIBLE_LIBRARY
# Call API function
return api.py_aa_status_string(status)
|
eabe0b6016b269a749e86e88e3eb5aab8e844215
| 27,222 |
def echo(context, args):
""" echo text
Echo back the following text."""
info(args)
return context
|
887e49ce9ff95c7eabf0499756e1cfce418ccd59
| 27,223 |
import time
def wait_for_visibility(element, wait_time=1):
"""Wait until an element is visible before scrolling.
Args:
element (ElementAPI): The splinter element to be waited on.
wait_time (int): The time in seconds to wait.
"""
end_time = time.time() + wait_time
while time.time() < end_time:
if element and element.visible:
return True
return False
|
b3e4ed391098131bc62bad4277f8ef163e129d20
| 27,224 |
import asyncio
async def meta(request):
"""Return ffprobe metadata"""
async def stream_fn(response):
async with sem:
cmd = ['ffprobe',
'-v',
'quiet',
'-i',
request.args.get('url'),
'-print_format',
'json',
'-show_format',
'-show_streams'
]
proc = await asyncio.create_subprocess_exec(*cmd,
stdout=asyncio.subprocess.PIPE
)
while True:
chunk = await proc.stdout.read(PYTHUMBIO_CHUNKSIZE)
if not chunk:
break
response.write(chunk)
return stream(stream_fn, content_type='application/json')
|
65bbedf428196ac8317716287550ee868bb6b99e
| 27,225 |
from typing import Tuple
import ctypes
def tpictr(
sample: str, lenout: int = _default_len_out, lenerr: int = _default_len_out
) -> Tuple[str, int, str]:
"""
Given a sample time string, create a time format picture
suitable for use by the routine timout.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tpictr_c.html
:param sample: A sample time string.
:param lenout: The length for the output picture string.
:param lenerr: The length for the output error string.
:return:
A format picture that describes sample,
Flag indicating whether sample parsed successfully,
Diagnostic returned if sample cannot be parsed
"""
sample = stypes.string_to_char_p(sample)
pictur = stypes.string_to_char_p(lenout)
errmsg = stypes.string_to_char_p(lenerr)
lenout = ctypes.c_int(lenout)
lenerr = ctypes.c_int(lenerr)
ok = ctypes.c_int()
libspice.tpictr_c(sample, lenout, lenerr, pictur, ctypes.byref(ok), errmsg)
return stypes.to_python_string(pictur), ok.value, stypes.to_python_string(errmsg)
|
73f098aa71b796d1a586f9e8666e4277339b7c0d
| 27,226 |
def survey_page(request):
"""View extracts the data that a volunteer fills out from the monthly survey
and updates the data on app side accordingly"""
if request.method == 'GET':
request.session['vol_id'] = request.GET.get('id')
request.session['vol_email'] = request.GET.get('email')
request.session['vol_token'] = request.GET.get('token')
request.session['survey_month'] = request.GET.get('month')
request.session['survey_year'] = request.GET.get('year')
days = monthrange(int(request.session['survey_year']), int(request.session['survey_month']))[1]
vol_id = request.session['vol_id']
vol_token = request.session['vol_token']
if not (vol_id or vol_token):
return render(request, "scheduling_application/bad_link.html", {})
volunteer = Volunteer.objects.get(id=vol_id)
month = request.session['survey_month']
context = {
'month': month,
'num_days': range(days)
}
# Validate the token inside of the URL
if vol_token != volunteer.survey_token:
return render(request, "scheduling_application/bad_link.html", {})
else:
return render(request, "scheduling_application/survey_sending/survey_page.html", context=context)
if request.method == 'POST' and 'unsubscribe' in request.POST:
vol_id = request.session['vol_id']
return render(request, "scheduling_application/unsubscribe.html", context={})
elif request.method == 'POST' and 'confirm_unsubscribe' in request.POST:
comms = request.POST.get('comms')
everything = request.POST.get('everything')
vol_id = request.session['vol_id']
# Unsubscribe SMS/Email
if comms:
volunteer = Volunteer.objects.get(id=vol_id)
unsub_comms(volunteer)
# Unsubscribe from entire service
if everything:
volunteer = Volunteer.objects.get(id=vol_id)
unsub_all(volunteer)
return render(request, "scheduling_application/survey_sending/survey_complete.html", {})
elif request.method == 'POST':
vol_id = request.session['vol_id']
option_list = request.POST.getlist('survey-value')
volunteer = Volunteer.objects.get(id=vol_id)
if int(request.session['survey_month']) < 10:
month_string = "0" + request.session['survey_month']
else:
month_string = request.session['survey_month']
regex = r'((' + month_string + r')[/]\d\d[/](' + request.session['survey_year'] + r'))'
volunteer.Days.all().filter(date__regex=regex).delete()
read_survey_data(option_list, volunteer, request.session['survey_month'], request.session['survey_year'])
return render(request, "scheduling_application/survey_sending/survey_complete.html", {})
|
fa9ac1a31782c0a5639d0d20d98cc41772b1ce09
| 27,227 |
def chebyu(n, monic=0):
"""Return nth order Chebyshev polynomial of second kind, Un(x). Orthogonal
over [-1,1] with weight function (1-x**2)**(1/2).
"""
base = jacobi(n,0.5,0.5,monic=monic)
if monic:
return base
factor = sqrt(pi)/2.0*_gam(n+2) / _gam(n+1.5)
base._scale(factor)
return base
|
b71c947e8f988fe3500339a10bd783ed8561da77
| 27,228 |
def spm(name, path, size, bos= -1, eos= -1, unk= 0, coverage= 0.9995):
"""-> SentencePieceProcessor
trains a sentence piece model of `size` from text file on `path`
and saves with `name`.
"""
SentencePieceTrainer.train(
"--model_prefix={name} \
--input={path} \
--vocab_size={size} \
--bos_id={bos} \
--eos_id={eos} \
--unk_id={unk} \
--unk_surface=☹ \
--character_coverage={coverage}".format(
coverage= coverage
, unk= unk
, eos= eos
, bos= bos
, size= size
, path= path
, name= name))
return load_spm(name + ".model")
|
df22367462839192bcd55093ddf8a2c5b15085f6
| 27,229 |
def list_reshape_bywindow(longlist, windowlen, step=1):
"""
A function to use window intercept long list into several component
A list could like below,
[a, b, c, d, e]
Output could be
[[a,b], [c,d]]
where windowlen as 2,
step as 2
Parameters:
------------
longlist: original long list
windowlen: window length
step: by default is 1, to use overlapping batch method, step as 1,
to use non-overlapping batch method, step as windowlen
Returns:
--------
ic_list: intercept list
Example:
--------
>>> ic_list = list_reshape_bywindow(longlist, windowlen = 3)
"""
ic_list = []
i = 0
while len(longlist)>=(windowlen+step*i):
ic_list.append(longlist[(step*i):(windowlen+step*i)])
i+=1
return ic_list
|
ee501b49c34656f4c0a1353d36f542b231b3a925
| 27,230 |
def generate_test_repo() -> Repository:
""" gets you a test repo """
test_requester = Requester(
login_or_token="",
retry=False,
password=None,
jwt=None,
base_url="https://github.com/yaleman/github_linter/",
timeout=30,
pool_size=10,
per_page=100,
user_agent="",
verify=False,
) # type: ignore
testrepo = Repository(
test_requester,
{},
attributes={"full_name" : "testuser/test1", "name" : "test1"},
completed=True,
)
return testrepo
|
56ed5de055e0437bb00630f4cdb71e4547868e64
| 27,231 |
from ucsmsdk.mometa.comm.CommSyslogConsole import \
def syslog_local_console_exists(handle, **kwargs):
"""
Checks if the syslog local console already exists
Args:
handle (UcsHandle)
**kwargs: key-value pair of managed object(MO) property and value, Use
'print(ucscoreutils.get_meta_info(<classid>).config_props)'
to get all configurable properties of class
Returns:
(True/False, MO/None)
Example:
syslog_local_console_exists(handle, severity="alerts")
"""
CommSyslogConsoleConsts
dn = _syslog_dn + "/console"
mo = handle.query_dn(dn)
if not mo:
return False, None
kwargs['admin_state'] = CommSyslogConsoleConsts.ADMIN_STATE_ENABLED
mo_exists = mo.check_prop_match(**kwargs)
return (mo_exists, mo if mo_exists else None)
|
f4f0b1c50dd29fdf8d574ff4bd5b3f4b33b522f5
| 27,232 |
import numpy
def filter_inputs(inputlist, minimum_members, number_of_families):
"""
Removes functions that have fewer than minimum_members different hashes,
and returns a subset (number_of_families) different ones.
"""
temp = defaultdict(list)
for i in inputlist:
temp[i[1]].append((i[0], i[2]))
result = []
# Remove all functions with insufficient number of variants.
keys = [k for k in temp.keys()]
for sym in keys:
if len(temp[sym]) < minimum_members:
del temp[sym]
# Now choose a random subset of functions that remain.
keys = [k for k in temp.keys()]
keys.sort() # Necessary because of Python key order being nondeterministic.
# Deterministic seed for reproducibility.
numpy.random.seed(0)
subset = numpy.random.choice(keys, number_of_families, replace=False)
for key in subset:
result.extend([(simhash[0], key, simhash[1]) for simhash in temp[key]])
return result
|
a92d37a20964b543bbd89dd86b0a93166ffe0130
| 27,233 |
def is_phone_in_call_video_tx_enabled(log, ad):
"""Return if phone in tx_enabled video call.
Args:
log: log object.
ad: android device object
Returns:
True if phone in tx_enabled video call.
"""
return is_phone_in_call_video_tx_enabled_for_subscription(
log, ad, get_outgoing_voice_sub_id(ad))
|
ec51a84c43a808b1e5f750eddf1eedcc4f050158
| 27,235 |
def find_mapping_net_assn(context, network_id, host_id):
"""
Takes in a network id and the host id to return an SEA that creates that
mapping. If there's no association found, a None is returned
:context: The context used to call the dom API
:network_id: The neutron network id.
:host_id: The Host id of the host, for which we want the
association.
:returns: The associated Network Association. May be None
"""
return dom_api.network_association_find(context, host_id, network_id)
|
543516722a3011d7d859f8f46ae62fd2823989e0
| 27,237 |
def bubble_sort(seq):
"""Inefficiently sort the mutable sequence (list) in place.
seq MUST BE A MUTABLE SEQUENCE.
As with list.sort() and random.shuffle this does NOT return
"""
changed = True
while changed:
changed = False
for i in xrange(len(seq) - 1):
if seq[i] > seq[i+1]:
seq[i], seq[i+1] = seq[i+1], seq[i]
changed = True
return seq
|
be8c8b4dea93fb91f0ed9397dcd3a9fb9c5d4703
| 27,239 |
def numba_cuda_DeviceNDArray(xd_arr):
"""Return cupy.ndarray view of a xnd.xnd in CUDA device.
"""
cbuf = pyarrow_cuda_buffer(xd_arr)
# DERIVED
return pyarrow_cuda_buffer_as.numba_cuda_DeviceNDArray(cbuf)
|
80f820ac589434f407a1e05954cb59c308882540
| 27,240 |
def deg2rad(dd):
"""Convertit un angle "degrés décimaux" en "radians"
"""
return dd/180*pi
|
cba29769452ed971a9934cae4f072724caf9a8d8
| 27,241 |
def createView(database, view_name, map_func):
"""
Creates and returns a Cloudant view.
"""
my_design_document = design_document.DesignDocument(database, "_design/names")
my_design_document.add_view(view_name, map_func)
return view.View(my_design_document, view_name, map_func)
|
77b8fbcbe33c8ae08605f4dff8dc7379dd686329
| 27,242 |
def find_modifiable_states(state_data):
""" Find indices into the state_data array,
Args:
state_data (ndarray): States array, in the form returned by cmd_states.get_cmd_states.fetch_states
Returns:
(ndarray): Numeric index of states that represent dwells with modifiable chip counts
(list): List of all permutations, where each row represents a single case (combination), and each column
represents the number of chips to be added for each state indexed by the returned ndarray (above)
Note:
The first element in the returned list represents 0's for each modifiable state, representing the baseline case.
"""
modifiable = (state_data['pcad_mode'] == 'NPNT') & (state_data['clocking'] == 1) & (
state_data['fep_count'] == state_data['ccd_count']) & (state_data['fep_count'] < 4)
states_ind = np.where(modifiable)[0]
cases = list(product([0, 1], repeat=len(states_ind)))
return states_ind, cases
|
61dc59766deb0b6b2c238fe4ce20fe819ef8c7d3
| 27,243 |
def get_datasource_bounding_box(datasource_uri):
"""Get datasource bounding box where coordinates are in projected units.
Args:
dataset_uri (string): a uri to a GDAL dataset
Returns:
bounding_box (list):
[upper_left_x, upper_left_y, lower_right_x, lower_right_y] in
projected coordinates
"""
datasource = ogr.Open(datasource_uri)
layer = datasource.GetLayer(0)
extent = layer.GetExtent()
# Reindex datasource extents into the upper left/lower right coordinates
bounding_box = [extent[0],
extent[3],
extent[1],
extent[2]]
return bounding_boxz
|
0d262eafb535807c9f6ce38ca3485f731cc95c97
| 27,246 |
def distributions_to_lower_upper_bounds(model, negative_allowed=[], ppf=(0.05,0.95), save_to_model=False):
"""
Converts distributions to uniform distributions by taking specified ppf
Args:
model: The model object
negative_allowed: list of params which are allowed to be negative
ppf: Quartile range of pdf to take. Default 5th anf 95th
save_to_model: Boolean. True will save lower and upper bounds in place of scipy distributions.
Returns:
List of lower and upper bounds.
"""
bounds = []
for name, distribution in model.parameter_distributions.items():
upper = distribution.ppf(ppf[1])
lower = None
lower_ppf = ppf[0]
while not check_not_neg(lower, name, negative_allowed):
lower = distribution.ppf(lower_ppf)
lower_ppf += 0.01
bounds.append([lower, upper])
if save_to_model == True:
model.parameter_distributions[name] = [lower,upper]
for name, distribution in model.species_distributions.items():
upper = distribution.ppf(ppf[1])
lower = None
lower_ppf = ppf[0]
while not check_not_neg(lower, name, negative_allowed):
lower = distribution.ppf(lower_ppf)
lower_ppf += 0.01
bounds.append([lower, upper])
if save_to_model == True:
model.species_distributions[name] = [lower,upper]
return bounds
|
c86b53802e34ec71a9a72d498278900a36420f37
| 27,248 |
def create_feature_indices(header):
"""
Function to return unique features along with respective column indices
for each feature in the final numpy array
Args:
header (list[str]): description of each feature's possible values
Returns:
feature_indices (dict): unique feature names as keys with value
types (dicrete or continuous) and data column indices where present
"""
feature_indices = {}
for i, head in enumerate(header):
current = head.split("->")
str_name = current[0].replace(" ", "_")
if current[0] == "mask":
feature_indices["presence_" +
current[1].replace(" ", "_")] = ["discrete", i]
elif feature_indices == {} or str_name not in feature_indices:
if len(current) > 1:
feature_indices[str_name] = ["discrete", i]
else:
feature_indices[str_name] = ["continuous", i]
elif str_name in feature_indices:
feature_indices[str_name].extend([i])
return feature_indices
|
a29d8c4c8f3a31ad516216756b7eba7eb4110946
| 27,250 |
def lower(word):
"""Sets all characters in a word to their lowercase value"""
return word.lower()
|
f96b1470b3ab1e31cd1875ad9cbf9ed017aa0158
| 27,251 |
def band_pass(data, scale_one, scale_two):
"""
Band pass filter
Difference of two gaussians
G(data, s1) - G(data, s2)
"""
bp = gaussian(data, scale=scale_one) - gaussian(data, scale=scale_two)
return bp
|
140074b49dc589641380a830b1cce8edb6445a45
| 27,252 |
def _in_dir(obj, attr):
"""Simpler hasattr() function without side effects."""
return attr in dir(obj)
|
f95e265d278e3014e8e683a872cd3b70ef6133c9
| 27,253 |
def load_stat_features_others_windows(patient_list, data_path="statistic_features.csv",
statistics_list=["std_x", "std_y", "std_z"], n_others_windows=40):
"""
Returns:
X_all_data - ndarray of shape(n_records, n_new_features), feature-vector consist of features of current window and several others ( n_others_windows // 2 before current window and n_others_windows // 2 after it)
y_all_data - ndarray of shape(n_records,)
"""
statistics_df = pd.read_csv(data_path)
X_all_data = []
y_all_data = []
for patient in patient_list:
X = np.array(statistics_df.loc[statistics_df.id == patient, statistics_list])
y = np.array(statistics_df.loc[statistics_df.id == patient, "sleep_stage"])
X_new = np.zeros((X.shape[0]-n_others_windows, X.shape[1]*(n_others_windows+1)))
for i in range(0, X.shape[0]-n_others_windows):
X_buff = X[i]
for j in range(1, n_others_windows+1):
X_buff = np.concatenate((X_buff, X[i+j]))
X_new[i] = X_buff
y = y[(n_others_windows//2): -(n_others_windows//2)]
#y_test_new = y_test[previous:]
X_all_data.append(X_new)
y_all_data.append(y)
X_all_data = np.concatenate(X_all_data, axis=0)
y_all_data = np.concatenate(y_all_data, axis=0)
return X_all_data, y_all_data
|
71de81b25740479f6fc1bc270cff35f33a70b357
| 27,256 |
def map_format(value, pattern):
"""
Apply python string formatting on an object:
.. sourcecode:: jinja
{{ "%s - %s"|format("Hello?", "Foo!") }}
-> Hello? - Foo!
"""
return soft_unicode(pattern) % (value)
|
53273dd29d7d0a0e11981fc7de948e930e966fc4
| 27,257 |
def Cadzow(Xk, K, N, tol_ratio=10000, max_iter=10):
"""
Implement Cadzow denoising
Parameters
----------
Xk : signal to denoise
K : number of most significant members to take
N : number of samples in the signal
tol_ratio : min ratio of (K+1)th singular value / Kth singular value
to stop iterations
max_iter : maximum number of iterations to run
Returns
-------
X : denoised signal
"""
X = Xk.copy()
ratio = 0
iters = 0
while (ratio < tol_ratio and iters < max_iter):
iters += 1
# perform svd
#print(toeplitz(X[K:],X[np.arange(K,-1,-1)]).shape)
U, s, Vh = svd(toeplitz(X[K:],X[np.arange(K,-1,-1)]))
# update ratio of singular values for cutoff
ratio = s[K-1] / s[K]
# build S' : first K diagonals of S
s_ = s[:K]
sz1 = U.shape[1]
sz2 = Vh.shape[0]
S_ = np.zeros(shape=(sz1, sz2))
for elem,(i,j) in enumerate(zip(np.arange(K),np.arange(K))):
S_[i,j] = s_[elem]
# least squares approx. for A
A_ = U @ S_ @ Vh
# denoised Xk is the average of the diagonals
for idx, off in enumerate(np.arange(K,K-N,-1)):
temp = np.mean(np.diagonal(A_,offset=off))
X[idx] = temp
return X
|
3dfffcf4eeb0b9765059f327b327375378007825
| 27,258 |
def toposortGroup(candidateIrefs):
"""Given a set of IRefs, returns a list of irefs toposorted based on the include graph."""
graph = {}
for iRef in candidateIrefs:
graph[iRef] = set(includePaths(iRef))
candidateSet = set(candidateIrefs)
output = []
for group in toposort.toposort(graph):
group = list(group.intersection(candidateSet))
group.sort()
output += group
return output
|
48166f439a5a9c4ad6ef8abd5c9b9e8dd6559888
| 27,259 |
def make_template(template):
"""Given an OpenSearch template, return a Template instance for it.
>>> template = make_template('http://localhost/search?q={term}')
>>> template.substitute(term='opensearch syntax')
'http://localhost/search?q=opensearch+syntax'
>>>
"""
terms = decompose_template(template)
return Template(template, terms)
|
ad6729cf5c4c2d9cf2198781e4566809d2a8f2b9
| 27,260 |
def search(T, dist, w, i=0):
"""Searches for w[i:] in trie T with distance at most dist
"""
if i == len(w):
if T is not None and T.is_word and dist == 0:
return ""
else:
return None
if T is None:
return None
f = search(T.s[w[i]], dist, w, i + 1) # matching
if f is not None:
return w[i] + f
if dist == 0:
return None
for c in ascii_letters:
f = search(T.s[c], dist - 1, w, i) # insertion
if f is not None:
return c + f
f = search(T.s[c], dist - 1, w, i + 1) # substitution
if f is not None:
return c + f
return search(T, dist - 1, w, i + 1) # deletion
|
926a0c3e50d38ed1ad7b6e66e8e0a85e24716d89
| 27,261 |
def get_weighted_embeddings(embeddings, weights):
"""Multiply a sequence of word embeddings with their weights
:param embeddings: a sequence of word embeddings got from
embedding_lookup, size of [batch_size, seq_len, embed_dim]
:param weights: a sequence of weights for each word, size of [batch_size,
seq_len]
:return: a sequence of weighted word embeddings, size of [batch_size,
seq_len, embed_dim]
"""
# import pdb
# pdb.set_trace()
embeddings = tf.transpose(embeddings, perm=[0, 2, 1])
# print(embeddings.get_shape())
weights = tf.expand_dims(weights, 1)
# print(weights.get_shape())
embeddings = tf.multiply(embeddings, weights)
# print(embeddings.get_shape())
embeddings = tf.transpose(embeddings, perm=[0, 2, 1])
# print(embeddings.get_shape())
return embeddings
|
741d68036cc7df4060403f3bf9f2acd08b16466c
| 27,262 |
def _adjust_values(females, males):
"""
Adjusting the values as the man moves in with the woman
"""
females = females.copy()
males = males.copy()
males.loc[:,"hid"] = females["hid"].tolist()
males.loc[:,"east"] = females["east"].tolist()
males.loc[:,"hhweight"] = females["hhweight"].tolist()
males.loc[:,"in_couple"] = 1
females.loc[:,"in_couple"] = 1
females.loc[:, "hhv"] = 0 # Make women the head of household
males.loc[:, "hhv"] = 1 # Men are "only" partner
return females, males
|
d139869b73e06fb917f843e86d135d1d9db3f4e3
| 27,263 |
def create_other_features(data):
"""Create columns for each other feature extracted."""
# Features list
features_list = ['Fibra_ottica', 'Cancello_elettrico', 'Cantina',
'Impianto_di_allarme', 'Mansarda', 'Taverna',
'Cablato', 'Idromassaggio', 'Piscina']
# Create column for each extracted feature
for feature in features_list:
mask = data['Altre_caratteristiche'].apply(lambda x: feature in x)
data[feature] = np.where(mask, 'sì', 'no')
return data
|
20d2f7e71c06952f2604004224fa113ab9ec88bb
| 27,264 |
def handler_good():
"""Return True for a good event handler."""
return True
|
302ea021276cb9be2d5e98c2a09776f4ee53cc97
| 27,265 |
def form_gov():
"""
Collects the data from the government form and
redirects them to the appropriate results page to report
the final results
"""
collected_data = []
form_gov = InputData_gov()
if request.method == "POST":
try:
collected_data.append("Government")
collected_data.append(request.form.get("state"))
collected_data.append(request.form.get("location"))
collected_data.append(request.form.get("land_ava"))
collected_data.append(request.form.get("goal_renewable"))
collected_data.append(request.form.get("api_key"))
# run "build_config.py" to build file for accessing NREL data
build_hscfg.config_file(collected_data[5])
# input data to wrapper function
wtk = h5pyd.File("/nrel/wtk-us.h5", "r")
if collected_data[4] == '':
results = wrapper.wrapper(
wtk, collected_data[2], collected_data[1],
float(collected_data[3]))
else:
results = wrapper.wrapper(
wtk, collected_data[2], collected_data[1],
float(collected_data[3]), goal=int(collected_data[4]))
return redirect(url_for("results_gov", gov_results=results))
except IndexError:
flash("ERROR: Check spelling of 'City/Town' or try a nearby city")
return render_template("form_gov.html", form_gov=form_gov)
except OSError:
flash("ERROR: API key not accepted")
return render_template("form_gov.html", form_gov=form_gov)
except ValueError:
flash("Error: Land available must be a number")
return render_template("form_gov.html", form_gov=form_gov)
return render_template("form_gov.html", form_gov=form_gov)
|
483a3ae4746a555bf1ab80252659606bca1b5447
| 27,266 |
from io import StringIO
def convert_to_grayscale(buffer):
"""Converts the image in the given StringIO object to grayscale.
Args:
buffer (StringIO): The original image to convert. Must be in RGB mode.
Returns:
StringIO: The grayscale version of the original image.
Raises:
ValueError: If the provided image is not in RGB mode.
"""
original = Image.open(buffer)
grayscale = StringIO()
_convert_to_grayscale(original).save(grayscale, format=original.format)
return grayscale
|
8aa632768da7c49e82923074c3057cffe3ed4d51
| 27,267 |
def line_visible(plaza_geometry, line, delta_m):
""" check if the line is "visible", i.e. unobstructed through the plaza"""
intersection_line = plaza_geometry.intersection(line)
# a line is visible if the intersection has the same length as the line itself, within a given delta
delta = meters_to_degrees(delta_m)
return abs(line.length - intersection_line.length) <= delta
|
aadc340eddb5f4036af1e8131ced244aa081c75d
| 27,268 |
def bad_gateway(message="Bad gateway"):
"""
A shortcut for creating a :class:`~aiohttp.web.Response` object with a ``502`` status and the JSON body
``{"message": "Bad gateway"}``.
:param message: text to send instead of 'Bad gateway'
:type message: str
:return: the response
:rtype: :class:`aiohttp.Response`
"""
return json_response({
"id": "bad_gateway",
"message": message
}, status=502)
|
9f9592b53b4e08b5c089ed2ad32754b95b8dcdb9
| 27,269 |
def chromatic_induction_factors(n: FloatingOrArrayLike) -> NDArray:
"""
Return the chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.
Parameters
----------
n
Function of the luminance factor of the background :math:`n`.
Returns
-------
:class:`numpy.ndarray`
Chromatic induction factors :math:`N_{bb}` and :math:`N_{cb}`.
Examples
--------
>>> chromatic_induction_factors(0.2) # doctest: +ELLIPSIS
array([ 1.000304, 1.000304])
"""
n = as_float_array(n)
with sdiv_mode():
N_bb = N_cb = as_float(0.725) * spow(sdiv(1, n), 0.2)
N_bbcb = tstack([N_bb, N_cb])
return N_bbcb
|
14f43cbc64aa1904eb38fa2442b33c840aad7275
| 27,270 |
def get_correctly_labeled_entries(all_entries):
"""Get entries that are labeled and evaluated as correct."""
return [
entry for entry in all_entries if convert_to_bool(entry[9]) and convert_to_bool(entry[10])
]
|
a61251165629c9bfff3d412c8f4be10eb5b8a5ac
| 27,271 |
def data_context_connectivity_context_connectivity_serviceuuid_latency_characteristictraffic_property_name_delete(uuid, traffic_property_name): # noqa: E501
"""data_context_connectivity_context_connectivity_serviceuuid_latency_characteristictraffic_property_name_delete
removes tapi.topology.LatencyCharacteristic # noqa: E501
:param uuid: Id of connectivity-service
:type uuid: str
:param traffic_property_name: Id of latency-characteristic
:type traffic_property_name: str
:rtype: None
"""
return 'do some magic!'
|
f8409c09d84ac223f8b3081e4396bc9af86c31c0
| 27,272 |
def tag_state_quantities(blocks, attributes, labels, exception=False):
""" Take a stream states dictionary, and return a tag dictionary for stream
quantities. This takes a dictionary (blk) that has state block labels as
keys and state blocks as values. The attributes are a list of attributes to
tag. If an element of the attribute list is list-like, the fist element is
the attribute and the remaining elements are indexes. Lables provides a list
of attribute lables to be used to create the tag. Tags are blk_key + label
for the attribute.
Args:
blocks (dict): Dictionary of state blocks. The key is the block label to
be used in the tag, and the value is a state block.
attributes (list-like): A list of attriutes to tag. It is okay if a
particular attribute does not exist in a state bock. This allows
you to mix state blocks with differnt sets of attributes. If an
attribute is indexed, the attribute can be specified as a list or
tuple where the first element is the attribute and the remaining
elements are indexes.
labels (list-like): These are attribute lables. The order corresponds to the
attribute list. They are used to create the tags. Tags are in the
form blk.key + label.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Return:
(dict): Dictionary where the keys are tags and the values are model
attributes, usually Pyomo component data objects.
"""
tags={}
if labels is None:
lables = attributes
for a in attributes:
if isinstance(a, (tuple, list)):
if len(a) == 2:
# in case there are multiple indexes and user gives tuple
label = f"{a[0]}[{a[1]}]"
if len(a) > 2:
label = f"{a[0]}[{a[1:]}]"
else:
label = a[0]
for key, s in blocks.items():
for i, a in enumerate(attributes):
j = None
if isinstance(a, (list, tuple)):
# if a is list or tuple, the first element should be the
# attribute and the remaining elements should be indexes.
if len(a) == 2:
j = a[1] # catch user supplying list-like of indexes
if len(a) > 2:
j = a[1:]
#if len(a) == 1, we'll say that's fine here. Don't know why you
#would put the attribute in a list-like if not indexed, but I'll
#allow it.
a = a[0]
v = getattr(s, a, None)
if j is not None and v is not None:
try:
v = v[j]
except KeyError:
if not exception:
v = None
else:
_log.error(f"{j} is not a valid index of {a}")
raise KeyError(f"{j} is not a valid index of {a}")
try:
value(v, exception=False)
except TypeError:
if not exception:
v = None
else:
_log.error(
f"Cannot calculate value of {a} (may be subscriptable)")
raise TypeError(
f"Cannot calculate value of {a} (may be subscriptable)")
except ZeroDivisionError:
pass # this one is okay
if v is not None:
tags[f"{key}{labels[i]}"] = v
return tags
|
74df558808c1db1e59f27ebe320e8931c291eb28
| 27,273 |
def instancesUserLookup(query=None, query_type=None):
"""
Return a list of sites to which the requested user belongs
Display on /search
"""
if query_type == 'username':
kwargs = {'$or': [{'users.username.site_owner': {'$regex': query, '$options': 'i'}}, {'users.username.site_editor': {'$regex': query, '$options': 'i'}}, {'users.username.form_manager': {'$regex': query, '$options': 'i'}}, {'users.username.edit_my_content': {'$regex': query, '$options': 'i'}}, {
'users.username.content_editor': {'$regex': query, '$options': 'i'}}, {'users.username.configuration_manager': {'$regex': query, '$options': 'i'}}, {'users.username.campaign_manager': {'$regex': query, '$options': 'i'}}, {'users.username.access_manager': {'$regex': query, '$options': 'i'}}]}
elif query_type == 'email_address':
# Handle mixed case in email addresses
kwargs = {'$or': [{'users.email_address.site_owner': {'$regex': query, '$options': 'i'}}, {'users.email_address.site_editor': {'$regex': query, '$options': 'i'}}, {'users.email_address.form_manager': {'$regex': query, '$options': 'i'}}, {'users.email_address.edit_my_content': {'$regex': query, '$options': 'i'}}, {
'users.email_address.content_editor': {'$regex': query, '$options': 'i'}}, {'users.email_address.configuration_manager': {'$regex': query, '$options': 'i'}}, {'users.email_address.campaign_manager': {'$regex': query, '$options': 'i'}}, {'users.email_address.access_manager': {'$regex': query, '$options': 'i'}}]}
results, totalItems = getAllResults(atlasType='statistics', **kwargs)
# Get list of all instances
instanceList = []
for r in results:
instance = get_internal(
'sites', **{'_id': r['site']})[0]['_items'][0]
for role, user in r['users'][query_type].iteritems():
# Handle mixed case in email addresses
if query.lower() in lowerList(user):
instanceList.append((instance['_id'], instance['path'], role))
return instanceList
|
27b778fb1d199497c5529754bf0cbbe99357ddcb
| 27,276 |
def volume_kerucut_melingkar(radius: float, tinggi: float) -> float:
"""
referensi dari kerucut melingkar
https://en.wikipedia.org/wiki/Cone
>>> volume_kerucut_melingkar(2, 3)
12.566370614359172
"""
return pi * pow(radius, 2) * tinggi / 3.0
|
2f0ddb7b1bd75ec1ee637135f4d5741fda8af328
| 27,277 |
def vgg19(pretrained=False, **kwargs):
"""VGG 19-layer model (configuration "E")
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfg['E']), **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['vgg19']))
return model
|
2ff08d30dc82297d5d497f55894e62766dd35019
| 27,278 |
def xmprv_from_seed(seed: Octets, version: Octets, decode: bool = True) -> bytes:
"""derive the master extended private key from the seed"""
if isinstance(version, str): # hex string
version = bytes.fromhex(version)
if version not in PRV:
m = f"invalid private version ({version})"
raise ValueError(m)
# serialization data
xmprv = version # version
xmprv += b'\x00' # depth
xmprv += b'\x00\x00\x00\x00' # parent pubkey fingerprint
xmprv += b'\x00\x00\x00\x00' # child index
# actual extended key (key + chain code) derivation
if isinstance(seed, str): # hex string
seed = bytes.fromhex(seed)
hd = HMAC(b"Bitcoin seed", seed, sha512).digest()
mprv = int_from_octets(hd[:32])
xmprv += hd[32:] # chain code
xmprv += b'\x00' + mprv.to_bytes(32, 'big') # private key
xmprv = base58.encode_check(xmprv)
return xmprv.decode('utf-8') if decode else xmprv
|
f7fb2a06d3e812e24b18453304c9a10476bda31d
| 27,279 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.