content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_policy_list(cluster_name):
"""
获取存储策略列表
:param cluster_name:
:return:
"""
data = []
status = ''
message = ''
resp = {"status": status, "data": data, "message": message}
pm = PolicyManager(cluster_name)
try:
sfo_policys = pm.policys()
for policy in sfo_policys:
policy_dict = {
"policy_num": policy,
"policy_name": sfo_policys[policy].get('name'),
"deprecated": sfo_policys[policy].get('deprecated', 'no'),
"policy_type": sfo_policys[policy].get('policy_type','replication')
}
data.append(policy_dict)
except Exception, error:
raise ValueError(str(error))
if data:
status = 200
message = 'OK'
else:
status = 404
message = 'Not Found Record'
resp.update({"status": status, "data": data, "message": message})
return resp, status
|
5b53acebbd7eaf52975bf393a4b4f87c7fe7e550
| 33,404 |
def package_air_ticket(request):
""" view air ticket package for customer """
air_tickets = AirTicket.objects.all()[::-1]
package_air_tickets = PackageAirTicket.objects.all()[::-1]
context = {
'air_tickets': air_tickets,
'package_air_tickets': package_air_tickets,
'user_info': Employee.objects.get(employee_id=request.user.username),
'cart': Cart.objects.filter(created_by__employee_id=request.user.username).count,
}
return render(request, 'employee/package_air_ticket.html', context)
|
0e7f70d3f49d6a5ef25f669d3f58768e933050b2
| 33,405 |
def CheckNodeJSPackage(package):
""" Check whether a node.js package is installed """
cmd = "node -e \"var d3=require('%s');\"" % package
x = RunCommand(cmd)
return x == 0
|
ba6709f641548bab52aafdc589f4730268b3b778
| 33,406 |
import re
def compilable(tex):
"""Return tex-math content wrapped so that it can be compiled using tex."""
# remove "\usepackage{pmc}". It's not clear what the contents
# of this package are (I have not been able to find it), but
# compilation more often succeeds without it than with it.
tex = tex.replace('\\usepackage{pmc}', '')
# replace "\documentclass{minimal}" with "\documentclass{slides}".
# It's not clear why, but some font commands (e.g. "\tt") appear
# to fail with the former.
tex = re.sub(r'(\\documentclass(?:\[[^\[\]]*\])?\{)minimal(\})',
r'\1slides\2', tex)
# replace any amount of consequtive space by a single plain space
tex = space_re.sub(' ', tex)
return tex
|
8381999e503194d7d9f2a1fbe751775a6bf48dd2
| 33,407 |
def filter_matches(matches, threshold=0.75):
"""Returns filterd copy of matches grater than given threshold
Arguments:
matches {list(tuple(cv2.DMatch))} -- List of tupe of cv2.DMatch objects
Keyword Arguments:
threshold {float} -- Filter Threshold (default: {0.75})
Returns:
list(cv2.DMatch) -- List of cv2.DMatch objects that satisfy ratio test
"""
filtered = []
for m, n in matches:
if m.distance < threshold * n.distance:
filtered.append(m)
return filtered
|
c2cbec1da42d96575eb422bfdda6a1351e24508b
| 33,408 |
def V_PT(P, T, substance, eos = IDEAL_GAS):
"""Returns specific volume."""
mw = get_parameter(substance, 'molecular-weight')
Vmol = eos.Vmol_PT(P, T, substance)
return Vmol / mw
|
776164131c2e59295bcdf1100697c5ca84931074
| 33,409 |
def checks(poly_fit_left, poly_fit_right,
poly_fitx_left, poly_fitx_right,
poly_fity):
"""Check if found lanes make sense."""
# check curvature
y_eval = np.max(poly_fity)
curvature_left = (((1 + (2*poly_fit_left[0]*y_eval +
poly_fit_left[1])**2)**1.5) /
np.absolute(2*poly_fit_left[0]))
curvature_right = (((1 + (2*poly_fit_right[0]*y_eval +
poly_fit_right[1])**2)**1.5) /
np.absolute(2*poly_fit_right[0]))
if np.maximum(np.log10(curvature_left)/np.log10(curvature_right),
np.log10(curvature_right)/np.log10(curvature_left)) > 1.4:
return False
# check parallel
dist = poly_fitx_left-poly_fitx_right
if np.mean(np.abs(dist-np.mean(dist))) > 100:
return False
return True
|
a5d2a807a29dfacc4b184aa591577161c3c5933c
| 33,410 |
from pathlib import Path
def run(
dataset_file: str = Path.joinpath(DATA_DIR, "processed", "winequality_clean.csv"),
params_file: str = Path.joinpath(MODELS_DIR, "params.json").read_text(),
):
"""
Run the training process
"""
dataset = load_dataset(dataset_file)
train_test = split_dataset(dataset)
params = load_params(params_file)
res = train(train_test, params)
return res
|
f2f82baebe910a8ada8dbfe77791e33d2a72e168
| 33,411 |
import string
import random
def createRandomStrings(l,n,chars=None,upper=False):
"""create list of l random strings, each of length n"""
names = []
if chars == None:
chars = string.ascii_lowercase
#for x in random.sample(alphabet,random.randint(min,max)):
if upper == True:
chars = [i.upper() for i in chars]
for i in range(l):
val = ''.join(random.choice(chars) for x in range(n))
names.append(val)
return names
|
678b5aeb3cc98ae2b47822500fcbaff05081058a
| 33,412 |
def create_model(reader_input, base_model=None, is_training=True, args=None):
"""
given the base model, reader_input
return the output tensors
"""
labels = reader_input[-1]
cls_feats = base_model.final_sentence_representation
cls_feats = fluid.layers.dropout(
x=cls_feats,
dropout_prob=0.1,
dropout_implementation="upscale_in_train")
logits = fluid.layers.fc(
input=cls_feats,
size=2,
param_attr=fluid.ParamAttr(
name="cls_out_w",
initializer=fluid.initializer.TruncatedNormal(scale=0.02)),
bias_attr=fluid.ParamAttr(
name="cls_out_b", initializer=fluid.initializer.Constant(0.)))
num_seqs = fluid.layers.fill_constant(shape=[1], value=512, dtype='int64')
output_tensors = {}
output_tensors['labels'] = labels
output_tensors['logits'] = logits
output_tensors['num_seqs'] = num_seqs
return output_tensors
|
9afcb97036c959ddf476e9844ab18cdf792ddbbd
| 33,413 |
def shot_mix(df1):
"""
Calculates the average of the poisson models for shots and shots on target
"""
df1["H_att_Poi_mix"] = (3*df1["H_att_PoiS"] + 3*df1["H_att_PoiST"] + 2*df1["H_att_PoiG"]) / 8
df1["A_att_Poi_mix"] = (3*df1["A_att_PoiS"] + 3*df1["A_att_PoiST"] + 2*df1["A_att_PoiG"]) / 8
df1["H_def_Poi_mix"] = (3*df1["H_def_PoiS"] + 3*df1["H_def_PoiST"] + 2*df1["H_def_PoiG"]) / 8
df1["A_def_Poi_mix"] = (3*df1["A_def_PoiS"] + 3*df1["A_def_PoiST"] + 2*df1["A_def_PoiG"]) / 8
# df1["H_att_Poi_mix"] = (df1["H_att_PoiS"] + df1["H_att_PoiST"]) / 2
# df1["A_att_Poi_mix"] = (df1["A_att_PoiS"] + df1["A_att_PoiST"]) / 2
# df1["H_def_Poi_mix"] = (df1["H_def_PoiS"] + df1["H_def_PoiST"]) / 2
# df1["A_def_Poi_mix"] = (df1["A_def_PoiS"] + df1["A_def_PoiST"]) / 2
df1["H_xG_Poi_mix"] = df1["H_att_Poi_mix"] * df1["A_def_Poi_mix"] * df1["Lg_HG"]
df1["A_xG_Poi_mix"] = df1["A_att_Poi_mix"] * df1["H_def_Poi_mix"] * df1["Lg_AG"]
df1["H_pred_Poi_mix"], df1["D_pred_Poi_mix"], df1["A_pred_Poi_mix"], df1["O_pred_Poi_mix"], df1["U_pred_Poi_mix"] = get_probs(df1["H_xG_Poi_mix"], df1["A_xG_Poi_mix"])
df1["rps_Poi_mix"] = rps(df1["H_pred_Poi_mix"], df1["D_pred_Poi_mix"], df1["A_pred_Poi_mix"], df1["Home"], df1["Draw"], df1["Away"])
return df1
|
6d7fe94ce4e90de26d0c5154ad2d0ae54d7409ca
| 33,414 |
def jobs():
"""
lista de jobs
"""
rows = db(db.job).select(orderby=db.job.created_on)
return locals()
|
067d7f9840cdd0ee52c7c2fefe3d5942170203c3
| 33,416 |
import base64
async def reserve_subdomain(request: Request, req: t.Optional[ReservationRequest] = None):
"""
Reserve a subdomain for use with an application.
If no subdomain is given, a random subdomain is reserved and returned.
If a set of SSH public keys is given, they are associated with the subdomain and the
fingerprints are returned. No token is returned in this case.
If no SSH public keys are given, a single-use token is returned that can be used to
associate public keys with the subdomain.
"""
if not req:
req = ReservationRequest()
# Begin with the maximum number of attempts
remaining_attempts = settings.generate_domain_max_attempts
while remaining_attempts > 0:
# As soon as we enter the loop, the remaining attempts decrease
remaining_attempts = remaining_attempts - 1
# Work out the subdomain that we will attempt to use
subdomain = req.subdomain if req.subdomain is not None else generate_random_subdomain()
# Work out what Consul operations we want to perform
# We perform the operations within a Consul transaction to ensure atomicity
# We use a check-and-set (CAS) operation with an index of zero for the subdomain record,
# which means the operation will only succeed if it creates the key - this means a
# subdomain can only be reserved once
if req.public_keys:
# If public keys are given, create the subdomain record with a value of 1
# and create/update the public key associations at the same time
# No token is returned
operations = [
{
"KV": {
"Verb": "cas",
"Index": 0,
"Key": f"{settings.consul_key_prefix}/subdomains/{subdomain}",
"Value": base64.b64encode(b"1").decode(),
},
},
] + [
{
"KV": {
# Use regular set operations to update the public key records, as we
# don't care about splatting existing records (a well-behaved client
# should generate a new keypair for each subdomain anyway)
"Verb": "set",
# Use a URL-safe fingerprint as the key, otherwise the "/" characters form a
# nested structure that we don't want
"Key": f"{settings.consul_key_prefix}/pubkeys/{fingerprint_urlsafe(pubkey)}",
# The value is the subdomain, which can be looked up by key later
"Value": base64.b64encode(subdomain.encode()).decode(),
}
}
for pubkey in req.public_keys
]
else:
# If no public keys are given, create the subdomain record with a value of 0
# A token will be returned that contains the subdomain and the Consul modify index,
# signed with a secret to ensure data integrity
# The associate operation will then use the subdomain and modify index from the token
# it receives to perform another CAS operation which changes the value of the
# subdomain record from 0 to 1, registering the public keys at the same time
# This operation will only succeed on the first attempt, making the tokens single use
operations = [
{
"KV": {
"Verb": "cas",
"Index": 0,
"Key": f"{settings.consul_key_prefix}/subdomains/{subdomain}",
"Value": base64.b64encode(b"0").decode(),
},
},
]
async with AsyncClient(base_url = settings.consul_url) as client:
response = await client.put("/v1/txn", json = operations)
# If the subdomain already exists, the response will be a 409
# How we react to this depends on whether the request specified a subdomain or
# if we generated one
if response.status_code == 409:
if req.subdomain is not None:
raise HTTPException(
status_code = 409,
detail = "The requested subdomain has already been reserved."
)
else:
continue
response.raise_for_status()
# If we get to here, the domain was registered successfully and we should break out
# after extracting the modify index
# The response should be JSON with a single response
modify_index = response.json()["Results"][0]["KV"]["ModifyIndex"]
break
else:
# No subdomain allocated after maximum number of attempts
raise HTTPException(
status_code = 409,
detail = "Unable to allocate a subdomain after {} attempts.".format(
settings.generate_domain_max_attempts
)
)
# The FQDN is the requests subdomain combined with the configured base domain
fqdn = f"{subdomain}.{settings.base_domain}"
if req.public_keys:
# When the request contained public keys, return the fingerprints
return Reservation(
subdomain = subdomain,
fqdn = fqdn,
# Return non-URL-safe fingerprints so they can be compared with the output of OpenSSH
fingerprints = [fingerprint(pubkey) for pubkey in req.public_keys]
)
else:
# If no keys were given, return a signed token containing the subdomain and modify index
token_data = f"{subdomain}.{modify_index}"
signature = generate_signature(token_data)
token = base64.urlsafe_b64encode(f"{token_data}.{signature}".encode()).decode()
return Reservation(subdomain = subdomain, fqdn = fqdn, token = token)
|
11d56868f6a0f4439ada5ad0755333c1b0ad9f74
| 33,417 |
def get_previous_quarter(today):
"""There are four quarters, 01-03, 04-06, 07-09, 10-12.
If today is in the last month of a quarter, assume it's the current quarter
that is requested.
"""
end_year = today.year
end_month = today.month - (today.month % 3) + 1
if end_month <= 0:
end_year -= 1
end_month += 12
if end_month > 12:
end_year += 1
end_month -= 12
end = '%d-%02d-01' % (end_year, end_month)
begin_year = end_year
begin_month = end_month - 3
if begin_month <= 0:
begin_year -= 1
begin_month += 12
begin = '%d-%02d-01' % (begin_year, begin_month)
return begin, end
|
4175c80d2aa75c0e3e02cdffe8a766c4a63686d0
| 33,418 |
def extract_metadata(document):
"""Return the dict containing document metadata.
:param document:
:type document: :class:`docutils.nodes.document`
:returns: docinfo data from document
:rtype: dict
From: https://github.com/adieu/mezzanine-cli @ mezzanine_cli/parser.py
License: BSD (https://github.com/adieu/mezzanine-cli/blob/master/setup.py)
"""
output = {}
for docinfo in document.traverse(nodes.docinfo):
for element in docinfo.children:
if element.tagname == 'field': # custom fields (e.g. summary)
name_elem, body_elem = element.children
name = name_elem.astext()
value = body_elem.astext()
else: # standard fields (e.g. address)
name = element.tagname
value = element.astext()
name = name.lower()
output[name] = value
return output
|
8c097569d240cc32ffc4496eda0e1e14e5269052
| 33,420 |
import scipy
def ibp_loglik(Z, alpha):
"""Probability of an assignment matrix drawn from an IBP prior, where each customer chooses new dishes
consecutively from the end, but otherwise the columns are unordered."""
N, K = Z.shape
total = K * np.log(alpha)
new_dish_counts = np.bincount(first_customer(Z))
total -= scipy.special.gammaln(new_dish_counts + 1).sum()
total -= alpha * (1. / np.arange(1, N+1)).sum()
m = Z.sum(0)
total += scipy.special.gammaln(N - m + 1).sum()
total += scipy.special.gammaln(m).sum()
total -= K * scipy.special.gammaln(N+1)
return total
|
89178a93d32aefc7e1557363e27db4c6f2e9ee43
| 33,422 |
def neighbours(geohash):
"""
Returns all 8 adjacent cells to specified geohash::
| nw | n | ne |
| w | * | e |
| sw | s | se |
:param geohash: string, geohash neighbours are required of
:returns: neighbours as namedtuple of geohashes with properties n,ne,e,se,s,sw,w,nw
>>> neighbours = geohash.neighbours('gcpuyph')
>>> neighbours
>>> ('gcpuypk', 'gcpuypm', 'gcpuypj', 'gcpuynv', 'gcpuynu', 'gcpuyng', 'gcpuyp5', 'gcpuyp7')
>>> neighbours.ne
>>> gcpuypm
"""
n = adjacent(geohash, 'n')
ne = adjacent(n, 'e')
e = adjacent(geohash, 'e')
s = adjacent(geohash, 's')
se = adjacent(s, 'e')
w = adjacent(geohash, 'w')
sw = adjacent(s, 'w')
nw = adjacent(n, 'w')
Neighbours = namedtuple('Neighbours',
['n', 'ne', 'e', 'se', 's', 'sw', 'w', 'nw'])
return Neighbours(n, ne, e, se, s, sw, w, nw)
|
1fd5a515e9e72cc62b98d7cda57592ae1faa1595
| 33,423 |
def get_payload(address):
"""
According to an Address object, return a valid payload required by
create/update address api routes.
"""
return {
"address": address.address,
"city": address.city,
"country": str(address.country),
"first_name": address.first_name,
"last_name": address.last_name,
"title": address.title,
"postcode": address.postcode,
}
|
34fde5090aae774a24a254ea7dd7f03cc0f784be
| 33,424 |
def new_alphabet(shift):
"""
:param shift: int, The magic number that user input to produce shifted.
:return: str, a new_alphabet will be returned.
"""
first_half = ALPHABET[shift:]
second_half = ALPHABET[:shift]
# first_half = ALPHABET[(26 - shift):]
# second_half = ALPHABET[0:(26 - shift)]
ans = first_half + second_half
return ans
|
71f3bf1c48647d8bc7641cb3070acebdb1d0170f
| 33,425 |
def _tensor_equal_tensor(x, y):
"""
Determine if two tensors are equal.
Args:
x (Tensor): first input tensor.
y (Tensor): second input tensor.
Returns:
bool, if x == y return true, x != y return false.
"""
return F.equal(x, y)
|
a1149d02b520a79584a28c57424a4c1dfbdd4021
| 33,426 |
def CleanUnicodeString(s, separator=None):
"""Return list of words after lowering case and may be removing punctuations.
Args:
s: a unicode string.
separator: default is None which implies separator of whitespaces and
punctuation is removed.
Returns:
Lowered case string after removing runs of separators.
Raises:
ValueError: when s is not a unicode string.
"""
if not isinstance(s, unicode):
raise ValueError('Expected unicode string type data but got: %s' % type(s))
words = s.lower().split(separator)
if separator is None:
# Remove punctuation
words = [w.translate(_GetPunctuationDict()) for w in words]
words = [w for w in words if w]
return words
|
c017cfd99d2153e3f10716f3664f665f315ecd89
| 33,427 |
def compute_cumulants(G, mus, R=None, return_R=False):
"""
Compute the cumulants of a Hawkes process given the integrated kernel
matrix `G` and the baseline rate vector `mus`
Arguments
---------
G : np.ndarray
The integrated kernel matrix of shape shape dim x dim
mus : np.ndarray
The baseline rate vector of shape dim
R : np.ndarray (optional)
Precomputed matrix R
return_R : bool (optional)
Return the matrix R if set to `True`
Return
------
L : np.ndarray
Mean intensity matrix
C : np.ndarray
Covariance matrix
Kc : np.ndarray
Skewness matrix
R : np.ndarray (returned only if `return_R` is True)
Internal matrix to compute the cumulants
"""
if not len(G.shape) == 2:
raise ValueError("Matrix `G` should be 2-dimensional")
if not len(mus.shape) == 1:
raise ValueError("Vector `mus` should be 1-dimensional")
if not G.shape[0] == G.shape[1]:
raise ValueError("Matrix `G` should be a squared matrix")
if not G.shape[0] == mus.shape[0]:
raise ValueError("Vector `mus` should have the same dinension as `G`")
R = compute_R(G)
L = np.diag(R @ mus)
C = R @ L @ R.T
Kc = (R**2) @ C.T + 2 * R * (C - R @ L) @ R.T
if return_R:
return L, C, Kc, R
return L, C, Kc
|
1c1b48147af0b2d46f87d1f6050137d778db3e97
| 33,428 |
def interp_2d_to_3d(gs, grid, gt):
"""Interpolate 2D vector to a 3D grid using a georeferenced grid.
Parameters
----------
gs : geopandas.GeoSeries
Input geopandas GeoSeries
grid : array_like
2D array of values, e.g. DEM
gt : tuple
GDAL-style geotransform coefficients for grid
Returns
-------
geopandas.GeoSeries
With 3rd dimension values interpolated from grid.
"""
assert gt[1] > 0, gt[1]
assert gt[2] == 0, gt[2]
assert gt[4] == 0, gt[4]
assert gt[5] < 0, gt[5]
hx = gt[1] / 2.0
hy = gt[5] / 2.0
div = gt[1] * gt[5]
ny, nx = grid.shape
ar = np.pad(grid, 1, 'symmetric')
def geom2dto3d(geom):
x, y = geom.xy
x = np.array(x)
y = np.array(y)
# Determine outside points
outside = (
(x < gt[0]) | (x > (gt[0] + nx * gt[1])) |
(y > gt[3]) | (y < (gt[3] + ny * gt[5])))
if outside.any():
raise ValueError(f'{outside.sum()} coordinates are outside grid')
# Use half raster cell widths for cell center values
fx = (x - (gt[0] + hx)) / gt[1]
fy = (y - (gt[3] + hy)) / gt[5]
ix1 = np.floor(fx).astype(np.int32)
iy1 = np.floor(fy).astype(np.int32)
ix2 = ix1 + 1
iy2 = iy1 + 1
# Calculate differences from point to bounding raster midpoints
dx1 = x - (gt[0] + ix1 * gt[1] + hx)
dy1 = y - (gt[3] + iy1 * gt[5] + hy)
dx2 = (gt[0] + ix2 * gt[1] + hx) - x
dy2 = (gt[3] + iy2 * gt[5] + hy) - y
# Use a 1-padded array to interpolate edges nicely, so add 1 to index
ix1 += 1
ix2 += 1
iy1 += 1
iy2 += 1
# Use the differences to weigh the four raster values
z = (ar[iy1, ix1] * dx2 * dy2 / div +
ar[iy1, ix2] * dx1 * dy2 / div +
ar[iy2, ix1] * dx2 * dy1 / div +
ar[iy2, ix2] * dx1 * dy1 / div)
return type(geom)(zip(x, y, z))
return gs.apply(geom2dto3d)
|
fe0ae9ee3bb65a2b7ab255bb228a5d4411790d2d
| 33,429 |
import traceback
def norecurse(f):
"""Decorator that keeps a function from recursively calling itself.
Parameters
----------
f: function
"""
def func(*args, **kwargs):
# If a function's name is on the stack twice (once for the current call
# and a second time for the previous call), then return withcurrent_level
# executing the function.
if len([1 for l in traceback.extract_stack() if l[2] == f.__name__]) > 1:
return None
# Otherwise, not a recursive call so execute the function and return result.
return f(*args, **kwargs)
return func
|
e7c7bebbbebcf53dad6772862f9ed4287c19a13d
| 33,430 |
def compare_model(models_list=None, x_train=None, y_train=None, calculate_accuracy=False, scoring_metrics=None, scoring_cv=3, silenced=False):
"""
Trains multiple user-defined model and pass out report
Parameters
----------------
models_list: list
a list of models to be trained
x_train: Array, DataFrame, Series
The feature set (x) to use in training an estimator to predict the outcome (y).
y_train: Series, 1-d array, list
The ground truth value for the train dataset
calculate_accuracy: Boolean
Specify if validation should be carried out on model. Default is False
scoring_metrics: list
Mertics to use in scoring the model
scoring_metrics = ['f1_micro','f1_macro','f1_weighted','accuracy']
scoring_cv: int
default value is 3
Returns
---------------
a tuple of fitted_model and the model evaluation scores
"""
if models_list is None or len(models_list) < 1:
raise ValueError("model_list: model_list can't be 'None' or empty")
if x_train is None:
raise ValueError("x_train: features can't be 'None' or empty")
if y_train is None:
raise ValueError("y_train: features can't be 'None' or empty")
if type(scoring_cv) is not int:
raise ValueError("scoring_cv: integer required")
fitted_model = []
model_scores = []
for model in models_list:
if silenced is not True:
print(f"Fitting {type(model).__name__} ...")
model.fit(x_train, y_train)
# append fitted model into list
fitted_model.append(model)
if calculate_accuracy:
if scoring_metrics is not None and len(scoring_metrics) > 0:
model_score = cross_validate(model, x_train, y_train, scoring=scoring_metrics, cv=scoring_cv)
model_scores.append(model_score)
else:
print(
"""
'calculate_accuracy' is set to True but scroring metrics is None or empty.
Model evaluation will not be done
"""
)
return fitted_model, model_scores
|
23f35fe1db665fe9b71262392a964d5562d09c69
| 33,431 |
def Material (colorVector):
"""
Material color.
"""
if colorVector == None:
return None
else:
assert isinstance (colorVector, (list, tuple))
assert len (colorVector), 3
for cid in range (0, 3):
assert colorVector[cid] >= 0
assert colorVector[cid] <= 1
return colorVector
|
89cee73c485669786f1a7cc4855ad8460c9db023
| 33,432 |
from typing import Callable
def linear_objective(
weights: np.ndarray, samples: np.ndarray, labels: np.ndarray, loss_fn: Callable
) -> float:
"""Calculates the final loss objective for a linear model"""
# Compute scores for loss
z = (samples @ weights) * labels
# Mean loss over batch
loss = np.mean(loss_fn(z)).item()
return loss
|
feac29c0d516897ac3c30c70c50364ff32407669
| 33,433 |
import itertools
import pathlib
def jpg_walk(path: str, filter_types: list) -> list:
"""
获取指导目录全部的图片路径
"""
with checkTimes("image walker"):
pools = list(
itertools.chain(
*[
list(pathlib.Path(path).glob(f"**/*.{types}"))
for types in filter_types
]
)
)
info(f"image find: {len(pools)}")
return pools
|
53524f3758601fbdc5f7664c234d30b995dd48ab
| 33,435 |
import math
def auto_border_start(min_corner_point, border_size):
"""Determine upper-right corner coords for auto border crop
:param min_corner_point: extreme corner component either 'min_x' or 'min_y'
:param border_size: min border_size determined by extreme_frame_corners in vidstab process
:return: adjusted extreme corner for cropping
"""
return math.floor(border_size - abs(min_corner_point))
|
e09d48a8c8c59053516357cbfd320cf92a080cc4
| 33,436 |
def val(model):
""" 计算模型在验证集上的分数 """
top_k = 3
# 状态置为验证
model.eval()
# 数据准备
dataset = ZhiHuData(conf.dev_data)
data_loader = DataLoader(dataset, batch_size=conf.batch_size)
# 预测
predict_label_and_marked_label_list = []
for i, batch in enumerate(data_loader):
title, content, label = batch
with t.no_grad():
title, content = Variable(title.cuda()), Variable(content.cuda())
score = model(title, content)
pred_value = score.data.topk(top_k, dim=1)[0].cpu()
pred_index = score.data.topk(top_k, dim=1)[1].cpu()
# 计算得分
true_value = label.data.float().topk(top_k, dim=1)[0]
true_index = label.data.float().topk(top_k, dim=1)[1]
tmp = []
for jj in range(label.size(0)):
true = true_index[jj][true_value[jj] > 0]
pred = pred_index[jj][pred_value[jj] > 0]
tmp.append((pred.tolist(), true.tolist()))
predict_label_and_marked_label_list.extend(tmp)
scores, prec_, recall_ = calc_score(predict_label_and_marked_label_list, topk=top_k)
print('calc_score score: {} - prec: {} - recall: {}'.format(scores, prec_, recall_))
scores, prec_, recall_ = calc_f1(predict_label_and_marked_label_list)
print('calc_f1 score: {} - prec: {} - recall: {}'.format(scores, prec_, recall_))
# 状态置为训练
model.train()
return scores, prec_, recall_
|
402bbcc829ec7e51e6ff70ab0c2b0a1b24b08c46
| 33,437 |
import array
def regress_origin(x, y):
"""Returns coefficients to regression "y=ax+b" passing through origin.
Requires vectors x and y of same length.
See p. 351 of Zar (1999) Biostatistical Analysis.
returns slope, intercept as a tuple.
"""
x, y = array(x, "float64"), array(y, "float64")
return npsum(x * y) / npsum(x * x), 0
|
6466f5cb38e3be1070d25051161809018601f689
| 33,438 |
import configparser
def bootPropsConfig(artifact, resources, targetDir, scalaVersion = "2.13.1"):
"""Create the configuration to install an artifact and its dependencies"""
scala = {}
scala["version"] = scalaVersion
app = {}
app["org"] = artifact.org
app["name"] = artifact.name
app["version"] = artifact.version
app["class"] = "com.scleradb.pathgen.Main"
app["cross-versioned"] = "binary"
if resources:
app["resources"] = ", ".join(resources)
repositories = {}
repositories["local"] = None
repositories["typesafe-ivy-releases"] = "http://repo.typesafe.com/typesafe/ivy-releases/, [organization]/[module]/[revision]/[type]s/[artifact](-[classifier]).[ext]"
repositories["maven-central"] = None
repositories["Sonatype OSS Snapshots"] = "https://oss.sonatype.org/content/repositories/snapshots"
boot = {}
boot["directory"] = targetDir
log = {}
log["level"] = "error"
config = configparser.ConfigParser(allow_no_value = True, delimiters = ":")
config["scala"] = scala
config["app"] = app
config["repositories"] = repositories
config["boot"] = boot
config["log"] = log
return config
|
66b8a4d641b3b1728e1d99c3f7bd7104806cdc50
| 33,439 |
import requests
def broadcast_transaction(hex_tx, blockchain_client=BlockchainInfoClient()):
""" Dispatch a raw transaction to the network.
"""
url = BLOCKCHAIN_API_BASE_URL + '/pushtx'
payload = {'tx': hex_tx}
r = requests.post(url, data=payload, auth=blockchain_client.auth)
if 'submitted' in r.text.lower():
return {'success': True}
else:
raise Exception('Invalid response from blockchain.info.')
|
34953d3cbdcd83b4fd9f7971214653f32466db63
| 33,440 |
def concat(inputs_list, axis=-1):
"""
Concatenate input tensors list
Parameters
----------
inputs_list: Input tensors list
axis: Axis along which to concatenate the tensors
"""
return tf.concat(inputs_list, axis=axis)
|
42bbe9ca6873ad9e66e13dd4fb29780a01c5e941
| 33,443 |
import torch
def string_to_tensor(string, char_list):
"""A helper function to create a target-tensor from a target-string
params:
string - the target-string
char_list - the ordered list of characters
output: a torch.tensor of shape (len(string)). The entries are the 1-shifted
indices of the characters in the char_list (+1, as 0 represents the blank-symbol)
"""
target = []
for char in string:
pos = char_list.index(char) + 1
target.append(pos)
result = torch.tensor(target, dtype=torch.int32)
return result
|
eb6c7fcccc9802462aedb80f3da49abec9edc465
| 33,444 |
def unique_nrpzs_active_subquery():
"""Returns unique NRPZS within active centers."""
return db.session.query(OckovaciMisto.nrpzs_kod) \
.filter(OckovaciMisto.status == True) \
.group_by(OckovaciMisto.nrpzs_kod) \
.having(func.count(OckovaciMisto.nrpzs_kod) == 1) \
.subquery()
|
f46f3424b460e12d7096f165055ec7cf291eef91
| 33,446 |
def retrieve_context_service_interface_point_total_potential_capacity_total_size_total_size(uuid): # noqa: E501
"""Retrieve total-size
Retrieve operation of resource: total-size # noqa: E501
:param uuid: ID of uuid
:type uuid: str
:rtype: CapacityValue
"""
return 'do some magic!'
|
5a018beed4c7352235b8f29425ac28f47717091b
| 33,447 |
def index():
""" Index page. """
return render_template('dashboard/index.html')
|
e12209bc31f0a7f118359e847a3c39bd1a79f557
| 33,448 |
def is_tax_id_obligatory(country):
"""
Returns True if country is in EU or is Poland. Returns False for others.
"""
if country.name in all_eu_countries:
return True
return False
|
f9ae6f1baff4f77b2c6255f85e57833a1b7cc830
| 33,449 |
def indexTupleToStr(idx):
"""
Generate a string contains all the lowercase letters corresponding to given index list.
Parameters
----------
idx : list of int
A list of indices, each should be in [0, 26).
Returns
-------
str
A string that corresponding to the indices in idx. E.g. if input [1, 3, 5], then return "ace"
"""
labelList = 'abcdefghijklmnopqrstuvwxyz'
if (not isinstance(idx, tuple)):
raise ValueError(errorMessage('indexTupleToStr requires a tuple as idx, {} gotten.'.format(idx)))
for x in idx:
if (x >= 26):
raise ValueError(errorMessage('indexTupleToStr cannot transfer index >= 26 for index {}.'.format(idx)))
return ''.join([labelList[x] for x in idx])
|
5f234cff34b530316b6a957de7604321a82a309b
| 33,450 |
def _LookupPermset(role, status, access):
"""Lookup the appropriate PermissionSet in _PERMISSIONS_TABLE.
Args:
role: a string indicating the user's role in the project.
status: a Project PB status value, or UNDEFINED_STATUS.
access: a Project PB access value, or UNDEFINED_ACCESS.
Returns:
A PermissionSet that is appropriate for that kind of user in that
project context.
"""
if (role, status, access) in _PERMISSIONS_TABLE:
return _PERMISSIONS_TABLE[(role, status, access)]
elif (role, status, WILDCARD_ACCESS) in _PERMISSIONS_TABLE:
return _PERMISSIONS_TABLE[(role, status, WILDCARD_ACCESS)]
else:
return EMPTY_PERMISSIONSET
|
f9188ca34476e29e743ea92f689097a41c8dcb35
| 33,451 |
def _add_sld_boilerplate(symbolizer):
"""
Wrap an XML snippet representing a single symbolizer in the appropriate
elements to make it a valid SLD which applies that symbolizer to all features,
including format strings to allow interpolating a "name" variable in.
"""
return """
<StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld" xmlns:ogc="http://www.opengis.net/ogc"
xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<NamedLayer>
<Name>%(name)s</Name>
<UserStyle>
<Name>%(name)s</Name>
<Title>%(name)s</Title>
<FeatureTypeStyle>
<Rule>
""" + symbolizer + """
</Rule>
</FeatureTypeStyle>
</UserStyle>
</NamedLayer>
</StyledLayerDescriptor>
"""
|
971199333570d5bc7baefec88f35b92922ef6176
| 33,452 |
def calculate_distance(location1, location2):
"""Calculate geodesic distance between two coordinates with ellipsoidal earth model.
Args:
location1: tuple of (latitude, longitude) as floats in Decimal Degrees
location2: tuple of (latitude, longitude) as floats in Decimal Degrees
Returns:
A float in meters of the distance between the two points
"""
return distance.geodesic(location1, location2).meters
|
569ae6af353d772234359ed70ac65e732ef2732a
| 33,453 |
def user():
"""
form传值变量名:
email,call,signature,mail,pre_school,address,detail_address,nickname
返回值:
user_info: 用户个人信息的字典
"""
if 'USR_ID' in session:
if request.method == 'GET':
user_info = query_db("SELECT * FROM student WHERE user_id = %s",
(session['USR_ID'],), one = True)
return render_template('user.html', user_info=user_info)
else:
if g.cursor.execute("""UPDATE SET nickname = %s, email = %s,
mail = %s, call = %s, signature = %s, pre_school = %s,
address = %s, detail_address = %s""", (
request.form['nickname'],
request.form['email'],
request.form['mail'],
request.form['call'],
request.form['signature'],
request.form['pre_school'],
request.form['address'],
request.form['detail_address']
)):
g.conn.commit()
return redirect(url_for('user'))
else:
return render_template('user.html', error=True)
else:
return redirect(url_for('login'))
|
40a960150cab9b88d848f742109c5120f4a846a4
| 33,454 |
def sequence_names_match(r1, r2):
"""
Check whether the sequences r1 and r2 have identical names, ignoring a
suffix of '1' or '2'. Some old paired-end reads have names that end in '/1'
and '/2'. Also, the fastq-dump tool (used for converting SRA files to FASTQ)
appends a .1 and .2 to paired-end reads if option -I is used.
"""
name1 = r1.name.split(None, 1)[0]
name2 = r2.name.split(None, 1)[0]
if name1[-1:] in '12' and name2[-1:] in '12':
name1 = name1[:-1]
name2 = name2[:-1]
return name1 == name2
|
645ac09011cc4b94c5b6d60bf691b6b1734d5b6b
| 33,455 |
import torch
def generate_mobile_module_lints(script_module: torch.jit.ScriptModule):
"""
Args:
script_module: An instance of torch script module with type of ScriptModule
Returns:
lint_map: A list of dictionary that contains modules lints
"""
if not isinstance(script_module, torch.jit.ScriptModule):
raise TypeError(
'Got {}, but ScriptModule is expected.'.format(type(script_module)))
lint_list = []
if not hasattr(script_module, "_generate_bundled_inputs_for_forward"):
lint_list.append({"name": LintCode.BUNDLED_INPUT.name, "message": "No bundled input for forward, please add bundled inputs "
"before saving the module using torch.utils.bundled_inputs.augment_model_with_bundled_inputs."})
for name, param in script_module.named_parameters():
if param.requires_grad:
lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": "Param {} requires grad, "
"please set torch.no_grad() to reduce memory usage and improve computation speed during "
"inference phase.".format(name)})
op_names = torch.jit.export_opnames(script_module)
for op_name in op_names:
if "dropout" in op_name:
lint_list.append({"name": LintCode.DROPOUT.name, "message": "Operator {} exists, remember to call eval() before "
"saving the module.and call torch.utils.mobile_optimizer.optimize_for_mobile to drop dropout "
"operator.".format(op_name)})
if "batch_norm" in op_name:
lint_list.append({"name": LintCode.BATCHNORM.name, "message": "Operator {} exists, remember to call eval() before "
"saving the module and call torch.utils.mobile_optimizer.optimize_for_mobile to drop batch_norm "
"operator.".format(op_name)})
return lint_list
|
4a6b25388fb549a273345f2f16f8e471c468a691
| 33,456 |
from typing import Set
def manipulation(macid: MACID, decision: str, effective_set: Set[str]) -> bool:
"""Check whether a decision is motivated by an incentive for manipulation.
Graphical Criterion:
1) There is a directed decision-free path from D_A to an effective decision node D_B.
2) There is a directed, effective path from D_B to U_A (an effective path is a path in which all
decision nodes, except possibly the initial node, and except fork nodes, are effective)
3) There is a directed, effective path from D_A to U_B that does not pass through D_B.
"""
if any(node not in macid.nodes for node in effective_set):
raise KeyError("One or many of the nodes in the effective_set are not present in the macid.")
agent = macid.decision_agent[decision]
agent_utils = macid.agent_utilities[agent]
reachable_decisions = [] # set of possible D_B
list_decs = list(macid.decisions)
list_decs.remove(decision)
for dec_reach in list_decs:
if dec_reach in effective_set:
if directed_decision_free_path(macid, decision, dec_reach):
reachable_decisions.append(dec_reach)
for decision_b in reachable_decisions:
agent_b = macid.decision_agent[decision_b]
agent_b_utils = macid.agent_utilities[agent_b]
for u in agent_utils:
if _effective_dir_path_exists(macid, decision_b, u, effective_set):
for u_b in agent_b_utils:
if _directed_effective_path_not_through_set_y(macid, decision, u_b, effective_set, {decision_b}):
return True
else:
return False
|
d84df003733c7f18c668ef08a7804773b6e501c8
| 33,457 |
def _get_client_ids_meeting_condition(train_tff_data, bad_accuracy_cutoff,
good_accuracy_cutoff,
invert_imagery_likelihood,
classifier_model):
"""Get clients that classify <bad_accuracy_cutoff or >good_accuracy_cutoff."""
bad_client_ids_inversion_map = {}
good_client_ids_inversion_map = {}
for client_id in train_tff_data.client_ids:
invert_imagery = (1 == np.random.binomial(n=1, p=invert_imagery_likelihood))
# TF Dataset for particular client.
raw_images_ds = train_tff_data.create_tf_dataset_for_client(client_id)
# Preprocess into format expected by classifier.
images_ds = emnist_data_utils.preprocess_img_dataset(
raw_images_ds,
invert_imagery=invert_imagery,
include_label=True,
batch_size=None,
shuffle=False,
repeat=False)
# Run classifier on all data on client, compute % classified correctly.
total_count, correct_count = _analyze_classifier(images_ds,
classifier_model)
accuracy = float(correct_count) / float(total_count)
if accuracy < bad_accuracy_cutoff:
bad_client_ids_inversion_map[client_id] = invert_imagery
if accuracy > good_accuracy_cutoff:
good_client_ids_inversion_map[client_id] = invert_imagery
return bad_client_ids_inversion_map, good_client_ids_inversion_map
|
c7d5878ee045eead44f50416aceac3005b0d4e2c
| 33,458 |
def get_wccp_service_group_settings(
self,
ne_id: str,
cached: bool,
) -> dict:
"""Get per-group WCCP configuration settings from appliance
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - wccp
- GET
- /wccp/config/group/{neId}?cached={cached}
:param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE``
:type ne_id: str
:param cached: ``True`` retrieves last known value to Orchestrator,
``False`` retrieves values directly from Appliance
:type cached: bool
:return: Returns nested dictionary of wccp settings per group-id \n
* keyword **<wccp_group_id>** (`dict`): WCCP group detail
object \n
* keyword **password** (`str, optional`): WCCP service group
password
* keyword **mask_src_port** (`int, optional`): WCCP service
group mask source port
* keyword **force_l2_return** (`bool, optional`): WCCP
service group force l2 return
* keyword **hash_dst_ip** (`bool, optional`): WCCP service
group hash destination ip
* keyword **self** (`int, optional`): Integer value of
service group Id
* keyword **weight** (`int, optional`): WCCP service group
weight
* keyword **hash_src_port** (`bool, optional`): WCCP service
group hash source port
* keyword **assign_method** (`str, optional`): Assignment
Method
* keyword **hash_dst_port** (`bool, optional`): WCCP service
group hash destination port
* keyword **hash_src_ip** (`bool, optional`): WCCP service
group hash source ip
* keyword **encap** (str ing, optional): WCCP service group
forwarding method
* keyword **protocol** (`str, optional`): WCCP service group
protocol
* keyword **assign_detail** (`str, optional`): WCCP service
group assignment detail
* keyword **compatibility** (`str, optional`): WCCP service
group compatibility mode. Valid values: ``ios``, ``nexus``
* keyword **interface** (`str, optional`): WCCP service
group interface
* keyword **mask_dst_ip** (`int, optional`): WCCP service
group mask destination ip
* keyword **mask_dst_port** (`int, optional`): WCCP service
group mask destination port
* keyword **mask_src_ip** (`int, optional`): WCCP service
group mask source ip
* keyword **priority** (`int, optional`): WCCP service group
priority. Valid range: ``[0, 255]``
* keyword **router** (`dict, optional`): WCCP service group
router information
:rtype: dict
"""
return self._get("/wccp/config/group/{}?cached={}".format(ne_id, cached))
|
75e07296893eabafbbf0285134cf33cb3eb46480
| 33,459 |
def container_storage_opt() -> int:
"""Return the size for the 'storage_opt' options of the container.
Return -1 if no option (or the default 'host') was given."""
if "storage_opt" in settings.DOCKER_PARAMETERS:
storage = settings.DOCKER_PARAMETERS["storage_opt"].get("size", -1)
if storage == "host" or storage == "-1":
storage = - 1
else:
storage = -1
return humanfriendly.parse_size(storage) if storage != -1 else -1
|
5d345bffd37dc0599d7ef137c08798a226322368
| 33,460 |
import copy
def construct_insert_conversatie_query(graph_uri, conversatie, bericht, delivery_timestamp):
"""
Construct a SPARQL query for inserting a new conversatie with a first bericht attached.
:param graph_uri: string
:param conversatie: dict containing escaped properties for conversatie
:param bericht: dict containing escaped properties for bericht
:returns: string containing SPARQL query
"""
conversatie = copy.deepcopy(conversatie) # For not modifying the pass-by-name original
conversatie['referentieABB'] = escape_helpers.sparql_escape_string(conversatie['referentieABB'])
conversatie['betreft'] = escape_helpers.sparql_escape_string(conversatie['betreft'])
conversatie['current_type_communicatie'] =\
escape_helpers.sparql_escape_string(conversatie['current_type_communicatie'])
bericht = copy.deepcopy(bericht) # For not modifying the pass-by-name original
bericht['inhoud'] = escape_helpers.sparql_escape_string(bericht['inhoud'])
q = """
PREFIX schema: <http://schema.org/>
PREFIX ext: <http://mu.semte.ch/vocabularies/ext/>
PREFIX adms: <http://www.w3.org/ns/adms#>
INSERT DATA {{
GRAPH <{0}> {{
<{1[uri]}> a schema:Conversation;
<http://mu.semte.ch/vocabularies/core/uuid> "{1[uuid]}";
schema:identifier {1[referentieABB]};
"""
if conversatie["dossierUri"]:
q += """
ext:dossierUri "{1[dossierUri]}";
"""
q += """
schema:about {1[betreft]};
<http://mu.semte.ch/vocabularies/ext/currentType> {1[current_type_communicatie]};
schema:processingTime "{1[reactietermijn]}";
schema:hasPart <{2[uri]}>.
<{2[uri]}> a schema:Message;
<http://mu.semte.ch/vocabularies/core/uuid> "{2[uuid]}";
schema:dateSent "{2[verzonden]}"^^xsd:dateTime;
schema:dateReceived "{2[ontvangen]}"^^xsd:dateTime;
schema:text {2[inhoud]};
<http://purl.org/dc/terms/type> "{2[type_communicatie]}";
schema:sender <{2[van]}>;
schema:recipient <{2[naar]}>;
adms:status <{3}>;
ext:deliveredAt "{4}"^^xsd:dateTime.
}}
}}
"""
q = q.format(graph_uri, conversatie, bericht, STATUS_DELIVERED_UNCONFIRMED, delivery_timestamp)
return q
|
61710bc10985cbf91bba354cd06916b77da587d7
| 33,462 |
def _minimum_chunk_size_accuracy(
data: pd.DataFrame,
partition_column_name: str = NML_METADATA_PARTITION_COLUMN_NAME,
prediction_column_name: str = NML_METADATA_PREDICTION_COLUMN_NAME,
target_column_name: str = NML_METADATA_TARGET_COLUMN_NAME,
required_std: float = 0.02,
):
"""Estimation of minimum sample size to get required standard deviation of Accuracy.
Estimation takes advantage of Standard Error of the Mean formula.
"""
y_true = data.loc[data[partition_column_name] == NML_METADATA_REFERENCE_PARTITION_NAME, target_column_name]
y_pred = data.loc[data[partition_column_name] == NML_METADATA_REFERENCE_PARTITION_NAME, prediction_column_name]
y_true, y_pred = np.asarray(y_true), np.asarray(y_pred)
y_true = np.asarray(y_true).astype(int)
y_pred = np.asarray(y_pred).astype(int)
correct_table = (y_true == y_pred).astype(int)
sample_size = (np.std(correct_table) ** 2) / (required_std**2)
sample_size = np.minimum(sample_size, len(y_true))
sample_size = np.round(sample_size, -2)
return _floor_chunk_size(sample_size)
|
3401ccb564cd69e82effc8d7c949feffe725a7fa
| 33,463 |
def estimate_directions(correlations, pairs):
"""Estimate directions from a correlation matrix for specific pairs.
Parameters
----------
correlations : numpy.ndarray, shape=(n_samples, n_samples)
A correlation matrix. Can contain nan values.
pairs : numpy.ndarray, shape=(< n_samples, 2)
A sequence of pairs which contain the indices of samples that are strongly correlated.
Returns
-------
directions : numpy.ndarray, shape=(< n_samples)
A sequence of -1 or +1 which indicates the direction of the correlation (e.g. anti or normal).
"""
directions = np.sign(correlations[pairs[:, 0], pairs[:, 1]])
return directions
|
be4282922bd8b052a60211ab61c4c678ef154e05
| 33,464 |
def get_est_input_size(out, height, p2pkh_pksize, p2sh_scriptsize, nonstd_scriptsize, p2wsh_scriptsize):
"""
Computes the estimated size an input created by a given output type (parsed from the chainstate) will have.
The size is computed in two parts, a fixed size that is non type dependant, and a variable size which
depends on the output type.
:param out: Output to be analyzed.
:type out: dict
:param height: Block height where the utxo was created. Used to set P2PKH min_size.
:type height: int
:param count_p2sh: Whether P2SH should be taken into account.
:type count_p2sh: bool
:return: The minimum input size of the given output type.
:rtype: int
"""
out_type = out["out_type"]
script = out["data"]
# Fixed size
prev_tx_id = 32
prev_out_index = 4
nSequence = 4
fixed_size = prev_tx_id + prev_out_index + nSequence
# Variable size (depending on scripSig):
# Public key size can be either 33 or 65 bytes, depending on whether the key is compressed or uncompressed. We will
# use data from the blockchain to estimate it depending on block height.
#
# Signatures size is contained between 71-73 bytes depending on the size of the S and R components of the signature.
# Since the most common size is 72, we will consider all signatures to be 72-byte long.
if out_type is 0:
# P2PKH
scriptSig = 74 + p2pkh_pksize[str(height)] # PUSH sig (1 byte) + sig (72 bytes) + PUSH pk (1 byte) + PK estimation
scriptSig_len = 1
elif out_type is 1:
# P2SH
scriptSig = p2sh_scriptsize
scriptSig_len = int(ceil(scriptSig / float(256)))
elif out_type in [2, 3, 4, 5]:
# P2PK
# P2PK requires a signature and a push OP_CODE to push the signature into the stack. The format of the public
# key (compressed or uncompressed) does not affect the length of the signature.
scriptSig = 73 # PUSH sig (1 byte) + sig (72 bytes)
scriptSig_len = 1
else:
segwit = check_native_segwit(script)
# P2MS
if check_multisig(script):
# Multisig can be 15-15 at most.
req_sigs = int(script[:2], 16) - 80 # OP_1 is hex 81
scriptSig = 1 + (req_sigs * 73) # OP_0 (1 byte) + 72 bytes per sig (PUSH sig (1 byte) + sig (72 bytes))
scriptSig_len = int(ceil(scriptSig / float(256)))
elif segwit[0] and segwit[1] == "P2WPKH":
scriptSig = 27 # PUSH sig (1 byte) + sig (72 bytes) + PUSH pk (1 byte) + pk (33 bytes) (107 / 4 = 27)
scriptSig_len = 1
elif segwit[0] and segwit[1] == "P2WSH":
scriptSig = ceil(p2wsh_scriptsize/4.0)
scriptSig_len = int(ceil(scriptSig / float(256)))
else:
# All other types (non-standard outs)
scriptSig = nonstd_scriptsize
scriptSig_len = int(ceil(scriptSig / float(256)))
var_size = scriptSig_len + scriptSig
return fixed_size + var_size
|
621cea04217ac7b1396d89ecdfa4e8a3812bd636
| 33,467 |
import win32com.client
def VisumInit(path=None,COMAddress='Visum.Visum.125'):
"""
###
Automatic Plate Number Recognition Support
(c) 2012 Rafal Kucharski [email protected]
####
VISUM INIT
"""
Visum = win32com.client.Dispatch(COMAddress)
if path != None: Visum.LoadVersion(path)
return Visum
|
dc9ebcb271ba86f3ab1180bed6b3da553e416f7d
| 33,468 |
def get(section, option, default=None):
"""
Simple accessor to hide/protect the multiple depth dict access: conf["SECTION"]["OPTION"]
"""
if section in conf.keys():
if option in conf[section].keys():
return conf[section][option]
return default
|
9ece13a6cf0df1f50f5d9db70c035aef88fe2412
| 33,469 |
def inceptionresnetv1(**kwargs):
"""
InceptionResNetV1 model from 'Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning,'
https://arxiv.org/abs/1602.07261.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_inceptionresnetv1(model_name="inceptionresnetv1", bn_eps=1e-3, **kwargs)
|
d424c883e03899cd37bc4f5421bd779ff85be991
| 33,470 |
from typing import Dict
def series_length(x: np.array, freq: int = 1) -> Dict[str, float]:
"""Series length.
Parameters
----------
x: numpy array
The time series.
freq: int
Frequency of the time series
Returns
-------
dict
'series_length': Wrapper of len(x).
"""
return {'series_length': len(x)}
|
a84f8e9f66bc862237ddff5ac651e961147aeee2
| 33,472 |
import pygrib
def _load_uvtp(date, time, latitude=-30, longitude=289.5):
"""Load U, V, T componenents of wind for GFS file given date and time."""
# do not want pygrib to be a dependency for the whole psfws package.
# load in dataset (specific date/time given by args)
try:
grbs = pygrib.open(f'gfsanl_4_{date}_{time}_000.grb2')
except FileNotFoundError:
print(f'Could not find: gfsanl_4_{date}_{time}_000.grb2')
return None
# select relevant variables
try:
uwind = grbs.select(name='U component of wind')
vwind = grbs.select(name='V component of wind')
temp = grbs.select(name='Temperature')
except ValueError:
return None
# check whether wind information exists in file
if len(uwind) < 32:
print(f'file {date} {time} had incomplete wind information.')
return None
else:
u_values = np.zeros(31)
v_values = np.zeros(31)
p_values = np.zeros(31)
# set location range
approx_lat = [latitude-0.5, latitude+0.5]
approx_long = [longitude-0.5, longitude+0.5]
# extract u and v values at specific lat/long for each altitude
for ins, outs in zip([uwind, vwind], [u_values, v_values]):
for i in range(1, 32):
d, lat, lon = ins[i].data(lat1=approx_lat[0],
lat2=approx_lat[1],
lon1=approx_long[0],
lon2=approx_long[1])
outs[i-1] = d[np.where((lat == latitude) &
(lon == longitude))][0]
p_values[i-1] = ins[i]['level']
# do temperature separately because inconsistent altitude info: use
# p_values from wind to get consistent levels:
t_values = []
for t in temp[:35]:
if t['level'] in p_values:
d, lat, lon = t.data(lat1=approx_lat[0],
lat2=approx_lat[1],
lon1=approx_long[0],
lon2=approx_long[1])
tmp = d[np.where((lat == latitude) & (lon == longitude))][0]
t_values.append(tmp)
return {'u': u_values, 'v': v_values, 't': np.array(t_values)}
|
7cb414b26df805b946154f78782373955f3e6422
| 33,474 |
from typing import OrderedDict
import six
def ds2_variables(input, output_vars=False, names=None):
"""Generate a collection of `DS2Variable` instances corresponding to the input
Parameters
----------
input : function or OrderedDict<string, tuple> or Pandas DataFrame or Numpy or OrderedDict<string, type>
a function or mapping parameter names to (type, is_output)
output_vars : bool
Whether or not to treat all variables from `input` as output variables
names : list of str
List of variable names to used. If a single string is specified it will
be used as a prefix and variable names in the format "prefixNN" will be
generated.
Returns
-------
list<DS2Variable>
Examples
--------
>>> ds2_variables(OrderedDict(a=int, c=float))
[DS2Variable(name='a', type='integer', out=False), DS2Variable(name='c', type='double', out=False)]
>>> ds2_variables({'x': (float, True)})
[DS2Variable(name='x', type='double', out=True)]
"""
if isinstance(input, dict):
types = input
elif hasattr(input, 'columns') and hasattr(input, 'dtypes'):
# Pandas DataFrame
types = OrderedDict()
for col in input.columns:
if input[col].dtype.name == 'object':
types[col] = ('char', False)
elif input[col].dtype.name == 'category':
types[col] = ('char', False)
else:
types[col] = (input[col].dtype.name, False)
elif hasattr(input, 'dtype'):
# Numpy array? No column names, but we can at least create dummy vars of the correct type
types = OrderedDict([('var{}'.format(i),
(input.dtype.name.replace('object', 'char'), False)) for i in range(1, input.size+1)])
elif six.callable(input):
types = parse_type_hints(input)
else:
raise RuntimeError("Unable to determine input/ouput types using "
"instance of type '%s'." % type(input))
if isinstance(names, six.string_types):
names = [names + str(i) for i in range(1, len(types)+1)]
elif names is None:
names = list(types.keys())
results = []
for v in six.itervalues(types):
name = names.pop(0)
if isinstance(v, six.string_types):
results.append(DS2Variable(name=name, type=v, out=output_vars))
elif isinstance(v, type):
results.append(DS2Variable(name=name, type=v.__name__, out=output_vars))
elif isinstance(v, tuple):
type_ = v[0].__name__ if isinstance(v[0], type) else str(v[0])
out = v[1] or output_vars
results.append(DS2Variable(name=name, type=type_, out=out))
else:
raise RuntimeError('Unable to determine input/ouput types.')
return results
|
50bed5939530679dd36edd5a911b212687a085aa
| 33,475 |
import hashlib
def register():
"""Register User"""
# forget any user_id
session.clear()
# If the user has been redirected through an event url
event = request.args.get('event')
# if user reached route via POST (as by submitting a form via POST)
if request.method == 'POST':
# check if form fields are empty and if entered passwords match
if not request.form.get("email") or not request.form.get("password") or not request.form.get("email"):
return render_template('failure.html', msg='Username/ Password/ Email fields cannot be empty')
if request.form.get("password") != request.form.get("confirmation"):
return render_template('failure.html', msg='Password fields do not match')
# create connection
db = mysql.connection.cursor()
# query database to see if email already exists
rows = db.execute(
"SELECT * FROM users WHERE email = '{}' ".format(request.form.get("email")))
if rows:
return render_template('failure.html', msg='Username Already Exists')
# hash password with SHA-1 algorithm and store it as string
password = encode(hashlib.sha1(encode(request.form.get("password", 'utf-8'))).digest(),
'hex_codec').decode('utf-8')
db.execute(
"INSERT IGNORE INTO users (first_name, last_name, email, password, email_verified, contact_verified) VALUES ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}')".format(
request.form.get("first_name"), request.form.get("last_name"), request.form.get("email"), password, False, False))
mysql.connection.commit()
# get id and auth level
db.execute("SELECT * FROM users WHERE email = '{}'".format(request.form.get("email")))
rv = db.fetchone()
# create session
session['user_id'] = rv['id']
session['verified'] = False
return redirect(url_for('index', event = event))
# else if user reached route via GET (as by clicking a link or via redirect)
else:
return render_template('register.html', event = event)
|
b6eb1c717a1c52dd206d6bb4f17e768a5a33bbc1
| 33,477 |
from typing import Sequence
def topological_sort(edges: Sequence[Sequence]) -> list:
"""Produce a topological sorting (if one exists) using a BFS approach.
Args:
edges (Sequence[Sequence]): A list of edges pairs, ex: [('A', 'B'), ('A', 'C')].
Returns:
list: An array with a non-unique topological order of nodes.
"""
adj_list = directed_adj_list(edges)
indegrees = inbound_degrees(adj_list)
sources = find_sources(indegrees)
result = []
while len(sources) > 0:
source = sources.popleft()
result.append(source)
for neighbor in adj_list[source]:
indegrees[neighbor] -= 1
if indegrees[neighbor] == 0:
sources.append(neighbor)
return result[::-1] if len(result) == len(adj_list) else []
|
48a48449e7cd9390abbe2b589994cd7476684f69
| 33,478 |
def map_currency(currency, currency_map):
"""
Returns the currency symbol as specified by the exchange API docs.
NOTE: Some exchanges (kraken) use different naming conventions. (e.g. BTC->XBT)
"""
if currency not in currency_map.keys():
return currency
return currency_map[currency]
|
98b235952b042109a4e2083ce6c8fd85690c22e3
| 33,479 |
def FastResultRow(cols):
"""Create a ResultRow-like class that has all fields already preparsed.
Non-UTF-8-String columns must be suffixed with !."""
getters = {}
_keys = []
for i, col in enumerate(cols.split()):
if col[-1] == '!':
col = col[:-1]
getter = itemgetter(i)
else:
getter = utf8getter(i)
_keys.append(col)
getters[i] = getters[col] = getter
class _FastResultRow:
_getters = getters
cols = _keys
def __init__(self, cols, info):
self.info = info
def __getitem__(self, index):
try:
return self._getters[index](self.info)
except KeyError:
if isinstance(index, int):
raise IndexError, 'row index out of range'
raise
def __len__(self):
return len(self.info)
def __nonzero__(self):
return bool(self.info)
def as_dict(self):
res = {}
for key in self.cols:
res[key] = self[key]
return res
def keys(self):
return self.cols
def values(self):
res = [None] * len(self.info)
for i in xrange(len(self.info)):
res[i] = self[i]
return res
def items(self):
if not self.info:
return []
res = [None] * len(self.info)
for i, col in enumerate(self.cols):
res[i] = (col, self[col])
return res
return _FastResultRow
|
61483f6b3e6627eb5b2125c3a458e3baf6d263eb
| 33,480 |
def new_answer(notification):
"""
Obtains a new answer body for notifs where **source is
the post**
"""
answer = Answer.query.filter_by(id=notification.target_id).first()
post = Post.query.filter_by(id=notification.source_id).first()
if not isinstance(answer, Answer):
return "A new unavailable answer has been posted."
if not isinstance(post, Post):
post_name = "n/a"
else:
post_name = post.title
language = answer.get_language()
if not isinstance(language, Language):
language_name = "new"
else:
language_name = language.get_display_name()
byte_count = answer.byte_len
return f"A {language_name} answer has been posted to your challenge, \"{post_name}\", measuring at {byte_count} bytes!"
|
41755acbaa870f0fabb9295a75a47c7e652bbbc0
| 33,481 |
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions.
Args:
sequence_tensor: Sequence output of `BertModel` layer of shape
(`batch_size`, `seq_length`, num_hidden) where num_hidden is number of
hidden units of `BertModel` layer.
positions: Positions ids of tokens in sequence to mask for pretraining of
with dimension (batch_size, max_predictions_per_seq) where
`max_predictions_per_seq` is maximum number of tokens to mask out and
predict per each sequence.
Returns:
Masked out sequence tensor of shape (batch_size * max_predictions_per_seq,
num_hidden).
"""
sequence_shape = modeling.get_shape_list(
sequence_tensor, name='sequence_output_tensor')
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.keras.backend.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.keras.backend.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.keras.backend.reshape(
sequence_tensor, [batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
|
82e1b4520cbac66d45d56fc1f66286578acab0dd
| 33,482 |
def _check_type(value, expected_type):
"""Perform type checking on the provided value
This is a helper that will raise ``TypeError`` if the provided value is
not an instance of the provided type. This method should be used sparingly
but can be good for preventing problems earlier when you want to restrict
duck typing to make the types of fields more obvious.
If the value passed the type check it will be returned from the call.
"""
if not isinstance(value, expected_type):
raise TypeError("Value {value!r} has unexpected type {actual_type!r}, expected {expected_type!r}".format(
value=value,
expected_type=expected_type,
actual_type=type(value),
))
return value
|
a18ecfe9d63e6a88c56fc083da227a5c12ee18db
| 33,483 |
def find_rbm_procrustes(frompts, topts):
"""
Finds a rigid body transformation M that moves points in frompts to the points in topts
that is, it finds a rigid body motion [ R | t ] with R \in SO(3)
This algorithm first approximates the rotation by solving
the orthogonal procrustes problem.
"""
# center data
t0 = frompts.mean(0)
t1 = topts.mean(0)
frompts_local = frompts - t0
topts_local = topts - t1
# find best rotation - procrustes problem
M = np.dot(topts_local.T, frompts_local)
U, s, Vt = np.linalg.svd(M)
R = np.dot(U, Vt)
if np.linalg.det(R) < 0:
R *= -1
T0 = np.eye(4)
T0[:3,:3] = R
T0[:3, 3] = t1 - np.dot(R, t0)
return T0
|
ae478e3c2139fe5bef1beb5c12ce75b07dee98e7
| 33,484 |
from typing import Callable
import functools
def adapt_type_to_browse_processor() -> Callable[[MediaProcessorType], BrowseGeneratorType]:
"""
Create generator for media objects from provided ID's
:return: Decorator
"""
def _decorate(func: MediaProcessorType):
"""
Decorate
:param func:
:return:
"""
@functools.wraps(func)
def wrapped_function(
browser: YandexMusicBrowser,
media_content_id: MediaContentIDType = None,
fetch_children: FetchChildrenType = True,
) -> BrowseGeneratorReturnType:
media_object = func(browser, media_content_id)
if media_object is not None:
return browser.generate_browse_from_media(
media_object, fetch_children=fetch_children
)
wrapped_function.__name__ = func.__name__
return wrapped_function
return _decorate
|
bbed3ec54ca966cc1886d02abbf5ccae8daca25c
| 33,485 |
import healpy as hp
def get_skyarea(output_mock, Nside):
"""
"""
# compute sky area from ra and dec ranges of galaxies
nominal_skyarea = np.rad2deg(np.rad2deg(4.0*np.pi/hp.nside2npix(Nside)))
if Nside > 8:
skyarea = nominal_skyarea
else:
pixels = set()
for k in output_mock.keys():
if output_mock[k].has_key('ra') and output_mock[k].has_key('dec'):
for ra, dec in zip(output_mock[k]['ra'], output_mock[k]['dec']):
pixels.add(hp.ang2pix(Nside, ra, dec, lonlat=True))
frac = len(pixels)/float(hp.nside2npix(Nside))
skyarea = frac*np.rad2deg(np.rad2deg(4.0*np.pi))
if np.isclose(skyarea, nominal_skyarea, rtol=.02): # agreement to about 1 sq. deg.
print(' Replacing calculated sky area {} with nominal_area'.format(skyarea))
skyarea = nominal_skyarea
if np.isclose(skyarea, nominal_skyarea/2., rtol=.01): # check for half-filled pixels
print(' Replacing calculated sky area {} with (nominal_area)/2'.format(skyarea))
skyarea = nominal_skyarea/2.
return skyarea
|
243e6d7a4b10673ed1d7fd03db48e7ac12c6ec0e
| 33,487 |
def get_patch_info(shape, p_size):
"""
shape: origin image size, (x, y)
p_size: patch size (square)
return: n_x, n_y, step_x, step_y
"""
x = shape[0]
y = shape[1]
n = m = 1
while x > n * p_size:
n += 1
while p_size - 1.0 * (x - p_size) / (n - 1) < 50:
n += 1
while y > m * p_size:
m += 1
while p_size - 1.0 * (y - p_size) / (m - 1) < 50:
m += 1
return n, m, (x - p_size) * 1.0 / (n - 1), (y - p_size) * 1.0 / (m - 1)
|
b681f355ffbf3c7f5653996cd021950e2c9689d4
| 33,489 |
def validate_token(token=None):
"""Helps in confirming the Email Address with the help of the token, sent on the registered email address.\n
Keyword Arguments:
token -- Token passed in the user's email
"""
try:
res = URLSafeTimedSerializer(SECRET).loads( # noqa
token, salt=VERIFICATION_SALT, max_age=MAX_TIME
)
except SignatureExpired:
flash("Sorry, link has been expired.", "danger")
return False
# Token was successfully validated
return True
|
36ae68623cb04c0d3964c23b3ebc36a82376bec8
| 33,490 |
def rate2amount(
rate: xr.DataArray, dim: str = "time", out_units: str = None
) -> xr.DataArray:
"""Convert a rate variable to an amount by multiplying by the sampling period length.
If the sampling period length cannot be inferred, the rate values
are multiplied by the duration between their time coordinate and the next one. The last period
is estimated with the duration of the one just before.
Parameters
----------
rate : xr.DataArray
"Rate" variable, with units of "amount" per time. Ex: Precipitation in "mm / d".
dim : str
The time dimension.
out_units : str, optional
Optional output units to convert to.
Examples
--------
The following converts a daily array of precipitation in mm/h to the daily amounts in mm.
>>> time = xr.cftime_range('2001-01-01', freq='D', periods=365)
>>> pr = xr.DataArray([1] * 365, dims=('time',), coords={'time': time}, attrs={'units': 'mm/h'})
>>> pram = rate2amount(pr)
>>> pram.units
'mm'
>>> float(pram[0])
24.0
Also works if the time axis is irregular : the rates are assumed constant for the whole period
starting on the values timestamp to the next timestamp.
>>> time = time[[0, 9, 30]] # The time axis is Jan 1st, Jan 10th, Jan 31st
>>> pr = xr.DataArray([1] * 3, dims=('time',), coords={'time': time}, attrs={'units': 'mm/h'})
>>> pram = rate2amount(pr)
>>> pram.values
array([216., 504., 504.])
Finally, we can force output units:
>>> pram = rate2amount(pr, out_units='pc') # Get rain amount in parsecs. Why not.
>>> pram.values
array([7.00008327e-18, 1.63335276e-17, 1.63335276e-17])
"""
try:
m, u = infer_sampling_units(rate.time, deffreq=None)
except AttributeError:
# In coherent time axis : xr.infer_freq returned None
# Get sampling period lengths in nanoseconds. Last period as the same length as the one before.
dt = (
rate.time.diff(dim, label="lower")
.reindex({dim: rate[dim]}, method="ffill")
.astype(float)
)
dt = dt / 1e9 # Convert to seconds
tu = (str2pint(rate.units) * str2pint("s")).to_reduced_units()
amount = rate * dt * tu.m
amount.attrs["units"] = pint2cfunits(tu)
if out_units:
amount = convert_units_to(amount, out_units)
else:
q = units.Quantity(m, u)
amount = pint_multiply(rate, q, out_units=out_units)
return amount
|
1a660711d6e5d7bffc14608e65aeb6d0ae5d7740
| 33,491 |
def makechis(d, N, maxchi):
"""Create the vector of chis for the chain.
This is a length-N+1 list of exponents of d. The exponents are of
the form
[0, 1, 2, 1, 0] (if N+1 is odd)
[0, 1, 2, 2, 1, 0] (if N+1 is even)
Any numbers in the above exceeding ln(maxchi) / ln(d) are replaced
with maxchi.
"""
last = N
maxexp = int(np.log(maxchi) // np.log(d))
exp = list(range(0, (last+1)//2))
reverse = exp[::-1]
if last % 2 != 1:
exp = exp + [last//2]
exp = exp + reverse
for i in range(0, len(exp)):
if exp[i] > maxexp:
exp[i] = maxexp
chis = np.power(d, exp, dtype=int)
return chis
|
45dea0c399b1a8d8a055c28015d17f12d3ce5112
| 33,492 |
import requests
def generate_input_f(reagent, MW, density):
""" A helper function to properly formate input for concentration-to-amount calculations.
Returns a dictionary where each key is a reagent component and each value is a sub-dictionary
containing concentration, phase, molecular weight, and density.
For reagents with multiple solvents, sub-dictionary also contains volume fraction for each solvent.
reagent - a list of reagent-instance-value dictionaries specifying component and its concentration
NOTE: this assumes that each component has a reagent-instance-value entry.
- for liquids, concentration is 0.0 M by default.
- for reagents with multiple solvents, an additional reagent-instance-value entry is required for
each liquid component that describes the volume fraction
MW - material definition URL for molecular weight
density - material definition URL for density
"""
input_data={} #instantiate dictionary to fill with data
for component in reagent: #component = one of the reagent instance values inside the reagent
if component['description']=='Concentration':
conc_val=component['nominal_value']['value'] #desired concentration
conc_unit=component['nominal_value']['unit'] #concentration unit
if units(conc_unit)!=units('molar'): #concentration must be in molarity. otherwise code breaks
print('Concentration must be a molarity. Please convert and re-enter.')
break
else:
conc=Q_(conc_val, conc_unit) #store in proper Pint format
phase=requests.get(component['material']).json()['phase'] #phase/state of matter
#extract associated material URL
r=requests.get(component['material']).json()['material']
mat=requests.get(r).json()
material=mat['description']
#loop through properties of the material to get MW and density
for prop in mat['identifier']:
r=requests.get(prop).json()
if r['material_identifier_def']==MW: #url must match that of MW material identifier def
mw= r['description']
mag=float(mw.split()[0])
unit= str(mw.split()[1])
mw=Q_(mag, unit).to(units.g/units.mol) #convert to g/mol and store in proper Pint format
if r['material_identifier_def']==density: #url must match that of density material identifier def
d= r['description']
mag=float(d.split()[0])
unit= str(d.split()[1])
d=Q_(mag, unit).to(units.g/units.ml) #convert to g/mL and store in proper Pint format
if component['description']=='Volume Fraction': #for cases with more than one solvent...
frac=component['nominal_value']['value'] #volume fraction
#extract associated material URL
r=requests.get(component['material']).json()['material']
mat=requests.get(r).json()
material=mat['description']
input_data[material].update({'volume fraction': frac}) #update dictionary to include volume fraction
input_data[material]={'concentration': conc, 'phase': phase, 'molecular weight': mw, 'density': d}
return input_data
|
2c8225509ba1512270a5ebfce130f79f2fba172c
| 33,493 |
def addCol(adjMatrix):
"""Adds a column to the end of adjMatrix and returns the index of the comlumn that was added"""
for j in range(len(adjMatrix)):
adjMatrix[j].append(0)
return len(adjMatrix[0])-1
|
29ee11953cbdb757e8ea80e897751059e42f1d90
| 33,494 |
def get_table_m_1_a_5():
"""表M.1(a)-5 居住人数4人における基準給湯量および入浴人数(生活スケジュール:平日(小))
Args:
Returns:
list: 表M.1(a)-5 居住人数4人における基準給湯量および入浴人数(生活スケジュール:平日(小))
"""
# 表M.1(a)-5 居住人数4人における基準給湯量および入浴人数(生活スケジュール:平日(小))
table_m_1_a_5 = np.array([
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(4, 0, 0, 4, 0, 0, 0, 0),
(6, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0),
(16, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 180, 0, 0, 0),
(36, 40, 0, 0, 0, 0, 1, 0),
(0, 40, 0, 0, 0, 0, 1, 0),
(0, 20, 0, 2, 0, 0, 2, 0),
(0, 0, 0, 2, 0, 0, 0, 0),
])
return table_m_1_a_5
|
0ec4fbf123b503a31bf199f6a392147d07e8e714
| 33,495 |
import re
def _get_first_sentence(s):
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(r".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace('\n', ' ')
|
9e78fdba3a47c0f9faae6a938ab58498455555b2
| 33,498 |
def open_tsg_from_legos(filename):
"""
Open thermosalinograph (TSG) transect from the LEGOS dataset,
and homogenize the coordinates
Parameters
----------
filename : str
Name of the file to open
Returns
-------
ds : xarray.Dataset
The TSG transect under the form of a Dataset
"""
renamed_var = {'TIME': 'time', 'LON': 'lon', 'LAT': 'lat'}
ds = (xr.open_dataset(filename, autoclose=True)
.rename(renamed_var)
.set_coords(('lon', 'lat'))
)
ds = check_data(ds)
for var in ds.variables:
try:
del(ds[var].attrs['coordinates'])
except(KeyError):
pass
return ds
|
69c999c53fb8faff622b5f274e79ed6ef9415ddd
| 33,499 |
from datetime import datetime
def get_current_time():
"""Return timestamp for current system time in UTC time zone.
Returns
-------
datetime
Current system time
"""
return datetime.datetime.utcnow()
|
606761fa1aabe0b9d0d1682b978efbae1a524fa3
| 33,500 |
def calculate_average(result):
"""Calculates the average package size"""
vals = result.values()
if len(vals) == 0:
raise ValueError("Cannot calculate average on empty dictionary.")
return sum(vals)/float(len(vals))
|
ea4b66b41533b0e8984b5137c39c744eec9d3e1f
| 33,501 |
def score_vectore(vector):
"""
:type vector: list of float
:return:
"""
x = 0.
y = 0.
for i in range(0, len(vector)):
if i % 2 == 0:
if vector[i] > 12:
x += vector[i]
else:
y += vector[i] + min(vector[i-1], vector[i])
return x, y
|
8fc60cb3cec65d5a3b8f0fea62777739e17249a4
| 33,502 |
def loudness_contour_equivalence(mean_loudness_value, phon_ref_value):
""" Function that serves for the validation of the hearing model presented in Annex F of ECMA-74.
Parameters
----------
Returns
-------
"""
# Conversion to phons of the loudness result that is in sones.
phon_loudness_value = sone2phone(mean_loudness_value)
phon_diff = abs(phon_ref_value - phon_loudness_value)
phon_contour_value = (
phon_ref_value - phon_diff
if (phon_ref_value <= phon_loudness_value)
else phon_ref_value + phon_diff
)
return phon_contour_value
|
410d6469caad75a2d9387ada9868a747a457dd86
| 33,503 |
def _compound_register(upper, lower):
"""Return a property that provides 16-bit access to two registers."""
def get(self):
return (upper.fget(None) << 8) | lower.fget(None)
def set(self, value):
upper.fset(None, value >> 8)
lower.fset(None, value)
return property(get, set)
|
00f315cc4c7f203755689adb5004f152a8b26823
| 33,505 |
def get_joining_type_property(value, is_bytes=False):
"""Get `JOINING TYPE` property."""
obj = unidata.ascii_joining_type if is_bytes else unidata.unicode_joining_type
if value.startswith('^'):
negated = value[1:]
value = '^' + unidata.unicode_alias['joiningtype'].get(negated, negated)
else:
value = unidata.unicode_alias['joiningtype'].get(value, value)
return obj[value]
|
ad2f590658c927a91ef5d90ac09d5b927e2091f3
| 33,507 |
from typing import List
def select_record_fields(
record: dict,
fields: List[str]) -> dict:
"""
Selects a subset of fields from a dictionary
"""
return {k: record.get(k, None) for k in fields}
|
4b56ba4bc683eb8d49540ccb8de79c08b900e7c8
| 33,508 |
def to_TProfile2D(
fName,
fTitle,
data,
fEntries,
fTsumw,
fTsumw2,
fTsumwx,
fTsumwx2,
fTsumwy,
fTsumwy2,
fTsumwxy,
fTsumwz,
fTsumwz2,
fSumw2,
fBinEntries,
fBinSumw2,
fXaxis,
fYaxis,
fZaxis=None,
fScalefactor=1.0,
fZmin=0.0,
fZmax=0.0,
fErrorMode=0,
fNcells=None,
fBarOffset=0,
fBarWidth=1000,
fMaximum=-1111.0,
fMinimum=-1111.0,
fNormFactor=0.0,
fContour=None,
fOption="",
fFunctions=None,
fBufferSize=0,
fBuffer=None,
fBinStatErrOpt=0,
fStatOverflows=2,
fLineColor=602,
fLineStyle=1,
fLineWidth=1,
fFillColor=0,
fFillStyle=1001,
fMarkerColor=1,
fMarkerStyle=1,
fMarkerSize=1.0,
):
"""
Args:
fName (None or str): Temporary name, will be overwritten by the writing
process because Uproot's write syntax is ``file[name] = histogram``.
fTitle (str): Real title of the histogram.
data (numpy.ndarray or :doc:`uproot.models.TArray.Model_TArray`): Bin contents
with first bin as underflow, last bin as overflow. The dtype of this array
must be float64.
fEntries (float): Number of entries. (https://root.cern.ch/doc/master/classTH1.html)
fTsumw (float): Total Sum of weights.
fTsumw2 (float): Total Sum of squares of weights.
fTsumwx (float): Total Sum of weight*X.
fTsumwx2 (float): Total Sum of weight*X*X.
fTsumwy (float): Total Sum of weight*Y. (TH2 only: https://root.cern.ch/doc/master/classTH2.html)
fTsumwy2 (float): Total Sum of weight*Y*Y. (TH2 only.)
fTsumwxy (float): Total Sum of weight*X*Y. (TH2 only.)
fTsumwz (float): Total Sum of weight*Z. (TProfile2D only: https://root.cern.ch/doc/master/classTProfile2D.html)
fTsumwz2 (float): Total Sum of weight*Z*Z. (TProfile2D only.)
fSumw2 (numpy.ndarray of numpy.float64 or :doc:`uproot.models.TArray.Model_TArrayD`): Array
of sum of squares of weights.
fBinEntries (numpy.ndarray of numpy.float64 or :doc:`uproot.models.TArray.Model_TArrayD`): Number
of entries per bin. (TProfile2D only.)
fBinSumw2 (numpy.ndarray of numpy.float64 or :doc:`uproot.models.TArray.Model_TArrayD`): Array
of sum of squares of weights per bin. (TProfile2D only.)
fXaxis (:doc:`uproot.models.TH.Model_TAxis_v10`): Use :doc:`uproot.writing.to_TAxis`
with ``fName="xaxis"`` and ``fTitle=""``.
fYaxis (:doc:`uproot.models.TH.Model_TAxis_v10`): Use :doc:`uproot.writing.to_TAxis`
with ``fName="yaxis"`` and ``fTitle=""``.
fZaxis (None or :doc:`uproot.models.TH.Model_TAxis_v10`): None generates a
default for 1D and 2D histograms.
fScalefactor (float): Scale factor. (TH2 only.)
fZmin (float): Lower limit in Z (if set). (TProfile2D only.)
fZmax (float): Upper limit in Z (if set). (TProfile2D only.)
fErrorMode (int): Option to compute errors. (TProfile2D only.)
fNcells (None or int): Number of bins(1D), cells (2D) +U/Overflows. Computed
from ``data`` if None.
fBarOffset (int): (1000*offset) for bar charts or legos
fBarWidth (int): (1000*width) for bar charts or legos
fMaximum (float): Maximum value for plotting.
fMinimum (float): Minimum value for plotting.
fNormFactor (float): Normalization factor.
fContour (None or numpy.ndarray of numpy.float64 or :doc:`uproot.models.TArray.Model_TArrayD`): Array
to display contour levels. None generates an empty array.
fOption (str or :doc:`uproot.models.TString.Model_TString`): Histogram options.
fFunctions (None, list, or :doc:`uproot.models.TList.Model_TList`): ->Pointer to
list of functions (fits and user). None generates an empty list.
fBufferSize (None or int): fBuffer size. Computed from ``fBuffer`` if None.
fBuffer (None or numpy.ndarray of numpy.float64): Buffer of entries accumulated
before automatically choosing the binning. (Irrelevant for serialization?)
None generates an empty array.
fBinStatErrOpt (int): Option for bin statistical errors.
fStatOverflows (int): Per object flag to use under/overflows in statistics.
fLineColor (int): Line color. (https://root.cern.ch/doc/master/classTAttLine.html)
fLineStyle (int): Line style.
fLineWidth (int): Line width.
fFillColor (int): Fill area color. (https://root.cern.ch/doc/master/classTAttFill.html)
fFillStyle (int): Fill area style.
fMarkerColor (int): Marker color. (https://root.cern.ch/doc/master/classTAttMarker.html)
fMarkerStyle (int): Marker style.
fMarkerSize (float): Marker size.
This function is for developers to create TProfile2D objects that can be
written to ROOT files, to implement conversion routines.
"""
th2x = to_TH2x(
fName=fName,
fTitle=fTitle,
data=data,
fEntries=fEntries,
fTsumw=fTsumw,
fTsumw2=fTsumw2,
fTsumwx=fTsumwx,
fTsumwx2=fTsumwx2,
fTsumwy=fTsumwy,
fTsumwy2=fTsumwy2,
fTsumwxy=fTsumwxy,
fSumw2=fSumw2,
fXaxis=fXaxis,
fYaxis=fYaxis,
fZaxis=fZaxis,
fScalefactor=fScalefactor,
fNcells=fNcells,
fBarOffset=fBarOffset,
fBarWidth=fBarWidth,
fMaximum=fMaximum,
fMinimum=fMinimum,
fNormFactor=fNormFactor,
fContour=fContour,
fOption=fOption,
fFunctions=fFunctions,
fBufferSize=fBufferSize,
fBuffer=fBuffer,
fBinStatErrOpt=fBinStatErrOpt,
fStatOverflows=fStatOverflows,
fLineColor=fLineColor,
fLineStyle=fLineStyle,
fLineWidth=fLineWidth,
fFillColor=fFillColor,
fFillStyle=fFillStyle,
fMarkerColor=fMarkerColor,
fMarkerStyle=fMarkerStyle,
fMarkerSize=fMarkerSize,
)
if not isinstance(th2x, uproot.models.TH.Model_TH2D_v4):
raise TypeError("TProfile2D requires an array of float64 (TArrayD)")
if isinstance(fBinEntries, uproot.models.TArray.Model_TArray):
tarray_fBinEntries = fBinEntries
else:
tarray_fBinEntries = to_TArray(fBinEntries)
if not isinstance(tarray_fBinEntries, uproot.models.TArray.Model_TArrayD):
raise TypeError("fBinEntries must be an array of float64 (TArrayD)")
if isinstance(fBinSumw2, uproot.models.TArray.Model_TArray):
tarray_fBinSumw2 = fBinSumw2
else:
tarray_fBinSumw2 = to_TArray(fBinSumw2)
if not isinstance(tarray_fBinSumw2, uproot.models.TArray.Model_TArrayD):
raise TypeError("fBinSumw2 must be an array of float64 (TArrayD)")
tprofile2d = uproot.models.TH.Model_TProfile2D_v8.empty()
tprofile2d._bases.append(th2x)
tprofile2d._members["fBinEntries"] = tarray_fBinEntries
tprofile2d._members["fErrorMode"] = fErrorMode
tprofile2d._members["fZmin"] = fZmin
tprofile2d._members["fZmax"] = fZmax
tprofile2d._members["fTsumwz"] = fTsumwz
tprofile2d._members["fTsumwz2"] = fTsumwz2
tprofile2d._members["fBinSumw2"] = tarray_fBinSumw2
tprofile2d._deeply_writable = th2x._deeply_writable
return tprofile2d
|
6886a90bc4e134d03f255c2fcec211dbdbd435b0
| 33,509 |
def decode_check(string: str, digestfunc=sha256d_32) -> bytes:
"""
Convert base58 encoded string to bytes and verify checksum.
"""
result = decode(string)
return verify_checksum(result, digestfunc)
|
e3cdca3d66a2822cf6d47756959f136c84bcae65
| 33,510 |
import pandas
def label_by_wic(grasp_wic, exclude_C0=False):
"""Label each grasp by the whiskers it contains
grasp_wic : DataFrame
Index: grasp keys. Columns: whisker. Values: binarized contact.
exclude_C0 : bool
If False, group by all whiskers.
If True, ignore C0, and group only by C1, C2, and C3.
But label C0-only contacts as C0.
"""
# Set grouping_whiskers
if exclude_C0:
grouping_whiskers = ['C1', 'C2', 'C3']
else:
grouping_whiskers = ['C0', 'C1', 'C2', 'C3']
# Init return variable
res = pandas.Series(
['blank'] * len(grasp_wic), index=grasp_wic.index).rename('label')
# Group
gobj = grasp_wic[['C0', 'C1', 'C2', 'C3']].groupby(grouping_whiskers)
for included_mask, sub_grasp_wic in gobj:
# Generate label by joining all whiskers in this group
label = '-'.join(
[w for w, w_in in zip(grouping_whiskers, included_mask) if w_in])
if label == '':
# This should only happen if exclude_C0 and on the C0 group
assert exclude_C0
assert (sub_grasp_wic['C0'] == 1).all()
assert (sub_grasp_wic.drop('C0', 1) == 0).all().all()
# So label it C0
label = 'C0'
# Assign
res.loc[sub_grasp_wic.index] = label
# Error check
assert 'blank' not in res.values
return res
|
0f1e552c68be0b77bad442b2432e071a74db4947
| 33,511 |
def selectBestFeature(dataSet: list):
"""
计算信息增益,挑选最好的特征值
:param dataSet: 数据集
:return:
"""
# 不使用最后一列标签列
best_feature_idx = 0
gain = float('-inf')
for feature_idx in range(len(dataSet[0])-1):
gain_tmp = calcOneFeatureGain(dataSet, feature_idx)
if gain_tmp > gain:
best_feature_idx = feature_idx
gain = gain_tmp
return best_feature_idx
|
ced59e8748eb12a17951fb511c7cc6b8d5f88555
| 33,512 |
from typing import Callable
import json
def __interact_instances(client, action: Callable, status: str) -> dict:
"""
Interacts with the instances. It executes the callable from action.
:param client: Sagemaker boto3 client.
:param action: a callable (e.g. client.start_notebook_instance).
:param status: A status that will be verified before starting. If the
status of the instance is not this one, it will not be executed.
:return: A dictionary containing the status code and a body. Body shows
the amount of affected instances.
:raises UnknownError: if a ClientError occurs.
"""
try:
instances = client.list_notebook_instances()
interaction = 0
for instance in instances["NotebookInstances"]:
tags = __get_tags(client, instance["NotebookInstanceArn"])
if instance[
"NotebookInstanceStatus"
] == status and not __is_ignorable(tags):
interaction += 1
action(NotebookInstanceName=instance["NotebookInstanceName"])
return {
"status_code": 200,
"body": json.dumps(f"{interaction} instances affected"),
}
except ClientError:
raise UnknownError
|
f067ce6d4d32718e6d25edb735534b3427eb9cc6
| 33,513 |
def _chordfinisher(*args, **kwargs):
"""
Needs to run at the end of a chord to delay the variant parsing step.
http://stackoverflow.com/questions/
15123772/celery-chaining-groups-and-subtasks-out-of-order-execution
"""
return "FINISHED VARIANT FINDING."
|
b68d09e755c2da468b98ab0466821770d2f7f4a7
| 33,514 |
def plot_reg_path(model, marker='o', highlight_c="orange", include_n_coef=False, figsize=None, fontsize=None):
"""Plots path of an L1/L2 regularized sklearn.linear_model.LogisticRegressionCV.
Produces two adjacent plots.
The first is a plot of mean coefficient values vs penalization strength.
The second is a plot of performance vs penalization strength.
The second plot may include number of nonzero coefs, if specified by parameters.
Parameters
----------
model: sklearn.linear_model.LogisticRegressionCV instance
A fit LogisticRegressionCV with L1/L2 penalty for which plots are made
marker: matplotlib.markers format, default='o'
Marker type used in plots
highlight_c: matplotlib color format or None, default="orange"
If not None, the best penalization strength is highlighted by a bar of
color highlight_c.
include_n_coef: bool, default=False
If true, the second plot also includes the number of nonzero
coefficients vs penalization strength on a second axis on the right.
figsize: tuple or list of floats or None, default=None
Specifies the figure size for both plots combined.
fontsize: int or None, default=None
Specifies the font size used in labels and titles.
Returns
-------
fig, (ax1, ax2): matplotlib.pyplot.figure and matplotlib.axes for plots.
"""
# Validate object
check_object(model)
# Create subplots
fig, (ax1, ax2) = plt.subplots(1,2, figsize=figsize)
# Create title
fig.suptitle('Mean Logistic Regression Path Over Crossval Folds', fontsize=fontsize)
# First plot
plot_reg_path_coef(model, marker=marker, highlight_c=highlight_c, fontsize=fontsize, ax=ax1)
# Second plot
plot_reg_path_perf(model, marker=marker, highlight_c=highlight_c, include_n_coef=include_n_coef, fontsize=fontsize, ax=ax2)
return fig, (ax1, ax2)
|
f868bf2ef5375598e29675b42234a5be470235f0
| 33,515 |
def create_object_count(app=None):
"""fetches all models of the passed in app and returns a
dict containg the name of each class and the number of instances"""
if app:
models = ContentType.objects.filter(app_label=app)
result = []
for x in models:
modelname = x.model
try:
fetched_model = ContentType.objects.get(
app_label=app, model=modelname).model_class()
item = {
'name': modelname.title(),
'count': fetched_model.objects.count()
}
except Exception as e:
item = {
'name': x,
'count': e
}
try:
item['link'] = fetched_model.get_listview_url()
except AttributeError:
item['link'] = None
result.append(item)
return result
else:
result = [
{
'name': 'no parameter passed in',
'count': '1'
}
]
return result
|
a0697f9847e3a200fd235cbb000c7d67e711f9fd
| 33,516 |
def unique_name(prefix: str, collection) -> str:
"""
Prepares a unique name that is not
in the collection (yet).
Parameters
----------
prefix
The prefix to use.
collection
Name collection.
Returns
-------
A unique name.
"""
if prefix not in collection:
return prefix
for i in range(len(collection) + 1):
candidate = f"{prefix}{i:d}"
if candidate not in collection:
return candidate
|
8438588bd17e097ffc5bbe7b88f4c2aba2363250
| 33,517 |
async def get_pic(image_type: str, group_id: int, sender: int) -> list:
"""
Return random pics message
Args:
image_type: The type of picture to return
image type list:
setu: hPics(animate R-15)
setu18: hPics(animate R-18)
real: hPics(real person R-15)
bizhi: Wallpaper(animate All age)
group_id: Group id
sender: Sender
Examples:
assist_process = await get_pic("setu")[0]
message = await get_pic("real")[1]
Return:
[
str: Auxiliary treatment to be done(Such as add statement),
MessageChain: Message to be send(MessageChain)
]
"""
async def color() -> str:
base_path = await get_config("setuPath")
pic_path = await random_pic(base_path)
return pic_path
async def color18() -> str:
base_path = await get_config("setu18Path")
pic_path = await random_pic(base_path)
return pic_path
async def real() -> str:
base_path = await get_config("realPath")
pic_path = await random_pic(base_path)
return pic_path
async def wallpaper() -> str:
base_path = await get_config("wallpaperPath")
pic_path = await random_pic(base_path)
return pic_path
switch = {
"setu": await color(),
"setu18": await color18(),
"real": await real(),
"bizhi": await wallpaper()
}
target_pic_path = switch[image_type]
message = MessageChain.create([
Image.fromLocalFile(target_pic_path)
])
await write_log(image_type, target_pic_path, sender, group_id, True, "img")
if image_type == "setu18":
operation = await get_setting(group_id, "r18Process")
if operation == "revoke":
return ["revoke", message]
elif operation == "flashImage":
message = MessageChain.create([
Image.fromLocalFile(target_pic_path).asFlash()
])
return ["None", message]
return ["None", message]
|
37f06676b508503ba54fab37e1e7418df0d144bc
| 33,518 |
def dispatch(req):
"""run the command specified in req.args; returns an integer status code"""
err = None
try:
status = _rundispatch(req)
except error.StdioError as e:
err = e
status = -1
ret = _flushstdio(req.ui, err)
if ret:
status = ret
return status
|
1874580fff23796b025fc2cd2ecce9f21a5af692
| 33,519 |
def get_settings(from_db=False):
"""
Use this to get latest system settings
"""
if not from_db and 'custom_settings' in current_app.config:
return current_app.config['custom_settings']
s = Setting.query.order_by(desc(Setting.id)).first()
app_environment = current_app.config.get('ENV', 'production')
if s is None:
set_settings(secret='super secret key', app_name='Open Event', app_environment=app_environment)
else:
current_app.config['custom_settings'] = make_dict(s)
if not current_app.config['custom_settings'].get('secret'):
set_settings(secret='super secret key', app_name='Open Event', app_environment=app_environment)
return current_app.config['custom_settings']
|
2154122bcdde3e0bc023103ed825d76234d489c1
| 33,520 |
def get_currency_crosses_list(base=None, second=None):
"""
This function retrieves all the available currency crosses from Investing.com and returns them as a
:obj:`dict`, which contains not just the currency crosses names, but all the fields contained on
the currency_crosses file is columns is None, otherwise, just the specified column values will be returned. Note
that the filtering params are both base and second, which mean the base and the second currency of the currency
cross, for example, in the currency cross `EUR/USD` the base currency is EUR and the second currency is USD. These
are optional parameters, so specifying one of them means that all the currency crosses where the introduced
currency is either base or second will be returned; if both are specified, just the introduced currency cross will
be returned if it exists. All the available currency crosses can be found at: https://www.investing.com/currencies/
Args:
base (:obj:`str`, optional):
symbol of the base currency of the currency cross, this will return a :obj:`pandas.DataFrame` containing
all the currency crosses where the base currency matches the introduced one.
second (:obj:`str`):
symbol of the second currency of the currency cross, this will return a :obj:`pandas.DataFrame` containing
all the currency crosses where the second currency matches the introduced one.
Returns:
:obj:`list` - currency_crosses_list:
The resulting :obj:`list` contains the retrieved data from the `currency_crosses.csv` file, which is
a listing of the names of the currency crosses listed in Investing.com, which is the input for data
retrieval functions as the name of the currency cross to retrieve data from needs to be specified.
In case the listing was successfully retrieved, the :obj:`list` will look like::
currency_crosses_list = [
'USD/BRLT', 'CAD/CHF', 'CHF/CAD', 'CAD/PLN', 'PLN/CAD', ...
]
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if currency crosses file was not found.
IOError: raised if currency crosses retrieval failed, both for missing file or empty file.
"""
return currency_crosses_as_list(base=base, second=second)
|
da6588be01510c96d6e1dc85c82d7beed877002b
| 33,521 |
def define_git_repo(*, name: 'name',
repo, treeish=None):
"""Define [NAME/]git_clone rule."""
(define_parameter.namedtuple_typed(GitRepoInfo, name + 'git_repo')
.with_default(GitRepoInfo(repo=repo, treeish=treeish)))
relpath = get_relpath()
@rule(name + 'git_clone')
@rule.depend('//base:build')
def git_clone(parameters):
"""Clone and checkout git repo."""
drydock_src = parameters['//base:drydock'] / relpath
if not drydock_src.exists():
LOG.info('clone into: %s', drydock_src)
git_repo = parameters[name + 'git_repo']
scripts.git_clone(
repo=git_repo.repo,
local_path=drydock_src,
checkout=git_repo.treeish,
)
return GitRepoRules(git_clone=git_clone)
|
78dcd536c306020d2ffd6fe4355992b10fb0d9ad
| 33,522 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.