content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import urllib
def getEntries(person):
""" Fetch a Advogato member's diary and return a dictionary in the form
{ date : entry, ... }
"""
parser = DiaryParser()
f = urllib.urlopen("http://www.advogato.org/person/%s/diary.xml" % urllib.quote(person))
s = f.read(8192)
while s:
parser.feed(s)
s = f.read(8192)
parser.close()
result = {}
for d, e in map(None, parser.dates, parser.entries):
result[d] = e
return result
|
9ed0b46aa694201817fd4c341a992c81d809abf5
| 25,051 |
def sum_values(p, K):
"""
sum the values in ``p``
"""
nv = []
for v in itervalues(p):
nv = dup_add(nv, v, K)
nv.reverse()
return nv
|
c92ac3492f0aa750879f899dde145918d4a9616d
| 25,052 |
def define_permit_price_targeting_constraints(m):
"""Constraints used to get the absolute difference between the permit price and some target"""
# Constraints to minimise difference between permit price and target
m.C_PERMIT_PRICE_TARGET_CONSTRAINT_1 = pyo.Constraint(
expr=m.V_DUMMY_PERMIT_PRICE_TARGET_X_1 >= m.P_POLICY_PERMIT_PRICE_TARGET - m.V_DUAL_PERMIT_MARKET)
m.C_PERMIT_PRICE_TARGET_CONSTRAINT_2 = pyo.Constraint(
expr=m.V_DUMMY_PERMIT_PRICE_TARGET_X_2 >= m.V_DUAL_PERMIT_MARKET - m.P_POLICY_PERMIT_PRICE_TARGET)
return m
|
eb31f63963e0a66491e31d3f4f8f816e21c47de9
| 25,053 |
def predict4():
"""Use Xception to label image"""
path = 'static/Images/boxer.jpeg'
img = image.load_img(path,target_size=(299,299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = model.predict(x)
pclass = decode_predictions(preds, top=5)
result = str(pclass[0][0][1])
bad_chars=[';',':','_','!','*']
for i in bad_chars:
result = result.replace(i, ' ')
result = result.title()
print(result)
return result
|
033fcde3cb670b8a66b430451f6b341ae2e7b980
| 25,054 |
def augment_data(image, label, seg_label, perform_random_flip_and_rotate,
num_channels, has_seg_labels):
"""
Image augmentation for training. Applies the following operations:
- Horizontally flip the image with probabiliy 0.5
- Vertically flip the image with probability 0.5
- Apply random rotation
"""
if perform_random_flip_and_rotate:
if has_seg_labels:
image = tf.concat([image, tf.expand_dims(seg_label, -1)], 2)
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
rotate_angle = tf.random_shuffle([0.0, 90.0, 180.0, 270.0])[0]
image = tf.contrib.image.rotate(
image, rotate_angle * np.pi / 180.0, interpolation='BILINEAR')
if has_seg_labels:
seg_label = image[:, :, -1]
image = image[:,:,:num_channels]
return image, label, seg_label
|
c243ae36a1d38cd36131bbd2f51347d2d29ca9ff
| 25,055 |
def protobuf_open_channel(channel_name, media_type):
"""func"""
open_channel_request = pb.OpenChannelRequest()
open_channel_request.channel_name = channel_name
open_channel_request.content_type = media_type
return open_channel_request.SerializeToString()
|
0d665788cbc37d8a15c276c41d2c28e5c12ee2ea
| 25,056 |
def action(update, context):
"""A fun command to send bot actions (typing, record audio, upload photo, etc). Action appears at top of main chat.
Done using the /action command."""
bot = context.bot
user_id = update.message.from_user.id
username = update.message.from_user.name
admin = _admin(user_id)
if not admin:
return _for_admin_only_message(bot, user_id, username)
available_actions = ['RECORD_AUDIO', 'RECORD_VIDEO_NOTE', 'TYPING', 'UPLOAD_AUDIO',
'UPLOAD_DOCUMENT', 'UPLOAD_PHOTO', 'UPLOAD_VIDEO', 'UPLOAD_VIDEO_NOTE']
send_action = choice(available_actions)
bot.send_chat_action(chat_id=config["GROUPS"]["crab_wiv_a_plan"], action=send_action)
|
5df8260a8787293187bba86712bf07505f915f39
| 25,057 |
def borehole_vec(x, theta):
"""Given x and theta, return vector of values."""
(Hu, Ld_Kw, Treff, powparam) = np.split(theta, theta.shape[1], axis=1)
(rw, Hl) = np.split(x[:, :-1], 2, axis=1)
numer = 2 * np.pi * (Hu - Hl)
denom1 = 2 * Ld_Kw / rw ** 2
denom2 = Treff
f = ((numer / ((denom1 + denom2))) * np.exp(powparam * rw)).reshape(-1)
return f
|
15f39f80d7ead4bb807dbb5c365acb900bbf405d
| 25,059 |
import requests
import json
def read_datastore(resource_id):
"""
Retrieves data when the resource is part of the CKAN DataStore.
Parameters
----------
resource_id: str
Id for resource
Returns
----------
pd.DataFrame:
Data records in table format
"""
r = requests.get(
DATASTORE_SEARCH_URL, params={"resource_id": resource_id, "limit": 1}
)
n_records = json.loads(r.content)["result"]["total"]
r = requests.get(
DATASTORE_SEARCH_URL, params={"resource_id": resource_id, "limit": n_records}
)
r.encoding = "utf-8"
data_json = json.loads(r.content)["result"]["records"]
data_df = pd.DataFrame.from_records(data_json).fillna("")
return data_df
|
80ff1b26960e7d33b0a68d286769736617353881
| 25,060 |
import hashlib
import binascii
def new_server_session(keys, pin):
"""Create SRP server session."""
context = SRPContext(
"Pair-Setup",
str(pin),
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
bits_salt=128,
bits_random=512,
)
username, verifier, salt = context.get_user_data_triplet()
context_server = SRPContext(
username,
prime=constants.PRIME_3072,
generator=constants.PRIME_3072_GEN,
hash_func=hashlib.sha512,
bits_salt=128,
bits_random=512,
)
session = SRPServerSession(
context_server, verifier, binascii.hexlify(keys.auth).decode()
)
return session, salt
|
5c3c20269dce31b4f7132d123845d7a46354373f
| 25,061 |
def lenzi(df):
"""Check if a pandas series is empty"""
return len(df.index) == 0
|
561705e6ff0da3bfb03407a721f2aff71a4d42a1
| 25,062 |
def m_step(counts, item_classes, psuedo_count):
"""
Get estimates for the prior class probabilities (p_j) and the error
rates (pi_jkl) using MLE with current estimates of true item classes
See equations 2.3 and 2.4 in Dawid-Skene (1979)
Input:
counts: Array of how many times each rating was given by each rater
for each item
item_classes: Matrix of current assignments of items to classes
psuedo_count: A psuedo count used to smooth the error rates. For each
rater k
and for each class i and class j, we pretend rater k has rated
psuedo_count examples with class i when class j was the true class.
Returns:
p_j: class marginals [classes]
pi_kjl: error rates - the probability of rater k giving
response l for an item in class j [observers, classes, classes]
"""
[nItems, nRaters, nClasses] = np.shape(counts)
# compute class marginals
class_marginals = np.sum(item_classes, axis=0) / float(nItems)
# compute error rates for each rater, each predicted class
# and each true class
error_rates = np.matmul(counts.T, item_classes) + psuedo_count
# reorder axes so its of size [nItems x nClasses x nClasses]
error_rates = np.einsum('abc->bca', error_rates)
# divide each row by the sum of the error rates over all observation classes
sum_over_responses = np.sum(error_rates, axis=2)[:, :, None]
# for cases where an annotator has never used a label, set their sum over
# responses for that label to 1 to avoid nan when we divide. The result will
# be error_rate[k, i, j] is 0 if annotator k never used label i.
sum_over_responses[sum_over_responses == 0] = 1
error_rates = np.divide(error_rates, sum_over_responses)
return (class_marginals, error_rates)
|
00d93803dd7f3f56af47f8fb455613d223fe0a89
| 25,063 |
from typing import Callable
def make_parser(fn: Callable[[], Parser]) -> Parser:
"""
Make typed parser (required for mypy).
"""
return generate(fn)
|
491888a666718d84447ff9de1b215a9e9c0f8ff0
| 25,064 |
def mfcc_htk(y, sr, hop_length=2**10, window_length=22050, nmfcc=13, n_mels=26, fmax=8000, lifterexp=22):
"""
Get MFCCs 'the HTK way' with the help of Essentia
https://github.com/MTG/essentia/blob/master/src/examples/tutorial/example_mfcc_the_htk_way.py
Using all of the default parameters from there except the hop length (which shouldn't matter), and a much longer window length (which has been found to work better for covers)
Parameters
----------
window_length: int
Length of the window to use for the STFT
nmfcc: int
Number of MFCC coefficients to compute
n_mels: int
Number of frequency bands to use
fmax: int
Maximum frequency
Returns
-------
ndarray(nmfcc, nframes)
An array of all of the MFCC frames
"""
fftlen = int(2**(np.ceil(np.log(window_length)/np.log(2))))
spectrumSize= fftlen//2+1
zeroPadding = fftlen - window_length
w = estd.Windowing(type = 'hamming', # corresponds to htk default USEHAMMING = T
size = window_length,
zeroPadding = zeroPadding,
normalized = False,
zeroPhase = False)
spectrum = estd.Spectrum(size=fftlen)
mfcc_htk = estd.MFCC(inputSize = spectrumSize,
type = 'magnitude', # htk uses mel filterbank magniude
warpingFormula = 'htkMel', # htk's mel warping formula
weighting = 'linear', # computation of filter weights done in Hz domain
highFrequencyBound = fmax, # 8000 is htk default
lowFrequencyBound = 0, # corresponds to htk default
numberBands = n_mels, # corresponds to htk default NUMCHANS = 26
numberCoefficients = nmfcc,
normalize = 'unit_max', # htk filter normaliation to have constant height = 1
dctType = 3, # htk uses DCT type III
logType = 'log',
liftering = lifterexp) # corresponds to htk default CEPLIFTER = 22
mfccs = []
# startFromZero = True, validFrameThresholdRatio = 1 : the way htk computes windows
for frame in estd.FrameGenerator(audio.y, frameSize = window_length, hopSize = hop_length , startFromZero = True, validFrameThresholdRatio = 1):
spect = spectrum(w(frame))
mel_bands, mfcc_coeffs = mfcc_htk(spect)
mfccs.append(mfcc_coeffs)
return np.array(mfccs, dtype=np.float32).T
|
e3beb95a027e963549df6a164b0d99345137540b
| 25,065 |
def num_to_int(num):
"""
Checks that a numerical value (e.g. returned by robot) is an integer and
not a float.
Parameters
----------
num : number to check
Returns
-------
integer : num cast to an integer
Raises
------
ValueError : if n is not an integer
"""
if num % 1 == 0:
return int(num)
else:
raise ValueError('Expecting integer. Got: "{0}" ({1})'
.format(num, type(num)))
|
af470940eb035fe8dd0160dfe9614c2b6d060194
| 25,066 |
def shuffle_blocks(wmx_orig, pop_size=800):
"""
Shuffles pop_size*pop_size blocks within the martrix
:param wmx_orig: original weight matrix
:param pop_size: size of the blocks kept together
:return: wmx_modified: modified weight matrix
"""
assert nPCs % pop_size == 0
np.random.seed(12345)
# get blocks
n_pops = nPCs / pop_size
blocks = {}
for i in range(n_pops):
for j in range(n_pops):
blocks[i, j] = wmx_orig[i*pop_size:(i+1)*pop_size, j*pop_size:(j+1)*pop_size]
# generate shuffling idx
x = np.linspace(0, n_pops-1, n_pops)
y = np.linspace(0, n_pops-1, n_pops)
np.random.shuffle(x)
np.random.shuffle(y)
# create block shuffled weight matrix
wmx_modified = np.zeros((nPCs, nPCs))
for i, id_i in enumerate(x):
for j, id_j in enumerate(y):
wmx_modified[i*pop_size:(i+1)*pop_size, j*pop_size:(j+1)*pop_size] = blocks[id_i, id_j]
return wmx_modified
|
aec38fe296b877ab79932aa675c5d04820e391af
| 25,067 |
def change():
"""
Change language
"""
lang = request.args.get("lang", None)
my_id = None
if hasattr(g, 'my') and g.my:
my_id = g.my['_id']
data = core.languages.change(lang=lang, my_id=my_id)
return jsonify(data)
|
a5186669db31b533e1ca9bc146b11d577be4f845
| 25,068 |
import shutil
def make_pkg(pkgname, context):
"""Create a new extension package.
:param pkgname: Name of the package to create.
:param context: Mapping with keys that match the placeholders in the
templates.
:return: True if package creation succeeded or a tuple with False and an
error message in case the creation failed.
:rtype: Bool or Tuple
"""
try:
shutil.copytree(TEMPLATE_DIRNAME, pkgname)
except (OSError, IOError, shutil.Error) as e:
return False, e.strerror
for f in TEMPLATE_FILES:
try:
write_template(pkgname, f, context)
except (OSError, IOError) as e:
return False, e.strerror
return True
|
6e2be6e991e2061a7b07e5e44a3479dbf0c2f1b1
| 25,069 |
def view_explorer_node(node_hash: str):
"""Build and send an induction query around the given node."""
node = manager.get_node_by_hash_or_404(node_hash)
query = manager.build_query_from_node(node)
return redirect_to_view_explorer_query(query)
|
3534d546ba540dcfc6db1110c0a4a1086515dc3d
| 25,070 |
import math
def encode_into_any_base(number, base, encoded_num):
"""Encode number into any base 2-36. Can be fractional or whole.
Parameters:
number: float -- integer representation of number (in base 10)
base: int -- base to convert to
encoded_num: str -- representation (so far) of number in base
Return: str -- string representation of number in the new base
"""
# enocding numbers if it's not fractional
if number % 1 == 0:
return encode_whole_number(number, base)
# encoding numbers that are fractional
else:
# first encoding the part that comes before the radix point
if not str(number)[0] == '0':
int_part = math.floor(number)
encoded_num += encode_whole_number(int_part, base)
# now cut off the integer from number, so it's just a fraction
number = number - int_part
# then encoding the decimal part of the number
return encode_into_any_base(number, base, encoded_num)
else:
# add the radix point to the answer
if encoded_num == '':
encoded_num += '0'
encoded_num += '.'
# convert the fractional part (of the overall number being encoded)
encoded_num += encode_fractional_number(number, base)
return encoded_num
|
f6dd94e173a3844dc6d858d1c6b360354624d3f1
| 25,071 |
def handle_forbidden(error: Forbidden) -> Response:
"""Render the base 403 error page."""
return respond(error.description, status=HTTPStatus.FORBIDDEN)
|
89c59dd66ce63ceef9e60cf8beea0da7895a0394
| 25,072 |
def format_timedelta(tdelta):
"""Return the timedelta as a 'HH:mm:ss' string."""
total_seconds = int(tdelta.total_seconds())
hours, remainder = divmod(total_seconds, 60*60)
minutes, seconds = divmod(remainder, 60)
return "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds)
|
852902e7972bcd13df8b60864ebcb2d75b2b259d
| 25,074 |
def video_data_to_df(videos_entries, save_csv):
"""
Creating a dataframe from the video data stored as tuples
:param videos_entries: (list) list of tuples containing topics, subtopics, videos and durations
:param save_csv: (boolean) condition to specify if the df is saved locally as a csv file
:return dfx: (dataframe) df with all data arranged in dataframe
"""
## Generating dataframe from tuples
dfx = pd.DataFrame(videos_entries)
## Assigning the df's column names based
dfx.columns = videos_df_colnames
## Rounding the length values (mins)
dfx["video_length_[mins]"] = round(dfx["video_length_[mins]"], 2)
## Adding column with the video length time in hours
dfx["video_length_[hrs]"] = round(dfx["video_length_[mins]"]/60, 2)
## Sorting values
dfx.sort_values(by=["topic", "subtopic", "video"], inplace=True)
## Restarting index
dfx.reset_index(inplace=True, drop=True)
## Saving a local copy of the df
if save_csv:
dfx.to_csv(usmle_videos_csv_copy_path + usmle_2020_videos_df_filename)
return dfx
|
11bb3d9293aa3689286cde76aea8bfff72594639
| 25,075 |
def create_mysql_entitySet(username, databaseName):
""" Create a new entity set in the databaseName """
password = get_password(username)
entitySetName = request.json['entitySetName']
attributes = request.json['attributes']
addToSchema(request.get_json(),"mysql")
pks = []
sql = "CREATE TABLE " + username + "_" + databaseName + "." + entitySetName + " ("
for attribute in attributes:
print(attribute, attributes[attribute])
print(attributes[attribute]['DataType'])
sql += " " + attribute + " " + attributes[attribute]['DataType']
if attributes[attribute]['NN'] == 1:
sql += " NOT NULL"
if attributes[attribute]['AI'] == 1:
sql += " AUTO_INCREMENT"
if attributes[attribute]['PK'] == 1:
pks.append(attribute)
sql += ","
sql += "PRIMARY KEY (" + pks[0]
for i in range(1,len(pks)):
sql += "," + pks[i]
sql += "));"
try:
cnx = connectSQLServerDB(username, password, username + "_" + databaseName)
mycursor = cnx.cursor()
mycursor.execute(sql)
cnx.close()
return jsonify(success=1, message="Entity Set '" + entitySetName + "' created successfully")
except mysql.connector.Error as err:
return jsonify(success=0, error_code=err.errno, message=err.msg)
|
00510e850d4c10f24defe7af070c652d3b390b5c
| 25,076 |
def m6(X, Y, Xp, Yp, alpha=1.0, prev='ident', post='ident', **kwargs):
"""Computes a matrix with the values of applying the kernel
:math:`m_4` between each pair of elements in :math:`X` and :math:`Y`.
Args:
X: Numpy matrix.
Y: Numpy matrix.
Xp: Numpy matrix with the probabilities of each category in *X*.
Yp: Numpy matrix with the probabilities of each category in *Y*.
alpha (float): Argument for the inverting function *h*.
prev (string): Function to transform the data before composing.
Values: ``'ident'``, ``'f1'`` or a function.
post (string): Function to transform the data after composing.
Values: ``'ident'``, ``'f1'``, ``'f2'`` or a function.
kwargs (dict): Arguments required by *prev* or *post*.
Return:
Numpy matrix of size :math:`m_X \\times m_Y`.
Since the code is vectorised any function passed in *prev* or *post*
must work on numpy arrays.
"""
h = lambda x: (1.0 - x ** alpha) ** (1.0 / alpha)
prevf = get_vector_function(prev, kwargs)
postf = get_vector_function(post, kwargs)
xm, xn = X.shape
ym, yn = Y.shape
Xp = h(Xp)
Yp = h(Yp)
G = np.zeros((xm, ym))
for i in range(xm):
I = np.tile(X[i], (ym, 1))
Ip = np.tile(Xp[i], (ym, 1))
EQ = I == Y
NE = I != Y
a = 2.0 * np.sum(prevf(Ip * EQ), axis=1)
b = np.sum(prevf(Ip * NE), axis=1)
c = np.sum(prevf(Yp * NE), axis=1)
dx = np.sum(prevf(1.0 - Ip * NE), axis=1)
dy = np.sum(prevf(1.0 - Ip * NE), axis=1)
d = dx + dy
apd = a + d
G[i, :] = apd / (apd + 2.0 * (b + c))
return postf(G)
|
94d1651500ec9177a14a2c8ad80abc6ca7c3948b
| 25,077 |
import time
import json
from unittest.mock import call
def serve_communications_and_statuses(erpnext_support_user, erpnext_support_issues, bench_site):
"""
returns a dict of support issue communications and statuses
response = {
"issue_name_1": {
"communications": [],
"status": "status",
"last_sync_on": "last_sync_on"
},
"issue_name_2": {
"communications": [],
"status": "status",
"last_sync_on": "last_sync_on"
}
}
"""
authenticate_erpnext_support_user(erpnext_support_user)
sync_time = get_datetime_str(now_datetime())
res = {}
time.sleep(5)
for erpnext_support_issue in json.loads(erpnext_support_issues):
if not erpnext_support_issue.get("frappe_issue_id"):
continue
# Sync Communications for Issue
fields = ["name", "subject", "content", "recipients", "has_attachment", "creation"]
filters = [
["reference_doctype", "=", "Issue"],
["reference_name", "=", erpnext_support_issue.get("frappe_issue_id")],
["communication_medium", "=", "Email"],
["sent_or_received", "=", "Sent"],
["creation", ">", get_datetime(erpnext_support_issue.get("last_sync_on"))]
]
communications = call(frappe.get_all, doctype="Communication", filters=filters, fields=fields, order_by="creation ASC")
# Sync Attachments for Communications
communications = get_attachments(communications)
# Sync Status for Issue
frappe_issue = frappe.get_doc("Issue", erpnext_support_issue.get("frappe_issue_id"))
res[erpnext_support_issue.get("name")] = {
"communications": communications,
"status": "Open" if frappe_issue.get("status") not in ["Open", "Closed"] else frappe_issue.get("status"),
"priority": frappe_issue.get("priority"),
"resolution_by": get_datetime_str(frappe_issue.resolution_by) if frappe_issue.resolution_by else None,
"last_sync_on": sync_time,
"release": frappe_issue.get("release")
}
return json.dumps(res)
|
ceaeeb5a1f5cbe956aeaef681b5e37c3d4ed58d2
| 25,078 |
def answer_view(answerid):
"""route to view a specific answer"""
return jsonify({"answer":"Your updated answer: {} ".format(user_answers[answerid])})
|
82c7697bfe601b54dcb1fd9c8667565886a09c34
| 25,079 |
def jwk_factory(acct_priv_key_path: str) -> _JWKBase:
"""generate jwk object according private key file"""
with open(acct_priv_key_path, 'rb') as f:
acct_priv = serialization.load_pem_private_key(
data=f.read(),
password=None,
backend=default_backend()
)
if isinstance(acct_priv, rsa.RSAPrivateKey):
jwk = JWKRSA(
priv_key=acct_priv,
n=acct_priv.public_key().public_numbers().n,
e=acct_priv.public_key().public_numbers().e
)
elif isinstance(acct_priv, ec.EllipticCurvePrivateKey):
if isinstance(acct_priv.curve, ec.SECP256R1):
jwk = JWKES256(acct_priv)
else:
raise NotImplementedError(
f'ecdsa curve {acct_priv.curve} not implemented'
)
else:
raise TypeError(f'key type {type(acct_priv)} not supported')
return jwk
|
fc08dd7294ddb067534c05a7e13b26e053ac3c42
| 25,080 |
from pathlib import Path
def execute(
scan_definition: str | Path,
df: DataFrame,
*,
soda_server_client: SodaServerClient | None = None,
) -> ScanResult:
"""
Execute a scan on a data frame.
Parameters
----------
scan_definition : Union[str, Path]
The path to a scan file or the content of a scan file.
df: DataFrame
The data frame to be scanned.
soda_server_client : Optional[SodaServerClient] (default : None)
A soda server client.
Returns
-------
out : ScanResult
The scan results.
"""
scan_yml = create_scan_yml(scan_definition)
df.createOrReplaceTempView(scan_yml.table_name)
scan = create_scan(scan_yml, soda_server_client=soda_server_client)
scan.execute()
return scan.scan_result
|
7bf0bedfb8865de117565110be4225b502e2fed2
| 25,081 |
def jaccard_similarity(emb1: np.ndarray, emb2: np.ndarray) -> float:
""" 计算特征向量的Jaccard系数
:param emb1: shape = [feature,]
:param emb2: shape = [feature,]
:return: Jaccard 系数
"""
up = np.double(np.bitwise_and((emb1 != emb2), np.bitwise_or(emb1 != 0, emb2 != 0)).sum())
down = np.double(np.bitwise_or(emb1 != 0, emb2 != 0).sum())
d1 = (up / down)
return d1
|
18e95d7f14ca093892770364fc5af75b95bebe2a
| 25,082 |
from typing import Tuple
from typing import Dict
from typing import List
def _share_secret_int_indices(s_i: int, n: int, t: int) -> Tuple[Dict[int, int], List[PointG1]]:
""" Computes n shares of a given secret such that at least t + 1 shares are required for recovery
of the secret. Additionally returns the commitents to the coefficient of the polynom
used to verify the validity of the shares.
Assumes nodes use the indices [1, 2, ..., n].
See share_secret function of a generalized variant with arbitary indices.
"""
coefficients = [s_i] + [
random_scalar() for j in range(t)
] # coefficients c_i0, c_i1, ..., c_it
def f(x: int) -> int:
""" evaluation function for secret polynomial
"""
return (
sum(coef * pow(x, j, CURVE_ORDER) for j, coef in enumerate(coefficients)) % CURVE_ORDER
)
shares = {x: f(x) for x in range(1, n + 1)}
commitments = [multiply(G1, coef) for coef in coefficients]
return shares, commitments
|
b822bd79337be741bbd626751f9d745b4b9e23fc
| 25,083 |
def auto_type(key, redis=None, default=None, o=True):
"""Returns datatype instance"""
if redis is None:
redis = config.redis
key = compress_key(key)
if redis.exists(key):
datatype = redis.type(key)
if datatype == 'string':
test_string = RedisString(key, redis=redis).data
if isinstance(test_string, dict):
datatype = 'dict-string'
elif isinstance(test_string, list):
datatype = 'list-string'
elif isinstance(test_string, basestring):
datatype = 'string'
elif isinstance(test_string, int):
datatype = 'string'
elif isinstance(test_string, float):
datatype = 'string'
return TYPE_MAP.get(datatype)(key, redis=redis, o=o)
else:
if default:
try:
return TYPE_MAP.get(default)(key, redis=redis, o=o)
except KeyError:
raise ValueError('Provide a valid default redis type.')
return None
|
3d1751c14c4b0c04d11ab265395dce94822558d8
| 25,084 |
from pathlib import Path
def get_user_data_dir(app_name=DEFAULT_APP_NAME, auto_create=True) -> Path:
"""
Get platform specific data folder
"""
return _get_user_dir(
app_name=app_name,
xdg_env_var='XDG_DATA_HOME', win_env_var='APPDATA',
fallback='~/.local/share', win_fallback='~\\AppData\\Roaming', macos_fallback='~/Library',
auto_create=auto_create
)
|
321b885983affcc5cf4d4baf0410ae9ad6b6f443
| 25,085 |
import copy
import numpy
def calculateDominantFrequency(signal, fs, fMin = 0, fMax = None, applyWindow = True,
fftZeroPaddingFactor = 1
):
"""
calculates the dominant frequency of the given signal
@param signal input signal
@param fs sampling frequency
@param fMin the minimum frequency [Hz] that should be considered
@param fMax the maximum frequency [Hz] that should be considered. If None
(default), we'll take half the Nyquist frequency.
@param applyWindow if True, we'll apply a HANN window before
calculating the FFT
@param fftZeroPaddingFactor if greater than one, we'll append the
appropriate number of zeros to the signal before calculating the FFT
"""
n = len(signal)
signalTmp = copy.deepcopy(signal)
if applyWindow:
fftWindow = createLookupTable(len(signalTmp), LOOKUP_TABLE_HANN)
signalTmp *= fftWindow
if fftZeroPaddingFactor > 1:
m = int(round(n * fftZeroPaddingFactor))
signalTmp = numpy.append(signalTmp, numpy.zeros(m - n))
spectrumX, spectrumY = calculateFFT(signalTmp, fs, len(signalTmp),
applyWindow = False, convertToDb = True,
spectrumType = AMPLITUDE_SPECTRUM)
binWidth = spectrumX[1] - spectrumX[0]
idx1 = 0
if fMin > 0:
idx1 = int(round(fMin / float(binWidth)))
idx2 = -1
if fMax > 0:
idx2 = int(round(fMax / float(binWidth)))
domFreq = numpy.nan
try:
domFreq, dummy = generalUtility.findArrayMaximum(spectrumY, idx1, idx2, doInterpolate = True)
domFreq *= binWidth
except Exception as e:
pass
# domFreq = None
# eMax = None
# if fMax is None:
# fMax = fs / 2.0
# for i in range(len(spectrumY)):
# f = spectrumX[i]
# if f >= fMin and f <= fMax:
# if domFreq is None:
# domFreq = spectrumX[i]
# eMax = spectrumY[i]
# else:
# if spectrumY[i] > eMax:
# domFreq = spectrumX[i]
# eMax = spectrumY[i]
# print domFreq, domFreq2
return domFreq
|
ab5f2818d309202f57230197c87c54b67a0f849c
| 25,087 |
def check_title(file_path):
"""
return 'has title' if found
no title, None, if not found
file_path is full path with file name and extension
"""
#print('is text file: ', tool.is_utf8_text_file(file_path))
if tool.is_utf8_text_file(file_path):
with open(file_path, 'r') as f:
text = f.read()
head = text[:300]
if tool.has_title(head):
return 'has title'
else:
return 'no title'
pass
pass
return None
|
4559772c1e50e807935c6112cfa6001a857b9dc4
| 25,088 |
from typing import Tuple
import torch
def permute_adjacency_twin(t1,t2) -> Tuple[torch.Tensor,torch.Tensor]:
"""
Makes a permutation of two adjacency matrices together. Equivalent to a renaming of the nodes.
Supposes shape (n,n)
"""
n,_ = t1.shape
perm = torch.randperm(n)
return t1[perm,:][:,perm],t2[perm,:][:,perm]
|
df3dc6507b8eae9d148ec9b2e664a427813d93a7
| 25,089 |
from collections import defaultdict,deque
def rad_extract(eventfiles,center,radius_function,return_cols=['PULSE_PHASE'],cuts=None,apply_GTI=True,theta_cut=66.4,zenith_cut=105,return_indices=False):
""" Extract events with a radial cut.
Return specified columns and perform additional boolean cuts.
Return is in form of a dictionary whose keys are column names
(and 'DIFFERENCES') and values are numpy arrays with the column
values. These will have been concatenated if there are multiple
FT1 files.
========= =======================================================
Argument Description
========= =======================================================
eventfiles -- a list of FT1 filenames
center -- a SkyDir giving the center of the radial cut
radius_function -- can be either a float specifying a cookier cutter
radial cut, or a function taking as arguments the energy
and event_class and speciying the radius in degrees, e.g.
def radius(energy,event_class):
return numpy.where(event_class,2,1)*(energy/1000)**-0.75
========= =======================================================
Keyword Description
========= =======================================================
return_cols ['RA','DEC','ENERGY','EVENT_CLASS','PULSE_PHASE'] -
a list of FT1 column names to return
cuts None - an optional list of boolean cuts to apply,
e.g., ['ENERGY > 100']
NB -- cuts not yet implemented!!
no_cuts [False] do not apply default zenith and incidence angle cuts
apply_GTI [True] accept or reject an event based on GTI if True;
else ignore GTI
return_indices [False] if True, return an array giving the index in the
original file of each event; obviously only useful in the
case of a single event file
========= =======================================================
"""
if not hasattr(radius_function,'__call__'):
simple_scalar = True
rval = radius_function
radius_function = lambda e,event_class: rval
else:
simple_scalar = False
eventfiles = __FITS_parse__(eventfiles)
coldict = defaultdict(deque)
cols = {}
cut_cols = ['ZENITH_ANGLE','THETA','TIME']
keys = list(set(['RA','DEC','ENERGY','CONVERSION_TYPE']+cut_cols+return_cols))
accepted = 0
total = 0
for eventfile in eventfiles:
#e = pf.open(eventfile,memmap=1)
#nrows = e[1].data.shape[0]
#e.close()
nrows = pyfits.getheader(eventfile,'EVENTS')['NAXIS2']
for key in keys:
cols[key] = np.empty(nrows,dtype=float)
PythonUtilities.get_float_col(cols[key],eventfile,'EVENTS',key)
rad = radius_function(cols['ENERGY'],cols['CONVERSION_TYPE'])
tmask = trap_mask(cols['RA'],cols['DEC'],center,rad)
tmask &= (cols['ZENITH_ANGLE'] < zenith_cut) & (cols['THETA'] < theta_cut)
if apply_GTI:
tmask &= get_gti_mask(eventfile,cols['TIME'])
print ('GTI will remove %d of %d photons.'%((~tmask).sum(),len(tmask)))
if simple_scalar:
rmask,diffs = rad_mask(cols['RA'][tmask],cols['DEC'][tmask],center,rad)
else:
rmask,diffs = rad_mask(cols['RA'][tmask],cols['DEC'][tmask],center,rad[tmask])
for key in keys:
coldict[key].append(cols[key][tmask][rmask])
if return_indices:
if 'EVENT_INDICES' not in return_cols:
return_cols.append('EVENT_INDICES')
coldict['EVENT_INDICES'].append(np.arange(len(tmask))[tmask][rmask])
coldict['DIFFERENCES'].append(diffs)
accepted += tmask.sum()
total += len(tmask)
for key in coldict.keys():
if (key in cut_cols) and not (key in return_cols):
cols.pop(key)
continue
cols[key] = np.concatenate([x for x in coldict[key]])
if key in INT_TYPES: cols[key] = cols[key].astype(int)
print ('Cuts removed %d of %d photons.'%(total-accepted,total))
return cols
|
bb0a5f96764c0a1edec1f408f283a2473ed630bf
| 25,090 |
import re
def list_to_exp(str_list, term_padding_exp=r'\b', compile=True):
"""
Returns a regular expression (compiled or not) that will catch any of the strings of the str_list.
Each string of the str_list will be surrounded by term_padding_exp (default r'\b' forces full word matches).
Note: Also orders the strings according to length so that no substring will overshadow a superstring.
"""
str_list = util_ulist.sort_as(str_list, list(map(len, str_list)), reverse=True)
exp = term_padding_exp + '(' + '|'.join(str_list) + ')' + term_padding_exp
if compile:
return re.compile(exp)
else:
return exp
|
f9a1d7002a36f0348179b9997c5dec672455f077
| 25,092 |
def prepare_ddp_loader(loader: DataLoader, num_processes: int, process_index: int) -> DataLoader:
"""
Transfers loader to distributed mode. Experimental feature.
Args:
loader: pytorch dataloder
num_processes (:obj:`int`, `optional`, defaults to 1):
The number of processes running concurrently.
process_index (:obj:`int`, `optional`, defaults to 0):
The index of the current process.
Returns:
DataLoader: pytorch dataloder with distributed batch sampler.
"""
ddp_dataset = loader.dataset
# Iterable dataset doesn't like batch_sampler, but DataLoader creates a default one for it
if isinstance(ddp_dataset, IterableDataset):
ddp_batch_sampler = None
else:
ddp_batch_sampler = BatchSamplerShard(
loader.batch_sampler,
num_processes=num_processes,
process_index=process_index,
)
# We ignore all of those since they are all dealt with by our new_batch_sampler
ignore_kwargs = [
"batch_size",
"shuffle",
"sampler",
"batch_sampler",
"drop_last",
"generator",
]
kwargs = {
k: getattr(loader, k, _PYTORCH_DATALOADER_KWARGS[k])
for k in _PYTORCH_DATALOADER_KWARGS
if k not in ignore_kwargs
}
# Need to provide batch_size as batch_sampler is None for Iterable dataset
if ddp_batch_sampler is None:
kwargs["drop_last"] = loader.drop_last
kwargs["batch_size"] = loader.batch_size
loader = DataLoader(dataset=ddp_dataset, batch_sampler=ddp_batch_sampler, **kwargs)
return loader
|
4f57b1888fdf43fcb910d802faee8ba997ee095f
| 25,093 |
import logging
def __validate_exchange(value: str) -> str:
"""
Check to see if passed string is in the list of possible Exchanges.
:param value: Exchange name.
:return: Passed value or No Return
"""
valid_values = EXCHANGE_VALUES
if value in valid_values:
return value
else:
logging.error(
f"Invalid exchange value: {value}. Valid options: {valid_values}"
)
|
001472e1485da0fc410dceafa67b78fe5dfe1058
| 25,094 |
def main(content, title="", classes=[]):
"""Generate a 'Material for MkDocs' admonition.
"""
md = markdown.markdown(content)
return '<div class="admonition {0}">\n'.format(" ".join(classes)) + \
' <p class="admonition-title">{0}</p>\n'.format(title) + \
' <p>{0}</p>\n'.format(md) + \
'</div>'
|
e29942de52b73d8652a54c64dd22c8bac6e8496c
| 25,095 |
def get_bprop_matrix_set_diag(self):
"""Generate bprop for MatrixSetDiag"""
get_dtype = P.DType()
def bprop(x, y, z, out, dout):
input_shape = F.shape(x)
batch_shape = input_shape[:-2]
matrix_shape = input_shape[-2:]
diag_shape = batch_shape + (_get_min(matrix_shape),)
grad_shape = F.shape(dout)
grad_dtype = get_dtype(dout)
assist = _get_matrix_diag_part_assist(grad_shape, grad_dtype)
dx = inner.MatrixSetDiag()(dout, P.Zeros()(diag_shape, grad_dtype), assist)
dy = inner.MatrixDiagPart()(dout, assist)
dz = zeros_like(z)
return dx, dy, dz
return bprop
|
c35f69a957b30bcefeba858e7e9bd4ee9e4591b8
| 25,096 |
from yt import load_particles
def fake_sph_grid_ds(hsml_factor=1.0):
"""Returns an in-memory SPH dataset useful for testing
This dataset should have 27 particles with the particles arranged uniformly
on a 3D grid. The bottom left corner is (0.5,0.5,0.5) and the top right
corner is (2.5,2.5,2.5). All particles will have non-overlapping smoothing
regions with a radius of 0.05, masses of 1, and densities of 1, and zero
velocity.
"""
npart = 27
x = np.empty(npart)
y = np.empty(npart)
z = np.empty(npart)
tot = 0
for i in range(0, 3):
for j in range(0, 3):
for k in range(0, 3):
x[tot] = i + 0.5
y[tot] = j + 0.5
z[tot] = k + 0.5
tot += 1
data = {
"particle_position_x": (x, "cm"),
"particle_position_y": (y, "cm"),
"particle_position_z": (z, "cm"),
"particle_mass": (np.ones(npart), "g"),
"particle_velocity_x": (np.zeros(npart), "cm/s"),
"particle_velocity_y": (np.zeros(npart), "cm/s"),
"particle_velocity_z": (np.zeros(npart), "cm/s"),
"smoothing_length": (0.05 * np.ones(npart) * hsml_factor, "cm"),
"density": (np.ones(npart), "g/cm**3"),
"temperature": (np.ones(npart), "K"),
}
bbox = np.array([[0, 3], [0, 3], [0, 3]])
return load_particles(data=data, length_unit=1.0, bbox=bbox)
|
9f32616d325fde7941cbcea814b3133fbcc988e5
| 25,097 |
async def _async_get_image_sessions(device: Device) -> dict[str, ImageSession]:
"""Return image events for the device."""
events = await device.event_media_manager.async_image_sessions()
return {e.event_token: e for e in events}
|
4406abc1ac08d39bb0127be1d02f5c664c167e04
| 25,098 |
def make_element_weight_parser(weight_column):
""" Parameterize with the column - this allows us
to generate data from different analysis result types.
"""
def parse_element_weight(csv_row):
name = csv_row[0]
weight = float(csv_row[weight_column]) # Assert not zero?
return name, weight
return parse_element_weight
|
ddc3a4f82ecd0fe4833683759b1a1c4296839a54
| 25,099 |
def jitterer(out, z):
"""This function jitters the x axis
1: matrix of layer activations of the form:
2. which layer number to do
outputs a transposed matrix of no of neurons rows and no of data columns"""
Jx=np.ones(out[z].T.shape)
for i in range(out[z].T.shape[0]):
'this is the number of neurons'
for j in range(out[z].T.shape[1]):
'this is the number of data'
Jx[i,j] = i + 1 + np.random.uniform(-0.25,0.25)
return Jx
|
65b1337e42dab802a0c91bc27d93249171569281
| 25,100 |
from typing import Tuple
def deconstruct_full_path(filename: str) -> Tuple[str, str]:
"""
Returns a tuple with the parent folder of the file and the file's name.
Parameters
----------
filename : str
The path (with filename) that will be deconstructed.
Returns
-------
Tuple[str, str]
A tuple where the first element is the path of the parent folder, and the second is the
file's name.
"""
posix_path = PurePosixPath("/") / filename
return str(posix_path.parent), posix_path.name
|
d33a8fc71beb39d56dc0aa9bf94264164e8bf1a9
| 25,101 |
def bbx_to_world(cords, vehicle):
"""
Convert bounding box coordinate at vehicle reference to world reference.
Parameters
----------
cords : np.ndarray
Bounding box coordinates with 8 vertices, shape (8, 4)
vehicle : opencda object
Opencda ObstacleVehicle.
Returns
-------
bb_world_cords : np.ndarray
Bounding box coordinates under world reference.
"""
bb_transform = Transform(vehicle.bounding_box.location)
# bounding box to vehicle transformation matrix
bb_vehicle_matrix = x_to_world_transformation(bb_transform)
# vehicle to world transformation matrix
vehicle_world_matrix = x_to_world_transformation(vehicle.get_transform())
# bounding box to world transformation matrix
bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix)
# 8 vertices are relative to bbx center, thus multiply with bbx_2_world to
# get the world coords.
bb_world_cords = np.dot(bb_world_matrix, np.transpose(cords))
return bb_world_cords
|
3d7438beccca9635fc15b266d2e8ada6bbc053c7
| 25,102 |
def load_data():
""" Loading data and padding """
training_set, testing_set = imdb.load_data(num_words = 10000)
x_train, y_train = training_set
x_test, y_test = testing_set
x_train_padded = sequence.pad_sequences(x_train, maxlen = 100)
x_test_padded = sequence.pad_sequences(x_test, maxlen = 100)
return x_train_padded, y_train, x_test_padded, y_test
|
ca52118f7038a70386e9ca552faf24dac6be9faf
| 25,103 |
def rotate_to_calibrated_axis(
data: np.ndarray, ref_val_0: complex, ref_val_1: complex
) -> np.ndarray:
"""
Rotates, normalizes and offsets complex valued data based on calibration points.
Parameters
----------
data
An array of complex valued data points.
ref_val_0
The reference value corresponding to the 0 state.
ref_val_1
The reference value corresponding to the 1 state.
Returns
-------
:
Calibrated array of complex data points.
"""
rotation_anle = np.angle(ref_val_1 - ref_val_0)
norm = np.abs(ref_val_1 - ref_val_0)
offset = ref_val_0 * np.exp(-1j * rotation_anle) / norm
corrected_data = data * np.exp(-1j * rotation_anle) / norm - offset
return corrected_data
|
82adf83c9565ec56ae6f13e1eec15c1be90f5dc4
| 25,104 |
from typing import Optional
from typing import Tuple
from typing import List
def filter_graph_data(df: pd.DataFrame, x_col: str, x_range: Optional[Tuple[int, int]], file_cols: List[str],
file_tuple: FileTuple) -> Optional[pd.DataFrame]:
"""
Filter data relevant for the graph from the dataframe.
:param df: The dataframe to filter
:param x_col: Name of the column that has the data for the x-axis, only used if x_range is given
:param x_range: (min, max) tuple for filtering the values for the x-axis, or None for no filter
:param file_cols: Column names that define values for which separate graphs are generated
:param file_tuple: The set of values for the file_cols that are used in this graph
:return:
"""
gdf_filter = True
if x_range is not None:
gdf_filter = (df[x_col] >= x_range[0]) & (df[x_col] < x_range[1])
for col_name, col_val in zip(file_cols, file_tuple):
gdf_filter &= df[col_name] == col_val
gdf = df.loc[gdf_filter]
return None if gdf.empty else gdf
|
200e19d73ae04c4ceabae6d0d65ccd034f368e15
| 25,105 |
def get_question_summary_from_model(question_summary_model):
"""Returns a domain object for an Oppia question summary given a
question summary model.
Args:
question_summary_model: QuestionSummaryModel. The QuestionSummary model
object to fetch corresponding QuestionSummary domain object.
Returns:
QuestionSummary. The domain object corresponding to the given question
summary model.
"""
return question_domain.QuestionSummary(
question_summary_model.id,
question_summary_model.question_content,
question_summary_model.misconception_ids,
question_summary_model.interaction_id,
question_summary_model.question_model_created_on,
question_summary_model.question_model_last_updated
)
|
65cce3d4440ebea81f5a777dcdec80c61b06e83b
| 25,106 |
def refraction(alt_degrees, temperature_C, pressure_mbar):
"""Given an observed altitude, return how much the image is refracted.
Zero refraction is returned both for objects very near the zenith,
as well as for objects more than one degree below the horizon.
"""
r = 0.016667 / tan((alt_degrees + 7.31 / (alt_degrees + 4.4)) * DEG2RAD)
d = r * (0.28 * pressure_mbar / (temperature_C + 273.0))
return where((-1.0 <= alt_degrees) & (alt_degrees <= 89.9), d, 0.0)
|
d413aba8e238b81c5a8076460cc35ae56617f148
| 25,108 |
def reduce_mem_usage(df):
"""
iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
logger.info('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
logger.info('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
logger.info('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
|
d78e0de83c85bc7495141be428bd54a0b86a2564
| 25,109 |
def retry_pattern():
"""Retry pattern decorator used when connecting to snowflake
"""
return backoff.on_exception(backoff.expo,
snowflake.connector.errors.OperationalError,
max_tries=5,
on_backoff=log_backoff_attempt,
factor=2)
|
78375f5f634f2826edd9c72fa20e2bb2d760b534
| 25,110 |
def get_vrf_route_targets(
device, address_family, rt_type, vrf=None, route_distinguisher=None
):
""" Get route target value from a device
Args:
address_family ('str'): address family value
rt_type ('str'): route target type
ex.) rt_type = 'import' OR
rt_type = 'export' OR
rt_type = 'both'
vrf('str'): vrf name
route_distinguisher ('str'): route distinguisher value
Returns:
Route target value
None
Raises:
None
"""
log.info(
"Getting route target of type {rt_type} for device {dev_name}".format(
rt_type=rt_type, dev_name=device.name
)
)
cli_command = ["show vrf detail {vrf}", "show vrf detail"]
if vrf:
cmd = cli_command[0].format(vrf=vrf)
else:
cmd = cli_command[1]
try:
raw_out = device.execute(cmd)
out = device.parse(cmd, output=raw_out)
except SchemaEmptyParserError:
return None
if not vrf:
vrf = "default"
try:
if not route_distinguisher:
route_distinguisher = out[vrf]["route_distinguisher"]
if "multicast" not in raw_out:
address_family = address_family.split()[0]
route_targets = out[vrf]["address_family"][address_family][
"route_targets"
][route_distinguisher]
if (
route_targets["rt_type"] == rt_type
or route_targets["rt_type"] == "both"
):
return route_targets["route_target"]
except KeyError as e:
return None
return None
|
d06b40220c8cc5c44c5ef4ab1e7a60057791dda5
| 25,111 |
def _format_field(value, parts, conv, spec, want_bytes=False):
"""Format a replacement field."""
for k, part, _ in parts:
if k:
if part.isdigit():
value = value[int(part)]
else:
value = value[part]
else:
value = getattr(value, part)
if conv:
value = ((conv == 'r') and '%r' or '%s') % (value,)
if hasattr(value, '__format__'):
value = value.__format__(spec)
elif hasattr(value, 'strftime') and spec:
value = value.strftime(str(spec))
else:
value = _strformat(value, spec)
if want_bytes and isinstance(value, unicode):
return str(value)
return value
|
d7c7bdf86b3b09800a4147d166584e81d7300c4f
| 25,113 |
def mixed_string_list_one_valid():
"""Return mixed strings."""
return _MIXED_STRING_LISTS_ONE_VALID_
|
c1f0ae91f761213a6d7674ec80e24befc0b959a4
| 25,114 |
def make_parser():
"""Create the argument parser, derived from the general scripts parser."""
parser = get_parser(
__doc__,
('A file containing a list of files/file paths to be read. These '
'should be nxml or txt files.')
)
parser.add_argument(
dest='output_name',
help=('Results will be pickled in files '
'<output_name>_stmts.pkl and <output_name>_readings.pkl.')
)
return parser
|
4bb2320708728bbf277bd9de380bdf6b1ead5a8b
| 25,115 |
import pathlib
def script_names():
"""Returns the sequence of example script names."""
result = [str(pathlib.Path(s).with_suffix('.py')) for s in _stem_names()]
return result
|
cdb4ab63718135fa98adbfbe8a1a237f6f5ad031
| 25,116 |
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / (union)
#print('jac: ', jaccard.max(), jaccard.min())
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
|
5226660a77d5753346bedaecf786644eda296b74
| 25,117 |
def expand_image(_img, block, stride, deform=True):
"""
Args:
_img: numpy array
block: size of the blocks required
stride: step size
Returns: array of blocks
"""
if deform:
_img=_img.astype('float32')
ims_Z=np.zeros([_img.shape[0],block[1],block[0]])
f_img=np.zeros([block[2],block[1],block[0]])
for z in range(0,_img.shape[0]):
ims_Z[z,:,:]=cv2.resize(_img[z,:,:], (block[0],block[1]))
for x in range(0,ims_Z.shape[2]):
f_img[:,:,x]=cv2.resize(ims_Z[:,:,x], (block[1],block[2]))
f_img=[f_img]
else:
to_pad = []
pad = False
for i in range(len(_img.shape)):
if _img.shape[i] < block[i]:
pad = True
to_pad.append(block[i])
else:
to_pad.append(_img.shape[i])
if pad:
print(f"Enttire image must be padded: {_img.shape}, must be padded")
_img = pad_nd_image(_img, new_shape=to_pad)
a_img = view_as_windows(_img, block, step=stride)
f_img = a_img.reshape(-1, *a_img.shape[-3:])
# Make sure blocks are padded
for s in f_img:
if s.shape != block:
print(f"Shape: {s.shape}, must be padded to match: {block}")
s = pad_nd_image(s, new_shape=block)
assert s.shape == block, "Padding failed"
return f_img
|
0ffbbfe2691be69980a334d1d32f4743dfd79de0
| 25,118 |
def disabled(reason='No reason given'):
"""Decorator that disables a command."""
# pylint:disable=missing-docstring,unused-argument
def actual_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
raise DisabledCommandException('This command is disabled: %s' % reason)
wrapper.tag = Tag.disabled
wrapper.original_func = func
return wrapper
return actual_decorator
|
32753d4d58ee11f12eb32acabdb692640b93bab7
| 25,119 |
def get_tensor_model_parallel_group():
"""Get the tensor model parallel group the caller rank belongs to."""
assert _TENSOR_MODEL_PARALLEL_GROUP is not None, \
'intra_layer_model parallel group is not initialized'
return _TENSOR_MODEL_PARALLEL_GROUP
|
ecf9e212995f09fe9d6a5482213dba3d1071ba80
| 25,120 |
import re
import string
def normalize(data):
"""Normalizes the values of incoming data
Args:
data (dict): Dictionary of response data
Returns:
dict
"""
normalized_data = {}
for key in data:
value = str(data[key])
key = key.lower()
# Strip all fields and reduce multiple spaces to a single whitespace
value = value.strip()
value = re.sub(r"\s+", " ", value)
if key == "name":
value = string.capwords(value)
elif key == "age":
if value is not None and len(value) > 0:
value = int(value)
else:
value = None
elif key in ("gender", "favorite_colors"):
value = value.lower()
if key in ("email", "favorite_colors", "finished"):
value = value.replace(" ", "")
if key == "finished":
value = bool(value.capitalize())
normalized_data[key] = value
return normalized_data
|
ed88050001e6ea65b77d8381b2fd247918ed8f37
| 25,121 |
def five_fold(data_set):
"""[summary]
Args:
data_set (List of Sample objects): The Samples to be partitioned
Returns:
fold: where fold is list of len n in n-fold of (train,test) where train and test are lists of Samples
"""
partition_index = int( len(data_set) / 5 )
s = 0
fold = []
for i in range(5): #0-4
tr = data_set.copy()
n = s + partition_index # was -1
te = tr[s:n]
del tr[s:s + partition_index]
fold.append( (tr,te) )
s += partition_index
return fold
|
d4179c238da3e9ebe05ab3513b80bcce982c8728
| 25,122 |
import re
def get_english_info(content_section):
"""
The english source section can have multiple publishers and volume counts. The criteria is that
the publisher with the largest volume count is most likely the one we want so sort the lines in
the section and grab data from the first line.
"""
english_section = [m.strip("\n") for m in content_section[24] if type(m) is bs4.element.NavigableString and m != "\n"]
english_section.sort()
eng_status, eng_volumes = None, None
try:
eng_volumes = int(re.search(r'\d+', english_section[0]).group())
#obj.eng_status = "Complete" if "Complete" in english_section else "Ongoing"
if ("Complete" or "Completed") in english_section[0]:
eng_status = "Complete"
elif "Ongoing" in english_section[0]:
eng_status = "Ongoing"
elif ("Cancelled" or "Canceled") in english_section[0]:
eng_status = "Cancelled"
elif "Hiatus" in english_section[0]:
eng_status = "Hiatus"
elif "Dropped" in english_section[0]:
eng_status = "Dropped"
else:
eng_status = "Unknown"
except AttributeError:
print(f"\t---> Attribute error: No english volumes")
except IndexError:
print("\t---> Index Error: No english volumes")
return eng_status, eng_volumes
|
c71751d863a4407fa409b18d7cced44c6044cb10
| 25,124 |
import numpy
def load_factual_vec(fname, vocab, k):
"""
Loads 300x1 word vecs from FACTBANK compiled word embeddings
"""
word_vecs = {}
with open(fname, "rb") as f:
header = f.readline()
vocab_size, layer1_size = map(int, header.split())
binary_len = numpy.dtype('float32').itemsize * layer1_size
for line in xrange(vocab_size):
word = []
while True:
ch = f.read(1)
if ch == ' ':
word = ''.join(word)
break
if ch != '\n':
word.append(ch)
if word in vocab:
word_vecs[word] = numpy.fromstring(f.read(binary_len), dtype='float32')
else:
f.read(binary_len)
return word_vecs
|
f37e030b4b8412a96652e67a673204e13c3cb3dc
| 25,125 |
from datetime import datetime
def vaccine(date):
"""
Auxiliary function.
Download data about vaccination in Cantabria from the Ministry of Health, Consumer Affairs and Social Welfare.
https://www.mscbs.gob.es
Args:
date(str): Date in format %Y%m%d
Returns: DataFrame with vaccination data from first day (2021/02/04) to the present day.
"""
try:
prefix_url = 'https://www.mscbs.gob.es/profesionales/saludPublica/ccayes/alertasActual/nCov/documentos/' \
'Informe_Comunicacion_'
suffix_url = '.ods'
nfile = f'{prefix_url}{date}{suffix_url}'
file_vaccine = pd.read_excel(nfile, engine='odf')
file_vaccine.set_index('Unnamed: 0', inplace=True)
vcant = file_vaccine.loc['Cantabria']
vcant = pd.DataFrame(vcant).T
vcant.index = [datetime.datetime.strptime(date, "%Y%m%d").strftime("%Y/%m/%d")]
return vcant
except Exception as e:
date = datetime.datetime.strptime(date, "%Y%m%d").strftime("%Y/%m/%d")
print(f"Error downloading vaccination data for {date}")
# print(e)
|
5e3f9ffc3106b76ab637ab23fb6e8e6f487a48f1
| 25,126 |
def cache(f):
"""A decorator to cache results for a given function call.
Note: The caching is only done on the first argument, usually "self".
"""
ret = {}
def _Wrapper(*args, **kwargs):
self = args[0]
if self not in ret:
ret[self] = f(*args, **kwargs)
return ret[self]
return _Wrapper
|
786218b8c248bcb7c9d519a843dd4542a9b612b0
| 25,127 |
def home():
"""Return the home page."""
response = flask.render_template(
'index.html',
metrics=SUPPORTED_METRICS.keys())
return response, 200
|
aeb98484b580ceab6f45d7f52e05dda0b97ddb2b
| 25,128 |
def data_to_segments_uniform(x, n_segments, segment_ranges=True):
""" Split data into segments of equal size (number of observations)."""
return split_equal_bins(x, n_segments)
|
d787bdad8604f4dbf327576f655a9e408b314766
| 25,129 |
from pathlib import Path
def load_all_sheets(file_name):
"""
Load from a xls(x) file all its sheets to a pandas.DataFrame as values to sheet_names as keys in a dictionary
Parameters
----------
file_name : str, Path
file_name to load from
Returns
-------
dict
dictionary containing the sheet_names as keys and pandas.DataFrame representing the xls(x) sheets
``{sheet_name: pandas.DataFrame}``
"""
file_name = Path(file_name)
excel_file = ExcelFile(file_name)
return load_these_sheets(file_name, list(excel_file.sheet_names))
|
84452af6d81c7b44c0669af637950e8b1c1dbda8
| 25,130 |
from datetime import datetime
def rate_limit(limit=1000, interval=60):
"""Rate limit for API endpoints.
If the user has exceeded the limit, then return the response 429.
"""
def rate_limit_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
key: str = f"Limit::{request.remote_addr}:{datetime.datetime.now().minute}"
current_request_count = cache.get(key=key)
if current_request_count and int(current_request_count) >= limit:
return {
"message": f"Too many requests. Limit {limit} in {interval} seconds",
}, HTTPStatus.TOO_MANY_REQUESTS
else:
pipe = cache.pipeline()
pipe.incr(key, 1)
pipe.expire(key, interval + 1)
pipe.execute()
return func(*args, **kwargs)
return wrapper
return rate_limit_decorator
|
3f609d2bfe4a90fcf822df50e6c81032ad7d0d03
| 25,131 |
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(filteredapp):
return AuthProtocol(filteredapp, conf)
return auth_filter
|
b4b3b64093998865cf6a1846935c1a10db37b0ea
| 25,132 |
def parse(stylesheet):
"""Parse a stylesheet using tinycss2 and return a StyleSheet instance.
:param stylesheet: A string of an existing stylesheet.
"""
parsed_stylesheet = tinycss2.parse_stylesheet(
stylesheet, skip_comments=True, skip_whitespace=True
)
css = qstylizer.style.StyleSheet()
for node in parsed_stylesheet:
if node.type == "error":
raise ValueError("Cannot parse Stylesheet: " + node.message)
selector = tinycss2.serialize(node.prelude).strip()
declaration_list = tinycss2.parse_declaration_list(
node.content, skip_comments=True, skip_whitespace=True
)
for declaration in declaration_list:
if declaration.type == "declaration":
prop = declaration.name.strip()
css[selector][prop] = tinycss2.serialize(declaration.value).strip()
return css
|
3df3901c06e861b03746c21a056bae51bb93ebd6
| 25,133 |
import unittest
def test_suite():
"""Returns a test suite of all the tests in this module."""
test_classes = [TestNetCDFPointUtilsConstructor,
TestNetCDFPointUtilsFunctions1,
TestNetCDFPointUtilsGridFunctions
]
suite_list = map(unittest.defaultTestLoader.loadTestsFromTestCase,
test_classes)
suite = unittest.TestSuite(suite_list)
return suite
|
ebe3b28968def1131be19aedc963263f3277a5fb
| 25,134 |
from typing import List
def compute_max_cut(n: int, nodes: List[int]) -> int:
"""Compute (inefficiently) the max cut, exhaustively."""
max_cut = -1000
for bits in helper.bitprod(n):
# Collect in/out sets.
iset = []
oset = []
for idx, val in enumerate(bits):
iset.append(idx) if val == 0 else oset.append(idx)
# Compute costs for this cut, record maximum.
cut = 0
for node in nodes:
if node[0] in iset and node[1] in oset:
cut += node[2]
if node[1] in iset and node[0] in oset:
cut += node[2]
if cut > max_cut:
max_cut_in, max_cut_out = iset.copy(), oset.copy()
max_cut = cut
max_bits = bits
state = bin(helper.bits2val(max_bits))[2:].zfill(n)
print('Max Cut. N: {}, Max: {:.1f}, {}-{}, |{}>'
.format(n, np.real(max_cut), max_cut_in, max_cut_out,
state))
return helper.bits2val(max_bits)
|
30acd71267cd213e559bf43b8296333530736624
| 25,135 |
def get_capacity_potential_from_enspreso(tech: str) -> pd.Series:
"""
Return capacity potential (in GW) per NUTS2 region for a given technology, based on the ENSPRESO dataset.
Parameters
----------
tech : str
Technology name among 'wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility' and 'pv_residential'
Returns
-------
nuts2_capacity_potentials: pd.Series
Series storing technical potential per NUTS2 region.
"""
accepted_techs = ['wind_onshore', 'wind_offshore', 'wind_floating', 'pv_utility', 'pv_residential']
assert tech in accepted_techs, f"Error: tech {tech} is not in {accepted_techs}"
path_potential_data = f"{data_path}generation/vres/potentials/source/ENSPRESO"
# For wind, summing over all wind conditions is similar to considering taking all available land and a capacity per
# area of 5MW/km2
if tech == 'wind_onshore':
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Raw data', index_col=1, skiprows=5)
onshore_wind = cap_potential_file[
(cap_potential_file['ONOFF'] == 'Onshore') &
(cap_potential_file['Scenario'] == 'EU-Wide high restrictions') &
(cap_potential_file['Subscenario - not cumulative'] == '2000m setback distance')]
nuts2_capacity_potentials_ds = onshore_wind['GW_Morethan25%_2030_100m_ALLTIMESLICESAVERAGE_V112'].copy()
elif tech == 'wind_offshore':
offshore_categories = ['12nm zone, water depth 0-30m', '12nm zone, water depth 30-60m',
'Water depth 0-30m', 'Water depth 30-60m']
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Wind Potential EU28 Full', index_col=1)
offshore_wind = cap_potential_file[
(cap_potential_file['Unit'] == 'GWe') &
(cap_potential_file['Onshore Offshore'] == 'Offshore') &
(cap_potential_file['Scenario'] == 'EU-Wide low restrictions') &
(cap_potential_file['Wind condition'] == 'CF > 25%') &
(cap_potential_file['Offshore categories'].isin(offshore_categories))]
nuts2_capacity_potentials_ds = offshore_wind.groupby(offshore_wind.index)['Value'].sum()
elif tech == 'wind_floating':
floating_categories = ['12nm zone, water depth 60-100m Floating',
'Water depth 60-100m Floating', 'Water depth 100-1000m Floating']
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_WIND_ONSHORE_OFFSHORE.XLSX'),
sheet_name='Wind Potential EU28 Full', index_col=1)
offshore_wind = cap_potential_file[
(cap_potential_file['Unit'] == 'GWe') &
(cap_potential_file['Onshore Offshore'] == 'Offshore') &
(cap_potential_file['Scenario'] == 'EU-Wide low restrictions') &
(cap_potential_file['Wind condition'] == 'CF > 25%') &
(cap_potential_file['Offshore categories'].isin(floating_categories))]
nuts2_capacity_potentials_ds = offshore_wind.groupby(offshore_wind.index)['Value'].sum()
elif tech == 'pv_utility':
# ODO: maybe parametrize this, if we decide to stick with it
land_use_high_irradiance_potential = 0.05
land_use_low_irradiance_potential = 0.00
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_SOLAR_PV_CSP_85W.XLSX'),
sheet_name='Raw Data Available Areas', index_col=0,
skiprows=[0, 1, 2, 3], usecols=[1, 43, 44, 45, 46],
names=["NUTS2", "Agricultural HI", "Agricultural LI",
"Non-Agricultural HI", "Non-Agricultural LI"])
capacity_potential_high = cap_potential_file[["Agricultural HI", "Non-Agricultural HI"]].sum(axis=1)
capacity_potential_low = cap_potential_file[["Agricultural LI", "Non-Agricultural LI"]].sum(axis=1)
nuts2_capacity_potentials_ds = capacity_potential_high * land_use_high_irradiance_potential + \
capacity_potential_low * land_use_low_irradiance_potential
else: # 'pv_residential'
cap_potential_file = pd.read_excel(join(path_potential_data, 'ENSPRESO_SOLAR_PV_CSP.XLSX'),
sheet_name='NUTS2 170 W per m2 and 3%', skiprows=2, index_col=2)
nuts2_capacity_potentials_ds = cap_potential_file['PV - roof/facades']
updated_potential_per_tech = update_enspreso_capacity_potential(nuts2_capacity_potentials_ds, tech).round(6)
return updated_potential_per_tech
|
4daaf38ca9f54aa162b79682ed981d3ba3ab3167
| 25,136 |
import time
import urllib
import json
def use_nearby_search(url, next_page=False, request_count=0):
"""Call nearby search API request.
Parameters
----------
url: str
URL to use to send a Nearby Search Request in Google Maps Place Search API
next_page: boolean, optional(default=False)
whether or not the URL is to request next page using next_page_token
request_count: int, optional(default=0)
the count of the previously-sent same requests; used only when next_page=True
Returns
-------
data: dict
returned API response
check https://developers.google.com/places/web-service/search#find-place-responses for its structure
status: str
status of the API response
check https://developers.google.com/places/web-service/search#PlaceSearchStatusCodes for details
"""
while True:
if next_page:
time.sleep(3)
try:
# get API response
print("API request made.")
response = urllib.request.urlopen(url)
except IOError:
pass # retry
else: # if no IOError occurs
data = json.loads(response.read().decode('utf-8'))
status = data['status']
if status == "OK":
break
elif (status == "INVALID_REQUEST") & next_page: # if next_page_token is not valid yet
if request_count >= 3:
print(f"Failed to receive a valid API response for 3 times for {url}.")
break # stop requesting after 3 trials
else:
print("...Key is not valid yet.")
request_count += 1
data, status = use_nearby_search(url + "&request_count=" + str(request_count), next_page,
request_count)
break
else:
break
return data, status
|
f579288356c4330a3af5ec2ac94cf31242669ba8
| 25,137 |
import urllib
def _GetGoogleAuthtoken(account_type, user, password, service, source):
"""This function authenticates the user in the specified service using
the provided authentication data.
Args:
account_type: Type of the account to login, could be GOOGLE or any other
string if the account is external.
user: Name of the user to be logged in.
password: Password of the user to be logged in.
service: Service where the user wants to log in, for example, 'ah'.
source: Name of the application requesting the user authentication.
Returns:
The authentatication token for the user if the supplied data is correct.
Raises:
lib.AuthenticationError: This exception is raised if the HTTP response is
403 - Forbidden, in this case the error is parsed and returned to the
user in the exception.
urllib2.HTTPError: This exception is raised for any other HTTP error.
"""
# Create a request for Google's Client login, with the specied data.
auth_request_data_map = {
'accountType': account_type,
'Email': user,
'Passwd': password,
'service': service,
'source': source
}
auth_request_data = urllib.urlencode(auth_request_data_map)
auth_url = 'https://www.google.com/accounts/ClientLogin'
auth_request = urllib2.Request(auth_url, auth_request_data)
try:
# Create a custom opener, make the request and extract the body.
http_opener = _GetHTTPOpener()
auth_response = http_opener.open(auth_request)
auth_response_body = auth_response.read()
# Parse the response data as a dictionary and return the 'Auth' key.
auth_response_data = _ParseBodyAsDict(auth_response_body)
return auth_response_data['Auth']
except urllib2.HTTPError as e:
# Check if the error was a 403 - Forbidden. In that case, forward the
# exception as an authentication error. Otherwise, just forward the
# exception.
if e.code == 403:
# Parse the error body as a dictionary and forward the exception as an
# authentication error.
response_dict = _ParseBodyAsDict(e.read())
raise AuthenticationError(auth_request.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
|
9852968100d116e27cf50f4b047661ef3135074d
| 25,138 |
def trim_filters(response):
"""Trim the leading and trailing zeros from a 1-D array or sequence, leaving
one zero on each side. This is a modified version of numpy.trim_zeros.
Parameters
----------
response : 1-D array or sequence
Input array.
Returns
-------
first : int
Index of the last leading zero.
last : int
Index of the first trailing zero.
"""
first = 0
for i in response:
if i != 0.:
if first == 0:
first += 1 # to avoid filters with non-zero edges
break
else:
first = first + 1
last = len(response)
for i in response[::-1]:
if i != 0.:
if last == len(response):
last -= 1 # to avoid filters with non-zero edges
break
else:
last = last - 1
first -= 1
last += 1
return first, last
|
2582c5821bd5c8487c0f9d2f55d2d982767d2669
| 25,139 |
def is_spanning(graph, subgraph):
"""
Return True or False by passing graph and subgraph through function V
to check if the subgraph uses all verticies of the original graph
Parameters
----------
graph = A networkx graph.
subgraph = A networkx subgraph of 'graph'.
Returns
-------
True if the subgraph is spanning.
False if the subgraph is not spanning.
"""
return V(graph) == V(subgraph)
|
371388bd6657165451216c4c65c5ea43ef19fed5
| 25,140 |
from bs4 import BeautifulSoup
import re
def clean_text(text):
"""
A function to pre-process text
Parameters
----------
text : string
the string to be processed
Returns
-------
text : string
a clean string
"""
tok = WordPunctTokenizer()
pat1 = r'@[A-Za-z0-9]+'
pat2 = r'https?://[A-Za-z0-9./]+'
combined_pat = r'|'.join((pat1, pat2))
soup = BeautifulSoup(text, 'lxml')
souped = soup.get_text()
stripped = re.sub(combined_pat, '', souped)
try:
clean = stripped.decode("utf-8-sig").replace(u"\ufffd", "?")
except:
clean = stripped
letters_only = re.sub("[^a-zA-Z]", " ", clean)
lower_case = letters_only.lower()
words = tok.tokenize(lower_case)
return (" ".join(words)).strip()
|
dc9d243d4c57ec1ea1af20325be57db536ec4286
| 25,144 |
def test_alias_function():
"""Test 4: Generate markup based on an element using an (alias w/function) parameter to explicitly correlate data and elements"""
template = get_template('contacts-alias')
def alias_name(p, e, k, v):
eq_(k, 'name')
return 'foo'
weld(template('.contact')[0], data, dict(alias=dict(name=alias_name,\
title='title')))
check_contacts(template)
|
69275c3e6676ac7b0473676fc7fe65d387edfecd
| 25,146 |
def baseurl(request):
"""
Return a BASE_URL template context for the current request.
"""
if request.is_secure():
scheme = 'https://'
else:
scheme = 'http://'
return {'BASE_URL': scheme + request.get_host(), }
|
76f3d75008eb996d1da226dbb4a7bd6e228fbcd1
| 25,147 |
import requests
def geolocate(address, bounds=None, country=None, administrative_area=None, sensor=False):
"""
Resolves address using Google Maps API, and performs some massaging to the output result.
Provided for convenience, as Uber relies on this heavily, and the desire to give a simple 'batteries included' experience.
See https://developers.google.com/maps/documentation/geocoding/ for more details
"""
params = {
'address': address,
'sensor': str(sensor).lower()
}
components = []
if country:
components.append('country:' + country)
if administrative_area:
components.append('administrative_area:' + administrative_area)
if bounds:
params['bounds'] = '|'.join(['{},{}'.format(x.latitude, x.longitude) for x in bounds])
if components:
params['components'] = '|'.join(components)
response = requests.get('http://maps.googleapis.com/maps/api/geocode/json', params=params)
if not response.ok:
raise GeolocationExcetion(response.text)
data = response.json()
if data['status'] not in ['OK', 'ZERO_RESULTS']:
raise GeolocationExcetion(data)
all_results = data.get('results', [])
for result in all_results:
coords = result.get('geometry', {}).get('location')
if coords:
result['latitude'] = coords['lat']
result['longitude'] = coords['lng']
return all_results
|
71edf37429aa10420e070d0cacf4e9ade1e6a75f
| 25,148 |
def _cmdy_hook_class(cls):
"""Put hooks into the original class for extending"""
# store the functions with the same name
# that defined by different plugins
# Note that current (most recently added) is not in the stack
cls._plugin_stacks = {}
def _original(self, fname):
"""Get the original function of self, if it is overridden"""
# callframe is oringally -1
frame = self._plugin_callframe.setdefault(fname, -1)
frame += 1
self._plugin_callframe[fname] = frame
return cls._plugin_stacks[fname][frame]
cls._original = _original
orig_init = cls.__init__
def __init__(self, *args, **kwargs):
self._plugin_callframe = {}
orig_init(self, *args, **kwargs)
cls.__init__ = __init__
if cls.__name__ == "CmdyHolding":
orig_reset = cls.reset
@wraps(orig_reset)
def reset(self, *args, **kwargs):
# clear the callframes as well
self._plugin_callframe = {}
orig_reset(self, *args, **kwargs)
return self
cls.reset = reset
# self is not a decorator, we don't return cls
|
5653a71aeafc184751edcb8fddecd503c4aa2ee9
| 25,149 |
def load_data(
datapath=None,
minstorms=3,
minbmps=3,
combine_nox=True,
combine_WB_RP=True,
remove_grabs=True,
grab_ok_bmps="default",
balanced_only=True,
fix_PFCs=True,
excluded_bmps=None,
excluded_params=None,
as_dataframe=False,
**dc_kwargs
):
"""Prepare data for categorical summaries
Parameter
---------
datapath : Path-like, optional
Path to the raw data CSV. If not provided, the latest data will be
downloaded.
minstorms : int (default = 3)
Minimum number of storms (monitoring events) for a BMP study to be included
minbmps : int (default = 3)
Minimum number of BMP studies for a parameter to be included
combine_nox : bool (default = True)
Toggles combining NO3 and NO2+NO3 into as new parameter NOx, giving
preference to NO2+NO3 when both parameters are observed for an event.
The underlying assuption is that NO2 concentrations are typically much
smaller than NO3, thus NO2+NO3 ~ NO3.
combine_WB_RP : bool (default = True)
Toggles combining Retention Pond and Wetland Basin data into a new
BMP category: Retention Pond/Wetland Basin.
remove_grabs : bool (default = True)
Toggles removing grab samples from the dataset except for:
- biological parameters
- BMPs categories that are whitelisted via *grab_ok_bmps*
grab_ok_bmps : sequence of str, optional
BMP categories for which grab data should be included. By default, this
inclues Retention Ponds, Wetland Basins, and the combined
Retention Pond/Wetland Basin category created when *combine_WB_RP* is
True.
balanced_only : bool (default = True)
Toggles removing BMP studies which have only influent or effluent data,
exclusively.
fix_PFCs : bool (default = True)
Makes correction to the category of Permeable Friction Course BMPs
excluded_bmps, excluded_params : sequence of str, optional
List of BMPs studies and parameters to exclude from the data.
as_dataframe : bool (default = False)
When False, a wqio.DataCollection is returned
Additional Parameters
---------------------
Any additional keword arguments will be passed to wqio.DataCollection.
Returns
-------
bmp : pandas.DataFrame or wqio.DataCollection
"""
othergroups = dc_kwargs.pop("othergroups", ["category", "units"])
pairgroups = dc_kwargs.pop("pairgroups", ["category", "units", "bmp_id", "site_id", "storm"])
rescol = dc_kwargs.pop("rescol", "res")
qualcol = dc_kwargs.pop("qualcol", "qual")
ndval = dc_kwargs.pop("ndval", ["ND", "<"])
stationcol = dc_kwargs.pop("stationcol", "station")
paramcol = dc_kwargs.pop("paramcol", "parameter")
bmp = (
_load_raw_data(datapath)
.pipe(_clean_raw_data)
.pipe(
_prepare_for_summary,
minstorms=minstorms,
minbmps=minbmps,
combine_nox=combine_nox,
combine_WB_RP=combine_WB_RP,
remove_grabs=remove_grabs,
grab_ok_bmps=grab_ok_bmps,
balanced_only=balanced_only,
fix_PFCs=fix_PFCs,
excluded_bmps=excluded_bmps,
excluded_params=excluded_params,
)
)
if as_dataframe:
return bmp
return wqio.DataCollection(
bmp,
rescol=rescol,
qualcol=qualcol,
ndval=ndval,
stationcol=stationcol,
paramcol=paramcol,
othergroups=othergroups,
pairgroups=pairgroups,
**dc_kwargs
)
|
72c7e6be0eabddeba79c681c5da30e0b855b6496
| 25,152 |
def parser_IBP_Descriptor(data,i,length,end):
"""\
parser_IBP_Descriptor(data,i,length,end) -> dict(parsed descriptor elements).
This descriptor is not parsed at the moment. The dict returned is:
{ "type": "IBP", "contents" : unparsed_descriptor_contents }
(Defined in ISO 13818-1 specification)
"""
return { "type" : "IBP", "contents" : data[i+2:end] }
|
12379af2260dd461751c59e2023b7e5e9d68b979
| 25,155 |
import logging
def build_county_list(state):
"""
Build the and return the fips list
"""
state_obj = us.states.lookup(state)
logging.info(f"Get fips list for state {state_obj.name}")
df_whitelist = load_data.load_whitelist()
df_whitelist = df_whitelist[df_whitelist["inference_ok"] == True]
all_fips = df_whitelist[
df_whitelist["state"].str.lower() == state_obj.name.lower()
].fips.tolist()
return all_fips
|
b11a0b831b7b89d04896ed1914acf993ef1d48ba
| 25,156 |
import bs4
def is_comment(obj):
"""Is comment."""
return isinstance(obj, bs4.Comment)
|
e56749b3d5f95754a031cc7286229d942333a22e
| 25,157 |
from typing import Tuple
import sqlite3
def insert_user(username: str) -> Tuple[int, str]:
"""
Inserts a new user. If the desired username is already taken,
appends integers incrementally until an open name is found.
:param username: The desired username for the new user.
:return A tuple containing the id and name of the new user.
"""
db = get_db()
new_user_id = None
count = 0
while new_user_id is None:
temp_name = username
if count != 0:
temp_name += str(count)
try:
cur = db.execute('INSERT INTO user (username) VALUES (?)', [temp_name])
new_user_id = cur.lastrowid
except sqlite3.IntegrityError:
count += 1
db.commit()
cur.close()
return new_user_id, temp_name
|
9eeeb6755251183de5ad775acfceef17c9f582a3
| 25,158 |
def get_gateway(ctx, name):
"""Get the sdk's gateway resource.
It will restore sessions if expired. It will read the client and vdc
from context and make get_gateway call to VDC for gateway object.
"""
restore_session(ctx, vdc_required=True)
client = ctx.obj['client']
vdc_href = ctx.obj['profiles'].get('vdc_href')
vdc = VDC(client, href=vdc_href)
gateway = vdc.get_gateway(name)
gateway_resource = Gateway(client, href=gateway.get('href'))
return gateway_resource
|
998f1a6a600797164c3e053eed206d99d20b338c
| 25,159 |
def fn_getdatetime(fn):
"""Extract datetime from input filename
"""
dt_list = fn_getdatetime_list(fn)
if dt_list:
return dt_list[0]
else:
return None
|
efea54154d318e0e5ef71c6147057b461696c677
| 25,160 |
def greenplum_kill_process(process_id):
"""
:param process_id: int
:return: None
"""
query = """
select pg_cancel_backend({0});
select pg_terminate_backend({0});
""".format(process_id)
return greenplum_read(query)
|
1bcd362bf2ed3d4cb5773c5539d0e9bb43bc96ab
| 25,161 |
import base64
import re
def check_app_auth(headers):
"""Authenticate an application from Authorization HTTP header"""
try:
auth_header = headers["Authorization"]
except KeyError:
return False
# Only handle HTTP Basic authentication
m = re.match("Basic (\w+==)", auth_header)
if not m:
return False
encoded = m.groups()[0].encode('ascii')
decoded = base64.decodestring(encoded).decode('ascii')
m = re.match("([^:]+):(.+)", decoded)
if not m:
# Invalid authorization format
return False
app_user, app_pass = m.groups()
global app_auth
try:
if app_auth[app_user] == app_pass:
return True
except KeyError:
# No such user, fall through
pass
return False
|
5378f70041294fdad591ffbb941991cb03a7fb3d
| 25,162 |
def setupConnection():
"""
Create connection to database, to be shared by table classes. The file
will be created if it does not exist.
"""
dbPath = conf.get('db', 'path')
conn = builder()(dbPath)
return conn
|
3437cf04622acf32b974b1ecc406daa594d30650
| 25,163 |
def max_pool(ip):
"""does a 2x2 max pool, crops off ends if not divisible by 2
ip is DxHxW
op is DxH/2xW/2
"""
height = ip.shape[1] - ip.shape[1]%2
width = ip.shape[2] - ip.shape[2]%2
h_max = np.maximum(ip[:,:height:2,:], ip[:,1:height:2,:])
op = np.maximum(h_max[:,:,:width:2], h_max[:,:,1:width:2])
return op
|
c270c4128842e33e69e0861f0010b0903c9728d3
| 25,164 |
import torch
def get_bin_vals(global_config):
"""
Creates bin values for grasping widths according to bounds defined in config
Arguments:
global_config {dict} -- config
Returns:
tf.constant -- bin value tensor
"""
bins_bounds = np.array(global_config['DATA']['labels']['offset_bins'])
if global_config['TEST']['bin_vals'] == 'max':
bin_vals = (bins_bounds[1:] + bins_bounds[:-1]) / 2
bin_vals[-1] = bins_bounds[-1]
elif global_config['TEST']['bin_vals'] == 'mean':
bin_vals = bins_bounds[1:]
else:
raise NotImplementedError
if not global_config['TEST']['allow_zero_margin']:
bin_vals = np.minimum(bin_vals, global_config['DATA']['gripper_width'] - global_config['TEST']['extra_opening'])
torch_bin_vals = torch.tensor(bin_vals, dtype=torch.float32)
return torch_bin_vals
|
07acdcb0329c1002983ca021fbc84c60f7474758
| 25,165 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.