content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def Class_Property (getter) :
"""Return a descriptor for a property that is accessible via the class
and via the instance.
::
>>> from _TFL._Meta.Property import *
>>> from _TFL._Meta.Once_Property import Once_Property
>>> class Foo (object) :
... @Class_Property
... def bar (cls) :
... "Normal method bar"
... print ("Normal method bar called")
... return 42
... @Class_Property
... @classmethod
... def baz (cls) :
... "classmethod baz"
... print ("classmethod baz called")
... return "Frozz"
... @Class_Property
... @Class_Method
... def foo (cls) :
... "Class_Method foo"
... print ("Class_Method foo called")
... return "Hello world"
... @Class_Property
... @Once_Property
... def qux (cls) :
... "Once property qux"
... print ("Once property qux")
... return 42 * 42
...
>>> foo = Foo ()
>>> Foo.bar
Normal method bar called
42
>>> foo.bar
Normal method bar called
42
>>> foo.bar = 137
>>> Foo.bar
Normal method bar called
42
>>> foo.bar
137
>>> Foo.bar = 23
>>> Foo.bar
23
>>> print (Foo.baz)
classmethod baz called
Frozz
>>> print (foo.baz)
classmethod baz called
Frozz
>>>
>>> print (Foo.foo)
Class_Method foo called
Hello world
>>> print (foo.foo)
Class_Method foo called
Hello world
>>>
>>> Foo.qux
Once property qux
1764
>>> foo.qux
1764
>>> foo2 = Foo ()
>>> foo2.qux
1764
>>> Foo.qux
1764
"""
if hasattr (getter, "__func__") :
return _Class_Property_Descriptor_ (getter)
else :
return _Class_Property_Function_ (getter) | 845d62444f41b547b9922d10666f8a911c7e8de3 | 15,476 |
def naive_act_norm_initialize(x, axis):
"""Compute the act_norm initial `scale` and `bias` for `x`."""
x = np.asarray(x)
axis = list(sorted(set([a + len(x.shape) if a < 0 else a for a in axis])))
min_axis = np.min(axis)
reduce_axis = tuple(a for a in range(len(x.shape)) if a not in axis)
var_shape = [x.shape[a] for a in axis]
var_shape_aligned = [x.shape[a] if a in axis else 1
for a in range(min_axis, len(x.shape))]
mean = np.reshape(np.mean(x, axis=reduce_axis), var_shape)
bias = -mean
scale = 1. / np.reshape(
np.sqrt(np.mean((x - np.reshape(mean, var_shape_aligned)) ** 2,
axis=reduce_axis)),
var_shape
)
return scale, bias, var_shape_aligned | 78034c16e38c27b146a8ee1be1be86d9fc4ffe6a | 15,477 |
def cmpTensors(t1, t2, atol=1e-5, rtol=1e-5, useLayout=None):
"""Compare Tensor list data"""
assert (len(t1) == len(t2))
for i in range(len(t2)):
if (useLayout is None):
assert(t1[i].layout == t2[i].layout)
dt1 = t1[i].dataAs(useLayout)
dt2 = t2[i].dataAs(useLayout)
if not np.allclose(dt1, dt2, atol=atol, rtol=rtol):
logger.error("Tensor %d mismatch!" % i)
return False
return True | ce085c9998fddc86420ea4f5307e83e15d49372a | 15,478 |
def auth(body): # noqa: E501
"""Authenticate endpoint
Return a bearer token to authenticate and authorize subsequent calls for resources # noqa: E501
:param body: Request body to perform authentication
:type body: dict | bytes
:rtype: Auth
"""
db = get_db()
cust = db['Customer'].find_one({"email": body['username']})
try:
if cust is None:
user = db['User'].find_one({"email": body['username']})
if user is None:
return "Auth failed", 401
else:
if user['plain_password'] == body['password']:
return generate_response(generate_token(str(user['_id'])))
else:
if cust['plain_password'] == body['password']:
return generate_response(generate_token(str(cust['_id'])))
except Exception as e:
print (e)
return "Auth failed", 401 | 2992d119cf7fa3a5d797825c704cd837f647dbd7 | 15,479 |
def make_feature(func, *argfuncs):
"""Return a customized feature function that adapts to different input representations.
Args:
func: feature function (callable)
argfuncs: argument adaptor functions (callable, take `ctx` as input)
"""
assert callable(func)
for argfunc in argfuncs:
assert callable(argfunc)
def _feature(ctx):
return func(*[argfunc(ctx) for argfunc in argfuncs])
return _feature | 26064ee0873d63edc877afdcb03a39e40453a831 | 15,480 |
import ctypes
def from_numpy(np_array: np.ndarray):
"""Convert a numpy array to another type of dlpack compatible array.
Parameters
----------
np_array : np.ndarray
The source numpy array that will be converted.
Returns
-------
pycapsule : PyCapsule
A pycapsule containing a DLManagedTensor that can be converted
to other array formats without copying the underlying memory.
"""
holder = _Holder(np_array)
size = ctypes.c_size_t(ctypes.sizeof(DLManagedTensor))
dl_managed_tensor = DLManagedTensor.from_address(
ctypes.pythonapi.PyMem_RawMalloc(size)
)
dl_managed_tensor.dl_tensor.data = holder.data
dl_managed_tensor.dl_tensor.device = DLDevice(1, 0)
dl_managed_tensor.dl_tensor.ndim = np_array.ndim
dl_managed_tensor.dl_tensor.dtype = DLDataType.TYPE_MAP[str(np_array.dtype)]
dl_managed_tensor.dl_tensor.shape = holder.shape
dl_managed_tensor.dl_tensor.strides = holder.strides
dl_managed_tensor.dl_tensor.byte_offset = 0
dl_managed_tensor.manager_ctx = holder._as_manager_ctx()
dl_managed_tensor.deleter = _numpy_array_deleter
pycapsule = ctypes.pythonapi.PyCapsule_New(
ctypes.byref(dl_managed_tensor),
_c_str_dltensor,
_numpy_pycapsule_deleter,
)
return pycapsule | 2663b831274f1fc1dd2e597212fa475f6d03e578 | 15,481 |
def lmsSubstringsAreEqual(string, typemap, offsetA, offsetB):
"""
Return True if LMS substrings at offsetA and offsetB are equal.
"""
# No other substring is equal to the empty suffix.
if offsetA == len(string) or offsetB == len(string):
return False
i = 0
while True:
aIsLMS = isLMSChar(i + offsetA, typemap)
bIsLMS = isLMSChar(i + offsetB, typemap)
# If we've found the start of the next LMS substrings
if (i > 0 and aIsLMS and bIsLMS):
# then we made it all the way through our original LMS
# substrings without finding a difference, so we can go
# home now.
return True
if aIsLMS != bIsLMS:
# We found the end of one LMS substring before we reached
# the end of the other.
return False
if string[i + offsetA] != string[i + offsetB]:
# We found a character difference, we're done.
return False
i += 1 | 5177b8cf5b2b80a519ef0d9fbb5f972c584a6b5b | 15,482 |
from .tools import nantrapz
def synthesize_photometry(lbda, flux, filter_lbda, filter_trans,
normed=True):
""" Get Photometry from the given spectral information through the given filter.
This function converts the flux into photons since the transmission provides the
fraction of photons that goes though.
Parameters
-----------
lbda, flux: [array]
Wavelength and flux of the spectrum from which you want to synthetize photometry
filter_lbda, filter_trans: [array]
Wavelength and transmission of the filter.
normed: [bool] -optional-
Shall the fitler transmission be normalized?
Returns
-------
Float (photometric point)
"""
# ---------
# The Tool
def integrate_photons(lbda, flux, step, flbda, fthroughput):
""" """
filter_interp = np.interp(lbda, flbda, fthroughput)
dphotons = (filter_interp * flux) * lbda * 5.006909561e7
return nantrapz(dphotons,lbda) if step is None else np.sum(dphotons*step)
# ---------
# The Code
normband = 1. if not normed else \
integrate_photons(lbda, np.ones(len(lbda)),None,filter_lbda,filter_trans)
return integrate_photons(lbda,flux,None,filter_lbda,filter_trans)/normband | 6eb8b9806388b9b373e37a2c813e3a4ba9696bc2 | 15,483 |
def get_A_dash_floor_bath(house_insulation_type, floor_bath_insulation):
"""浴室の床の面積 (m2)
Args:
house_insulation_type(str): 床断熱住戸'または'基礎断熱住戸'
floor_bath_insulation(str): 床断熱住戸'または'基礎断熱住戸'または'浴室の床及び基礎が外気等に面していない'
Returns:
float: 浴室の床の面積 (m2)
"""
return get_table_3(15, house_insulation_type, floor_bath_insulation) | fbcd2c6dd6b5e2099351b445bf4b3e71aed4d508 | 15,484 |
def cancel_task_async(hostname, task_id):
"""Cancels a swarming task."""
return _call_api_async(
None, hostname, 'task/%s/cancel' % task_id, method='POST') | fb1b57dac80518e2cf3b375d8ecd393b34855b45 | 15,485 |
def generate_two_files_both_stress_strain():
"""Generates two files that have both stress and strain in each file"""
fname = {'stress': 'resources/double_stress.json',
'strain': 'resources/double_strain.json'}
expected = [ # makes an array of two pif systems
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100)))),
pif.Property(name='strain',
scalars=list(np.linspace(0, 1)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100))))]),
pif.System(
properties=[
pif.Property(name='stress',
scalars=list(np.linspace(0, 100)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100)))),
pif.Property(name='strain',
scalars=list(np.linspace(0, 1)),
conditions=pif.Value(
name='time',
scalars=list(np.linspace(0, 100))))
])]
# dump the pifs into two seperate files
with open(fname['stress'], 'w') as stress_file:
pif.dump(expected[0], stress_file)
with open(fname['strain'], 'w') as strain_file:
pif.dump(expected[1], strain_file)
return fname | 6cfe410071085bc975f630e34e43c8b2b626f846 | 15,486 |
def recipe_edit(username, pk):
"""Page showing the possibility to edit the recipe."""
recipe_manager = RecipeManager(api_token=g.user_token)
response = recipe_manager.get_recipe_response(pk)
recipe = response.json()
# shows 404 if there is no recipe, response status code is 404 or user is not the author
if not recipe or response.status_code == 404 or username != g.username:
abort(404)
# checking form validation
form = RecipeAddForm(data=recipe)
if form.validate_on_submit():
try:
if form.image.data != DEFAULT_RECIPE_IMAGE_PATH: # if the user has uploaded a picture file
image = images.save(form.image.data)
image_path = f'app/media/recipe_images/{image}'
else:
image_path = None # set image_path to None so as not to alter the image
except UploadNotAllowed: # if the user uploaded a file that is not a picture
flash('Incorrect picture format', 'error')
else: # if there is no exception edit recipe data and image
recipe_data, recipe_files = recipe_manager.get_form_data(form, image_path)
recipe_manager.edit(recipe_data, recipe_files, pk, username)
return redirect('/recipes/')
return render_template('recipe_edit.html', form=form) | 73735cd5c279c8e62aebdacfb29c5d3d83c856fa | 15,488 |
import re
def load_data_file(filename):
"""loads a single file into a DataFrame"""
regexp = '^.*/results/([^/]+)/([^/]+)/([^/]+).csv$'
optimizer, blackbox, seed = re.match(regexp, filename).groups()
f = ROOT + '/results/{}/{}/{}.csv'.format(optimizer, blackbox, seed)
result = np.genfromtxt(f, delimiter=',')
return get_best(result) | a2c53adfc356809f7ec554d20203a5ad276ebc1e | 15,489 |
def get_hub_manager():
"""Generate Hub plugin structure"""
global _HUB_MANAGER
if not _HUB_MANAGER:
_HUB_MANAGER = _HubManager(_plugins)
return _HUB_MANAGER | 384039f45f59cec3db737536a08719770ecfb3ff | 15,490 |
def extract_stimtype(
data: pd.DataFrame, stimtype: str, columns: list
) -> pd.DataFrame:
"""
Get trials with matching label under stimType
"""
if stimtype not in accept_stimtype:
raise ValueError(f"invalid {stimtype}, only accept {accept_stimtype}")
get = columns.copy()
get += ["participant_id"]
get += [i for i in identity_entity if i in data.columns]
stimresp = data.query(f"stimType == '{stimtype}'")
return stimresp.loc[:, get] | 186cc066133d1d8d6c443b17a2d17cc70d366d98 | 15,491 |
def compute_rank_clf_loss(hparams, scores, labels, group_size, weight):
"""
Compute ranking/classification loss
Note that the tfr loss is slightly different than our implementation: the tfr loss is sum over all loss and
devided by number of queries; our implementation is sum over all loss and devided by the number of larger than
0 labels.
"""
# Classification loss
if hparams.num_classes > 1:
labels = tf.cast(labels, tf.int32)
labels = tf.squeeze(labels, -1) # Last dimension is max_group_size, which should be 1
return tf.losses.sparse_softmax_cross_entropy(logits=scores, labels=labels, weights=weight)
# Expand weight to [batch size, 1] so that in inhouse ranking loss it can be multiplied with loss which is
# [batch_size, max_group_size]
expanded_weight = tf.expand_dims(weight, axis=-1)
# Ranking losses
# tf-ranking loss
if hparams.use_tfr_loss:
weight_name = "weight"
loss_fn = tfr.losses.make_loss_fn(hparams.tfr_loss_fn, lambda_weight=hparams.tfr_lambda_weights,
weights_feature_name=weight_name)
loss = loss_fn(labels, scores, {weight_name: expanded_weight})
return loss
# our own implementation
if hparams.ltr_loss_fn == 'pairwise':
lambdarank = LambdaRank()
pairwise_loss, pairwise_mask = lambdarank(scores, labels, group_size)
loss = tf.reduce_sum(tf.reduce_sum(pairwise_loss, axis=[1, 2]) * expanded_weight) / tf.reduce_sum(pairwise_mask)
elif hparams.ltr_loss_fn == 'softmax':
loss = compute_softmax_loss(scores, labels, group_size) * expanded_weight
is_positive_label = tf.cast(tf.greater(labels, 0), dtype=tf.float32)
loss = tf.div_no_nan(tf.reduce_sum(loss), tf.reduce_sum(is_positive_label))
elif hparams.ltr_loss_fn == 'pointwise':
loss = compute_sigmoid_cross_entropy_loss(scores, labels, group_size) * expanded_weight
loss = tf.reduce_mean(loss)
else:
raise ValueError('Currently only support pointwise/pairwise/softmax/softmax_cls.')
return loss | 12b45518d5bd11182dbf220ccfe90da2fe0d6c38 | 15,492 |
import string
def get_org_image_url(url, insert_own_log=False):
""" liefert gegebenenfalls die URL zum Logo der betreffenden Institution """
#n_pos = url[7:].find('/') # [7:] um http:// zu ueberspringen
#org_url = url[:n_pos+7+1] # einschliesslich '/'
item_containers = get_image_items(ELIXIER_LOGOS_PATH)
image_url = image_url_url = ''
image_url_extern = True
for ic in item_containers:
arr = string.splitfields(ic.item.sub_title, '|')
for a in arr:
b = a.strip()
if b != '' and url.find(b) >= 0:
image_url = ELIXIER_LOGOS_URL + ic.item.name
image_url_url = ic.item.title
image_url_extern = True
break
if image_url != '':
break
if insert_own_log and image_url == '':
image_url = EDUFOLDER_INST_LOGO_URL
image_url_url = get_base_site_url()
image_url_extern = False
return image_url, image_url_url, image_url_extern | b80d29a3393820e6cfc58e36ae34361d4587bd73 | 15,493 |
import asyncio
import logging
async def download_page(url, file_dir, file_name, is_binary=False):
"""
Fetch URL and save response to file
Args:
url (str): Page URL
file_dir (pathlib.Path): File directory
file_name (str): File name
is_binary (bool): True if should download binary content (e.g. images)
Returns:
HttpResponse: HTTP response content and extension
"""
response = await fetch(url, is_binary)
path = file_dir.joinpath('{}{}'.format(file_name, response.ext))
try:
with ThreadPoolExecutor() as pool:
await asyncio.get_running_loop().run_in_executor(
pool, write_file, str(path), is_binary, response.content
)
except OSError:
logging.error('Can\'t save file: {}'.format(path))
return response | 452285e7d47d7d7c227e356efc0e7dc1ad2ce7ee | 15,494 |
def normal_coffee():
"""
when the user decides to pick a normal or large cup of coffee
:return: template that explains how to make normal coffee
"""
return statement(render_template('explanation_large_cup', product='kaffee')) | ba9ed37cb85327d6541ad86071f047ce87297c95 | 15,495 |
def _transitive_closure_dense_numpy(A, kind='metric', verbose=False):
"""
Calculates Transitive Closure using numpy dense matrix traversing.
"""
C = A.copy()
n, m = A.shape
# Check if diagonal is all zero
if sum(np.diagonal(A)) > 0:
raise ValueError("Diagonal has to be zero for matrix computation to be correct")
# Compute Transitive Closure
for i in range(0, n):
if verbose:
print('calc row:', i + 1, 'of', m)
for j in range(0, n):
if kind == 'metric':
vec = C[i, :] + C[:, j]
C[i, j] = vec.min()
elif kind == 'ultrametric':
vec = np.maximum(C[i, :], C[:, j])
C[i, j] = vec.min()
return np.array(C) | cf02a380dbf28a6442cc999b3faea329d5041b17 | 15,496 |
def convert_date(raw_dates: pd.Series) -> pd.Series:
"""Automatically converts series containing raw dates
to specific format.
Parameters
----------
raw_dates:
Series to be converted.
Returns
-------
Optimized pandas series.
"""
raw_dates = pd.to_datetime(raw_dates, utc=True)
return raw_dates | 23a2310ec8fd30dd2b831805817fb3407c10c104 | 15,497 |
async def get_scorekeeper_by_id(scorekeeper_id: conint(ge=0, lt=2**31)):
"""Retrieve a Scorekeeper object, based on Scorekeeper ID,
containing: Scorekeeper ID, name, slug string, and gender."""
try:
scorekeeper = Scorekeeper(database_connection=_database_connection)
scorekeeper_info = scorekeeper.retrieve_by_id(scorekeeper_id)
if not scorekeeper_info:
raise HTTPException(status_code=404,
detail=f"Scorekeeper ID {scorekeeper_id} not found")
else:
return scorekeeper_info
except ValueError:
raise HTTPException(status_code=404,
detail=f"Scorekeeper ID {scorekeeper_id} not found")
except ProgrammingError:
raise HTTPException(status_code=500,
detail="Unable to retrieve scorekeeper information")
except DatabaseError:
raise HTTPException(status_code=500,
detail="Database error occurred while trying to "
"retrieve scorekeeper information") | 044b3bacfdf47918c2ad15635958d69c17ccf5c8 | 15,498 |
from datetime import datetime
def get_time(sec_scale):
"""time since epoch in milisecond
"""
if sec_scale == 'sec':
scale = 0
elif sec_scale == 'msec':
scale = 3
else:
raise
secs = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds()
return int(secs * pow(10, scale)) | c233133d61c6347a27186ef3baf0ae2bc79cf8f2 | 15,500 |
import urllib
import json
def get_json(url):
"""
Function that retrieves a json from a given url.
:return: the json that was received
"""
with urllib.request.urlopen(url) as response:
data = response.readall().decode('utf-8')
data = json.loads(data)
return data | 3164bb7d1adc40e3dcd07e82ec734807f3a17abc | 15,501 |
def _defaultChangeProvider(variables,wf):
""" by default we just forword the message to the change provider """
return variables | 5087dc06e0da1f3270b28e9ab1bd2241ed4b4de4 | 15,502 |
def GPPrediction(y_train, X_train, T_train, eqid_train, sid_train = None, lid_train = None,
X_new = None, T_new = None, eqid_new = None, sid_new = None, lid_new = None,
dc_0 = 0.,
Tid_list = None, Hyp_list = None, phi_0 = None, tau_0 = None,
sigma_s = None, sigma_e = None):
"""
Make ground motion predictions at new locations conditioned on the training data
Parameters
----------
y_train : np.array(n_train_pt)
Array with ground-motion observations associated with training data
X_train : np.array(n_train_pt, n_dim)
Design matrix for training data.
T_train : np.array(n_train_pt, 2x n_coor)
Coordinates matrix for training data.
eqid_train : np.array(n_train_pt)
Earthquake IDs for training data.
sid_train : np.array(n_train_pt), optional
Station IDs for training data. The default is None.
lid_train : np.array(n_train_pt), optional
Source IDs for training data. The default is None.
X_new : np.array(n_new_pt, n_dim), optional
Desing matrix for predictions. The default is None.
T_new : np.array(n_new_pt, 2 x n_coor), optional
Coordinate matrix for predictions. The default is None.
eqid_new : np.array(n_new_pt), optional
Earthquake IDs for predictions. The default is None.
sid_new : np.array(n_new_pt), optional
Station IDs for predictions. The default is None.
lid_new : np.array(n_new_pt), optional
Source IDs for predictions. The default is None.
dc_0 : float, optional
Mean offset. The default is zero.
Tid_list : n_dim list
List to specify the coordinate pair or each dimension.
Hyp_list : TYPE, optional
List of hyper-parameters for each dimension of the covariance fuction.
phi_0 : double
Within-event standard deviation.
tau_0 : double
Between-event standard deviation.
sigma_s : double, optional
Standard deviation for zero correlation site-to-site term. The default is None.
sigma_e : double, optional
Standard deviation for zero correlation source-to-source term. The default is None.
Returns
-------
np.array(n_new_pt)
median estimate of new predictions.
np.array(n_new_pt, n_new_pt)
epistemic uncertainty of new predictions.
"""
#import pdb; pdb.set_trace()
#remove mean offset from conditioning data
y_train = y_train - dc_0
#number of grid nodes
n_pt_train = X_train.shape[0]
n_pt_new = X_new.shape[0]
#initialize covariance matrices
cov_data = np.zeros([n_pt_train,n_pt_train])
cov_star = np.zeros([n_pt_new,n_pt_train])
cov_star2 = np.zeros([n_pt_new,n_pt_new])
#create covariance matrices
for k, (hyp, tid) in enumerate(zip(Hyp_list,Tid_list)):
#covariance between train data
cov_data += CreateCovMaternDimX(X_train[:,k], X_train[:,k],
T_train[tid], T_train[tid],
hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2],
delta = 1e-6)
#covariance between train data and predictions
cov_star += CreateCovMaternDimX(X_new[:,k], X_train[:,k],
T_new[tid], T_train[tid],
hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2],
delta = 0)
#covariance between prediction data
cov_star2 += CreateCovMaternDimX(X_new[:,k], X_new[:,k],
T_new[tid], T_new[tid],
hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2],
delta = 1e-6)
#add site to site systematic effects if sigma_s is specified
if not (sigma_s is None):
assert(not(sid_train is None)), 'Error site id for training data not specified'
cov_data += CreateCovS2S(sid_train, sid_train, sigma_s, delta = 1e-6)
#add source to source systematic effects if phi_L2L is specified
if not (sigma_e is None):
assert(not(lid_train is None)), 'Error location id for training data not specified'
cov_data += CreateCovL2L(lid_train, lid_train, sigma_e, delta = 1e-6)
#add between and within event covariance matrices
cov_data += CreateCovWe(eqid_train, eqid_train, phi_0)
cov_data += CreateCovBe(eqid_train, eqid_train, tau_0, delta = 1e-6)
#consider site to site systematic effects in predictions if sigma_s is specified
if not ( (sigma_s is None) or (sid_new is None)):
cov_star2 += CreateCovS2S(sid_new, sid_new, sigma_s, delta = 1e-6)
cov_star += CreateCovS2S(sid_new, sid_train, sigma_s)
#consider site to site systematic effects in predictions if sigma_s is specified
if not ( (sigma_e is None) or (lid_new is None)):
cov_star2 += CreateCovL2L(lid_new, lid_new, sigma_e, delta = 1e-6)
cov_star += CreateCovL2L(lid_new, lid_train, sigma_e)
#consider earthquake aleatory terms if eqid_new is specified
if not (eqid_new is None):
cov_star2 += CreateCovBe(eqid_new, eqid_new, tau_0, delta = 1e-6)
cov_star += CreateCovBe(eqid_new, eqid_train, tau_0)
#posterior mean and variance at new locations
y_new_mu = cov_star.dot(linalg.solve(cov_data, y_train))
#add mean offset to new predictions
y_new_mu = y_new_mu + dc_0
y_new_cov = cov_star2 - cov_star.dot(linalg.solve(cov_data, cov_star.transpose()))
#posterior standard dev. at new locations
y_new_sig = np.sqrt(np.diag(y_new_cov))
return y_new_mu.flatten(), y_new_sig.flatten(), y_new_cov | 28bdf5575f16d1ee3a719aaadc59eefda642171d | 15,504 |
def zdotu(x, y):
"""
This function computes the complex scalar product \M{x^T y} for the
vectors x and y, returning the result.
"""
return _gslwrap.gsl_blas_zdotu(x, y, 1j) | 135b5196568454dc0c721ab42cdd13d4bed63c5c | 15,505 |
def music21_to_chord_duration(p, key):
"""
Takes in a Music21 score, and outputs three lists
List for chords (by primeFormString string name)
List for chord function (by romanNumeralFromChord .romanNumeral)
List for durations
"""
p_chords = p.chordify()
p_chords_o = p_chords.flat.getElementsByClass('Chord')
chord_list = []
chord_function_list = []
duration_list = []
for ch in p_chords_o:
duration_list.append(ch.duration.quarterLength)
ch.closedPosition(forceOctave=4, inPlace=True)
rn = roman.romanNumeralFromChord(ch, key)
rp = rn.pitches
rp_names = ",".join([pi.name + pi.unicodeNameWithOctave[-1] for pi in rp])
chord_list.append(rp_names)
chord_function_list.append(rn.figure)
return chord_list, chord_function_list, duration_list | 142a0ef06c5c9542097cc7db0631a1f19e2f8f72 | 15,506 |
def city_country(city, country, population=''):
"""Generate a neatly formatted city/country name."""
full_name = city + ', ' + country
if population:
return full_name.title() + ' - population ' + str(population)
else:
return full_name.title() | 23be8d5b39380fd177240e479cf77ac7eb6c7459 | 15,507 |
def generate_headermap(line,startswith="Chr", sep="\t"):
"""
>>> line = "Chr\\tStart\\tEnd\\tRef\\tAlt\\tFunc.refGene\\tGene.refGene\\tGeneDetail.refGene\\tExonicFunc.refGene\\tAAChange.refGene\\tsnp138\\tsnp138NonFlagged\\tesp6500siv2_ea\\tcosmic70\\tclinvar_20150629\\tOtherinfo"
>>> generate_headermap(line)
{'Chr': 0, 'Start': 1, 'End': 2, 'Ref': 3, 'Alt': 4, 'Func.refGene': 5, 'Gene.refGene': 6, 'GeneDetail.refGene': 7, 'ExonicFunc.refGene': 8, 'AAChange.refGene': 9, 'snp138': 10, 'snp138NonFlagged': 11, 'esp6500siv2_ea': 12, 'cosmic70': 13, 'clinvar_20150629': 14, 'Otherinfo': 15}
"""
if not line.startswith(startswith):
raise Exception("Header line should start with \"{0}\"".format(startswith))
else:
if line.startswith("#"):
line = line[1:]
return dict([(v, i) for i,v in enumerate(line.rstrip().split(sep))]) | 16bbbc07fa13ff9bc8ec7af1aafc4ed65b20ec4c | 15,508 |
def max(q):
"""Return the maximum of an array or maximum along an axis.
Parameters
----------
q : array_like
Input data
Returns
-------
array_like
Maximum of an array or maximum along an axis
"""
if isphysicalquantity(q):
return q.__class__(np.max(q.value), q.unit)
else:
return np.max(q) | 0a3cfae6fb9d1d26913817fcc11765214baa8dff | 15,509 |
from typing import OrderedDict
def make_failure_log(conclusion_pred, premise_preds, conclusion, premises,
coq_output_lines=None):
"""
Produces a dictionary with the following structure:
{"unproved sub-goal" : "sub-goal_predicate",
"matching premises" : ["premise1", "premise2", ...],
"raw sub-goal" : "conclusion",
"raw premises" : ["raw premise1", "raw premise2", ...]}
Raw sub-goal and raw premises are the coq lines with the premise
internal name and its predicates. E.g.
H : premise (Acc x1)
Note that this function is not capable of returning all unproved
sub-goals in coq's stack. We only return the top unproved sub-goal.
"""
failure_log = OrderedDict()
conclusion_base = denormalize_token(conclusion_pred)
# failure_log["unproved sub-goal"] = conclusion_base
premises_base = [denormalize_token(p) for p in premise_preds]
# failure_log["matching premises"] = premises_base
# failure_log["raw sub-goal"] = conclusion
# failure_log["raw premises"] = premises
premise_preds = []
for p in premises:
try:
pred = p.split()[2]
except:
continue
if pred.startswith('_'):
premise_preds.append(denormalize_token(pred))
failure_log["all_premises"] = premise_preds
failure_log["other_sub-goals"] = get_subgoals_from_coq_output(
coq_output_lines, premises)
failure_log["other_sub-goals"].insert(0, {
'subgoal': conclusion_base,
'index': 1,
'raw_subgoal': conclusion,
'matching_premises' : premises_base,
'matching_raw_premises' : premises_base})
failure_log["type_error"] = has_type_error(coq_output_lines)
failure_log["open_formula"] = has_open_formula(coq_output_lines)
return failure_log | 17f7cb8b6867849e034d72f05e9e48622bd35b7d | 15,510 |
import requests
def request(url=None, json=None, parser=lambda x: x, encoding=None, **kwargs):
"""
:param url:
:param json:
:param parser: None 的时候返回r,否则返回 parser(r.json())
:param kwargs:
:return:
"""
method = 'post' if json is not None else 'get' # 特殊情况除外
logger.info(f"Request Method: {method}")
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE '
}
r = requests.request(method, url, json=json, headers=headers)
r.encoding = encoding if encoding else r.apparent_encoding
if parser is None:
return r
return parser(r.json()) | c222cc2a5c1e2acb457600d223c9ca6ab588aa5e | 15,511 |
import math
def log_density_igaussian(z, z_var):
"""Calculate log density of zero-mean isotropic gaussian distribution given z and z_var."""
assert z.ndimension() == 2
assert z_var > 0
z_dim = z.size(1)
return -(z_dim/2)*math.log(2*math.pi*z_var) + z.pow(2).sum(1).div(-2*z_var) | a412b9e25aecfc2baed2d783a2d7cd281fadc9fb | 15,512 |
def denom(r,E,J,model):
"""solve the denominator"""
ur = model.potcurve(r)#model.potcurve[ (abs(r-model.rcurve)).argmin()]
return 2.0*(E-ur)*r*r - J*J; | 19dc7c5cd283b66f834ba9a0d84fb396ca2c2c89 | 15,513 |
def approximate_gaussians(confidence_array, mean_array, variance_array):
""" Approximate gaussians with given parameters with one gaussian.
Approximation is performed via minimization of Kullback-Leibler
divergence KL(sum_{j} w_j N_{mu_j, sigma_j} || N_{mu, sigma}).
Parameters
----------
confidence_array : ndarray(num_gaussians)
confidence values for gaussians.
mean_array : ndarray(num_gaussians, 3)
(z,y,x) mean values for input gaussians.
variance_array : ndarray(num_gaussians)
(z,y,x) variances for input gaussians.
Returns
-------
tuple(ndarray(3), ndarray(3))
mean and sigma for covering gaussian.
"""
delimiter = np.sum(confidence_array)
mu = np.sum(mean_array.T * confidence_array, axis=1) / delimiter
sigma = np.sqrt(np.sum((variance_array + (mean_array - mu) ** 2).T
* confidence_array, axis=1) / delimiter)
return mu, sigma | 7c722f0153e46631b3c4731d8a307e0b219be02b | 15,514 |
from typing import Callable
import types
def return_loss(apply_fn: Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray],
steps: types.Transition):
"""Loss wrapper for ReturnMapper.
Args:
apply_fn: applies a transition model (o_t, a_t) -> (o_t+1, r), expects the
leading axis to index the batch and the second axis to index the
transition triplet (t-1, t, t+1).
steps: RLDS dictionary of transition triplets as prepared by
`rlds_loader.episode_to_timestep_batch`.
Returns:
A scalar loss value as jnp.ndarray.
"""
observation_t = jax.tree_map(lambda obs: obs[:, dataset.CURRENT, ...],
steps.observation)
action_t = steps.action[:, dataset.CURRENT, ...]
n_step_return_t = steps.extras[dataset.N_STEP_RETURN][:, dataset.CURRENT, ...]
predicted_n_step_return_t = apply_fn(observation_t, action_t)
return mse(predicted_n_step_return_t, n_step_return_t) | 970cb6623436982ef1359b1328edcb828012f1f7 | 15,515 |
def part_two(data):
"""Part two"""
array = ['a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p']
commands = data.split(',')
for _ in range(1000000000 % 30):
dance(array, commands)
return ''.join(map(str, array)) | 75e847cd5a598aa67ca54133c15a0c2c3fc67433 | 15,516 |
def readCSVPremadeGroups(filename, studentProperties=None):
"""studentProperties is a list of student properties in the order they appear in the CSV.
For example, if a CSV row (each group is a row) is as follows: "Rowan Wilson, [email protected], 1579348, Bob Tilano, [email protected], 57387294"
Then the format is: fullname, email, huid, fullname, email, huid, ...
Thus, studentProperties = ['fullname', 'email', 'huid']
"""
csv = _readCSV(filename)
# Create studentProperties if needed
if studentProperties == None:
studentProperties = []
firstHeader = None
for header in csv['headers']:
header = _keepOnlyLetters(header).lower()
if firstHeader == header:
# Found beginning of repeating sequence
break
if firstHeader == None:
firstHeader = header
studentProperties.append(header)
# Pull groups from CSV data
groups = []
for row in csv['data']:
students = []
currStudent = None
for i in range(len(row)):
if len(row[i].strip()) == 0:
break
propIndex = i % len(studentProperties)
if propIndex == 0:
# Just starting a new student
currStudent = {}
else:
currStudent[studentProperties[propIndex]] = row[i]
if propIndex == len(studentProperties) - 1:
# Just finished adding properties to a student
students.append(currStudent)
if len(students) > 0:
groups.append(students)
return groups | f12de287b4a9f19e2e29302338f7233e34d54f0c | 15,518 |
def random_contrast(video, lower, upper, seed=None):
"""Adjust the contrast of an image or images by a random factor.
Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly
picked in the interval `[lower, upper)`.
For producing deterministic results given a `seed` value, use
`tf.image.stateless_random_contrast`. Unlike using the `seed` param
with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the
same results given the same seed independent of how many times the function is
called, and independent of global seed settings (e.g. tf.random.set_seed).
Args:
image: An image tensor with 3 or more dimensions.
lower: float. Lower bound for the random contrast factor.
upper: float. Upper bound for the random contrast factor.
seed: A Python integer. Used to create a random seed. See
`tf.compat.v1.set_random_seed` for behavior.
Usage Example:
>>> x = [[[1.0, 2.0, 3.0],
... [4.0, 5.0, 6.0]],
... [[7.0, 8.0, 9.0],
... [10.0, 11.0, 12.0]]]
>>> tf.image.random_contrast(x, 0.2, 0.5)
<tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...>
Returns:
The contrast-adjusted image(s).
Raises:
ValueError: if `upper <= lower` or if `lower < 0`.
"""
if upper <= lower:
raise ValueError("upper must be > lower.")
if lower < 0:
raise ValueError("lower must be non-negative.")
contrast_factor = tf.random.random_uniform([], lower, upper, seed=seed)
return adjust_contrast(video, contrast_factor) | 6ea6a72100ad468d7692c0bb8c2837cba5eaa3e0 | 15,519 |
from typing import Dict
def load(df: DataFrame, config: Dict, logger) -> bool:
"""Write data in final destination
:param df: DataFrame to save.
:type df: DataFrame
:param config: job configuration
:type config: Dict
:param logger: Py4j Logger
:type logger: Py4j.Logger
:return: True
:rtype: bool
"""
df.write.save(path=config['output_path'], mode='overwrite')
return True | 70f962cc24f23264f23dce458c233466dc06d276 | 15,520 |
def phase_correct_zero(spec, phi):
"""
Correct the phases of a spectrum by phi radians
Parameters
----------
spec : float array of complex dtype
The spectrum to be corrected.
phi : float
Returns
-------
spec : float array
The phase corrected spectrum
Notes
-----
[Keeler2005] Keeler, J (2005). Understanding NMR Spectroscopy, 2nd
edition. Wiley. Page 88.
"""
c_factor = np.exp(-1j * phi)
# If it's an array, we need to reshape it and broadcast across the
# frequency bands in the spectrum. Otherwise, we assume it's a scalar and
# apply it to all the dimensions of the spec array:
if hasattr(phi, 'shape'):
c_factor = c_factor.reshape(c_factor.shape + (1,))
return spec * c_factor | 1647e8f99e10ba5f715e4c268907cbf995e99335 | 15,521 |
def upsample(s, n, phase=0):
"""Increase sampling rate by integer factor n with included offset phase.
"""
return np.roll(np.kron(s, np.r_[1, np.zeros(n-1)]), phase) | 997f48be57816efb11b77c258e945d3161b748be | 15,522 |
def parse_idx_inp(idx_str):
""" parse idx string
"""
idx_str = idx_str.strip()
if idx_str.isdigit():
idxs = [int(idx_str)]
if '-' in idx_str:
[idx_begin, idx_end] = idx_str.split('-')
idxs = list(range(int(idx_begin), int(idx_end)+1))
return idxs | 2dc1282169f7534455f2a0297af6c3079192cb66 | 15,523 |
import requests
def toggl_request_get(url: str, params: dict = False) -> requests.Response:
"""Send a GET request to specified url using toggl headers and configured auth"""
headers = {"Content-Type": "application/json"}
auth = (CONFIG["toggl"]["api_token"], "api_token")
response = requests.get(url, headers=headers, auth=auth, params=params)
return response | bd4714bb0d92dcfb1e2ef27bb3fa3c67179b8b40 | 15,524 |
def sample_point_cloud(source, target, sample_indices=[2]):
""" Resamples a source point cloud at the coordinates of a target points
Uses the nearest point in the target point cloud to the source point
Parameters
----------
source: array
Input point cloud
target: array
Target point cloud for sample locations
sample_indices: list
List of indices to sample from source. Defaults to 2 (z or height
dimension)
Returns
-------
An array of sampled points
"""
sample_indices = np.array(sample_indices)
tree = cKDTree(source[:, 0:2])
dist, idx = tree.query(target, n_jobs=-1)
output = np.hstack(
[
target,
source[idx[:, None], sample_indices].reshape(
(len(idx), len(sample_indices))
),
]
)
return output | b490e598e68ef175a5cf80c052f2f82fc70ac4ba | 15,525 |
def make_satellite_gsp_pv_map(batch: Batch, example_index: int, satellite_channel_index: int):
"""Make a animation of the satellite, gsp and the pv data"""
trace_times = []
times = batch.satellite.time[example_index]
pv = batch.pv
for time in times:
trace_times.append(
make_satellite_gsp_pv_map_one_time_value(
batch=batch,
example_index=example_index,
satellite_channel_index=satellite_channel_index,
time_value=time,
)
)
frames = []
for i, traces in enumerate(trace_times):
frames.append(go.Frame(data=traces, name=f"frame{i+1}"))
# make slider
labels = [pd.to_datetime(time.data) for time in times]
sliders = make_slider(labels=labels)
x = pv.x_coords[example_index][pv.x_coords[example_index] != 0].mean()
y = pv.y_coords[example_index][pv.y_coords[example_index] != 0].mean()
lat, lon = osgb_to_lat_lon(x=x, y=y)
fig = go.Figure(
data=trace_times[0],
layout=go.Layout(
title="Start Title",
),
frames=frames,
)
fig.update_layout(updatemenus=[make_buttons()])
fig.update_layout(
mapbox_style="carto-positron", mapbox_zoom=8, mapbox_center={"lat": lat, "lon": lon}
)
fig.update_layout(sliders=sliders)
return fig | fd0e0a7543da212f368849c3686277a7c8c42a95 | 15,526 |
def raw_to_engineering_product(product, idbm):
"""Apply parameter raw to engineering conversion for the entire product.
Parameters
----------
product : `BaseProduct`
The TM product as level 0
Returns
-------
`int`
How many columns where calibrated.
"""
col_n = 0
idb_ranges = QTable(rows=[(version, range.start.as_float(), range.end.as_float())
for version, range in product.idb_versions.items()],
names=["version", "obt_start", "obt_end"])
idb_ranges.sort("obt_start")
idb_ranges['obt_start'][0] = SCETime.min_time().as_float()
for i in range(0, len(idb_ranges)-1):
idb_ranges['obt_end'][i] = idb_ranges['obt_start'][i+1]
idb_ranges['obt_end'][-1] = SCETime.max_time().as_float()
for col in product.data.colnames:
if (not (hasattr(product.data[col], "meta")
and "PCF_CURTX" in product.data[col].meta
and product.data[col].meta["PCF_CURTX"] is not None
and product.data[col].meta["NIXS"] is not None
and hasattr(product, "idb")
)):
continue
col_n += 1
c = 0
# clone the current column into a new column as the content might be replaced chunk wise
product.data[CCN] = product.data[col]
for idbversion, starttime, endtime in idb_ranges.iterrows():
idb = idbm.get_idb(idbversion)
idb_time_period = np.where((starttime <= product.data['time']) &
(product.data['time'] < endtime))[0]
if len(idb_time_period) < 1:
continue
c += len(idb_time_period)
calib_param = idb.get_params_for_calibration(
product.service_type,
product.service_subtype,
(product.ssid if hasattr(product, "ssid") else None),
product.data[col].meta["NIXS"],
product.data[col].meta["PCF_CURTX"])[0]
raw = Parameter(product.data[col].meta["NIXS"],
product.data[idb_time_period][col], None)
eng = apply_raw_to_engineering(raw, (calib_param, idb))
# cast the type of the column if needed
if product.data[CCN].dtype != eng.engineering.dtype:
product.data[CCN] = product.data[CCN].astype(eng.engineering.dtype)
# set the unit if needed
if hasattr(eng.engineering, "unit") and \
product.data[CCN].unit != eng.engineering.unit:
meta = product.data[col].meta
product.data[CCN].unit = eng.engineering.unit
# restore the meta info
setattr(product.data[CCN], "meta", meta)
# override the data into the new column
product.data[CCN][idb_time_period] = eng.engineering
# replace the old column with the converted
product.data[col] = product.data[CCN]
product.data[col].meta = product.data[CCN].meta
# delete the generic column for conversion
del product.data[CCN]
# delete the calibration key from meta as it is now processed
del product.data[col].meta["PCF_CURTX"]
if c != len(product.data):
logger.warning("Not all time bins got converted to engineering" +
"values due to bad idb periods." +
f"\n Converted bins: {c}\ntotal bins {len(product.data)}")
return col_n | aaf5a92c53bfc41a5230593b96c0de7b8ad1ba4a | 15,527 |
def paginate(objects, page_num, per_page, max_paging_links):
"""
Return a paginated page for the given objects, giving it a custom
``visible_page_range`` attribute calculated from ``max_paging_links``.
"""
paginator = Paginator(objects, per_page)
try:
page_num = int(page_num)
except ValueError:
page_num = 1
try:
objects = paginator.page(page_num)
except (EmptyPage, InvalidPage):
objects = paginator.page(paginator.num_pages)
page_range = objects.paginator.page_range
if len(page_range) > max_paging_links:
start = min(objects.paginator.num_pages - max_paging_links,
max(0, objects.number - (max_paging_links / 2) - 1))
page_range = page_range[start:start + max_paging_links]
objects.visible_page_range = page_range
return objects | cd8a7ef046a48c580ad12cfa44f1862312bb1aba | 15,528 |
def _get_crop_frame(image, max_wiggle, tx, ty):
"""
Based on on the max_wiggle, determines a cropping frame.
"""
pic_width, pic_height = image.size
wiggle_room_x = max_wiggle * .5 * pic_width
wiggle_room_y = max_wiggle * .5 * pic_height
cropped_width = pic_width - wiggle_room_x
cropped_height = pic_height - wiggle_room_y
left = int(tx * wiggle_room_x)
top = int(ty * wiggle_room_y)
right = left + cropped_width
bottom = top + cropped_height
return left, top, right, bottom | 18442a97544d6c4bc4116dc43811c9fcd0d203c6 | 15,529 |
from re import U
def __vigenere(s, key='virink', de=0):
"""维吉利亚密码"""
s = str(s).replace(" ", "").upper()
key = str(key).replace(" ", "").upper()
res = ''
i = 0
while i < len(s):
j = i % len(key)
k = U.index(key[j])
m = U.index(s[i])
if de:
if m < k:
m += 26
res += U[m - k]
else:
res += U[(m + k) % 26]
i += 1
return res | 4deadfc9fdd1cb002c2f31a1de7763b0c49dd757 | 15,530 |
def mask_unit_group(unit_group: tf.Tensor, unit_group_length: tf.Tensor, mask_value=0) -> tf.Tensor:
""" Masks unit groups according to their length.
Args:
unit_group: A tensor of rank 3 with a sequence of unit feature vectors.
unit_group_length: The length of the unit group (assumes all unit feature vectors upfront).
mask_value: The mask value.
Returns:
A tensor of rank 3 where indices beyond unit_group_length are zero-masked.
"""
if unit_group_length is not None:
# get rid of last dimensions with size 1
if unit_group.shape.rank - unit_group_length.shape.rank < 2:
unit_group_length = tf.squeeze(unit_group_length, axis=-1) # B
# mask with mask_value
unit_group_mask = tf.sequence_mask(
tf.cast(unit_group_length, tf.int32), maxlen=unit_group.shape[1], dtype=unit_group.dtype) # B x T
unit_group_mask = tf.expand_dims(unit_group_mask, axis=-1)
unit_group *= unit_group_mask
if mask_value != 0:
mask_value = tf.convert_to_tensor(mask_value)
unit_group = tf.cast(unit_group, mask_value.dtype)
unit_group_mask = tf.cast(unit_group_mask, mask_value.dtype)
unit_group += (1 - unit_group_mask) * mask_value
return unit_group | 758028075f793bad1165d0ca8992c78cb4a1318e | 15,531 |
def fill_session_team(team_id, session_id, dbsession=DBSESSION):
"""
Use the FPL API to get list of players in an FPL squad with id=team_id,
then fill the session team with these players.
"""
# first reset the team
reset_session_team(session_id, dbsession)
# now query the API
players = fetcher.get_fpl_team_data(get_last_finished_gameweek(), team_id)
player_ids = [p["element"] for p in players]
for pid in player_ids:
add_session_player(pid, session_id, dbsession)
team_history = fetcher.get_fpl_team_history_data()["current"]
index = (
get_last_finished_gameweek() - 1
) # as gameweek starts counting from 1 but list index starts at 0
budget = team_history[index]["value"]
set_session_budget(budget, session_id)
return player_ids | 28118a527c009d90401b368d628725ee29e838ef | 15,532 |
def create_map(users_info):
"""
This function builds an HTML map with locations of user's friends on
Twitter.
"""
my_map = folium.Map(
location=[49.818396058511645, 24.02258071000576], zoom_start=10)
folium.TileLayer('cartodbdark_matter').add_to(my_map)
folium.TileLayer('stamentoner').add_to(my_map)
folium.TileLayer('openstreetmap').add_to(my_map)
fg_friends = folium.FeatureGroup(name='Twitter Friends')
for user in users_info:
nickname = user[0]
user_coord = user[1]
fg_friends.add_child(folium.Marker(location=user_coord,
popup=nickname,
icon=folium.Icon(color='darkred',
icon='heart')))
my_map.add_child(fg_friends)
my_map.add_child(folium.LayerControl())
return my_map.get_root().render() | cfe9649101906aa295ffc9984bbed15a99c7ed46 | 15,533 |
def l2sq(x):
"""Sum the matrix elements squared
"""
return (x**2).sum() | c02ea548128dde02e4c3e70f9280f1ded539cee9 | 15,534 |
def normalize(arr, axis=None):
"""
Normalize a vector between 0 and 1.
Parameters
----------
arr : numpy.ndarray
Input array
axis : integer
Axis along which normalization is computed
Returns
-------
arr : numpy.ndarray
Normalized version of the input array
"""
if not isinstance(arr, np.ndarray):
arr = np.asarray(arr)
arr = arr - np.min(arr, axis)
M = np.max(arr, axis)
if np.sum(np.abs(M)) > 0:
arr = arr / M
return arr | 2c9689ee829e66bfd02db3c1c92c749ca068bd73 | 15,535 |
def successive_substitution(m, T, P, max_iter, M, Pc, Tc, omega, delta, Aij,
Bij, delta_groups, calc_delta, K, steps=0):
"""
Find K-factors by successive substitution
Iterate to find a converged set of K-factors defining the gas/liquid
partitioning of a mixture using successive substitution. We follow the
algorithms in McCain (1990) and Michelsen and Mollerup (2007).
Parameters
----------
m : ndarray, size (nc)
masses of each component present in the whole mixture (gas plus
liquid, kg)
T : float
temperature (K)
P : float
pressure (Pa)
max_iter : int
maximum number of iterations to perform. Set max_iter to np.inf if
you want the algorithm to guarantee to iterate to convergenece, but
beware that you may create an infinite loop.
M : ndarray, size (nc)
Molecular weights (kg/mol)
Pc : ndarray, size (nc)
Critical pressures (Pa)
Tc : ndarray, size (nc)
Critical temperatures (K)
omega : ndarray, size (nc)
Acentric factors (--)
delta : ndarray, size (nc, nc)
Binary interaction coefficients for the Peng-Robinson equation of
state.
Aij : ndarray, (15, 15)
Coefficients in matrix A_ij for the group contribution method for
delta_ij following Privat and Jaubert (2012)
Bij : ndarray, (15, 15)
Coefficients in matrix A_ij for the group contribution method for
delta_ij following Privat and Jaubert (2012)
delta_groups : ndarray, (nc, 15)
Specification of the fractional groups for each component of the
mixture for the group contribution method of Privat and Jaubert (2012)
for delta_ij
calc_delta : int
Flag specifying whether or not to compute delta_ij (1: True, -1:
False) using the group contribution method
K : ndarray, size (nc)
Initial guess for the partition coefficients. If K = None, this
function will use initial estimates from Wilson (see Michelsen and
Mollerup, 2007, page 259, equation 26)
steps : int (default = 0)
Number of previous iteration steps
Returns
-------
K : ndarray, size (nc)
Final value of the K-factors
beta : float
Fraction of gas or liquid (--)
xi : ndarray, size(2, nc)
Mole fraction of each component in the mixture. Row 1 gives the
values for the gas phase and Row 2 gives the values for the liquid
phase (--)
exit_flag : int
Flag indicating how the solution finished: 1: converged in the
allowable number of iterations, 0: did not converge and did not find
any indication that it might be single phase, and -1: did not
converge, but it looks like it might be single phase.
steps : int
Total number of interation steps so far
Notes
-----
The max_iter parameter controls how many steps of successive iteration
are performed. If set to None, the iteration will continue until the
tolerance criteria are reached.
"""
# Update the value of K using successive substitution
def update_K(K):
"""
Evaluate the update function for finding K-factor
Evaluates the new guess for K-factor following McCain (1990) p. 426,
equation (15-23) as explained on p. 430 in connection with equation
(15-31). This is the update equation for the successive substitution
method.
Parameters
----------
T, P, m_0, M, Pc, Tc, omega, delta = constant and inherited
from above
K : ndarray
The current guess for the K-factor (--)
Returns
-------
K_new : ndarray
New guess for K-factor
"""
# Get the mixture composition for the current K-factor
xi, beta = gas_liq_eq(m, M, K)
# Get tha gas and liquid fugacities for the current composition
f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega, delta,
Aij, Bij, delta_groups, calc_delta)[0,:]
f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega, delta,
Aij, Bij, delta_groups, calc_delta)[1,:]
# Update K using K = (phi_liq / phi_gas)
K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))
# If the mass of any component in the mixture is zero, make sure the
# K-factor is also zero.
K_new[np.isnan(K_new)] = 0.
# Follow what is said by Michelsen & Mollerup, at page 259, just
# above equation 27:
if steps==0.:
moles = m / M
zi = moles / np.sum(moles)
if np.sum(zi*K_new) - 1. <= 0.: # Condition 4 page 252
xi[0,:] = K_new * zi / np.sum(K_new*zi)
xi[1,:] = zi
# Recompute fugacities of gas and liquid:
# Get tha gas and liquid fugacities for the current
# composition
f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega,
delta, Aij, Bij, delta_groups, calc_delta)[0,:]
f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega,
delta, Aij, Bij, delta_groups, calc_delta)[1,:]
# Update K using K = (phi_liq / phi_gas)
K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))
K_new[np.isnan(K_new)] = 0.
elif (1.-np.sum(zi/K_new))>=0.: # % Condition 5 page 252
xi[0,:] = zi
xi[1,:] = (zi/K_new)/np.sum(zi/K_new)
# Recompute fugacities of gas and liquid:
# Get tha gas and liquid fugacities for the current
# composition
f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega,
delta, Aij, Bij, delta_groups, calc_delta)[0,:]
f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega,
delta, Aij, Bij, delta_groups, calc_delta)[1,:]
# Update K using K = (phi_liq / phi_gas)
K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P))
K_new[np.isnan(K_new)] = 0.
# Return an updated value for the K factors
return (K_new, beta)
# Set up the iteration parameters
tol = 1.49012e-8 # Suggested by McCain (1990)
err = 1.
# Iterate to find the final value of K factor using successive
# substitution
stop = False
while err > tol and steps < max_iter and not stop:
# Save the current value of K factor
K_old = K
# Update the estimate of K factor using the present fugacities
K, beta = update_K(K)
steps += 1
if steps > 3 and (beta == 0. or beta == 1.):
stop = True
# Compute the current error based on the squared relative error
# suggested by McCain (1990) and update the iteration counter
err = np.nansum((K - K_old)**2 / (K * K_old))
# Determine the exit condition
if stop:
# Successive subsitution thinks this is single-phase
flag = -1
elif steps < max_iter:
# This solution is converged
flag = 1
else:
# No decision has been reached
flag = 0
# Update the equilibrium and return the last value of K-factor
xi, beta = gas_liq_eq(m, M, K)
return (K, beta, xi, flag, steps) | 1ad40642f940be0d1e967bf97a62e3b754312ae9 | 15,536 |
import re
def Match(context, pattern, arg=None):
"""Do a regular expression match against the argument"""
if not arg:
arg = context.node
arg = Conversions.StringValue(arg)
bool = re.match(pattern, arg) and boolean.true or boolean.false
return bool | 62007fcd4617b0dfebb1bc8857f89fa2e6075f41 | 15,537 |
def rr_rectangle(rbins, a, b):
""" RR_rect(r; a, b) """
return Frr_rectangle(rbins[1:], a, b) - Frr_rectangle(rbins[:-1], a, b) | 98e30791e114ce3e2f6529db92c0103d1477cd76 | 15,538 |
def update_type(title, title_new=None, description=None, col_titles_new={}):
"""Method creates data type
Args:
title (str): current type title
title_new (str): new type title
description (str): type description
col_titles_new (dict): new column values (key - col id, value - col value)
Returns:
bool
"""
try:
db = DBO(_get_dsn())._dbo_driver
cnt = db.execute(
'SELECT count(*) FROM data_type WHERE title = \'{0}\''.format(title)).fetchone()[0]
if (cnt == 0):
raise Error('Type {0} does not exist'.format(title))
query = 'UPDATE data_type SET '
if (title_new != None):
query += 'title = \'{0}\', '.format(title_new)
if (description != None):
query += 'description = \'{0}\', '.format(description)
for key, value in col_titles_new.items():
query += 'col{0}_title = \'{1}\', '.format(key, value)
query = query[:-2]
query += ' WHERE title = \'{0}\''.format(title)
db.execute(query)
db.commit()
return True
except Error as ex:
print(ex)
db.rollback()
return False | 92e663d3bfd798de0367a44c4909a330ac9e4254 | 15,539 |
def api(repos_path):
"""Glottolog instance from shared directory for read-only tests."""
return pyglottolog.Glottolog(str(repos_path)) | a941a907050300bc89f6db8b4bd33cf9725cf832 | 15,540 |
from presqt.targets.osf.utilities.utils.async_functions import run_urls_async
import requests
def get_all_paginated_data(url, token):
"""
Get all data for the requesting user.
Parameters
----------
url : str
URL to the current data to get
token: str
User's OSF token
Returns
-------
Data dictionary of the data points gathered up until now.
"""
headers = {'Authorization': 'Bearer {}'.format(token)}
# Get initial data
response = requests.get(url, headers=headers)
if response.status_code == 200:
response_json = response.json()
elif response.status_code == 410:
raise PresQTResponseException("The requested resource is no longer available.", status.HTTP_410_GONE)
elif response.status_code == 404:
raise OSFNotFoundError("Resource not found.", status.HTTP_404_NOT_FOUND)
elif response.status_code == 403:
raise OSFForbiddenError(
"User does not have access to this resource with the token provided.", status.HTTP_403_FORBIDDEN)
data = response_json['data']
meta = response_json['links']['meta']
# Calculate pagination pages
if '?filter' in url or '?page' in url:
# We already have all the data we need for this request
return data
else:
page_total = get_page_total(meta['total'], meta['per_page'])
url_list = ['{}?page={}'.format(url, number) for number in range(2, page_total + 1)]
# Call all pagination pages asynchronously
children_data = run_urls_async(url_list, headers)
[data.extend(child['data']) for child in children_data]
return data | cca997d479c63415b519de9cbd8ac2681abc42ed | 15,541 |
def alaw_decode(x_a, quantization_channels, input_int=True, A=87.6):
"""alaw_decode(x_a, quantization_channels, input_int=True)
input
-----
x_a: np.array, mu-law waveform
quantization_channels: int, Number of channels
input_int: Bool
True: convert x_mu (int) from int to float, before mu-law decode
False: directly decode x_mu (float)
A: float, parameter for a-law, default 87.6
output
------
x: np.array, waveform
"""
num = quantization_channels - 1.0
if input_int:
x = x_a / num * 2 - 1.0
else:
x = x_a
sign = np.sign(x)
x_a_abs = np.abs(x)
x = x_a_abs * (1 + np.log(A))
flag = x >= 1
x[flag] = np.exp(x[flag] - 1)
x = sign * x / A
return x | a2e10eb590d5b7731227b96233c6b615c11d4af6 | 15,542 |
import dateutil
def swap_year_for_time(df, inplace):
"""Internal implementation to swap 'year' domain to 'time' (as datetime)"""
if not df.time_col == "year":
raise ValueError("Time domain must be 'year' to use this method")
ret = df.copy() if not inplace else df
index = ret._data.index
order = [v if v != "year" else "time" for v in index.names]
if "subannual" in df.extra_cols:
order = order.remove("subannual")
time_values = zip(*[index.get_level_values(c) for c in ["year", "subannual"]])
time = list(map(dateutil.parser.parse, [f"{y}-{s}" for y, s in time_values]))
index = index.droplevel(["year", "subannual"])
ret.extra_cols.remove("subannual")
else:
time = index.get_level_values("year")
index = index.droplevel(["year"])
# add new index column, assign data and other attributes
index = append_index_col(index, time, "time", order=order)
ret._data.index = index
ret.time_col = "time"
ret._set_attributes()
delattr(ret, "year")
if not inplace:
return ret | 790e8d24e6c4d87413dfc5d6205cbb04f7acefd6 | 15,545 |
from typing import Optional
from typing import List
def get_orders(
db: Session,
skip: int = 0,
limit: int = 50,
moderator: str = None,
owner: str = None,
desc: bool = True,
) -> Optional[List[entities.Order]]:
"""
Get the registed orders using filters.
Args:
- db: the database session.
- skip: the number of filtered entities to skip.
- limit: the number of entities to limit the query.
- moderadtor: the moderator name that create the order.
- owner: the owner name that receive the order.
- desc: order by request_at datetime.
Returns:
- the list of orders or `None` if there are no orders to return
using the filter specified.
"""
order_by = (
entities.Order.requested_at.desc()
if desc
else entities.Order.requested_at.asc()
)
query = db.query(entities.Order).order_by(order_by)
if moderator:
query = query.filter_by(mod_display_name=moderator)
if owner:
query = query.filter_by(owner_display_name=owner)
return query.offset(skip).limit(limit).all() | a5c86ccaad8573bc641f531751370c264baa60f8 | 15,546 |
def create_data_loader(img_dir, info_csv_path, batch_size):
"""Returns a data loader for the model."""
img_transform = transforms.Compose([transforms.Resize((120, 120), interpolation=Image.BICUBIC),
transforms.ToTensor()])
img_dataset = FashionDataset(img_dir, img_transform, info_csv_path)
data_loader = DataLoader(img_dataset, batch_size=batch_size, shuffle=True, num_workers=12, pin_memory=True)
return data_loader | 1a7df2b691c66ef5957113d5113353f34bf8c855 | 15,547 |
def comp_number_phase_eq(self):
"""Compute the equivalent number of phase
Parameters
----------
self : LamSquirrelCage
A LamSquirrelCage object
Returns
-------
qb: float
Zs/p
"""
return self.slot.Zs / float(self.winding.p) | f4679cf92dffff138a5a96787244a984a11896f9 | 15,548 |
import sympy
def exprOps(expr):
"""This operation estimation is not handling some simple optimizations that
should be done (i.e. y-x is treated as -1*x+y) and it is overestimating multiplications
in situations such as divisions. This is as a result of the simple method
of implementing this function given the rudimentary form of the expression
tree. It should only be overestimating the number of operations though,
so it is a viable way to see how much optimization is improving the
computational load of the generated Kalman filters"""
ops = OperationCounts()
if isinstance(expr, sympy.Symbol) or isinstance(expr, sympy.Number):
#print('--> {}'.format(expr))
ops.reads += 1
else:
func = expr.func
num = len(expr.args) - 1
#print('--> ({}, {})'.format(func, expr.args))
process = True
if func == sympy.Add:
ops.addsubs += num
elif func == sympy.Mul:
ops.mults += num
elif func == sympy.Pow:
if expr.args[1] == -1:
ops.divs += 1
process = False
elif expr.args[1] > 0:
ops.mults += int(expr.args[1].evalf()-1)
process = False
else:
print('Error: Unknown how to map expression {} to operation counts'.format(expr))
else:
print('Unknown function {}'.format(func))
if process:
for arg in expr.args:
o = exprOps(arg)
ops += o
return ops | b6255707ef7475c893d9325358e7666b95c0e7c8 | 15,549 |
def ellipsis_reformat(source: str) -> str:
"""
Move ellipses (``...``) for type stubs onto the end of the stub definition.
Before:
.. code-block:: python
def foo(value: str) -> int:
...
After:
.. code-block:: python
def foo(value: str) -> int: ...
:param source: The source to reformat.
:return: The reformatted source.
"""
if "..." not in source:
return source
return EllipsisRewriter(source).rewrite() | 162d9d863f7316bee87a04857366a7f78f68d75b | 15,550 |
def build_wtk_filepath(region, year, resolution=None):
"""
A utility for building WIND Toolkit filepaths.
Args:
region (str): region in which the lat/lon point is located (see
`get_regions`)
year (int): year to be accessed (see `get_regions`)
resolution (:obj:`str`, optional): data resolution (see `get_regions`)
Returns:
str: The filepath for the requested resource.
"""
wtk = _load_wtk()
base_url = '/nrel/wtk/'
assert region in wtk, 'region not found: %s' % region
year_range = wtk[region]['year_range']
year_range = range(year_range[0], year_range[1]+1)
assert isinstance(year, int), '"year" must be an integer'
msg = 'year %s not available for region: %s' % (year, region)
assert year in year_range, msg
if resolution:
msg = 'resolution "%s" not available for region: %s' % (
resolution, region)
assert resolution in wtk[region]['resolutions'], msg
base = wtk[region].get('base')
if resolution == '5min':
url_region = '%s-%s/' % (region, resolution)
else:
url_region = region + '/'
if base:
file = '%s_%s.h5' % (base, year)
else:
file = 'wtk_%s_%s.h5' % (region, year)
return base_url + url_region + file | 93da894523a6517faaf4fa4976ba986a3719494c | 15,551 |
import logging
def init_doc(args: dict) -> dict:
""" Initialize documentation variable
:param args: A dictionary containing relevant documentation fields
:return:
"""
doc = {}
try:
doc[ENDPOINT_PORT_KEY] = args[ENDPOINT_PORT_KEY]
except KeyError:
logging.warning("No port for documentation specified, default one will be used: "+str(DEFAULT_REST_PORT))
doc[ENDPOINT_PORT_KEY] = DEFAULT_REST_PORT
try:
doc[ENDPOINT_URL_KEY] = args[ENDPOINT_URL_KEY]
except KeyError:
logging.warning("No URL for documentation specified, default one will be used: " + DEFAULT_URL)
doc[ENDPOINT_URL_KEY] = DEFAULT_URL
try:
doc[MODULE_NAME_KEY] = args[MODULE_NAME_KEY]
except KeyError:
logging.warning("No module name for documentation specified, default one will be used: " + DEFAULT_MODULE_NAME)
doc[MODULE_NAME_KEY] = DEFAULT_MODULE_NAME
return doc | afa20f89595eac45e924ecdb32f9ef169fc72726 | 15,552 |
def decode_entities(string):
""" Decodes HTML entities in the given string ("<" => "<").
"""
# http://snippets.dzone.com/posts/show/4569
def replace_entity(match):
hash, hex, name = match.group(1), match.group(2), match.group(3)
if hash == "#" or name.isdigit():
if hex == "":
return unichr(int(name)) # "&" => "&"
if hex.lower() == "x":
return unichr(int("0x" + name, 16)) # "&" = > "&"
else:
cp = name2codepoint.get(name) # "&" => "&"
return unichr(cp) if cp else match.group() # "&foo;" => "&foo;"
if isinstance(string, basestring):
return RE_UNICODE.subn(replace_entity, string)[0]
return string | 480a7ed8a37b05bc65d10e513e021b00fcb718c4 | 15,553 |
def convert2board(chrom, rows, cols):
"""
Converts the chromosome represented in a list into a 2D numpy array.
:param rows: number of rows associated with the board.
:param cols: number of columns associated with the board.
:param chrom: chromosome to be converted.
:return: 2D numpy array.
"""
# Initialise the variables to be used
idx = int(0) # Chromosome index
board = np.zeros((rows, cols), 'uint8')
board.fill(CELL_UNASSIGNED)
# Now loop through the board adding the shapes and checking validity.
# Start at top left corner, processing each row in turn.
for row in range(rows):
for col in range(cols):
# Retrieve the next shape
shape = chrom[idx]
# Skip the cell if it is already occupied.
if board[row][col] != CELL_UNASSIGNED:
continue
# Have we run out of shapes...
if shape == CELL_UNASSIGNED:
idx = idx + 1
if idx >= len(chrom):
return board
continue
# Attempt to place the shape on the board.
if shape == CELL_SPACE:
# Place the hole if valid.
if not ((col > 0 and board[row][col - 1] == CELL_SPACE) or
(row > 0 and board[row - 1][col] == CELL_SPACE)):
board[row][col] = CELL_SPACE
elif shape == CELL_HDOMINO:
# Are we ok to have a horizontal domino?
if col < cols - 1 and board[row][col + 1] == CELL_UNASSIGNED:
board[row][col] = CELL_HDOMINO
board[row][col + 1] = CELL_HDOMINO
else:
# shape == CELL_VDOMINO:
# Are we ok to have a vertical domino?
if row < rows - 1:
board[row][col] = CELL_VDOMINO
board[row + 1][col] = CELL_VDOMINO
# Move on to the next shape
idx = idx + 1
if idx >= len(chrom):
return board
return board | 9897965550793f54e55ce2c66c95a7584a987a4e | 15,554 |
def filter_halo_pnum(data, Ncut=1000):
""" Returns indicies of halos with more than Ncut particles"""
npart = np.array(data['np'][0])
ind =np.where(npart > Ncut)[0]
print("# of halos:",len(ind))
return ind | 3c89eb263399ef022c1b5492190aff282e4410e8 | 15,555 |
def _preprocess_sgm(line, is_sgm):
"""Preprocessing to strip tags in SGM files."""
if not is_sgm:
return line
# In SGM files, remove <srcset ...>, <p>, <doc ...> lines.
if line.startswith("<srcset") or line.startswith("</srcset"):
return ""
if line.startswith("<refset") or line.startswith("</refset"):
return ""
if line.startswith("<doc") or line.startswith("</doc"):
return ""
if line.startswith("<p>") or line.startswith("</p>"):
return ""
# Strip <seg> tags.
line = line.strip()
if line.startswith("<seg") and line.endswith("</seg>"):
i = line.index(">")
return line[i + 1:-6] | 0a482c5ccf2c001dfd9b52458044a1feaf62e5b9 | 15,556 |
def discover(using, index="*"):
"""
:param using: Elasticsearch client
:param index: Comma-separated list or wildcard expression of index names used to limit the request.
"""
indices = Indices()
for index_name, index_detail in using.indices.get(index=index).items():
indices[index_name] = Index(
client=using,
name=index_name,
mappings=index_detail["mappings"],
settings=index_detail["settings"],
aliases=index_detail["aliases"],
)
return indices | 32a53f15b0db3ba2b2c092e8dbd4ffdf57f133c8 | 15,557 |
from datetime import datetime
def quote_sql_value(cursor: Cursor, value: SQLType) -> str:
"""
Use the SQL `quote()` function to return the quoted version of `value`.
:returns: the quoted value
"""
if isinstance(value, (int, float, datetime)):
return str(value)
if value is None:
return "NULL"
if isinstance(value, (str, bytes)):
cursor.execute("SELECT quote(?);", (value,))
result = cursor.fetchall()[0][0]
assert isinstance(result, str)
return result
raise ValueError(f"Do not know how to quote value of type {type(value)}") | 17887be2440563a1321708f797310eb8f1731687 | 15,558 |
def create_admin_nova_client(context):
"""
Creates client that uses trove admin credentials
:return: a client for nova for the trove admin
"""
client = create_nova_client(context, password=CONF.nova_proxy_admin_pass)
return client | 3fdd56ae419b5228b209a9e00fb8828c17a0d847 | 15,559 |
def page_cache(timeout=1800):
"""
page cache
param:
timeout:the deadline of cache default is 1800
"""
def _func(func):
def wrap(request, *a, **kw):
key = request.get_full_path()
#pass chinese
try:
key = mkey.encode("utf-8")
except Exception, e:
key = str(key)
data = None
try:
data = mclient.get(key)
if not data:
data = func(request, *a, **kw)
if data:
mclient.set(key, data, timeout)
return HttpResponse(data, content_type=request.META.get("CONTENT_TYPE", "text/plain"))
except Exception, e:
if data:
HttpResponse(data, content_type=request.META.get("CONTENT_TYPE", "text/plain"))
else:
return HttpResponse("<objects><error>%s</error></objects>" % e,
content_type=request.META.get("CONTENT_TYPE", "text/plain"))
return wrap
return _funcs | ca5be8d6ad1c1d0e627e2e22dbe44532d20af5cd | 15,560 |
def get_available_games():
"""Get a list of games that are available to join."""
games = Game.objects.filter(started=False) #pylint: disable=no-member
if len(games) == 0:
options = [('', '- None -')]
else:
options = [('', '- Select -')]
for game in games:
options.append((game.name, game.name))
return options | 245d85ce623ffe3ed9eb718aafaf7889c67dada6 | 15,561 |
def process_ps_stdout(stdout):
""" Process the stdout of the ps command """
return [i.split()[0] for i in filter(lambda x: x, stdout.decode("utf-8").split("\n")[1:])] | c086cc88c51484abe4308b3ac450faaba978656e | 15,563 |
import shlex
def chpasswd(path, oldpassword, newpassword):
"""Change password of a private key.
"""
if len(newpassword) != 0 and not len(newpassword) > 4: return False
cmd = shlex.split('ssh-keygen -p')
child = pexpect.spawn(cmd[0], cmd[1:])
i = child.expect(['Enter file in which the key is', pexpect.EOF])
if i == 1:
if child.isalive(): child.wait()
return False
child.sendline(path)
i = child.expect(['Enter old passphrase', 'Enter new passphrase', pexpect.EOF])
if i == 0:
child.sendline(oldpassword)
i = child.expect(['Enter new passphrase', 'Bad passphrase', pexpect.EOF])
if i != 0:
if child.isalive(): child.wait()
return False
elif i == 2:
if child.isalive(): child.wait()
return False
child.sendline(newpassword)
i = child.expect(['Enter same passphrase again', pexpect.EOF])
if i == 1:
if child.isalive(): child.wait()
return False
child.sendline(newpassword)
child.expect(pexpect.EOF)
if child.isalive():
return child.wait() == 0
return True | ee84bdccee24ea591db6d9c82bfce8374d1a420d | 15,564 |
def get_display_limits(VarInst, data=None):
"""Get limits to resize the display of Variables.
Function takes as argument a `VariableInstance` from a `Section` or
`Planform` and an optional :obj:`data` argument, which specifies how to
determine the limits to return.
Parameters
----------
VarInst : :obj:`~deltametrics.section.BaseSectionVariable` subclass
The `Variable` instance to visualize. May be any subclass of
:obj:`~deltametrics.section.BaseSectionVariable` or
:obj:`~deltametrics.plan.BasePlanformVariable`.
data : :obj:`str`, optional
The type of data to compute limits for. Typically this will be the
same value used with either :obj:`get_display_arrays` or
:obj:`get_display_lines`. Supported options are `'spacetime'`,
`'preserved'`, and `'stratigraphy'`.
Returns
-------
xmin, xmax, ymin, ymax : :obj:`float`
Values to use as limits on a plot. Use with, for example,
``ax.set_xlim((xmin, xmax))``.
"""
# # # SectionVariables # # #
if issubclass(type(VarInst), section.BaseSectionVariable):
# # DataSection # #
if isinstance(VarInst, section.DataSectionVariable):
data = data or VarInst._default_data
if data in VarInst._spacetime_names:
return np.min(VarInst._S), np.max(VarInst._S), \
np.min(VarInst._Z), np.max(VarInst._Z)
elif data in VarInst._preserved_names:
VarInst._check_knows_stratigraphy() # need to check explicitly
return np.min(VarInst._S), np.max(VarInst._S), \
np.min(VarInst._Z), np.max(VarInst._Z)
elif data in VarInst._stratigraphy_names:
VarInst._check_knows_stratigraphy() # need to check explicitly
_strata = np.copy(VarInst.strat_attr['strata'])
return np.min(VarInst._S), np.max(VarInst._S), \
np.min(_strata), np.max(_strata) * 1.5
else:
raise ValueError('Bad data argument: %s' % str(data))
# # StratigraphySection # #
elif isinstance(VarInst, section.StratigraphySectionVariable):
data = data or VarInst._default_data
if data in VarInst._spacetime_names:
VarInst._check_knows_spacetime() # always False
elif data in VarInst._preserved_names:
VarInst._check_knows_spacetime() # always False
elif data in VarInst._stratigraphy_names:
return np.min(VarInst._S), np.max(VarInst._S), \
np.min(VarInst._Z), np.max(VarInst._Z) * 1.5
else:
raise ValueError('Bad data argument: %s' % str(data))
else:
raise TypeError
# # # PlanformVariables # # #
elif False: # issubclass(type(VarInst), plan.BasePlanformVariable):
raise NotImplementedError
else:
raise TypeError('Invaid "VarInst" type: %s' % type(VarInst)) | d4864fccd8c282033d99fdc817e077d3f6d5b434 | 15,565 |
def plot_layer_consistency_example(eigval_col, eigvec_col, layernames, layeridx=[0,1,-1], titstr="GAN", figdir="", savelabel="", use_cuda=False):
"""
Note for scatter plot the aspect ratio is set fixed to one.
:param eigval_col:
:param eigvec_col:
:param nsamp:
:param titstr:
:param figdir:
:return:
"""
nsamp = len(layeridx)
# Hnums = len(eigval_col)
# eiglist = sorted(np.random.choice(Hnums, nsamp, replace=False)) # range(5)
print("Plot hessian of layers : ", [layernames[idx] for idx in layeridx])
fig = plt.figure(figsize=[10, 10], constrained_layout=False)
spec = fig.add_gridspec(ncols=nsamp, nrows=nsamp, left=0.075, right=0.975, top=0.9, bottom=0.05)
for axi, Li in enumerate(layeridx):
eigval_i, eigvect_i = eigval_col[Li], eigvec_col[Li]
for axj, Lj in enumerate(layeridx):
eigval_j, eigvect_j = eigval_col[Lj], eigvec_col[Lj]
inpr = eigvect_i.T @ eigvect_j
vHv_ij = np.diag((inpr @ np.diag(eigval_j)) @ inpr.T)
ax = fig.add_subplot(spec[axi, axj])
if axi == axj:
ax.hist(np.log10(eigval_j), 20)
else:
ax.scatter(np.log10(eigval_j), np.log10(vHv_ij), s=15, alpha=0.6)
ax.set_aspect(1, adjustable='datalim')
if axi == nsamp-1:
ax.set_xlabel("eigvals @ %s" % layernames[Lj])
if axj == 0:
ax.set_ylabel("vHv eigvec @ %s" % layernames[Li])
ST = plt.suptitle("Consistency of %s Hessian Across Layers\n"
"Cross scatter of EigenValues and vHv values for Hessian at %d Layers"%(titstr, nsamp),
fontsize=18)
# plt.subplots_adjust(left=0.175, right=0.95 )
RND = np.random.randint(1000)
plt.savefig(join(figdir, "Hess_layer_consistency_example_%s_rnd%03d.jpg" % (savelabel, RND)),
bbox_extra_artists=[ST]) #
plt.savefig(join(figdir, "Hess_layer_consistency_example_%s_rnd%03d.pdf" % (savelabel, RND)),
bbox_extra_artists=[ST]) #
return fig | 38191663fcf1c9f05aa39127179a3cbf5f29b219 | 15,566 |
def min_vertex_cover(left_v, right_v):
"""
Use the Hopcroft-Karp algorithm to find a maximum
matching or maximum independent set of a bipartite graph.
Next, find a minimum vertex cover by finding the
complement of a maximum independent set.
The function takes as input two dictionaries, one for the
left vertices and one for the right vertices. Each key in
the left dictionary is a left vertex with a value equal to
a list of the right vertices that are connected to the key
by an edge. The right dictionary is structured similarly.
The output is a dictionary with keys equal to the vertices
in a minimum vertex cover and values equal to lists of the
vertices connected to the key by an edge.
For example, using the following simple bipartite graph:
1000 2000
1001 2000
where vertices 1000 and 1001 each have one edge and 2000 has
two edges, the input would be:
left = {1000: [2000], 1001: [2000]}
right = {2000: [1000, 1001]}
and the ouput or minimum vertex cover would be:
{2000: [1000, 1001]}
with vertex 2000 being the minimum vertex cover.
"""
data_hk = bipartiteMatch(left_v)
left_mis = data_hk[1]
right_mis = data_hk[2]
mvc = left_v.copy()
mvc.update(right_v) # junta os dicionarios num so
for v in left_mis:
try:
del (mvc[v])
except KeyError:
pass
for v in right_mis:
try:
del (mvc[v])
except KeyError:
pass
return mvc | a94aaf6dd07b98e7f5a77b01ab6548bc401e8b03 | 15,567 |
def neighbor_dist(x1, y1, x2, y2):
"""Return distance of nearest neighbor to x1, y1 in x2, y2"""
m1, m2, d12 = match_xy(x2, y2, x1, y1, neighbors=1)
return d12 | 91b67e571d2812a9bc2e05b25a74fbca292daec7 | 15,568 |
import requests
def add_artist_subscription(auth, userid, artist_mbid):
"""
Add an artist to the list of subscribed artists.
:param tuple auth: authentication data (username, password)
:param str userid: user ID (must match auth data)
:param str artist_mbid: musicbrainz ID of the artist to add
:return: True on success
:raises: HTTPError
"""
url = '%s/artists/%s/%s' % (API_BASE_URL, userid, artist_mbid)
response = requests.put(url, auth=auth)
response.raise_for_status()
return True | 770be84ec9edb272c8c3d8cb1959f419f8867e1d | 15,569 |
from pathlib import Path
import pickle
def get_built_vocab(dataset: str) -> Vocab:
"""load vocab file for `dataset` to get Vocab based on selected client and data in current directory
Args:
dataset (str): string of dataset name to get vocab
Returns:
if there is no built vocab file for `dataset`, return None, else return Vocab
"""
vocab_file_path = Path(__file__).parent.resolve() / f'{dataset}_vocab.pickle'
if not vocab_file_path.exists():
print('There is no built vocab file for {} dataset, please run `main` or `build_vocab.sh` to build it firstly.'
.format(dataset))
return None
vocab_file = open(vocab_file_path, 'rb') # get vocab based on sample data
vocab = pickle.load(vocab_file)
return vocab | b03daba815ccddb7ff3aee2e2eac39de22ff6cff | 15,570 |
from typing import Optional
from typing import Iterable
def binidx(num: int, width: Optional[int] = None) -> Iterable[int]:
""" Returns the indices of bits with the value `1`.
Parameters
----------
num : int
The number representing the binary state.
width : int, optional
Minimum number of digits used. The default is the global value `BITS`.
Returns
-------
binidx : list
"""
fill = width or 0
return list(sorted(i for i, char in enumerate(f"{num:0{fill}b}"[::-1]) if char == "1")) | 70d1895cf0141950d8e2f5efe6bfbf7bd8dbc30b | 15,571 |
import math
def distance_vinchey(f, a, start, end):
"""
Uses Vincenty formula for distance between two Latitude/Longitude points
(latitude,longitude) tuples, in numeric degrees. f,a are ellipsoidal parameters
Returns the distance (m) between two geographic points on the ellipsoid and the
forward and reverse azimuths between these points. Returns ( s, alpha12, alpha21 ) as a tuple
"""
# Convert into notation from the original paper
# http://www.anzlic.org.au/icsm/gdatum/chapter4.html
#
# Vincenty's Inverse formulae
# Given: latitude and longitude of two points (phi1, lembda1 and phi2, lembda2)
phi1 = math.radians(start[0]); lembda1 = math.radians(start[1]);
phi2 = math.radians(end[0]); lembda2 = math.radians(end[1]);
if (abs( phi2 - phi1 ) < 1e-8) and ( abs( lembda2 - lembda1) < 1e-8 ):
return 0.0, 0.0, 0.0
two_pi = 2.0*math.pi
b = a * (1.0 - f)
TanU1 = (1-f) * math.tan( phi1 )
TanU2 = (1-f) * math.tan( phi2 )
U1 = math.atan(TanU1)
U2 = math.atan(TanU2)
lembda = lembda2 - lembda1
last_lembda = -4000000.0 # an impossibe value
omega = lembda
# Iterate the following equations, until there is no significant change in lembda
while ( last_lembda < -3000000.0 or lembda != 0 and abs( (last_lembda - lembda)/lembda) > 1.0e-9 ) :
sqr_sin_sigma = pow( math.cos(U2) * math.sin(lembda), 2) + \
pow( (math.cos(U1) * math.sin(U2) - \
math.sin(U1) * math.cos(U2) * math.cos(lembda) ), 2 )
Sin_sigma = math.sqrt( sqr_sin_sigma )
Cos_sigma = math.sin(U1) * math.sin(U2) + math.cos(U1) * math.cos(U2) * math.cos(lembda)
sigma = math.atan2( Sin_sigma, Cos_sigma )
Sin_alpha = math.cos(U1) * math.cos(U2) * math.sin(lembda) / math.sin(sigma)
alpha = math.asin( Sin_alpha )
Cos2sigma_m = math.cos(sigma) - (2 * math.sin(U1) * math.sin(U2) / pow(math.cos(alpha), 2) )
C = (f/16) * pow(math.cos(alpha), 2) * (4 + f * (4 - 3 * pow(math.cos(alpha), 2)))
last_lembda = lembda
lembda = omega + (1-C) * f * math.sin(alpha) * (sigma + C * math.sin(sigma) * \
(Cos2sigma_m + C * math.cos(sigma) * (-1 + 2 * pow(Cos2sigma_m, 2) )))
u2 = pow(math.cos(alpha),2) * (a*a-b*b) / (b*b)
A = 1 + (u2/16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2)))
B = (u2/1024) * (256 + u2 * (-128+ u2 * (74 - 47 * u2)))
delta_sigma = B * Sin_sigma * (Cos2sigma_m + (B/4) * \
(Cos_sigma * (-1 + 2 * pow(Cos2sigma_m, 2) ) - \
(B/6) * Cos2sigma_m * (-3 + 4 * sqr_sin_sigma) * \
(-3 + 4 * pow(Cos2sigma_m,2 ) )))
s = b * A * (sigma - delta_sigma)
alpha12 = math.atan2( (math.cos(U2) * math.sin(lembda)), \
(math.cos(U1) * math.sin(U2) - math.sin(U1) * math.cos(U2) * math.cos(lembda)))
alpha21 = math.atan2( (math.cos(U1) * math.sin(lembda)), \
(-math.sin(U1) * math.cos(U2) + math.cos(U1) * math.sin(U2) * math.cos(lembda)))
if ( alpha12 < 0.0 ) :
alpha12 = alpha12 + two_pi
if ( alpha12 > two_pi ) :
alpha12 = alpha12 - two_pi
alpha21 = alpha21 + two_pi / 2.0
if ( alpha21 < 0.0 ) :
alpha21 = alpha21 + two_pi
if ( alpha21 > two_pi ) :
alpha21 = alpha21 - two_pi
return s, alpha12, alpha21 | df5ae92a12af6ab656af65a12145436089202cf2 | 15,573 |
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
# x1、y1、x2、y2、以及score赋值
dets = np.array(dets)
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
#每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
#按照score置信度降序排序
order = scores.argsort()[::-1]
keep = []
#保留的结果框集合
while order.size > 0:
i = order[0]
keep.append(i) #保留该类剩余box中得分最高的一个
#得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
#计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
#计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
#保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
#因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep | d85822e8076bf1695c6f9e7b7271b21572ebf7d6 | 15,574 |
def _romanize(word: str) -> str:
"""
:param str word: Thai word to be romanized, should have already been tokenized.
:return: Spells out how the Thai word should be pronounced.
"""
if not isinstance(word, str) or not word:
return ""
word = _replace_vowels(_normalize(word))
res = _RE_CONSONANT.findall(word)
# 2-character word, all consonants
if len(word) == 2 and len(res) == 2:
word = list(word)
word.insert(1, "o")
word = "".join(word)
word = _replace_consonants(word, res)
return word | 5e464faa1011893eb63f1f9afedd42768a8527c8 | 15,575 |
def lookup_beatmap(beatmaps: list, **lookup):
""" Finds and returns the first beatmap with the lookup specified.
Beatmaps is a list of beatmap dicts and could be used with beatmap_lookup().
Lookup is any key stored in a beatmap from beatmap_lookup().
"""
if not beatmaps:
return None
for beatmap in beatmaps:
match = True
for key, value in lookup.items():
if key.lower() not in beatmap:
raise KeyError(f"The list of beatmaps does not have key: {key}")
if not beatmap[key].lower() == value.lower():
match = False
if match:
return beatmap
return None | fa5f126502b5398934882139f01af8f4f80e1ea5 | 15,576 |
def scott(
x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None
) -> float:
"""Scott similarity
Scott, W. A. (1955).
Reliability of content analysis: The case of nominal scale coding.
Public opinion quarterly, 321-325.
Args:
x (BinaryFeatureVector): binary feature vector
y (BinaryFeatureVector): binary feature vector
Returns:
float: similarity of given vectors
"""
a, b, c, d = operational_taxonomic_units(x, y, mask)
return (4 * a * d - (b + c) ** 2) / ((2 * a + b + c) * (2 + d + b + c)) | 6b950cb2b716d2e93638b169682cdd99230cbb89 | 15,577 |
import xmlrunner
def get_test_runner():
"""
Returns a test runner instance for unittest.main. This object captures
the test output and saves it as an xml file.
"""
try:
path = get_test_dir()
runner = xmlrunner.XMLTestRunner(output=path)
return runner
except Exception, e:
print("get_test_runner error: %s" % e)
return None | 6b2db3207c278a7f07ed6fd7922042beea1bfee7 | 15,578 |
def _construct_capsule(geom, pos, rot):
"""Converts a cylinder geometry to a collider."""
radius = float(geom.get('radius'))
length = float(geom.get('length'))
length = length + 2 * radius
return config_pb2.Collider(
capsule=config_pb2.Collider.Capsule(radius=radius, length=length),
rotation=_vec(euler.quat2euler(rot, 'rxyz'), scale=180 / np.pi),
position=_vec(pos)) | a4bddb7c64468515d3a36ebaac22402eeb4f16b0 | 15,579 |
def file_root_dir(tmpdir_factory):
"""Prepares the testing dirs for file tests"""
root_dir = tmpdir_factory.mktemp('complex_file_dir')
for file_path in ['file1.yml',
'arg/name/file2',
'defaults/arg/name/file.yml',
'defaults/arg/name/file2',
'vars/arg/name/file1.yml',
'vars/arg/name/file3.yml',
'vars/arg/name/nested/file4.yml']:
root_dir.join(file_path).ensure()
return root_dir | 834e0d850e7a7dd59d792e98ed25b909d5a20567 | 15,580 |
from typing import Iterable
def path_nucleotide_length(g: BifrostDiGraph, path: Iterable[Kmer]) -> int:
"""Compute the length of a path in nucleotides."""
if not path:
return 0
node_iter = iter(path)
start = next(node_iter)
k = g.graph['k']
length = g.nodes[start]['length'] + k - 1
prev = start
for n in node_iter:
if (prev, n) not in g.edges:
raise ValueError(f"Invalid path specified, ({prev}, {n}) is not an edge.")
length += g.nodes[n]['length']
prev = n
return length | 612cff39bcf859a995d90c22e2dacb54e9c0b4c9 | 15,581 |
def extract_static_links(page_content):
"""Deliver the static asset links from a page source."""
soup = bs(page_content, "html.parser")
static_js = [
link.get("src")
for link in soup.findAll("script")
if link.get("src") and "static" in link.get("src")
]
static_images = [
image.get("src")
for image in soup.findAll("img")
if image.get("src") and "static" in image.get("src")
]
static_css = [
link.get("href")
for link in soup.findAll("link")
if link.get("href") and "static" in link.get("href")
]
return static_js + static_images + static_css | 8ea99171d55db182fe4265042c84deca36176d84 | 15,582 |
def zero_inflated_nb(n, p, phi=0, size=None):
"""Models a zero-inflated negative binomial
Something about hte negative binomail model here...
This basically just wraps the numpy negative binomial generator,
where the probability of a zero is additionally inflated by
some probability, psi...
Parameters
----------
n : int
Parameter, > 0.
p : float
Parameter, 0 <= p <= 1.
phi : float, optional
The probability of obtaining an excess zero in the model,
where 0 <= phi <= 1. When `phi = 0`, the distribution collapses
to a negative binomial model.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
int or ndarray of ints
Drawn samples
Also See
--------
np.random.negative_binomial
References
----------
..[1] Kutz, Z.D. et al. (2015) "Sparse and Compositionally Robust Inference
of Microbial Ecological Networks." PLoS Compuational Biology. 11: e10004226
http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004226
"""
zeros = (np.random.binomial(1, phi, size) == 1)
nb_ = np.random.negative_binomial(n, p, size=size)
nb_[zeros] = 0
return nb_ | c20f28b33e070e035979093e6ebb9ed10611c5dd | 15,583 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.