content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def feature_norm_ldc(df):
"""
Process the features to obtain the standard metrics in LDC mode.
"""
df['HNAP'] = df['HNAC']/df['ICC_abs']*100
df['TCC'] = (df['ICC_abs']+df['DCC_abs'])/df['VOL']
df['ICC'] = df['ICC_abs']/df['VOL']
df['DCC'] = df['DCC_abs']/df['VOL']
return df | 60e3ef31c0be07179854de3191c2c75f4ec2cb4d | 18,252 |
def dice_jaccard(y_true, y_pred, y_scores, shape, smooth=1, thr=None):
"""
Computes Dice and Jaccard coefficients.
Args:
y_true (ndarray): (N,4)-shaped array of groundtruth bounding boxes coordinates in xyxy format
y_pred (ndarray): (N,4)-shaped array of predicted bounding boxes coordinates in xyxy format
y_scores (ndarray): (N,)-shaped array of prediction scores
shape (tuple): shape of the map, i.e. (h, w)
smooth (int, optional): Smoothing factor to avoid ZeroDivisionError. Defaults to 1.
thr (float, optional): Threshold to binarize predictions; if None, the soft version of
the coefficients are computed. Defaults to None.
Returns:
tuple: The Dice and Jaccard coefficients.
"""
m_true = np.zeros(shape, dtype=np.float32)
for x0, y0, x1, y1 in y_true.astype(int):
m_true[y0:y1 + 1, x0: x1 + 1] = 1.
if thr is not None:
keep = y_scores >= thr
y_pred = y_pred[keep]
y_scores = y_scores[keep]
m_pred = np.zeros_like(m_true)
for (x0, y0, x1, y1), score in zip(y_pred.astype(int), y_scores):
m_pred[y0:y1 + 1, x0: x1 + 1] = np.maximum(m_pred[y0:y1 + 1, x0: x1 + 1], score)
intersection = np.sum(m_true * m_pred)
sum_ = np.sum(m_true) + np.sum(m_pred)
union = sum_ - intersection
jaccard = (intersection + smooth) / (union + smooth)
dice = 2. * (intersection + smooth) / (sum_ + smooth)
return dice.mean(), jaccard.mean() | ed3a043b53d843e05ff3e32954eb9dbc2939b6ca | 18,253 |
def forward_pass(output_node, sorted_nodes):
"""
Performs a forward pass through a list of sorted nodes.
Arguments:
`output_node`: A node in the graph, should be the output node (have no outgoing edges).
`sorted_nodes`: A topologically sorted list of nodes.
Returns the output Node's value
"""
for n in sorted_nodes:
n.forward()
return output_node.value | a91c5b7ebef98815a47b26d58a680b36098969d5 | 18,254 |
def qr_decomposition(q, r, iter, n):
"""
Return Q and R matrices for iter number of iterations.
"""
v = column_convertor(r[iter:, iter])
Hbar = hh_reflection(v)
H = np.identity(n)
H[iter:, iter:] = Hbar
r = np.matmul(H, r)
q = np.matmul(q, H)
return q, r | 94aa433e31e93dc36f67f579cb03f67930cfabc4 | 18,255 |
import logging
import torch
import operator
def build_detection_train_loader(cfg, mapper=None):
"""
A data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Coordinate a random shuffle order shared among all processes (all GPUs)
3. Each process spawn another few workers to process the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will yield.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, True)`.
Returns:
an infinite iterator of training data
"""
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = PlusDatasetMapper(cfg, dataset, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
sampler = samplers.RepeatFactorTrainingSampler(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=None,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_worker, drop_last=True
)
# drop_last so the batch always have the same size
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return data_loader | 007b09ce00814264b3264798d4a0afd05c23d6eb | 18,257 |
def discRect(radius,w,l,pos,gap,layerRect,layerCircle,layer):
"""
This function creates a disc that is recessed inside of a rectangle. The
amount that the disc is recessed is determined by a gap that surrounds the
perimeter of the disc. This much hangs out past the rectangle to couple to
a bus waveguide.Calls subCircle(...) in order to accomplish the subtraction
This function returns the disc and the surrounding rectangle
radius: radius of circle
w: width of rectangle (vertical)
l: length of rectangle (horizontal)
pos: tuple giving a relative offset of the circle. The offset is determined
by the gap specified, but it can also be added to this other offset. The
default is no additional recession into the rectangle and just a shift
along the length of the rectangle.
gap: the gap surrounding the disc
layerRect: the layer on which the rectangle is written
layerCircle: the layer on which the disc subtracted from the rectangle is
written. This layer is temporarily used for the boolean operation since
ultimately the disc is returned on the same layer on which the rectangle is
drawn.
"""
newRad=radius+gap
# the circle is offset by the gap width away from the rect
posx,posy=pos
pos=(posx,w/2-radius+posy+gap)
print('pos: '+str(pos))
sub=subCircle(newRad,w,l,pos,layerRect,layerCircle,layer)
# add the disc
disc=gdspy.Round(pos,radius,number_of_points=199,**layerRect)
return sub,disc | 1cb5f505fb868f31771fe6e48faa6399d8b051ad | 18,258 |
def sub_factory():
"""Subscript text: <pre>H[sub]2[/sub]O</pre><br />
Example:<br />
H[sub]2[/sub]O
"""
return make_simple_formatter("sub", "<sub>%(value)s</sub>"), {} | 4f721d0713c1a2be496a45c1bf7abe8766572135 | 18,259 |
from typing import Tuple
def train_test_split(
structures: list, targets: list, train_frac: float = 0.8
) -> Tuple[Tuple[list, list], Tuple[list, list]]:
"""Split structures and targets into training and testing subsets."""
num_train = floor(len(structures) * train_frac)
return (
(structures[:num_train], targets[:num_train]),
(structures[num_train:], targets[num_train:]),
) | 279fbe353bf07aa9b9654f4be4c21cf248f2c8bb | 18,260 |
def reset_password(token):
"""
Handles the reset password process.
"""
if not current_user.is_anonymous():
return redirect(url_for("forum.index"))
form = ResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
expired, invalid, data = user.verify_reset_token(form.token.data)
if invalid:
flash(("Your password token is invalid."), "danger")
return redirect(url_for("auth.forgot_password"))
if expired:
flash(("Your password is expired."), "danger")
return redirect(url_for("auth.forgot_password"))
if user and data:
user.password = form.password.data
user.save()
flash(("Your password has been updated."), "success")
return redirect(url_for("auth.login"))
form.token.data = token
return render_template("auth/reset_password.html", form=form) | c34d090b09a236eecfe101d66ec0daaf3c08eb87 | 18,261 |
def delete(vol_path):
"""
Delete a kv store object for this volume identified by vol_path.
Return true if successful, false otherwise
"""
return kvESX.delete(vol_path) | 5d120b6a509119587df5f2dc9f1436115b01a257 | 18,262 |
import uuid
def get_tablespace_data(tablespace_path, db_owner):
"""This function returns the tablespace data"""
data = {
"name": "test_%s" % str(uuid.uuid4())[1:8],
"seclabels": [],
"spcacl": [
{
"grantee": db_owner,
"grantor": db_owner,
"privileges": [
{
"privilege_type": "C",
"privilege": True,
"with_grant": False
}
]
}
],
"spclocation": tablespace_path,
"spcoptions": [],
"spcuser": db_owner
}
return data | 3272e9b941d6bfb426ed754eed7f956c4c0933f4 | 18,263 |
def join_chunks(chunks):
"""empty all chunks out of their sub-lists to be split apart again by split_chunks(). this is because chunks now
looks like this [[t,t,t],[t,t],[f,f,f,][t]]"""
return [item for sublist in chunks for item in sublist] | a5daf41ba3fa6e7dafc4f05b29cc5aeaa397d5a5 | 18,264 |
def urls_equal(url1, url2):
"""
Compare two URLObjects, without regard to the order of their query strings.
"""
return (
url1.without_query() == url2.without_query()
and url1.query_dict == url2.query_dict
) | f2cbcf111cd5d02fa053fbd373d24b2dab047dfc | 18,265 |
def bytes_to_ints(bs):
"""
Convert a list of bytes to a list of integers.
>>> bytes_to_ints([1, 0, 2, 1])
[256, 513]
>>> bytes_to_ints([1, 0, 1])
Traceback (most recent call last):
...
ValueError: Odd number of bytes.
>>> bytes_to_ints([])
[]
"""
if len(bs) % 2 != 0:
raise ValueError("Odd number of bytes.")
pairs = zip(bs[::2], bs[1::2])
return [(a << 8) + b for a, b in pairs] | e8ac9ec973ff58973703e3e109da5b45d3f9d802 | 18,266 |
import site
def canRun(page):
""" Returns True if the given check page is still set to "Run";
otherwise, returns false. Accepts one required argument, "page."
"""
print("Checking checkpage.")
page = site.Pages[page]
text = page.text()
if text == "Run":
print("We're good!")
return True
return False | 3cb1276d82ffeadb1a730bb2eb1c1f3427905e94 | 18,268 |
def _bgp_predict_wrapper(model, *args, **kwargs):
"""
Just to ensure that the outgoing shapes are right (i.e. 2D).
"""
mean, cov = model.predict_y(*args, **kwargs)
if len(mean.shape) == 1:
mean = mean[:, None]
if len(cov.shape) == 1:
cov = cov[:, None]
return mean, cov | 23bb62927e767057df94ef8b95b57874fc078d7f | 18,270 |
import numpy
def max_pool(images, imgshp, maxpoolshp):
"""
Implements a max pooling layer
Takes as input a 2D tensor of shape batch_size x img_size and performs max pooling.
Max pooling downsamples by taking the max value in a given area, here defined by
maxpoolshp. Outputs a 2D tensor of shape batch_size x output_size.
Parameters
----------
images : 2D tensor
Tensorcontaining images on which to apply convolution. Assumed to be \
of shape `batch_size x img_size`
imgshp : tuple
Tuple containing image dimensions
maxpoolshp : tuple
Tuple containing shape of area to max pool over
Returns
-------
out1 : WRITEME
Symbolic result (2D tensor)
out2 : WRITEME
Logical shape of the output
"""
N = numpy
poolsize = N.int64(N.prod(maxpoolshp))
# imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
# in the first case, default nfeatures to 1
if N.size(imgshp)==2:
imgshp = (1,)+imgshp
# construct indices and index pointers for sparse matrix, which, when multiplied
# with input images will generate a stack of image patches
indices, indptr, spmat_shape, sptype, outshp = \
convolution_indices.conv_eval(imgshp, maxpoolshp, maxpoolshp, mode='valid')
logger.info('XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX')
logger.info('imgshp = {0}'.format(imgshp))
logger.info('maxpoolshp = {0}'.format(maxpoolshp))
logger.info('outshp = {0}'.format(outshp))
# build sparse matrix, then generate stack of image patches
csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices, indptr, spmat_shape)
patches = sparse.structured_dot(csc, images.T).T
pshape = tensor.stack(images.shape[0]*\
tensor.as_tensor(N.prod(outshp)),
tensor.as_tensor(imgshp[0]),
tensor.as_tensor(poolsize))
patch_stack = tensor.reshape(patches, pshape, ndim=3);
out1 = tensor.max(patch_stack, axis=2)
pshape = tensor.stack(images.shape[0],
tensor.as_tensor(N.prod(outshp)),
tensor.as_tensor(imgshp[0]))
out2 = tensor.reshape(out1, pshape, ndim=3);
out3 = tensor.DimShuffle((False,)*3, (0,2,1))(out2)
return tensor.flatten(out3,2), outshp | acbbfb686f77dc6e05f385b2addc8f49e7f344d3 | 18,272 |
def rmean(A):
""" Removes time-mean of llc_4320 3d fields; axis=2 is time"""
ix,jx,kx = A.shape
Am = np.repeat(A.mean(axis=2),kx)
Am = Am.reshape(ix,jx,kx)
return A-Am | 39edcdca0cc4d411c579991086bf555d65686020 | 18,273 |
def build_request_url(base_url, sub_url, query_type, api_key, value):
"""
Function that creates the url and parameters
:param base_url: The base URL from the app.config
:param sub_url: The sub URL from the app.config file. If not defined it will be: "v1/pay-as-you-go/"
:param query_type: The query type of the request
:param apikey: The api key from the app.config
:param value: The artifact value
:return: Tuple. A string of the URL and a dict of the params
:rtype: tuple
"""
# Setup the mapping dict for APIVoid API call types and the url and params for the requests call.
url_map = {
"IP Reputation": {
"url": "iprep",
"params": {
"ip": value
}
},
"Domain Reputation": {
"url": "domainbl",
"params": {
"host": value
}
},
"DNS Lookup": {
"url": "dnslookup",
"params": {
"action": "dns-a",
"host": value
}
},
"Email Verify": {
"url": "emailverify",
"params": {
"email": value
}
},
"ThreatLog": {
"url": "threatlog",
"params": {
"host": value
}
},
"SSL Info": {
"url": "sslinfo",
"params": {
"host": value
}
},
"URL Reputation": {
"url": "urlrep",
"params": {
"url": url_encode(value.encode('utf8')) if isinstance(value, str) else value
}
},
"selftest": {
"url": "sitetrust",
"params": {
"stats": value
}
},
}
try:
request_type = url_map.get(query_type)
request_url = request_type.get("url")
request_params = request_type.get("params")
except KeyError:
raise ValueError("%s is an Invalid IP Void request type or it's not supported", query_type)
# Join the base url, the request type and the sub url
the_url = "/".join((base_url, request_url, sub_url))
# Append the api key
the_url = u"{0}?key={1}".format(the_url, api_key)
# Append the params
for (k, v) in request_params.items():
the_url = u"{0}&{1}={2}".format(the_url, k, v)
LOG.info("Using URL: %s", the_url)
return the_url | ecf3ef0a3d7d5591b1f6aa9787f4f2984688f9f2 | 18,275 |
import re
def snake_to_camel(action_str):
"""
for all actions and all objects unsnake case and camel case.
re-add numbers
"""
if action_str == "toggle object on":
return "ToggleObjectOn"
elif action_str == "toggle object off":
return "ToggleObjectOff"
def camel(match):
return match.group(1)[0].upper() + match.group(1)[1:] + match.group(2).upper()
action_str = re.sub(r'(.*?) ([a-zA-Z])', camel, action_str)
if action_str.startswith("Look"): # LookDown_15, LookUp_15
action_str += "_15"
if action_str.startswith("Rotate"): # RotateRight_90, RotateLeft_90
action_str += "_90"
if action_str.startswith("Move"): # MoveAhead_25
action_str += "_25"
return action_str[0].upper() + action_str[1:] | c71745c02fc712e2b463e7bcb022bfca41c2efd4 | 18,276 |
from datetime import datetime
def todayDate() -> datetime.date:
"""
:return: ex: datetime.date(2020, 6, 28)
"""
return datetime.date.today() | dc9dae8bbeaabf5c8d7d9e3509d1e331e2c609ff | 18,277 |
def lookup_facade(name, version):
"""
Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file.
"""
for _version in range(int(version), 0, -1):
try:
facade = getattr(CLIENTS[str(_version)], name)
return facade
except (KeyError, AttributeError):
continue
else:
raise ImportError("No supported version for facade: "
"{}".format(name)) | eb76df1f7f3a9991c3e283643a52784c9d65f4f1 | 18,278 |
import time
def create_service(netUrl, gwUrl, attributes, token):
"""
Create NFN Service in MOP Environment.
:param netUrl: REST Url endpoint for network
:param gwUrl: REST Url endpoint for gateway
:param serviceAttributes: service paramaters, e.g. service type or name, etc
:param token: seesion token for NF Console
:return serviceId, serviceUrl: created service details
"""
url = netUrl+'/services'
gwId = gwUrl.split('/')[8]
if attributes['type'] == 'host':
# checking if service name is provided
if attributes['name']:
serviceName = attributes['name']
else:
serviceName = attributes['gateway']+'--'+str(attributes['ip'])+'--'+str(attributes['port'])
data = {
"serviceClass": "CS",
"name": serviceName,
"serviceInterceptType": "IP",
"serviceType": "ALL",
"endpointId": gwId,
"pbrType": "WAN",
"dataInterleaving": "NO",
"transparency": "NO",
"networkIp": attributes['ip'],
"networkFirstPort": attributes['port'],
"networkLastPort": attributes['port'],
"interceptIp": attributes['ip'],
"interceptFirstPort": attributes['port'],
"interceptLastPort": attributes['port']
}
if attributes['type'] == 'network':
# checking if service name is provided
if attributes['name']:
serviceName = attributes['name']
else:
serviceName = attributes['gateway']+'--'+str(attributes['netIp'])+'--'+str(attributes['netCidr'])
data = {
"serviceClass": "GW",
"name": serviceName,
"serviceInterceptType": "IP",
"serviceType": "ALL",
"endpointId": gwId,
"lowLatency": "NO",
"dataInterleaving": "NO",
"transparency": "NO",
"multicast": "OFF",
"dnsOptions": "NONE",
"icmpTunnel": "YES",
"cryptoLevel": "STRONG",
"permanentConnection": "YES",
"collectionLocation": "BOTH",
"pbrType": "WAN",
"rateSmoothing": "NO",
"gatewayClusterId": None,
"interceptIp": attributes['netIp'],
"gatewayIp": attributes['netIp'],
"gatewayCidrBlock": attributes['netCidr'],
"localNetworkGateway": "YES"
}
returnData = nfreq((url, data), "post", token)
serviceUrl = returnData['_links']['self']['href']
time.sleep(1)
return serviceUrl, serviceName | 848f8375273ec4583a6c5d361c8a319ff43ba2a8 | 18,279 |
def _drawBlandAltman(mean, diff, md, sd, percentage, limitOfAgreement, confidenceIntervals, detrend, title, ax, figureSize, dpi, savePath, figureFormat, meanColour, loaColour, pointColour):
"""
Sub function to draw the plot.
"""
if ax is None:
fig, ax = plt.subplots(1,1, figsize=figureSize, dpi=dpi)
plt.rcParams.update({'font.size': 15,'xtick.labelsize':15,
'ytick.labelsize':15})
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=15)
# ax.rcParams.update({'font.size': 15})
# ax=ax[0,0]
draw = True
else:
draw = False
##
# Plot CIs if calculated
##
if 'mean' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['mean'][0],
confidenceIntervals['mean'][1],
facecolor='lightblue', alpha=0.2)
if 'upperLoA' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['upperLoA'][0],
confidenceIntervals['upperLoA'][1],
facecolor='wheat', alpha=0.2)
if 'lowerLoA' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['lowerLoA'][0],
confidenceIntervals['lowerLoA'][1],
facecolor='wheat', alpha=0.2)
##
# Plot the mean diff and LoA
##
ax.axhline(md, color=meanColour, linestyle='--')
ax.axhline(md + limitOfAgreement*sd, color=loaColour, linestyle='--')
ax.axhline(md - limitOfAgreement*sd, color=loaColour, linestyle='--')
##
# Plot the data points
##
# ax.scatter(mean[0:22], diff[0:22], alpha=0.8, c='orange', marker='.', s=100, label='India Male')
# ax.scatter(mean[22:44], diff[22:44], alpha=0.8, c='blue', marker='.', s=100, label='India Female')
# ax.scatter(mean[44:66], diff[44:66], alpha=0.8, c='red', marker='.', s=100, label='Sierra Leone Male')
# ax.scatter(mean[66:88], diff[66:88], alpha=0.8, c='purple', marker='.', s=100, label='Sierra Leone Female')
ax.scatter(mean[0:20], diff[0:20], alpha=0.8, c='orange', marker='.', s=100, label='India Male')
ax.scatter(mean[20:39], diff[20:39], alpha=0.8, c='blue', marker='.', s=100, label='India Female')
ax.scatter(mean[39:59], diff[39:59], alpha=0.8, c='red', marker='.', s=100, label='Sierra Leone Male')
ax.scatter(mean[59:77], diff[59:77], alpha=0.8, c='purple', marker='.', s=100, label='Sierra Leone Female')
ax.set_ylim(-50, 70)
ax.legend(loc='upper right', fontsize=12)
trans = transforms.blended_transform_factory(
ax.transAxes, ax.transData)
limitOfAgreementRange = (md + (limitOfAgreement * sd)) - (md - limitOfAgreement*sd)
offset = (limitOfAgreementRange / 100.0) * 1.5
ax.text(0.98, md + offset, 'Mean', ha="right", va="bottom", transform=trans)
ax.text(0.98, md - offset, f'{md:.2f}', ha="right", va="top", transform=trans)
ax.text(0.98, md + (limitOfAgreement * sd) + offset, f'+{limitOfAgreement:.2f} SD', ha="right", va="bottom", transform=trans)
ax.text(0.98, md + (limitOfAgreement * sd) - offset, f'{md + limitOfAgreement*sd:.2f}', ha="right", va="top", transform=trans)
ax.text(0.98, md - (limitOfAgreement * sd) - offset, f'-{limitOfAgreement:.2f} SD', ha="right", va="top", transform=trans)
ax.text(0.98, md - (limitOfAgreement * sd) + offset, f'{md - limitOfAgreement*sd:.2f}', ha="right", va="bottom", transform=trans)
# Only draw spine between extent of the data
# ax.spines['left'].set_bounds(min(diff), max(diff))
# ax.spines['bottom'].set_bounds(min(mean), max(mean))
# Hide the right and top spines
# ax.spines['right'].set_visible(False)
# ax.spines['top'].set_visible(False)
if percentage:
ax.set_ylabel('Percentage difference between methods', fontsize=20)
else:
ax.set_ylabel('Difference between methods', fontsize=20)
ax.set_xlabel('Mean of methods', fontsize=20)
# tickLocs = ax.xaxis.get_ticklocs()
# cadenceX = tickLocs[2] - tickLocs[1]
# tickLocs = rangeFrameLocator(tickLocs, (min(mean), max(mean)))
# ax.xaxis.set_major_locator(ticker.FixedLocator(tickLocs))
# tickLocs = ax.yaxis.get_ticklocs()
# cadenceY = tickLocs[2] - tickLocs[1]
# tickLocs = rangeFrameLocator(tickLocs, (min(diff), max(diff)))
# ax.yaxis.set_major_locator(ticker.FixedLocator(tickLocs))
# plt.draw() # Force drawing to populate tick labels
# labels = rangeFrameLabler(ax.xaxis.get_ticklocs(), [item.get_text() for item in ax.get_xticklabels()], cadenceX)
# ax.set_xticklabels(labels)
# labels = rangeFrameLabler(ax.yaxis.get_ticklocs(), [item.get_text() for item in ax.get_yticklabels()], cadenceY)
# ax.set_yticklabels(labels)
# ax.patch.set_alpha(0)
if detrend[0] is None:
pass
else:
plt.text(1, -0.1, f'{detrend[0]} slope correction factor: {detrend[1]:.2f} ± {detrend[2]:.2f}', ha='right', transform=ax.transAxes)
if title:
ax.set_title(title)
##
# Save or draw
##
plt.tight_layout()
if (savePath is not None) & draw:
fig.savefig(savePath, format=figureFormat, dpi=dpi)
plt.close()
elif draw:
plt.show()
else:
return ax | 43bf53cd4594c1ed58860a6127f40f6345bea6ba | 18,280 |
def rename_columns(df):
"""This function renames certain columns of the DataFrame
:param df: DataFrame
:type df: pandas DataFrame
:return: DataFrame
:rtype: pandas DataFrame
"""
renamed_cols = {"Man1": "Manufacturer (PE)",
"Pro1": "Model (PE)",
"Man2": "Manufacturer (BAT)",
"Pro2": "Model (BAT)",
"Top": "Type [-coupled]",
'P_PV2AC_in': 'P_PV2AC_in [W]',
'P_PV2AC_out': 'P_PV2AC_out [W]',
'U_PV_min': 'U_PV_min [V]',
'U_PV_nom': 'U_PV_nom [V]',
'U_PV_max': 'U_PV_max [V]',
'U_MPP_min': 'U_MPP_min [V]',
'U_MPP_max': 'U_MPP_max [V]',
'P_AC2BAT_in': 'P_AC2BAT_in [W]',
'P_BAT2AC_out': 'P_BAT2AC_out [W]',
'P_PV2BAT_in': 'P_PV2BAT_in [W]',
'P_BAT2PV_out': 'P_BAT2PV_out [W]',
'P_PV2BAT_out': 'P_PV2BAT_out [W]',
'P_BAT2AC_in': 'P_BAT2AC_in [W]',
'U_BAT_min': 'U_BAT_min [V]',
'U_BAT_nom': 'U_BAT_nom [V]',
'U_BAT_max': 'U_BAT_max [V]',
'E_BAT_100': 'E_BAT_100 [kWh]',
'E_BAT_50': 'E_BAT_50 [kWh]',
'E_BAT_25': 'E_BAT_25 [kWh]',
'E_BAT_usable': 'E_BAT_usable [kWh]',
'eta_BAT_100': 'eta_BAT_100',
'eta_BAT_50': 'eta_BAT_50',
'eta_BAT_25': 'eta_BAT_25',
'eta_BAT': 'eta_BAT',
'P_SYS_SOC1_AC': 'P_SYS_SOC1_AC [W]',
'P_SYS_SOC1_DC': 'P_SYS_SOC1_DC [W]',
'P_SYS_SOC0_AC': 'P_SYS_SOC0_AC [W]',
'P_SYS_SOC0_DC': 'P_SYS_SOC0_DC [W]',
'P_PVINV_AC': 'P_PVINV_AC [W]',
'P_PERI_AC': 'P_PERI_AC [W]',
'P_PV2BAT_DEV_IMPORT': 'P_PV2BAT_DEV_IMPORT [W]',
'P_PV2BAT_DEV_EXPORT': 'P_PV2BAT_DEV_EXPORT [W]',
'P_BAT2AC_DEV_IMPORT': 'P_BAT2AC_DEV_IMPORT [W]',
'P_BAT2AC_DEV_EXPORT': 'P_BAT2AC_DEV_EXPORT [W]',
't_DEAD': 't_DEAD [s]',
't_SETTLING': 't_SETTLING [s]'
}
return df.rename(columns=renamed_cols) | 9c22747d7c6da20cab1593388db5575a38aa313f | 18,281 |
import requests
import json
def get_github_emoji(): # pragma: no cover
"""Get Github's usable emoji."""
try:
resp = requests.get(
'https://api.github.com/emojis',
timeout=30
)
except Exception:
return None
return json.loads(resp.text) | 533a56e2e59b039cbc45ab5acb7ab4e8487e4ad9 | 18,282 |
def transport_stable(p, q, C, lambda1, lambda2, epsilon, scaling_iter, g):
"""
Compute the optimal transport with stabilized numerics.
Args:
p: uniform distribution on input cells
q: uniform distribution on output cells
C: cost matrix to transport cell i to cell j
lambda1: regularization parameter for marginal constraint for p.
lambda2: regularization parameter for marginal constraint for q.
epsilon: entropy parameter
scaling_iter: number of scaling iterations
g: growth value for input cells
"""
u = np.zeros(len(p))
v = np.zeros(len(q))
b = np.ones(len(q))
p = p * g
q = q * np.average(g)
K0 = np.exp(-C / epsilon)
K = np.copy(K0)
alpha1 = lambda1 / (lambda1 + epsilon)
alpha2 = lambda2 / (lambda2 + epsilon)
for i in range(scaling_iter):
# scaling iteration
a = (p / (K.dot(b))) ** alpha1 * np.exp(-u / (lambda1 + epsilon))
b = (q / (K.T.dot(a))) ** alpha2 * np.exp(-v / (lambda2 + epsilon))
# stabilization
if (max(max(abs(a)), max(abs(b))) > 1e100):
u = u + epsilon * np.log(a)
v = v + epsilon * np.log(b) # absorb
K = (K0.T * np.exp(u / epsilon)).T * np.exp(v / epsilon)
a = np.ones(len(p))
b = np.ones(len(q))
return (K.T * a).T * b | 584607e57b4d216633ef0a03c2cb06726b0f423f | 18,283 |
def add(A: Coord, B: Coord, s: float = 1.0, t: float = 1.0) -> Coord:
"""Return the point sA + tB."""
return (s * A[0] + t * B[0], s * A[1] + t * B[1]) | 53c2f750199d785140154881fdc0ace31b9e2472 | 18,284 |
def from_binary(bin_data: str, delimiter: str = " ") -> bytes:
"""Converts binary string into bytes object"""
if delimiter == "":
data = [bin_data[i:i+8] for i in range(0, len(bin_data), 8)]
else:
data = bin_data.split(delimiter)
data = [int(byte, 2) for byte in data]
return bytes(data) | f16706da2d5b9ae5984a35a13ebd02ae94581153 | 18,285 |
def one_on_f_weight(f, normalize=True):
""" Literally 1/f weight. Useful for fitting linspace data in logspace.
Parameters
----------
f: array
Frequency
normalize: boolean, optional
Normalized the weight to [0, 1].
Defaults to True.
Returns
-------
weight: array
The 1/f weight.
"""
weight = 1/f
if normalize:
weight /= max(weight)
return(weight) | 54301aa7480e6f3520cbfcccfa463a2a02d34b9c | 18,287 |
def load_randomdata(dataset_str, iter):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
NL = 2312
NC = 120
elif dataset_str == 'cora':
NL = 1708
NC = 140
else:
NL = 18717
NC = 60
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
#fixed 500 for validation read from file, choose random 140 from the others for train
'''
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
'''
idx_val=[int(item) for item in open("source/"+dataset_str+"/val_idx"+str(iter)+".txt").readlines()]
idx_test = test_idx_range.tolist()
idx_train = random.sample(list(set(range(0,NL))-set(idx_val)),NC);
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask | 476a54078680bb711a77fc9e3900192a1ef3b811 | 18,289 |
def plot(figsize=None, formats=None, limit=100, titlelen=10, **kwargs):
"""Display an image [in a Jupyter Notebook] from a Quilt fragment path.
Intended for use with `%matplotlib inline`.
Convenience method that loops over supblots that call
`plt.imshow(image.imread(FRAG_PATH))`.
Keyword arguments
* figsize=None # None means auto, else provide (HEIGHT_INCHES, WIDTH_INCHES)
* formats=None # List of extensions as strings ['jpg', 'png', ...]
* limit=100 # maximum number of images to display
* titlelen=10 # max number of characters in subplot title
* **kwargs - all remaining kwargs are passed to plt.subplots;
see https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html
"""
# pylint: disable=protected-access
def _plot(node, paths):
lower_formats = set((x.lower() for x in formats)) if formats is not None else None
def node_filter(frag, meta):
filepath = meta.get('_system', {}).get('filepath', None)
# don't try to read DataFrames as images
if isinstance(frag, string_types) and filepath:
_, ext = splitext_no_dot(filepath)
if lower_formats is None or ext.lower() in lower_formats:
return True
return False
# assume DataNode has one path; doesn't work with multi-fragment images
display = [('', paths[0], node._meta)]
# for GroupNodes, display all DataNode children
if isinstance(node, GroupNode):
datanodes = [(x, y) for (x, y) in node._items() if isinstance(y, DataNode)]
display = [(x, y._data(), y._meta) for (x, y) in datanodes]
# sort by name so iteration is reproducible (and unit tests pass)
display = sorted(display, key=lambda rec: rec[0])
display = [x for x in display if node_filter(x[1], x[2])]
if len(display) > limit:
print('Displaying {} of {} images{}'.format(limit, len(display), ELLIPSIS))
display = display[:limit]
# display can be empty e.g. if no DataNode children
if not display:
print('No images to display.')
return
# cast to int to avoid downstream complaints of
# 'float' object cannot be interpreted as an index
floatlen = float(len(display)) # prevent integer division in 2.7
cols = min(int(floor(sqrt(floatlen))), 8)
rows = int(ceil(floatlen/cols))
plt.tight_layout()
plt.subplots(
rows,
cols,
figsize=(cols*2, rows*2) if not figsize else figsize,
**kwargs)
for i in range(rows*cols):
axes = plt.subplot(rows, cols, i + 1) # subplots start at 1, not 0
axes.axis('off')
if i < len(display):
(name, frag, meta) = display[i]
plt.title(name[:titlelen] + ELLIPSIS if len(name) > titlelen else name)
filepath = meta.get('_system', {}).get('filepath', None)
_, ext = splitext_no_dot(filepath)
try:
bits = mpimg.imread(frag, format=ext)
plt.imshow(bits)
# Mac throws OSError, Linux IOError if file not recognizable
except (IOError, OSError) as err:
print('{}: {}'.format(name, str(err)))
continue
return _plot | f1b72c952d1c517ba4f09e03af8463a73d2c8759 | 18,290 |
def tresize(tombfile, keyfile, passphrase, newsize):
"""
Resize a tomb.
Keyfile, passphrase and new size are needed.
"""
cmd = ['tomb',
'resize',
tombfile,
'-k',
keyfile,
'--unsafe',
'--tomb-pwd',
sanitize_passphrase(passphrase),
'-s',
str(newsize),
'--no-color']
return execute(cmd) | 334a722b79aec80bc4a95c67a0b155653e29eb10 | 18,291 |
def auto_z_levels(fid, x, y, variable, t_idx, n_cont, n_dec):
"""
list(float) = auto_z_levels(fid, variable, t_idx, n_cont, n_dec)
... # contour lines
... # post .
"""
fig, ax = plt.subplots()
z_levs = np.ndarray(0)
for i in t_idx:
data = fid.variables[variable][i]
cp = ax.contour(x, y, data, n_cont)
z_levs = np.concatenate((z_levs, cp.levels), axis=0)
z_levs = np.sort(np.unique(np.around(z_levs, n_dec)))
plt.close(fig)
return z_levs | f80020c01a661412fb79d23f6081bdb94a471102 | 18,292 |
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeSubMessageDefault(message):
assert getattr(message_type, '_concrete_class', None), (
'Uninitialized concrete class found for field %r (message type %r)'
% (field.full_name, message_type.full_name))
result = message_type._concrete_class()
result._SetListener(
_OneofListener(message, field)
if field.containing_oneof is not None
else message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault | 3a468e2850aaf9707ee1229eeb009ef5c013f1b6 | 18,294 |
def clean_text(dirty_text):
"""
Given a string, this function tokenizes the words of that string.
:param dirty_text: string
:return: list
input = "American artist accomplishments american"
output = ['accomplishments', 'american', 'artist']
"""
lower_dirty_text = dirty_text.lower()
regex_pattern = r"[\w']+"
tokenizer = RegexpTokenizer(regex_pattern)
tokens = tokenizer.tokenize(lower_dirty_text)
unique_tokens = list(set(tokens))
return unique_tokens | 1df63ea0c9be5a518d2fd1f931772080962f878f | 18,295 |
def GetCurrentUserController(AuthJSONController):
""" Return the CurrentUserController in the proper scope """
class CurrentUserController(AuthJSONController):
""" Controller to return the currently signed in user """
def __init__(self, toJson):
""" Initialize with the Json Converter """
self.toJson = toJson
AuthJSONController.__init__(self)
def performWithJSON(self, json=None, user=None):
""" Convert the existing Word Lists to JSON """
if user:
return {'user':self.toJson(user)}
return Errors.NO_USER.toJSON()
return CurrentUserController | ee710cd4d65982cf01d17fba130b7bb83dffd617 | 18,296 |
import numpy
def fft_in_range(audiomatrix, startindex, endindex, channel):
"""
Do an FFT in the specified range of indices
The audiomatrix should have the first index as its time domain and
second index as the channel number. The startindex and endinex
select the time range to use, and the channel parameter selects
which channel to do the FFT on.
Returns a vector of data in the frequency domain
"""
n = endindex - startindex
indat = audiomatrix[startindex:endindex, channel]
outdat = (numpy.fft.fft(indat)[range(n//2)])/n
return outdat | 30ce104795d0809f054439ba32f47d33528ecbff | 18,297 |
def drop_arrays_by_name(gt_names, used_classes):
"""Drop irrelevant ground truths by name.
Args:
gt_names (list[str]): Names of ground truths.
used_classes (list[str]): Classes of interest.
Returns:
np.ndarray: Indices of ground truths that will be dropped.
"""
inds = [i for i, x in enumerate(gt_names) if x not in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds | 67d711ae61f3c833fa9e8b33d4bf4bf6d99a34ad | 18,298 |
def get_data_table_metas(data_table_name, data_table_namespace):
"""
Gets metas from meta table associated with table named `data_table_name` and namespaced `data_table_namespace`.
Parameters
---------
data_table_name : string
table name of this data table
data_table_namespace : string
table name of this data table
Returns
-------
dict
metas
Examples
--------
>>> from common.python import session
>>> session.get_data_table_metas("meta", "readme") # {'model_id': 'a_id', 'used_framework': 'fate'}
"""
return RuntimeInstance.SESSION.get_data_table_metas(data_table_name=data_table_name,
data_table_namespace=data_table_namespace) | 8b4ee249112d399c429a33fed82d9cb01404d441 | 18,299 |
def get_at_content(sequence):
"""Return content of AT in sequence, as float between 0 and 1, inclusive. """
sequence = sequence.upper()
a_content = sequence.count('A')
t_content = sequence.count('T')
return round((a_content+t_content)/len(sequence), 2) | 6316d29cdb9d7129f225f2f79a50485fb6919e32 | 18,300 |
def page_not_found(e):
"""Handle nonexistin pages."""
_next = get_next_url()
if _next:
flash("Page Not Found", "danger")
return redirect(_next)
return render_template("404.html"), 404 | 9267c65fb842309cf1e877239be4d7fff7b1b634 | 18,301 |
def test_query(p: int = 1) -> int:
"""
Example 2 for a unit test
:param p: example of description
:return: return data
"""
return p | 69c01914db1d7a5cebf1c4e78f7c5dc7f778b4b4 | 18,302 |
def mirror_1d(d, xmin=None, xmax=None):
"""If necessary apply reflecting boundary conditions."""
if xmin is not None and xmax is not None:
xmed = (xmin+xmax)/2
return np.concatenate((2*xmin-d[d < xmed], d, 2*xmax-d[d >= xmed]))
elif xmin is not None:
return np.concatenate((2*xmin-d, d))
elif xmax is not None:
return np.concatenate((d, 2*xmax-d))
else:
return d | 0b538222dd227171ac4a3cf1ac2d8a30361eccf1 | 18,303 |
def calc_vertical_avg(fld,msk):
"""Compute vertical average, ignoring continental or iceshelf points """
# Make mask of nans, assume input msk is 3D of same size as fld 3 spatial dims
nanmsk = np.where(msk==1,1,np.NAN)
v_avg = fld.copy()
v_avg.values = v_avg.values*msk.values
if 'Z' in fld.dims:
vdim = 'Z'
elif 'Zl' in fld.dims:
vdim = 'Zl'
else:
raise TypeError('Could not find recognizable vertical field in input dataset')
# Once vertical coordinate is found, compute avg along dimension
v_avg = v_avg.sum(dim=vdim,skipna=True)
return v_avg | 7cd512cf2642864e9974e18ef582f541f65b3e96 | 18,304 |
def replace(data, match, repl):
"""Replace values for all key in match on repl value.
Recursively apply a function to values in a dict or list until the input
data is neither a dict nor a list.
"""
if isinstance(data, dict):
return {
key: repl if key in match else replace(value, match, repl)
for key, value in data.items()
}
if isinstance(data, list):
return [replace(item, match, repl) for item in data]
return data | 1b3dc8ac7521ec199cf74ebc8f4d8777827ab9fc | 18,306 |
import time
def get_current_date() ->str:
"""Forms a string to represent the current date using the time module"""
if len(str(time.gmtime()[2])) == 1:
current_date = str(time.gmtime()[0]) + '-' + str(time.gmtime()[1]) + '-0' + str(time.gmtime()[2])
else:
current_date = str(time.gmtime()[0]) + '-' + str(time.gmtime()[1]) + '-' + str(time.gmtime()[2])
return current_date | 480d44fc0153407960eacb875474fc02cb17c6c3 | 18,307 |
def to_jsobj(obj):
"""Convert a Jsonable object to a JSON object, and return it."""
if isinstance(obj, LIST_TYPES):
return [to_jsobj(o) for o in obj]
if obj.__class__.__module__ == "builtins":
return obj
return obj.to_jsobj() | ffd43b2d49f6dd0d3b6608a601e3bccc1be1a289 | 18,308 |
def EVAL_find_counter_exemplars(latent_representation_original, Z, idxs, counter_exemplar_idxs):
"""
Compute the values of the goal function.
"""
# prepare the data to apply the diversity optimization
data = np.zeros((len(idxs), np.shape(Z)[1]))
for i in range(len(idxs)):
data[i] = Z[idxs[i]]
# min-max normalization (applied on ALL examples)
scaler = MinMaxScaler()
scaler.fit_transform(data)
# list of points
points = [row for row in scaler.transform(data)]
# MIN MAX normalize instance to explain
instance = scaler.transform((latent_representation_original))
# number of nearest neighbors to consider
knn = 5
kp = {}
lconst = 1
_, d0 = argmax(points, lambda p: -dist(instance, p))
lconst = 0.5 / (-d0)
for p1 in points:
# compute distances
dst = [(p2, dist(p1, p2)) for p2 in points if not np.array_equal(p1, p2)]
# sort
dst = sorted(dst, key=lambda x: x[1])
# add top knn to kp
kp[p1.tobytes()] = set(p2.tobytes() for p2, d in dst[:knn])
# goal function
def g(points):
dpoints, dx = set(), 0
for p1 in points:
# union operator
dpoints |= kp[p1.tobytes()]
dx += dist(p1, instance)
# scaled version 2*cost
return len(dpoints) - 2 * lconst * dx
# get the extracted CF
extracted_CF_data = []
for i in range(len(counter_exemplar_idxs)):
extracted_CF_data.append(Z[counter_exemplar_idxs[i]])
# apply scaling
extracted_CF_data = scaler.transform((extracted_CF_data))
return g(extracted_CF_data) | 985d260c74e78ffa6a47a52d6d5d30043b0b5495 | 18,309 |
from re import M
def min_energy(bond):
"""Calculate minimum energy.
Args:
bond: an instance of Bond or array[L1*L2][3].
"""
N_unit = L1*L2
coupling = bond.bond if isinstance(bond, Bond) else bond
# Create matrix A
a = np.zeros((N_unit, N_unit), dtype=float)
for i in range(N_unit):
a[i][nn_1(i)] += coupling[i][0]
a[i][nn_2(i)] += coupling[i][1]
a[i][i] += coupling[i][2]
u,s,vt = sl.svd(a)
det_u = sl.det(u)
det_vt = sl.det(vt)
# calculate parity of the projection operator
## product of u_{ij}
sgn = np.prod(np.sign(coupling))
## from boundary condition
if (L1+L2+M*(L1-M))%2 != 0: sgn *= -1 # (-1)^theta
## det(Q) = det(VU)
sgn *= det_u*det_vt
min_epsilon = min(s)
sum_epsilon = -0.5*sum(s)
ene_phys = sum_epsilon
ene_unphys = sum_epsilon + min_epsilon
# judge whether the vacuume state is physical or not
if sgn < 0: # The vacuum state is unphysical.
ene_phys, ene_unphys = ene_unphys, ene_phys
return ene_phys,ene_unphys,min_epsilon,sgn,det_u,det_vt | e86adbde2cbb5135962360fd67c45704c935c123 | 18,310 |
from typing import Tuple
def find_result_node(flat_graph: dict) -> Tuple[str, dict]:
"""
Find result node in flat graph
:return: tuple with node id (str) and node dictionary of the result node.
"""
result_nodes = [(key, node) for (key, node) in flat_graph.items() if node.get("result")]
if len(result_nodes) == 1:
return result_nodes[0]
elif len(result_nodes) == 0:
raise ProcessGraphVisitException("Found no result node in flat process graph")
else:
keys = [k for (k, n) in result_nodes]
raise ProcessGraphVisitException(
"Found multiple result nodes in flat process graph: {keys!r}".format(keys=keys)) | d0aa0e7ba71c4eb9412393a3bea40965db1525fe | 18,311 |
import string
import random
def password_generator(size=25, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):
"""Returns a random 25 character password"""
return ''.join(random.choice(chars) for _ in range(size)) | 66cedb858d7ddef9b93b4f9ed8ffd854a722c14b | 18,312 |
def create_cleaned_df(df, class_label_str):
"""Transform the wide-from Dataframe (df) from main.xlsx into one with
unique row names, values 0-1001 as the column names and a label column
containing the class label as an int.
Parameters
----------
df : pandas DataFrame
A DataFrame read in from main.xlsx. It must have columns 'Name',
'Analyte' and 'Concentration'.
class_label_str: str (len 2)
The class label for the dataframe. It must be two characters long and
one of 'Cu', 'Cd', 'Pb' or 'Sw'.
Returns
-------
pandas DataFrame
Wide-form dataframe with unique row and column names and a label column.
"""
# Replace spaces with underscores in Concentration column
df['Concentration'] = df['Concentration'].str.replace(' ', '_')
# Create new column (we will use this to extract unique names later on)
df['metal_concentration'] = df['Analyte'] + '_' + df['Concentration']
df = df.drop(columns=['Name', 'Analyte', 'Concentration'])
# Transpose df (now columns are a range - 0, 1, 2, etc.)
df['metal_concentration'] = [f'{name}_{i}' for i, name in enumerate(df['metal_concentration'])]
df = df.set_index('metal_concentration')
df.index.name = None
df.columns = range(0, 1002)
class_label_to_int_mapping = get_class_label_to_int_mapping()
df['label'] = class_label_to_int_mapping[class_label_str]
return df | ac40ce5af2221db6e984537dfd3a8dcb5f25a4a7 | 18,313 |
def uni_to_int(dxu, x, lambda_val):
"""
Translates from single integrator to unicycle dynamics.
Parameters
----------
dxu :
Single integrator control input.
x :
Unicycle states (3 x N)
lambda_val :
Returns
-------
dx :
"""
n = dxu.shape[1]
dx = np.zeros((2, n))
for i in range(0, n):
temp = np.array([[np.cos(x[2, i]), -lambda_val * np.sin(x[2, i])],
[np.sin(x[2, i]), lambda_val * np.cos(x[2, i])]])
dx[:, i] = np.dot(temp, dxu[:, i])
return dx | bfd9f0f0e62fbedb9611f762267644ecb2b3de30 | 18,314 |
import struct
def pack_binary_command(cmd_type, cmd_args, is_response=False):
"""Packs the given command using the parameter ordering specified in GEARMAN_PARAMS_FOR_COMMAND.
*NOTE* Expects that all arguments in cmd_args are already str's.
"""
expected_cmd_params = GEARMAN_PARAMS_FOR_COMMAND.get(cmd_type, None)
if expected_cmd_params is None or cmd_type == GEARMAN_COMMAND_TEXT_COMMAND:
raise ProtocolError('Received unknown binary command: %s' % get_command_name(cmd_type))
expected_parameter_set = set(expected_cmd_params)
received_parameter_set = set(cmd_args.keys())
if expected_parameter_set != received_parameter_set:
raise ProtocolError('Received arguments did not match expected arguments: %r != %r' % (expected_parameter_set, received_parameter_set))
# Select the right expected magic
if is_response:
magic = MAGIC_RES_STRING
else:
magic = MAGIC_REQ_STRING
# !NOTE! str should be replaced with bytes in Python 3.x
# We will iterate in ORDER and str all our command arguments
if compat.any(type(param_value) != str for param_value in cmd_args.itervalues()):
raise ProtocolError('Received non-binary arguments: %r' % cmd_args)
data_items = [cmd_args[param] for param in expected_cmd_params]
# Now check that all but the last argument are free of \0 as per the protocol spec.
if compat.any('\0' in argument for argument in data_items[:-1]):
raise ProtocolError('Received arguments with NULL byte in non-final argument')
binary_payload = NULL_CHAR.join(data_items)
# Pack the header in the !4sII format then append the binary payload
payload_size = len(binary_payload)
packing_format = '!4sII%ds' % payload_size
return struct.pack(packing_format, magic, cmd_type, payload_size, binary_payload) | bbe87233d338344ef2ed21b9555fcd4c22c959dc | 18,315 |
import cupy as cp
import cupyx.scipy.sparse.linalg as cp_linalg
def get_adjacency_spectrum(graph, k=np.inf, eigvals_only=False, which='LA', use_gpu=False):
"""
Gets the top k eigenpairs of the adjacency matrix
:param graph: undirected NetworkX graph
:param k: number of top k eigenpairs to obtain
:param eigvals_only: get only the eigenvalues i.e., no eigenvectors
:param which: the type of k eigenvectors and eigenvalues to find
:return: the eigenpair information
"""
# get all eigenpairs for small graphs
if len(graph) < 100:
A = nx.adjacency_matrix(graph).todense()
eigpairs = eigh(A, eigvals_only=eigvals_only)
else:
A = nx.to_scipy_sparse_matrix(graph, format='csr', dtype=np.float, nodelist=graph.nodes)
if gpu_available() and use_gpu:
A_gpu = cp.sparse.csr_matrix(A)
eigpairs = cp_linalg.eigsh(A_gpu, k=min(k, len(graph) - 3), which=which, return_eigenvectors=not eigvals_only)
if type(eigpairs) is tuple:
eigpairs = list(eigpairs)
eigpairs[0], eigpairs[1] = cp.asnumpy(eigpairs[0]), cp.asnumpy(eigpairs[1])
else:
eigpairs = cp.asnumpy(eigpairs)
else:
if use_gpu: print('Warning: GPU requested, but not available')
eigpairs = eigsh(A, k=min(k, len(graph) - 1), which=which, return_eigenvectors=not eigvals_only)
return eigpairs | aef39f929c949edb13604c0e83b01b4d6025f06d | 18,316 |
def make_system(*args, **kwargs):
"""
Factory function for contact systems. Checks the compatibility between the
substrate, interaction method and surface and returns an object of the
appropriate type to handle it. The returned object is always of a subtype
of SystemBase.
Parameters:
-----------
substrate -- An instance of HalfSpace. Defines the solid mechanics in
the substrate
surface -- An instance of SurfaceTopography, defines the profile.
Returns
-------
"""
substrate, surface = _make_system_args(*args, **kwargs)
return NonSmoothContactSystem(substrate=substrate, surface=surface) | 06d0fbd8eb8ec6e39ec6aabb2192ab8f3455846e | 18,317 |
def place_owner_list(user_id):
"""
It retrieves the list of places for which the user is the owner.
Parameters:
- user_id: id of the user, which is owner and wants to get its own places.
Returns a tuple:
- list of Places owned by the user (empty if the user is not an owner)
- status message
- the http code indicating the type of error, if any
"""
try:
places = Place.get_owner_places(user_id)
except TypeError, e:
return None, str(e), 400
return places, "OK", 200 | ea2c0df7f4e72bd0b7ace5ca9c341a71fd651b32 | 18,319 |
def job_met_heu(prob_label, tr, te, r, ni, n):
"""MeanEmbeddingTest with test_locs randomized.
tr unused."""
# MeanEmbeddingTest random locations
with util.ContextTimer() as t:
met_heu = tst.MeanEmbeddingTest.create_fit_gauss_heuristic(te, J, alpha, seed=180)
met_heu_test = met_heu.perform_test(te)
return {
#'test_method': met_heu,
'test_result': met_heu_test,
'time_secs': t.secs} | 6aabf3c2628ecdb98b1ae939040a823be307c6f5 | 18,320 |
def download():
"""Unchanged from web2py.
```
allows downloading of uploaded files
http://..../[app]/default/download/[filename]
```
"""
return response.download(request, db) | 65e0e27b96b6701f2e04c98be702819908647956 | 18,321 |
def action_id2arr(ids):
""" Converts action from id to array format (as understood by the environment) """
return actions[ids] | 4d3f54078e99c73e0509b36ee39806c721690551 | 18,324 |
from promgen import models
def breadcrumb(instance=None, label=None):
"""
Create HTML Breadcrumb from instance
Starting with the instance, walk up the tree building a bootstrap3
compatiable breadcrumb
"""
def site(obj):
yield reverse("site-detail"), obj.domain
def shard(obj):
yield reverse("shard-list"), _("Shards")
yield obj.get_absolute_url(), obj.name
def service(obj):
yield reverse("service-list"), _("Services")
yield obj.get_absolute_url(), obj.name
def project(obj):
yield from service(obj.service)
yield obj.get_absolute_url(), obj.name
def alert(obj):
yield reverse("alert-list"), _("Alerts")
yield obj.get_absolute_url(), obj.pk
def rule(obj):
if obj.content_type.model == "site":
yield from site(obj.content_object)
if obj.content_type.model == "service":
yield from service(obj.content_object)
if obj.content_type.model == "project":
yield from project(obj.content_object)
# If we have a new rule, it won't have a name
if obj.pk:
yield obj.get_absolute_url(), obj.name
def sender(obj):
if obj.content_type.model == "service":
yield from service(obj.content_object)
if obj.content_type.model == "project":
yield from project(obj.content_object)
def generator():
yield reverse("home"), _("Home")
if isinstance(instance, models.Sender):
yield from sender(instance)
if isinstance(instance, models.Project):
yield from project(instance)
if isinstance(instance, models.Service):
yield from service(instance)
if isinstance(instance, models.Shard):
yield from shard(instance)
if isinstance(instance, models.Rule):
yield from rule(instance)
if isinstance(instance, models.Alert):
yield from alert(instance)
def to_tag():
yield '<ol class="breadcrumb">'
for href, text in generator():
yield format_html('<li><a href="{}">{}</a></li>', mark_safe(href), text)
if label:
yield format_html('<li class="active">{}</li>', _(label))
yield "</ol>"
return mark_safe("".join(to_tag())) | 5fec6c8b6d1bfdadec9405b3a4b73b119d7357f9 | 18,327 |
from typing import Optional
def get_domain(arn: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDomainResult:
"""
The resource schema to create a CodeArtifact domain.
:param str arn: The ARN of the domain.
"""
__args__ = dict()
__args__['arn'] = arn
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws-native:codeartifact:getDomain', __args__, opts=opts, typ=GetDomainResult).value
return AwaitableGetDomainResult(
arn=__ret__.arn,
name=__ret__.name,
owner=__ret__.owner,
permissions_policy_document=__ret__.permissions_policy_document,
tags=__ret__.tags) | bed4130d5ec77f859cfa7e200794990792b0d2e8 | 18,330 |
def collect_properties(service_instance, view_ref, obj_type, path_set=None, include_mors=False):
"""
Collect properties for managed objects from a view ref
Check the vSphere API documentation for example on retrieving
object properties:
- http://goo.gl/erbFDz
Original Source: https://github.com/dnaeon/py-vconnector/blob/master/src/vconnector/core.py
Modified for my purposes here.
:param pyVmomi.vim.view.* view_ref: Starting point of inventory navigation
:param pyVmomi.vim.* obj_type: Type of managed object
:param list path_set: List of properties to retrieve
:param bool include_mors: If True include the managed objects refs in the result
:return: A list of properties for the managed objects
:rtype list:
"""
collector = service_instance.content.propertyCollector
# Create object specification to define the starting point of
# inventory navigation
obj_spec = vmodl.query.PropertyCollector.ObjectSpec()
obj_spec.obj = view_ref
obj_spec.skip = True
# Create a traversal specification to identify the path for collection
traversal_spec = vmodl.query.PropertyCollector.TraversalSpec()
traversal_spec.name = 'traverseEntities'
traversal_spec.path = 'view'
traversal_spec.skip = False
traversal_spec.type = view_ref.__class__
obj_spec.selectSet = [traversal_spec]
# Identify the properties to the retrieved
property_spec = vmodl.query.PropertyCollector.PropertySpec()
property_spec.type = obj_type
if not path_set:
property_spec.all = True
property_spec.pathSet = path_set
# Add the object and property specification to the
# property filter specification
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = [obj_spec]
filter_spec.propSet = [property_spec]
# Retrieve properties
props = collector.RetrieveContents([filter_spec])
data = []
for obj in props:
properties = {}
for prop in obj.propSet:
properties[prop.name] = prop.val
if include_mors:
properties['obj'] = obj.obj
data.append(properties)
return data | 98cc1f6baa38f6c82452a0c3c5efb13a86a17b9e | 18,331 |
import signal
def _timeout(seconds):
"""Decorator for preventing a function from running for too long.
Inputs:
seconds (int): The number of seconds allowed.
Notes:
This decorator uses signal.SIGALRM, which is only available on Unix.
"""
assert isinstance(seconds, int), "@timeout(sec) requires an int"
def _handler(signum, frame):
"""Handle the alarm by raising a custom exception."""
raise TimeoutError("Timeout after {0} seconds".format(seconds))
def decorator(func):
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handler)
signal.alarm(seconds) # Set the alarm.
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0) # Turn the alarm off.
return result
return wraps(func)(wrapper)
return decorator | dec9909e662c3943d2bba56e116d6c212201aa42 | 18,332 |
def marginal_entropy(problem: dict, train_ixs: np.ndarray, obs_labels: np.ndarray, unlabeled_ixs: np.ndarray,
batch_size: int, **kwargs) -> np.ndarray:
"""
Score is -p(x)log[p(x)] i.e. marginal entropy of the point.
:param problem: dictionary that defines the problem, containing keys:
* points: an (n_samples, n_dim) matrix of points in the space
* num_classes: the number of different classes [0, num_classes)
* batch_size: number of points to query each iteration
* num_queries: the max number of queries we can make on the data
* model: the sk-learn model we are training
:param train_ixs: index into `points` of the training examples
:param obs_labels: labels for the training examples
:param unlabeled_ixs: indices into problem['points'] to score
:param kwargs: unused
:return: scores for each of selected_ixs
"""
points = problem['points']
model = problem['model']
test_X = points[unlabeled_ixs]
p_x = model.predict_proba(test_X)
p_x = p_x.clip(1e-9, 1 - 1e-9)
logp_x = np.log(p_x)
return -1 * (p_x * logp_x).sum(axis=1)
# return 1/ np.abs(model.decision_function(test_X)) | 78751d2cae853f9595579578c38bf2974cea4316 | 18,333 |
def prepareNNImages(bact_img, ftsz_img, model, bacteria=False):
"""Preprocess raw iSIM images before running them throught the neural network.
Returns a 3D numpy array that contains the data for the neural network and the
positions dict generated by getTilePositions for tiling.
"""
# Set iSIM specific values
pixelCalib = 56 # nm per pixel
sig = 121.5 / 81 # in pixel
resizeParam = pixelCalib / 81 # no unit
try:
nnImageSize = model.layers[0].input_shape[0][1]
except AttributeError:
nnImageSize = model
positions = None
# Preprocess the images
if nnImageSize is None or ftsz_img.shape[1] > nnImageSize:
# Adjust to 81nm/px
bact_img = transform.rescale(bact_img, resizeParam)
ftsz_img = transform.rescale(ftsz_img, resizeParam)
# This leaves an image that is smaller then initially
# gaussian and background subtraction
bact_img = filters.gaussian(bact_img, sig, preserve_range=True)
ftsz_img = filters.gaussian(
ftsz_img, sig, preserve_range=True
) - filters.gaussian(ftsz_img, sig * 5, preserve_range=True)
# Tiling
if nnImageSize is not None:
positions = getTilePositionsV2(ftsz_img, nnImageSize)
contrastMax = 255
else:
contrastMax = 1
# Contrast
ftsz_img = exposure.rescale_intensity(
ftsz_img, (np.min(ftsz_img), np.max(ftsz_img)), out_range=(0, contrastMax)
)
bact_img = exposure.rescale_intensity(
bact_img, (np.mean(bact_img), np.max(bact_img)), out_range=(0, contrastMax)
)
else:
positions = {
"px": [(0, 0, ftsz_img.shape[1], ftsz_img.shape[1])],
"n": 1,
"overlap": 0,
"stitch": 0,
}
# Put into format for the network
if nnImageSize is not None:
ftsz_img = ftsz_img.reshape(1, ftsz_img.shape[0], ftsz_img.shape[0], 1)
bact_img = bact_img.reshape(1, bact_img.shape[0], bact_img.shape[0], 1)
inputDataFull = np.concatenate((bact_img, ftsz_img), axis=3)
# Cycle through these tiles and make one array for everything
i = 0
inputData = np.zeros(
(positions["n"] ** 2, nnImageSize, nnImageSize, 2), dtype=np.uint8()
)
for position in positions["px"]:
inputData[i, :, :, :] = inputDataFull[
:, position[0] : position[2], position[1] : position[3], :
]
if bacteria:
inputData[i, :, :, 1] = exposure.rescale_intensity(
inputData[i, :, :, 1],
(0, np.max(inputData[i, :, :, 1])),
out_range=(0, 255),
)
inputData[i, :, :, 0] = exposure.rescale_intensity(
inputData[i, :, :, 0],
(0, np.max(inputData[i, :, :, 0])),
out_range=(0, 255),
)
i = i + 1
inputData = inputData.astype("uint8")
else:
# This is now missing the tile-wise rescale_intensity for the mito channel.
# Image shape has to be in multiples of 4, not even quadratic
cropPixels = (
bact_img.shape[0] - bact_img.shape[0] % 4,
bact_img.shape[1] - bact_img.shape[1] % 4,
)
bact_img = bact_img[0 : cropPixels[0], 0 : cropPixels[1]]
ftsz_img = ftsz_img[0 : cropPixels[0], 0 : cropPixels[1]]
positions = getTilePositionsV2(bact_img, 128)
bact_img = bact_img.reshape(1, bact_img.shape[0], bact_img.shape[0], 1)
ftsz_img = ftsz_img.reshape(1, ftsz_img.shape[0], ftsz_img.shape[0], 1)
inputData = np.stack((bact_img, ftsz_img), 3)
return inputData, positions | b43f064ebbb63043db047cc406ad64df1edb0b44 | 18,334 |
def get_key(val):
"""
Get dict key by value
:param val:
:return:
"""
for key, value in HANDSHAKE.items():
if val == value:
return key | df5924127bec434cadbfae3a4d9e347c55678ae5 | 18,335 |
import re
def valid_text(val, rule):
"""Return True if regex fully matches non-empty string of value."""
if callable(rule):
match = rule(val)
else:
match = re.findall(rule, val)
return (False if not match or not val else
True if match is True else
match[0] == val) | aa6f6ac3a3210d34b44eba1f2e8e8cff851ff038 | 18,336 |
def settings():
"""Render the settings page."""
c = mongo.db[app.config['USERS_COLLECTION']]
user = c.find_one({'username': current_user.get_id()})
if not user:
return render_template()
user['id'] = str(user['_id'])
user.pop('_id', None)
return render_template('settings.html', user=user) | 3a6e3cb38680aea581f3fda1edb11fb0237df355 | 18,337 |
def exportAllScansS3(folder_id):
""" Exports all Tenable scans found in a folder to S3. """
scan_list = []
scans = client.scan_helper.scans(folder_id=folder_id)
for scan in scans:
if scan.status() != 'completed':
continue
scan.download("./%s.html" % scan.details().info.name, format='html')
scan_list.append(scan.id)
return scan_list | 2f460d5cc0d96bfcaab8fca8d1f103120ae78ca4 | 18,338 |
def download_handler(resource, _, filename=None, inline=False, activity_id=None):
"""Get the download URL from LFS server and redirect the user there
"""
if resource.get('url_type') != 'upload' or not resource.get('lfs_prefix'):
return None
context = get_context()
data_dict = {'resource': resource,
'filename': filename,
'inline': inline,
'activity_id': activity_id}
resource_download_spec = tk.get_action('get_resource_download_spec')(context, data_dict)
href = resource_download_spec.get('href')
if href:
return tk.redirect_to(href)
else:
return tk.abort(404, tk._('No download is available')) | 16fcf2ae97ff5d8d2d0d206c1e3035092a034006 | 18,341 |
def body_open():
"""open the main logic"""
return " @coroutine\n def __query(__connection):" | d8792f2b3237f024f20a12c6b7d371af1dbdb21e | 18,342 |
import sqlite3
def db_add_entry(user_auth,\
name:str, user_id:str, user_pw:str, url:str=''):
"""
Add an entry into the credentials database, and returns the inserted row.
If insertion fails, return None.
"""
# SQL Query
sql = f'INSERT INTO {DB_TABLE}'
sql += '(name, user_id, user_pw, url, date_created, date_modified) '
sql += 'VALUES(?, ?, ?, ?, ?, ?)'
# Params
user_id_enc = user_auth.encrypt(user_id)
user_pw_enc = user_auth.encrypt(user_pw)
current_ts = get_current_ts()
sql_params = [name, user_id_enc, user_pw_enc, url, current_ts, current_ts]
entry_id = -1
# Run SQL
try:
with user_auth.conn as conn:
cur = conn.cursor()
cur.execute(sql, sql_params)
entry_id = cur.lastrowid
cur.close()
except sqlite3.DatabaseError:
return False
# Sign the entry
user_auth.sign_entry(entry_id, update_db=True)
return True | 217c54215c9ee49ed8a32b6093e2b274ce0fe7fe | 18,343 |
def First():
"""(read-only) Sets the first sensor active. Returns 0 if none."""
return lib.Sensors_Get_First() | 255eb920dae36a01f5e430c44a6922db7eaac0c9 | 18,344 |
def rch_from_model_ds(model_ds, gwf):
"""get recharge package from model dataset.
Parameters
----------
model_ds : xarray.Dataset
dataset with model data.
gwf : flopy ModflowGwf
groundwaterflow object.
Returns
-------
rch : flopy ModflowGwfrch
rch package
"""
# create recharge package
rch = recharge.model_datasets_to_rch(gwf, model_ds)
return rch | 6af1fef950a951026d082762fd1dce99af7a3ab1 | 18,345 |
def _drawdots_on_origin_image(mats, usage, img, notation_type, color=['yellow', 'green', 'blue', 'red']):
"""
For visualizatoin purpose, draw different color on original image.
:param mats:
:param usage: Detection or Classfifcation
:param img: original image
:param color: color list for each category
:return: dotted image
"""
if usage == 'Classification':
for i, mat in enumerate(mats):
mat_content = mat['detection']
_draw_points(mat_content, img, color[i], notation_type=notation_type)
elif usage == 'Detection':
mat_content = mats['detection']
_draw_points(mat_content, img, color[0], notation_type=notation_type)
return img | 12ae7544c1ddc415c237835cb14b112e186d0d15 | 18,346 |
def scale(*args, x = 1, y = 1):
"""
Returns a transformation which scales a path around the origin by the specified amount.
`scale(s)`: Scale uniformly by `s`.
`scale(sx, sy)`: Scale by `sx` along the x axis and by `sy` along the y axis.
`scale(x = sx)`: Scale along the x axis only.
`scale(y = sy)`: Scale along the y axis only.
"""
if args:
if len(args) == 1:
args *= 2
x, y = args
return transform(x, 0, 0, 0, y, 0) | 4853a2dd4dd8145cfbd502a38531a769674c203c | 18,347 |
async def async_setup_entry(hass, config_entry):
"""Set up Tile as config entry."""
websession = aiohttp_client.async_get_clientsession(hass)
client = await async_login(
config_entry.data[CONF_USERNAME],
config_entry.data[CONF_PASSWORD],
session=websession,
)
async def async_update_data():
"""Get new data from the API."""
try:
return await client.tiles.all()
except SessionExpiredError:
LOGGER.info("Tile session expired; creating a new one")
await client.async_init()
except TileError as err:
raise UpdateFailed(f"Error while retrieving data: {err}") from err
coordinator = DataUpdateCoordinator(
hass,
LOGGER,
name=config_entry.title,
update_interval=DEFAULT_UPDATE_INTERVAL,
update_method=async_update_data,
)
await coordinator.async_refresh()
hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id] = coordinator
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True | 0aa031a56e32335cbe0aab83ff686eb970803b8c | 18,350 |
def get_selection_uri_template():
"""
Utility function, to build Selection endpoint's Falcon uri_template string
>>> get_selection_uri_template()
'/v1/source/{source_id}/selection.{type}'
"""
str_source_uri = get_uri_template(source.str_route)
path_selection = selection.str_route
param_id = source_parameters.source_id
param_type = selection.str_param_type
str_selection_uri = ''.join(
['/{', param_id, '}/', path_selection, '{', param_type, '}']
)
return str_source_uri+str_selection_uri | ec51a7ecc9476dfd060e717c11f4db9255a756dc | 18,351 |
def conv_single_step(a_slice_prev, W, b):
"""
Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation
of the previous layer.
Arguments:
a_slice_prev -- slice of input data of shape (f, f, n_C_prev)
W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev)
b -- Bias parameters contained in a window - matrix of shape (1, 1, 1)
Returns:
Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data
"""
### START CODE HERE ### (≈ 2 lines of code)
# Element-wise product between a_slice and W. Do not add the bias yet.
s = a_slice_prev * W
# Sum over all entries of the volume s.
Z = s.sum()
# Add bias b to Z. Cast b to a float() so that Z results in a scalar value.
Z = Z + np.asscalar(b.astype(np.float))
### END CODE HERE ###
return Z | 08528925783a6ad2e0d29e602aafe44082a8c50c | 18,352 |
def _is_pyqt_obj(obj):
"""Checks if ``obj`` wraps an underlying C/C++ object."""
if isinstance(obj, QtCore.QObject):
try:
obj.parent()
return True
except RuntimeError:
return False
else:
return False | 7be1750807eaee9ab54ee144019909d3c6890c65 | 18,353 |
from typing import Optional
from typing import Tuple
def get_optimizer(
optim_type: str,
optimizer_grouped_parameters,
lr: float,
weight_decay: float,
eps: Optional[float] = 1e-6,
betas: Optional[Tuple[float, float]] = (0.9, 0.999),
momentum: Optional[float] = 0.9,
):
"""
Choose a Pytorch optimizer based on its name.
Parameters
----------
optim_type
Name of optimizer.
optimizer_grouped_parameters
The model parameters to be optimized.
lr
Learning rate.
weight_decay
Optimizer weight decay.
eps
Optimizer eps.
betas
Optimizer betas.
momentum
Momentum used in the SGD optimizer.
Returns
-------
A Pytorch optimizer.
"""
if optim_type == "adamw":
optimizer = optim.AdamW(
optimizer_grouped_parameters,
lr=lr,
weight_decay=weight_decay,
eps=eps,
betas=betas,
)
elif optim_type == "adam":
optimizer = optim.Adam(
optimizer_grouped_parameters,
lr=lr,
weight_decay=weight_decay,
)
elif optim_type == "sgd":
optimizer = optim.SGD(
optimizer_grouped_parameters,
lr=lr,
weight_decay=weight_decay,
momentum=momentum,
)
else:
raise ValueError(f"unknown optimizer: {optim_type}")
return optimizer | b80ca6d38ada3aba310656c7609e73a34d8555b7 | 18,354 |
def forward(observations, transitions, sequence_len, batch=False):
"""Implementation of the forward algorithm in Keras.
Returns the log probability of the given observations and transitions
by recursively summing over the probabilities of all paths through
the state space. All probabilities are in logarithmic space.
See e.g. https://en.wikipedia.org/wiki/Forward_algorithm .
Args:
observations (tensor): A tensor of the observation log
probabilities, shape (sequence_len, num_states) if
batch is False, (batch_size, sequence_len, num_states)
otherwise.
transitions (tensor): A (num_states, num_states) tensor of
the transition weights (log probabilities).
sequence_len (int): The number of steps in the sequence.
This must be given because unrolling scan() requires a
definite (not tensor) value.
batch (bool): Whether to run in batchwise mode. If True, the
first dimension of observations corresponds to the batch.
Returns:
Total log probability if batch is False or vector of log
probabiities otherwise.
"""
step = make_forward_step(transitions, batch)
if not batch:
first, rest = observations[0, :], observations[1:, :]
else:
first, rest = observations[:, 0, :], observations[:, 1:, :]
sequence_len -= 1 # exclude first
outputs, _ = scan(step, rest, first, n_steps=sequence_len, batch=batch)
if not batch:
last, axis = outputs[sequence_len-1], 0
else:
last, axis = outputs[:, sequence_len-1], 1
return logsumexp(last, axis=axis) | 16e104ee3e4a55903b2874cbef74e9e63584174c | 18,355 |
def fib(n):
"""Return the n'th Fibonacci number.
"""
if n < 0:
raise ValueError("Fibonacci numbers are only defined for n >= 0.")
return _fib(n) | e25907deae2884e3ec69ba8ae29fb362aa50dbe3 | 18,356 |
def check_column(board):
"""
list -> bool
This function checks if every column has different numbers and returns
True is yes, and False if not.
>>> check_column(["**** ****", "***1 ****", "** 3****", \
"* 4 1****", " 9 5 ", " 6 83 *", "3 1 **", " 8 2***", " 2 ****"])
False
>>> check_column(["**** ****", "***1 ****", "** 3****", \
"* 4 1****", " 9 5 ", " 6 83 *", "3 5 **", " 8 2***", " 2 ****"])
True
"""
length = len(board)
for i in range(length):
one_line = []
for line in board:
if line[i] == '*' or line[i] == ' ':
continue
if line[i] in one_line:
return False
else:
one_line.append(line[i])
return True | b903d1b589cd2981cc374ff47f985151d341e7ec | 18,357 |
def lines_len_in_circle(r, font_size=12, letter_width=7.2):
"""Return the amount of chars that fits each line in a circle according to
its radius *r*
Doctest:
.. doctest::
>>> lines_len_in_circle(20)
[2, 5, 2]
"""
lines = 2 * r // font_size
positions = [
x + (font_size // 2) * (-1 if x <= 0 else 1)
for x in text_y(lines)
]
return [
int(2 * r * cos(asin(y / r)) / letter_width)
for y in positions
] | 43a7043d1c1632a9683476ab5cfa9d19f8105230 | 18,358 |
def capacitorCorrection(m_cap):
"""Apply a correction to the measured capacitance value
to get a value closer to the real value.
One reason this may differ is measurement varies based on frequency.
The measurements are performed at 30Hz but capacitance values
are normally quoted for 1kHz.
The coefficients are based on mutiple linear regression in R
using rms-based measurements of capacitors vs readings from multimeter
plus a fudge for small values!
"""
### These are based on 30Hz sine wave with 2x2k2 + 2x10nF
### + internal_res = 200 and no esr correction
###return -7.599263e-08 + 9.232542e-01 * m_cap + 1.690527e+04 * m_cap * m_cap
### 31Hz sine 2x2k2 + 2x10nF with internal_res = 140 no esr correction
poly2_cor = -6.168148e-08 + 8.508691e-01 * m_cap + 2.556320e+04 * m_cap * m_cap
return poly2_cor if poly2_cor > 30e-9 else m_cap * 0.2 | 1942c177d534bc5533bb636e10f0107c1230c81d | 18,360 |
def get_s3_keys(bucket):
"""Get a list of keys in an S3 bucket."""
keys = []
resp = s3.list_objects(Bucket=bucket)
for obj in resp['Contents']:
keys.append(obj['Key'])
return keys | 2efb2e4e9a7ac943c3e35e69a0987933957800e1 | 18,364 |
from scipy.ndimage.filters import maximum_filter, minimum_filter
def plot_maxmin_points(lon, lat, data, extrema, nsize, symbol, color='k',
plotValue=True, transform=None):
"""
This function will find and plot relative maximum and minimum for a 2D grid. The function
can be used to plot an H for maximum values (e.g., High pressure) and an L for minimum
values (e.g., low pressue). It is best to used filetered data to obtain a synoptic scale
max/min value. The symbol text can be set to a string value and optionally the color of the
symbol and any plotted value can be set with the parameter color
lon = plotting longitude values (2D)
lat = plotting latitude values (2D)
data = 2D data that you wish to plot the max/min symbol placement
extrema = Either a value of max for Maximum Values or min for Minimum Values
nsize = Size of the grid box to filter the max and min values to plot a reasonable number
symbol = String to be placed at location of max/min value
color = String matplotlib colorname to plot the symbol (and numerica value, if plotted)
plot_value = Boolean (True/False) of whether to plot the numeric value of max/min point
The max/min symbol will be plotted on the current axes within the bounding frame
(e.g., clip_on=True)
^^^ Notes from MetPy. Function adapted from MetPy.
"""
if (extrema == 'max'):
data_ext = maximum_filter(data, nsize, mode='nearest')
elif (extrema == 'min'):
data_ext = minimum_filter(data, nsize, mode='nearest')
else:
raise ValueError('Value for hilo must be either max or min')
mxy, mxx = np.where(data_ext == data)
for i in range(len(mxy)):
ax.text(lon[mxy[i], mxx[i]], lat[mxy[i], mxx[i]], symbol, color=color, size=13,
clip_on=True, horizontalalignment='center', verticalalignment='center',
fontweight='extra bold',
transform=transform)
ax.text(lon[mxy[i], mxx[i]], lat[mxy[i], mxx[i]],
'\n \n' + str(np.int(data[mxy[i], mxx[i]])),
color=color, size=8, clip_on=True, fontweight='bold',
horizontalalignment='center', verticalalignment='center',
transform=transform, zorder=10)
return ax | a1d17972540346e96337e54da72960dc9e1f8afe | 18,365 |
def _get_results(model):
"""
Helper function to get the results from the solved model instances
"""
_invest = {}
results = solph.processing.convert_keys_to_strings(model.results())
for i in ["wind", "gas", "storage"]:
_invest[i] = results[(i, "electricity")]["scalars"]["invest"]
return _invest | d054ef85df5603d8b450c0e35cd65ab5fb1cc278 | 18,366 |
def subtract_overscan(ccd, overscan=None, overscan_axis=1, fits_section=None,
median=False, model=None):
"""
Subtract the overscan region from an image.
Parameters
----------
ccd : `~astropy.nddata.CCDData`
Data to have overscan frame corrected.
overscan : `~astropy.nddata.CCDData` or None, optional
Slice from ``ccd`` that contains the overscan. Must provide either
this argument or ``fits_section``, but not both.
Default is ``None``.
overscan_axis : 0, 1 or None, optional
Axis along which overscan should combined with mean or median. Axis
numbering follows the *python* convention for ordering, so 0 is the
first axis and 1 is the second axis.
If overscan_axis is explicitly set to None, the axis is set to
the shortest dimension of the overscan section (or 1 in case
of a square overscan).
Default is ``1``.
fits_section : str or None, optional
Region of ``ccd`` from which the overscan is extracted, using the FITS
conventions for index order and index start. See Notes and Examples
below. Must provide either this argument or ``overscan``, but not both.
Default is ``None``.
median : bool, optional
If true, takes the median of each line. Otherwise, uses the mean.
Default is ``False``.
model : `~astropy.modeling.Model` or None, optional
Model to fit to the data. If None, returns the values calculated
by the median or the mean.
Default is ``None``.
{log}
Raises
------
TypeError
A TypeError is raised if either ``ccd`` or ``overscan`` are not the
correct objects.
Returns
-------
ccd : `~astropy.nddata.CCDData`
CCDData object with overscan subtracted.
Notes
-----
The format of the ``fits_section`` string follow the rules for slices that
are consistent with the FITS standard (v3) and IRAF usage of keywords like
TRIMSEC and BIASSEC. Its indexes are one-based, instead of the
python-standard zero-based, and the first index is the one that increases
most rapidly as you move through the array in memory order, opposite the
python ordering.
The 'fits_section' argument is provided as a convenience for those who are
processing files that contain TRIMSEC and BIASSEC. The preferred, more
pythonic, way of specifying the overscan is to do it by indexing the data
array directly with the ``overscan`` argument.
Examples
--------
Creating a 100x100 array containing ones just for demonstration purposes::
>>> import numpy as np
>>> from astropy import units as u
>>> arr1 = CCDData(np.ones([100, 100]), unit=u.adu)
The statement below uses all rows of columns 90 through 99 as the
overscan::
>>> no_scan = subtract_overscan(arr1, overscan=arr1[:, 90:100])
>>> assert (no_scan.data == 0).all()
This statement does the same as the above, but with a FITS-style section::
>>> no_scan = subtract_overscan(arr1, fits_section='[91:100, :]')
>>> assert (no_scan.data == 0).all()
Spaces are stripped out of the ``fits_section`` string.
"""
if not (isinstance(ccd, CCDData) or isinstance(ccd, np.ndarray)):
raise TypeError('ccddata is not a CCDData or ndarray object.')
if ((overscan is not None and fits_section is not None) or
(overscan is None and fits_section is None)):
raise TypeError('specify either overscan or fits_section, but not '
'both.')
if (overscan is not None) and (not isinstance(overscan, CCDData)):
raise TypeError('overscan is not a CCDData object.')
if (fits_section is not None and
not isinstance(fits_section, str)):
raise TypeError('overscan is not a string.')
if fits_section is not None:
overscan = ccd[slice_from_string(fits_section, fits_convention=True)]
if overscan_axis is None:
overscan_axis = 0 if overscan.shape[1] > overscan.shape[0] else 1
if median:
oscan = np.median(overscan.data, axis=overscan_axis)
else:
oscan = np.mean(overscan.data, axis=overscan_axis)
if model is not None:
of = fitting.LinearLSQFitter()
yarr = np.arange(len(oscan))
oscan = of(model, yarr, oscan)
oscan = oscan(yarr)
if overscan_axis == 1:
oscan = np.reshape(oscan, (oscan.size, 1))
else:
oscan = np.reshape(oscan, (1, oscan.size))
else:
if overscan_axis == 1:
oscan = np.reshape(oscan, oscan.shape + (1,))
else:
oscan = np.reshape(oscan, (1,) + oscan.shape)
subtracted = ccd.copy()
# subtract the overscan
subtracted.data = ccd.data - oscan
return subtracted | 9d5d8333949f77e86000f836051c92122b96c87b | 18,368 |
from datetime import datetime
def read_raw(omega):
"""Read the raw temperature, humidity and dewpoint values from an OMEGA iServer.
Parameters
----------
omega : :class:`msl.equipment.record_types.EquipmentRecord`
The Equipment Record of an OMEGA iServer.
Returns
-------
:class:`str`
The serial number of the OMEGA iServer.
:class:`dict`
The data.
"""
nprobes = omega.connection.properties.get('nprobes', 1)
nbytes = omega.connection.properties.get('nbytes')
error = None
try:
cxn = omega.connect()
thd = cxn.temperature_humidity_dewpoint(probe=1, nbytes=nbytes)
if nprobes == 2:
thd += cxn.temperature_humidity_dewpoint(probe=2, nbytes=nbytes)
cxn.disconnect()
except Exception as e:
error = str(e)
thd = [None] * (nprobes * 3)
now_iso = datetime.now().replace(microsecond=0).isoformat(sep='T')
data = {
'error': error,
'alias': omega.alias,
'datetime': now_iso,
'report_number': None,
}
if len(thd) == 3:
data.update({
'temperature': thd[0], 'humidity': thd[1], 'dewpoint': thd[2]
})
else:
data.update({
'temperature1': thd[0], 'humidity1': thd[1], 'dewpoint1': thd[2],
'temperature2': thd[3], 'humidity2': thd[4], 'dewpoint2': thd[5]
})
return omega.serial, data | 105e07d26774288319459ebdc485d75c3a909212 | 18,369 |
import math
def get_spell_slots(pcClass, level):
"""Return a list containing the available spell slots for each spell level."""
spell_slots = []
if pcClass.casefold() == "Magic-User".casefold():
highest_spell_level = min(math.ceil(level / 2), 9)
# MU_SPELL_SLOTS[level - 1] gives first level spell slots for the given character
# level. The spell slots for subsequent spell levels move two steps down the
# list each time. So we move two steps down the list for each spell level we
# need beyond the first by subtracting 2 * i from the index.
for i in range(highest_spell_level):
spell_slots.append(MU_SPELL_SLOTS[(level - 1) - (2 * i)])
if pcClass.casefold() == "Cleric".casefold():
# Cleric spell slots are a little strange: they have access to level 1 spells
# if they're 3rd level or lower. Otherwise, they use the same progression as
# magic-users (except that Clerics' highest spell level is 7, not 9).
highest_spell_level = 1 if level <= 3 else min(math.ceil(level / 2), 7)
# Here's the really painful bit. Cleric spell slots ALMOST follow the nice easy
# Magic-User pattern of moving two steps down each time you go up a spell level.
# Almost.
# They actually move 3 steps down the first time (from spell level 1 to spell
# level 2), and then a nice even 2 steps down for every spell level after that.
# Special cases, UGH.
for i in range(highest_spell_level):
if i <= 1:
spell_slots.append(CLERIC_SPELL_SLOTS[(level - 1) - (3 * i)])
else:
spell_slots.append(CLERIC_SPELL_SLOTS[(level - 1) - (2 * i)])
# Sigh. Level 20 is a special case that doesn't follow any handy pattern that I
# could find.
if level == 20:
spell_slots = [8, 7, 7, 6, 5, 5, 4]
return spell_slots | 792110a79eb00965ea72e067e47c8eff2be4c293 | 18,370 |
from datetime import datetime, timedelta
def determine_dates_to_query_on_matomo(dates_in_database):
"""
Determines which dates need to be queried on Matomo to update the dataset.
"""
# determines which dates are missing from the database and could be queried on Matomo
# NOTE: start date was set to 2020-05-01 as May is when the portal started to be live
start_date = datetime.strptime('2020-05-01', '%Y-%m-%d').date()
end_date = (datetime.today() - timedelta(1)).date()
delta = timedelta(days=1)
dates_to_process = []
while start_date <= end_date:
if str(start_date) not in dates_in_database:
dates_to_process.append(str(start_date))
start_date += delta
return dates_to_process | 40db63fb7ff339d5c306df37cf0f4b1765b91f90 | 18,371 |
def calc_total_energy(electron_energy, atomic_distance, energy0):
"""
Calculates the total energy of H2 molecule from electron_energy by
adding proton-proton Coulomb energy and defining the zero energy
energy0. The formula:
E = E_el + E_p - E_0
where e is the total energy, E_el is the electronic energy
E_p = 1 / R, where R is atomic distance and E_0 is the chosen
zero energy.
:param electron_energy: list of energies of H2 molecule without
proton-proton Coulomb energy
:param atomic_distance: list of distances between two H atoms
of H2 molecule
:param energy0: The energy that we take as zero energy
:return: list of total energy of H2 molecule in MJ mol^{-1}
"""
total_energy = [0]*len(electron_energy)
for dot in range(len(electron_energy)):
# proton-proton Coulomb energy
proton_energy = proton_proton(atomic_distance[dot])
total_energy_hartree = electron_energy[dot] + proton_energy - energy0
total_energy[dot] = hartree_to_MJmol(total_energy_hartree)
return total_energy | 3a948dc26e7147961e9c7677ef2b8b1d8f47d0ab | 18,373 |
def k8s_stats_response():
"""
Returns K8s /stats/summary endpoint output from microk8s on Jetson Nano.
"""
with open("tests/resources/k8s_response.json", "r") as response_file:
response = response_file.read()
return response | 68413108eeea6bdd80a782b962f3a5c97e1a4b73 | 18,374 |
def display_credentials():
"""
Function to display saved credentials.
"""
return Credentials.display_credential() | 0cfb2e7529bd46ae3a05e21aeec25761c062e04b | 18,375 |
from typing import Optional
from typing import Dict
def evaluate_absence_of_narrow_ranges(
piece: Piece, min_size: int = 9,
penalties: Optional[Dict[int, float]] = None
) -> float:
"""
Evaluate melodic fluency based on absence of narrow ranges.
:param piece:
`Piece` instance
:param min_size:
minimum size of narrow range (in line elements)
:param penalties:
mapping from width of a range (in scale degrees) to penalty
applicable to ranges of not greater width
:return:
multiplied by -1 count of narrow ranges weighted based on their width
"""
penalties = penalties or {2: 1, 3: 0.5}
pitches = [x.scale_element.position_in_degrees for x in piece.counterpoint]
rolling_mins = rolling_aggregate(pitches, min, min_size)[min_size-1:]
rolling_maxs = rolling_aggregate(pitches, max, min_size)[min_size-1:]
borders = zip(rolling_mins, rolling_maxs)
score = 0
for lower_border, upper_border in borders:
range_width = upper_border - lower_border
curr_penalties = [v for k, v in penalties.items() if k >= range_width]
penalty = max(curr_penalties) if curr_penalties else 0
score -= penalty
return score | 4b487f1f749f31d33852d928b1b56d1489336827 | 18,376 |
from qalgebra.core.scalar_algebra import (
One,
Scalar,
ScalarTimes,
ScalarValue,
Zero,
)
from typing import OrderedDict
def collect_scalar_summands(cls, ops, kwargs):
"""Collect :class:`.ScalarValue` and :class:`.ScalarExpression` summands.
Example::
>>> srepr(collect_scalar_summands(Scalar, (1, 2, 3), {}))
'ScalarValue(6)'
>>> collect_scalar_summands(Scalar, (1, 1, -1), {})
One
>>> collect_scalar_summands(Scalar, (1, -1), {})
Zero
>>> Psi = KetSymbol("Psi", hs=0)
>>> Phi = KetSymbol("Phi", hs=0)
>>> braket = BraKet.create(Psi, Phi)
>>> collect_scalar_summands(Scalar, (1, braket, -1), {})
<Psi|Phi>^(0)
>>> collect_scalar_summands(Scalar, (1, 2 * braket, 2, 2 * braket), {})
((3, 4 * <Psi|Phi>^(0)), {})
>>> collect_scalar_summands(Scalar, (2 * braket, -braket, -braket), {})
Zero
"""
# This routine is required because there is no
# "ScalarTimesQuantumExpression" for scalars: we have to extract
# coefficiencts from ScalarTimes instead
a_0 = Zero
coeff_map = OrderedDict()
for op in ops:
if isinstance(op, ScalarValue) or isinstance(op, Scalar._val_types):
a_0 += op
continue
elif isinstance(op, ScalarTimes):
if isinstance(op.operands[0], ScalarValue):
coeff = op.operands[0]
term = op.operands[1]
for sub_op in op.operands[2:]:
term *= sub_op
else:
coeff, term = One, op
else:
coeff, term = One, op
if term in coeff_map:
coeff_map[term] += coeff
else:
coeff_map[term] = coeff
if a_0 == Zero:
fops = []
else:
fops = [a_0]
for (term, coeff) in coeff_map.items():
op = coeff * term
if not op.is_zero:
fops.append(op)
if len(fops) == 0:
return cls._zero
elif len(fops) == 1:
return fops[0]
else:
return tuple(fops), kwargs | a6b7ba05db9e9d6434f217bcc7a67f2b6f7ba22b | 18,377 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.