content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def requires_site(site):
"""Skip test based on where it is being run"""
skip_it = bool(site != SITE)
return pytest.mark.skipif(skip_it,
reason='SITE is not %s.' % site) | b436e6390828af2ffdca6e972b5a55fccafed64b | 13,000 |
import traceback
def handle_error(e):
"""
Handle errors, formatting them as JSON if requested
"""
error_type = type(e).__name__
message = str(e)
trace = None
description = None
status_code = 500
if isinstance(e, werkzeug.exceptions.HTTPException):
status_code = e.code
description = e.description
if app.debug:
trace = traceback.format_exc()
if request_wants_json():
details = {
'message': message,
'type': error_type,
}
if description is not None:
details['description'] = description
if trace is not None:
details['trace'] = trace.split('\n')
return flask.jsonify({'error': details}), status_code
else:
message = message.replace('\\n', '<br />')
if isinstance(e, digits.frameworks.errors.NetworkVisualizationError):
trace = message
message = ''
return flask.render_template('error.html',
title=error_type,
message=message,
description=description,
trace=trace,
), status_code | c2b1b3d3e9b97c9c28896d2ef04e2f33b64d6a18 | 13,001 |
def iou(bbox_1, bbox_2):
"""Computes intersection over union between two bounding boxes.
Parameters
----------
bbox_1 : np.ndarray
First bounding box, of the form (x_min, y_min, x_max, y_max).
bbox_2 : np.ndarray
Second bounding box, of the form (x_min, y_min, x_max, y_max).
Returns
-------
float
Intersection over union value between both bounding boxes.
"""
x_min = np.maximum(bbox_1[0], bbox_2[0])
y_min = np.maximum(bbox_1[1], bbox_2[1])
x_max = np.minimum(bbox_1[2], bbox_2[2])
y_max = np.minimum(bbox_1[3], bbox_2[3])
width = np.maximum(0.0, x_max - x_min)
height = np.maximum(0.0, y_max - y_min)
intersection = width * height
return (
intersection
) / (
(bbox_1[2] - bbox_1[0]) * (bbox_1[3] - bbox_1[1])
+ (bbox_2[2] - bbox_2[0]) * (bbox_2[3] - bbox_2[1])
- intersection
) | c076290d386a2c6740f7cd6aaee18a42f9c850ce | 13,002 |
def GetIntensityArray(videofile, threshold, scale_percent):
"""Finds pixel coordinates within a videofile (.tif, .mp4) for pixels
that are above a brightness threshold, then accumulates the
brightness event intensities for each coordinate,
outputting it as a 2-D array in the same size as the video frames
Input:
-videofile: file containing an image stack of fluorescent events
-threshold: minimum brightness for detection
-scale_percent: helps resize image for faster computing speeds
Output: 2-d Array of accumulated intensity values for each pixel above
a calculated brightness threshold in the video"""
# Reading video file and convert to grayscale
ret, img = cv2.imreadmulti(videofile, flags=cv2.IMREAD_GRAYSCALE)
# Setting Resizing Dimensions
width = int(img[0].shape[1] * scale_percent / 100)
height = int(img[0].shape[0] * scale_percent / 100)
dim = (width, height)
img_resized = cv2.resize(img[0], dim, interpolation=cv2.INTER_AREA)
# Creating empty array to add intensity values to
int_array = np.zeros(np.shape(img_resized))
for frame in range(len(img)):
# Resize Frame
frame_resized = cv2.resize(img[frame],
dim, interpolation=cv2.INTER_AREA)
intensity = GetIntensityValues(frame_resized, threshold)
if len(np.where(intensity >= 1)) > 0:
# Get coordinates of the single pixel counts
row, col = np.where(intensity >= 1)
for i in range(len(row)):
for j in range(len(col)):
# Add single count to freq_array in location of event
int_array[row[i], col[j]] += intensity[row[i], col[j]]
else:
pass
return int_array | 720b50da3eb04698c62b2afa67ca3cd7b4f3661d | 13,003 |
def _check_data(handler, data):
"""Check the data."""
if 'latitude' not in data or 'longitude' not in data:
handler.write_text("Latitude and longitude not specified.",
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.error("Latitude and longitude not specified.")
return False
if 'device' not in data:
handler.write_text("Device id not specified.",
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.error("Device id not specified.")
return False
if 'id' not in data:
handler.write_text("Location id not specified.",
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.error("Location id not specified.")
return False
if 'trigger' not in data:
handler.write_text("Trigger is not specified.",
HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.error("Trigger is not specified.")
return False
return True | 829681db92b0b7a6368ca143b6c656b46c972430 | 13,004 |
import requests
from bs4 import BeautifulSoup
def get_soup(page_url):
""" Returns BeautifulSoup object of the url provided """
try:
req = requests.get(page_url)
except Exception:
print('Failed to establish a connection with the website')
return
if req.status_code == 404:
print('Page not found')
return
content = req.content
soup = BeautifulSoup(content, 'html.parser')
return soup | d837e3b6aa6184285857428b2c796172379f3a1f | 13,005 |
def foreign_key_constraint_sql(table):
"""Return the SQL to add foreign key constraints to a given table"""
sql = ''
fk_names = list(table.foreign_keys.keys())
for fk_name in sorted(fk_names):
foreign_key = table.foreign_keys[fk_name]
sql += "FOREIGN KEY({fn}) REFERENCES {tn}({kc}), ".format(
fn=foreign_key.from_col,
tn=foreign_key.to_table.name,
kc=foreign_key.to_col
)
return sql | 0883050d2b9d302ab9099ef27abd400e4d4fe69e | 13,006 |
from typing import Optional
def expandDimConst(term: AST.PPTerm,
ntId: int) -> Optional[AST.PPTerm]:
"""
Expand dimension constant to integer constants (Required for fold zeros)
"""
nt = ASTUtils.getNthNT(term, ntId)
if type(nt.sort) != AST.PPDimConst:
return None
subTerm = AST.PPIntConst(nt.sort.value)
termExpanded = ReprUtils.replaceNthNT(term, ntId, subTerm)
return termExpanded | b048d8ed0ec743a88c17a1f120a57d5e548e7d69 | 13,007 |
from scipy.optimize import minimize_scalar
def _fit_amplitude_scipy(counts, background, model, optimizer='Brent'):
"""
Fit amplitude using scipy.optimize.
Parameters
----------
counts : `~numpy.ndarray`
Slice of count map.
background : `~numpy.ndarray`
Slice of background map.
model : `~numpy.ndarray`
Model template to fit.
flux : float
Starting value for the fit.
Returns
-------
amplitude : float
Fitted flux amplitude.
niter : int
Number of function evaluations needed for the fit.
"""
args = (counts, background, model)
amplitude_min, amplitude_max = _amplitude_bounds_cython(counts, background, model)
try:
result = minimize_scalar(f_cash, bracket=(amplitude_min, amplitude_max),
args=args, method=optimizer, tol=10)
return result.x, result.nfev
except ValueError:
result = minimize_scalar(f_cash, args=args, method=optimizer, tol=0.1)
return result.x, result.nfev | 51faafd7a81ff70196075bacac6b36db6b7e09f8 | 13,008 |
import psutil
import subprocess
def account_key__sign(data, key_pem=None, key_pem_filepath=None):
"""
This routine will use crypto/certbot if available.
If not, openssl is used via subprocesses
:param key_pem: (required) the RSA Key in PEM format
:param key_pem_filepath: (optional) the filepath to a PEM encoded RSA account key file.
"""
log.info("account_key__sign >")
if openssl_crypto:
pkey = openssl_crypto.load_privatekey(openssl_crypto.FILETYPE_PEM, key_pem)
if PY3:
if not isinstance(data, bytes):
data = data.encode()
signature = pkey.to_cryptography_key().sign(
data,
cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15(),
cryptography.hazmat.primitives.hashes.SHA256(),
)
return signature
log.debug(".account_key__sign > openssl fallback")
_tmpfile = None
try:
if key_pem_filepath is None:
_tmpfile = new_pem_tempfile(key_pem)
key_pem_filepath = _tmpfile.name
with psutil.Popen(
[openssl_path, "dgst", "-sha256", "-sign", key_pem_filepath],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
if PY3:
if not isinstance(data, bytes):
data = data.encode()
signature, err = proc.communicate(data)
if proc.returncode != 0:
raise IOError("account_key__sign\n{0}".format(err))
return signature
finally:
if _tmpfile:
_tmpfile.close() | bc7ee73fe71a54937ef8626f2b54d14f800badd1 | 13,009 |
from pathlib import Path
def get_world_paths() -> list:
"""
Returns a list of paths to the worlds on the server.
"""
server_dir = Path(__file__).resolve().parents[1]
world_paths = []
for p in server_dir.iterdir():
if p.is_dir and (p / "level.dat").is_file():
world_paths.append(p.absolute())
return world_paths | bf1c23c6a1c928dc66470db2e11b49ad2fc9e5d9 | 13,010 |
def derivative_p(α_L, α_G, ρ_G, v_L, v_G): # (1)
"""
Calculates pressure spatial derivative to be pluged into the expression for
pressure at the next spatial step (see first equation of the model). It
returns the value of pressure spatial derivative at the current time step
and, hence, takes as arguments volume fractions, velocities, and gas density
at the current spatial step.
Args:
α_L (float) - liquid phase volume fraction. Can assume any value
from 0 to 1.
α_G (float) - gaseous phase volume fraction. Can assume any value
from 0 to 1.
ρ_G (float) - gaseous phase density. Can assume any positive value.
v_L (float) - liquid phase velocity. Can assume either positive or
negative values.
v_G (float) - gaseous phase velocity. Can assume any positive value.
Returns:
float: the return value (pressure derivative at the current spatial
step). Can assume any value from negative infinity to 0.
"""
derivative_p = (-1)*(ρ_L*α_L + ρ_G*α_G) \
* ( g + (2*f/D) * (α_L*v_L + α_G*v_G)**2 ) # line continuation operator
return(derivative_p) | e2aa8a4967b121b798058aa963b0685080b7eb8b | 13,011 |
def fit_sigmoid(colors, a=0.05):
"""Fits a sigmoid to raw contact temperature readings from the ContactPose dataset. This function is copied from that repo"""
idx = colors > 0
ci = colors[idx]
x1 = min(ci) # Find two points
y1 = a
x2 = max(ci)
y2 = 1-a
lna = np.log((1 - y1) / y1)
lnb = np.log((1 - y2) / y2)
k = (lnb - lna) / (x1 - x2)
mu = (x2*lna - x1*lnb) / (lna - lnb)
ci = np.exp(k * (ci-mu)) / (1 + np.exp(k * (ci-mu))) # Apply the sigmoid
colors[idx] = ci
return colors | 2024e32cd454cb27cf3d0fef9f270a22abba5ea0 | 13,012 |
def deprecated(func):
"""Decorator for reporting deprecated function calls
Use this decorator sparingly, because we'll be charged if we make too many Rollbar notifications
"""
@wraps(func)
def wrapped(*args, **kwargs):
# try to get a request, may not always succeed
request = get_current_request()
# notify a maximum of once per function per request/session
if request:
if DEPRECATED_ROLLBAR_NOTIFIED not in request.session:
deprecated_notifications = {}
request.session[DEPRECATED_ROLLBAR_NOTIFIED] = deprecated_notifications
deprecated_notifications = request.session[DEPRECATED_ROLLBAR_NOTIFIED]
key = '%s' % func
# first get it
already_notified = deprecated_notifications.get(key, False)
# then mark it
deprecated_notifications[key] = True
else:
already_notified = False
if not already_notified:
rollbar.report_message('Deprecated function call warning: %s' % func, 'warning', request)
return func(*args, **kwargs)
return wrapped | d01f53acd584faa3c3b15771c76a00210ffbeb83 | 13,013 |
from sys import path
def read_file(path_file: path) -> str:
"""
Reads the content of the file at path_file
:param path_file:
:return:
"""
content = None
with open(path_file, 'r') as file:
content = file.read()
return content | 0a49a13ddc855abfdbb4cefd2396a7832c1e26a8 | 13,014 |
import bisect
def absorptionCoefficient_Voigt(Components=None,SourceTables=None,partitionFunction=PYTIPS,
Environment=None,OmegaRange=None,OmegaStep=None,OmegaWing=None,
IntensityThreshold=DefaultIntensityThreshold,
OmegaWingHW=DefaultOmegaWingHW,
ParameterBindings=DefaultParameterBindings,
EnvironmentDependencyBindings=DefaultEnvironmentDependencyBindings,
GammaL='gamma_air', HITRAN_units=True, LineShift=True,
File=None, Format=None, OmegaGrid=None):
"""
INPUT PARAMETERS:
Components: list of tuples [(M,I,D)], where
M - HITRAN molecule number,
I - HITRAN isotopologue number,
D - abundance (optional)
SourceTables: list of tables from which to calculate cross-section (optional)
partitionFunction: pointer to partition function (default is PYTIPS) (optional)
Environment: dictionary containing thermodynamic parameters.
'p' - pressure in atmospheres,
'T' - temperature in Kelvin
Default={'p':1.,'T':296.}
OmegaRange: wavenumber range to consider.
OmegaStep: wavenumber step to consider.
OmegaWing: absolute wing for calculating a lineshape (in cm-1)
IntensityThreshold: threshold for intensities
OmegaWingHW: relative wing for calculating a lineshape (in halfwidths)
GammaL: specifies broadening parameter ('gamma_air' or 'gamma_self')
HITRAN_units: use cm2/molecule (True) or cm-1 (False) for absorption coefficient
File: write output to file (if specified)
Format: c-format of file output (accounts significant digits in OmegaStep)
OUTPUT PARAMETERS:
Omegas: wavenumber grid with respect to parameters OmegaRange and OmegaStep
Xsect: absorption coefficient calculated on the grid
---
DESCRIPTION:
Calculate absorption coefficient using Voigt profile.
Absorption coefficient is calculated at arbitrary temperature and pressure.
User can vary a wide range of parameters to control a process of calculation
(such as OmegaRange, OmegaStep, OmegaWing, OmegaWingHW, IntensityThreshold).
The choise of these parameters depends on properties of a particular linelist.
Default values are a sort of guess which gives a decent precision (on average)
for a reasonable amount of cpu time. To increase calculation accuracy,
user should use a trial and error method.
---
EXAMPLE OF USAGE:
nu,coef = absorptionCoefficient_Voigt(((2,1),),'co2',OmegaStep=0.01,
HITRAN_units=False,GammaL='gamma_self')
---
"""
# warn user about too large omega step
if OmegaStep>0.1: warn('Too small omega step: possible accuracy decline')
# "bug" with 1-element list
Components = listOfTuples(Components)
SourceTables = listOfTuples(SourceTables)
# determine final input values
Components,SourceTables,Environment,OmegaRange,OmegaStep,OmegaWing,\
IntensityThreshold,Format = \
getDefaultValuesForXsect(Components,SourceTables,Environment,OmegaRange,
OmegaStep,OmegaWing,IntensityThreshold,Format)
# get uniform linespace for cross-section
#number_of_points = (OmegaRange[1]-OmegaRange[0])/OmegaStep + 1
#Omegas = linspace(OmegaRange[0],OmegaRange[1],number_of_points)
if OmegaGrid is not None:
Omegas = npsort(OmegaGrid)
else:
Omegas = arange(OmegaRange[0],OmegaRange[1],OmegaStep)
number_of_points = len(Omegas)
Xsect = zeros(number_of_points)
# reference temperature and pressure
Tref = __FloatType__(296.) # K
pref = __FloatType__(1.) # atm
# actual temperature and pressure
T = Environment['T'] # K
p = Environment['p'] # atm
# create dictionary from Components
ABUNDANCES = {}
NATURAL_ABUNDANCES = {}
for Component in Components:
M = Component[0]
I = Component[1]
if len(Component) >= 3:
ni = Component[2]
else:
try:
ni = ISO[(M,I)][ISO_INDEX['abundance']]
except KeyError:
raise Exception('cannot find component M,I = %d,%d.' % (M,I))
ABUNDANCES[(M,I)] = ni
NATURAL_ABUNDANCES[(M,I)] = ISO[(M,I)][ISO_INDEX['abundance']]
# precalculation of volume concentration
if HITRAN_units:
factor = __FloatType__(1.0)
else:
factor = volumeConcentration(p,T)
# SourceTables contain multiple tables
for TableName in SourceTables:
# get line centers
nline = LOCAL_TABLE_CACHE[TableName]['header']['number_of_rows']
# loop through line centers (single stream)
for RowID in range(nline):
# get basic line parameters (lower level)
LineCenterDB = LOCAL_TABLE_CACHE[TableName]['data']['nu'][RowID]
LineIntensityDB = LOCAL_TABLE_CACHE[TableName]['data']['sw'][RowID]
LowerStateEnergyDB = LOCAL_TABLE_CACHE[TableName]['data']['elower'][RowID]
MoleculeNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['molec_id'][RowID]
IsoNumberDB = LOCAL_TABLE_CACHE[TableName]['data']['local_iso_id'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_air'][RowID]
#Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data']['gamma_self'][RowID]
Gamma0DB = LOCAL_TABLE_CACHE[TableName]['data'][GammaL][RowID]
TempRatioPowerDB = LOCAL_TABLE_CACHE[TableName]['data']['n_air'][RowID]
#TempRatioPowerDB = 1.0 # for planar molecules
if LineShift:
Shift0DB = LOCAL_TABLE_CACHE[TableName]['data']['delta_air'][RowID]
else:
Shift0DB = 0
# filter by molecule and isotopologue
if (MoleculeNumberDB,IsoNumberDB) not in ABUNDANCES: continue
# partition functions for T and Tref
# TODO: optimize
SigmaT = partitionFunction(MoleculeNumberDB,IsoNumberDB,T)
SigmaTref = partitionFunction(MoleculeNumberDB,IsoNumberDB,Tref)
# get all environment dependences from voigt parameters
# intensity
LineIntensity = EnvironmentDependency_Intensity(LineIntensityDB,T,Tref,SigmaT,SigmaTref,
LowerStateEnergyDB,LineCenterDB)
# FILTER by LineIntensity: compare it with IntencityThreshold
# TODO: apply wing narrowing instead of filtering, this would be more appropriate
if LineIntensity < IntensityThreshold: continue
# doppler broadening coefficient (GammaD)
# V1 >>>
#GammaDDB = cSqrtLn2*LineCenterDB/cc*sqrt(2*cBolts*T/molecularMass(MoleculeNumberDB,IsoNumberDB))
#GammaD = EnvironmentDependency_GammaD(GammaDDB,T,Tref)
# V2 >>>
cMassMol = 1.66053873e-27 # hapi
#cMassMol = 1.6605402e-27 # converter
m = molecularMass(MoleculeNumberDB,IsoNumberDB) * cMassMol * 1000
GammaD = sqrt(2*cBolts*T*log(2)/m/cc**2)*LineCenterDB
# lorentz broadening coefficient
Gamma0 = EnvironmentDependency_Gamma0(Gamma0DB,T,Tref,p,pref,TempRatioPowerDB)
# get final wing of the line according to Gamma0, OmegaWingHW and OmegaWing
# XXX min or max?
OmegaWingF = max(OmegaWing,OmegaWingHW*Gamma0,OmegaWingHW*GammaD)
# shift coefficient
Shift0 = Shift0DB*p/pref
# XXX other parameter (such as Delta0, Delta2, anuVC etc.) will be included in HTP version
#PROFILE_VOIGT(sg0,GamD,Gam0,sg)
# sg0 : Unperturbed line position in cm-1 (Input).
# GamD : Doppler HWHM in cm-1 (Input)
# Gam0 : Speed-averaged line-width in cm-1 (Input).
# sg : Current WaveNumber of the Computation in cm-1 (Input).
# XXX time?
BoundIndexLower = bisect(Omegas,LineCenterDB-OmegaWingF)
BoundIndexUpper = bisect(Omegas,LineCenterDB+OmegaWingF)
lineshape_vals = PROFILE_VOIGT(LineCenterDB+Shift0,GammaD,Gamma0,Omegas[BoundIndexLower:BoundIndexUpper])[0]
Xsect[BoundIndexLower:BoundIndexUpper] += factor / NATURAL_ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
ABUNDANCES[(MoleculeNumberDB,IsoNumberDB)] * \
LineIntensity * lineshape_vals
if File: save_to_file(File,Format,Omegas,Xsect)
return Omegas,Xsect | 6c4b909999ea4e6cc14bae7fbda04f2b6ba6ddb8 | 13,015 |
def stellar_mags_scatter_cube_pair(file_pair, min_relative_flux=0.5, save=False):
"""Return the scatter in stellar colours within a star datacube pair."""
hdulist_pair = [pf.open(path, 'update') for path in file_pair]
flux = np.vstack(
[hdulist[0].data for hdulist in hdulist_pair])
noise = np.sqrt(np.vstack(
[hdulist['VARIANCE'].data for hdulist in hdulist_pair]))
wavelength = np.hstack(
[get_coords(hdulist[0].header, 3) for hdulist in hdulist_pair])
smoothed_flux = flux.copy()
smoothed_flux[~np.isfinite(smoothed_flux)] = 0.0
smoothed_flux = median_filter(smoothed_flux, (201, 1, 1))
image = np.sum(smoothed_flux, 0)
keep = (image >= (min_relative_flux * np.max(image)))
flux = flux[:, keep]
noise = noise[:, keep]
mags = []
for flux_i, noise_i in zip(flux.T, noise.T):
mags_i = measure_mags(flux_i, noise_i, wavelength)
mags.append([mags_i['g'], mags_i['r']])
mags = np.array(mags)
colour = mags[:, 0] - mags[:, 1]
scatter = np.std(colour)
if save:
for hdulist in hdulist_pair:
hdulist[0].header['COLORSTD'] = (
scatter, 'Scatter in g-r within cubes')
hdulist.flush()
for hdulist in hdulist_pair:
hdulist.close()
return scatter | b1f823a9ab33f7deadb1651aaa728690b0a08cf6 | 13,016 |
import os
def get_data_filename(relative_path):
"""Get the full path to one of the reference files shipped for testing
In the source distribution, these files are in ``examples/*/``,
but on installation, they're moved to somewhere in the user's python
site-packages directory.
Parameters
----------
relative_path : str
Name of the file to load, with respect to the yank egg folder which
is typically located at something like
``~/anaconda/lib/python3.6/site-packages/yank-*.egg/examples/``
Returns
-------
fn : str
Resource Filename
"""
fn = resource_filename('yank', relative_path)
if not os.path.exists(fn):
raise ValueError("Sorry! {} does not exist. If you just added it, you'll have to re-install".format(fn))
return fn | 3e400d6b386c5b5a1be7c6f89bd219329258514f | 13,017 |
import hmac
import hashlib
def is_valid_webhook_request(webhook_token: str, request_body: str, webhook_signature_header: str) -> bool:
"""This method verifies that requests to your Webhook URL are genuine and from Buycoins.
Args:
webhook_token: your webhook token
request_body: the body of the request
webhook_signature_header: the X-Webhook-Signature header from BuyCoins
Returns:
a Boolean stating whether the request is valid or not
"""
hmac_request_body = hmac.new(webhook_token.encode(), request_body.encode(), hashlib.sha1)
return hmac.compare_digest(hmac_request_body.hexdigest(), webhook_signature_header) | 1ce1ef0a9e1386ebbea7773d8cd9d40df2544792 | 13,018 |
import torch
def logsigsoftmax(logits):
"""
Computes sigsoftmax from the paper - https://arxiv.org/pdf/1805.10829.pdf
"""
max_values = torch.max(logits, 1, keepdim=True)[0]
exp_logits_sigmoided = torch.exp(logits - max_values) * torch.sigmoid(logits)
sum_exp_logits_sigmoided = exp_logits_sigmoided.sum(1, keepdim=True)
log_probs = logits - max_values + F.logsigmoid(logits) - torch.log(sum_exp_logits_sigmoided)
return log_probs | d7f6c2ef4279d511ff81fc30cfa1db5875c5ea4a | 13,019 |
def multi(dispatch_fn):
"""Initialise function as a multimethod"""
def _inner(*args, **kwargs):
return _inner.__multi__.get(
dispatch_fn(*args, **kwargs),
_inner.__multi_default__
)(*args, **kwargs)
_inner.__multi__ = {}
_inner.__multi_default__ = lambda *args, **kwargs: None # Default default
return _inner | 29023826b3e67ccc39a458fc90bd817cd725c35f | 13,020 |
def choose_key(somemap, default=0, prompt="choose", input=input, error=default_error,
lines=LINES, columns=COLUMNS):
"""Select a key from a mapping.
Returns the key selected.
"""
keytype = type(print_menu_map(somemap, lines=lines, columns=columns))
while 1:
try:
userinput = get_input(prompt, default, input)
except EOFError:
return default
if not userinput:
return default
try:
idx = keytype(userinput)
except ValueError:
error("Not a valid entry. Please try again.")
continue
if idx not in somemap:
error("Not a valid selection. Please try again.")
continue
return idx | d3c2438c85afae48980f772a06a7e622866197c5 | 13,021 |
import os
def image_model_saver(image_model, model_type, output_directory, training_dict, labels1, labels2, preds, results1, results2):
"""
Saves Keras image model and other outputs
image_model: Image model to be saved
model_type (string): Name of model
output_directory: Directory to folder to save file in
training_dict: Dictionary of training and validation values
labels1: List of multimodal labels for test set
labels2: List of unimodal labels for test set
preds: List of model predictions after passed through argmax()
results1: Dictionary of metrics on multimodal labels
results2: Dictionary of metrics on uniimodal labels
tokenizer: Tokenizer to be saved. Defaulted to None.
"""
output_directory = os.path.join(output_directory, model_type)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
os.chdir(output_directory)
np.save(model_type+"_dogwhistle_train_results.npy", training_dict) #save training dict
np.save(model_type+"_dogwhistle_test_results_multimodal.npy", results1) #save test metrics
np.save(model_type+"_dogwhistle_test_results_unimodal.npy", results2) #save test metrics
test_predictions = pd.DataFrame([labels1, labels2, preds]) #save predictions and labels
test_predictions = test_predictions.T
test_predictions = test_predictions.rename(columns={0: 'Multimodal Labels', 1: 'Unimodal Labels', 2: 'Predictions'})
test_predictions.to_csv(model_type+"_dogwhistle_predictions.csv")
image_model.save("image_model.h5") #save model
return print("Saving complete.") | 044674f289a73be4e285ccd472fc5428e419e080 | 13,022 |
from typing import Tuple
def predicted_orders(
daily_order_summary: pd.DataFrame, order_forecast_model: Tuple[float, float]
) -> pd.DataFrame:
"""Predicted orders for the next 30 days based on the fit paramters"""
a, b = order_forecast_model
start_date = daily_order_summary.order_date.max()
future_dates = pd.date_range(start=start_date, end=start_date + pd.DateOffset(days=30))
predicted_data = model_func(x=future_dates.astype(np.int64), a=a, b=b)
return pd.DataFrame({"order_date": future_dates, "num_orders": predicted_data}) | 48f79b1076c32f93bdf148d1b986c6892cd3a8af | 13,023 |
import subprocess
def _try_command_line(command_line):
"""Returns the output of a command line or an empty string on error."""
_logging.debug("Running command line: %s" % command_line)
try:
return subprocess.check_output(command_line, stderr=subprocess.STDOUT)
except Exception as e:
_print_process_error(command_line, e)
return None | b37c67fbcaf28a7fdd7b7cd5fbafc2b2e9561f9f | 13,024 |
import torch
import tqdm
def eval_loop(model, ldr, device):
"""Runs the evaluation loop on the input data `ldr`.
Args:
model (torch.nn.Module): model to be evaluated
ldr (torch.utils.data.DataLoader): evaluation data loader
device (torch.device): device inference will be run on
Returns:
list: list of labels, predictions, and confidence levels for each example in
the dataloader
"""
all_preds = []; all_labels = []; all_preds_dist=[]
all_confidence = []
with torch.no_grad():
for batch in tqdm.tqdm(ldr):
batch = list(batch)
inputs, targets, inputs_lens, targets_lens = model.collate(*batch)
inputs = inputs.to(device)
probs, rnn_args = model(inputs, softmax=True)
probs = probs.data.cpu().numpy()
preds_confidence = [decode(p, beam_size=3, blank=model.blank)[0] for p in probs]
preds = [x[0] for x in preds_confidence]
confidence = [x[1] for x in preds_confidence]
all_preds.extend(preds)
all_confidence.extend(confidence)
all_labels.extend(batch[1])
return list(zip(all_labels, all_preds, all_confidence)) | ca96192b1cd104ea3c86b5c5234ab5bf708613c8 | 13,025 |
def get_total_frts():
"""
Get total number of FRTs for a single state.
Arguments:
Returns:
{JSON} -- Returns headers of the columns and data in list
"""
query = """
SELECT
place.state AS state
, COUNT(DISTINCT frt.id) AS state_total
FROM
panoptic.place AS place
LEFT JOIN
panoptic.frt_place_link AS link
ON
place.id = link.place__key
LEFT JOIN
panoptic.frt AS frt
ON
link.frt__key = frt.id
GROUP BY
place.state
"""
headers, data = execute_select_query(query)
results = []
while data:
results.append(dict(zip(headers, data.pop())))
return results | 0f054ef795f09ea01b28c139fd353bf237ed10a4 | 13,026 |
def newton_sqrt(n: float, a: float) -> float:
"""Approximate sqrt(n) starting from a, using the Newton-Raphson method."""
r = within(0.00001, repeat_f(next_sqrt_approx(n), a))
return next(r) | 4d7186bd55e4f4d4511078bcb0fff4959a182a3b | 13,027 |
def prismatic(xyz, rpy, axis, qi):
"""Returns the dual quaternion for a prismatic joint.
"""
# Joint origin rotation from RPY ZYX convention
roll, pitch, yaw = rpy[0], rpy[1], rpy[2]
# Origin rotation from RPY ZYX convention
cr = cs.cos(roll/2.0)
sr = cs.sin(roll/2.0)
cp = cs.cos(pitch/2.0)
sp = cs.sin(pitch/2.0)
cy = cs.cos(yaw/2.0)
sy = cs.sin(yaw/2.0)
# The quaternion associated with the origin rotation
# Note: quat = w + ix + jy + kz
x_or = cy*sr*cp - sy*cr*sp
y_or = cy*cr*sp + sy*sr*cp
z_or = sy*cr*cp - cy*sr*sp
w_or = cy*cr*cp + sy*sr*sp
# Joint origin translation as a dual quaternion
x_ot = 0.5*xyz[0]*w_or + 0.5*xyz[1]*z_or - 0.5*xyz[2]*y_or
y_ot = - 0.5*xyz[0]*z_or + 0.5*xyz[1]*w_or + 0.5*xyz[2]*x_or
z_ot = 0.5*xyz[0]*y_or - 0.5*xyz[1]*x_or + 0.5*xyz[2]*w_or
w_ot = - 0.5*xyz[0]*x_or - 0.5*xyz[1]*y_or - 0.5*xyz[2]*z_or
Q_o = [x_or, y_or, z_or, w_or, x_ot, y_ot, z_ot, w_ot]
# Joint displacement orientation is just identity
x_jr = 0.0
y_jr = 0.0
z_jr = 0.0
w_jr = 1.0
# Joint displacement translation along axis
x_jt = qi*axis[0]/2.0
y_jt = qi*axis[1]/2.0
z_jt = qi*axis[2]/2.0
w_jt = 0.0
Q_j = [x_jr, y_jr, z_jr, w_jr, x_jt, y_jt, z_jt, w_jt]
# Get resulting dual quaternion
return product(Q_o, Q_j) | 898db39d01a7b26daf2657c69c42af12bd8fef60 | 13,028 |
def markov_chain(bot_id, previous_posts):
"""
Caches are triplets of consecutive words from the source
Beginning=True means the triplet was the beinning of a messaeg
Starts with a random choice from the beginning caches
Then makes random choices from the all_caches set, constructing a markov chain
'randomness' value determined by totalling the number of words that were chosen randomly
"""
bot = TwitterBot.objects.get(id=bot_id)
beginning_caches = bot.twitterpostcache_set.filter(beginning=True)
if not len(beginning_caches):
print "Not enough data"
return
# Randomly choose one of the beginning caches to start with
seed_index = random.randint(0, len(beginning_caches) - 1)
seed_cache = beginning_caches[seed_index]
# Start the chain
new_markov_chain = [seed_cache.word1, seed_cache.word2]
# Add words one by one to complete the markov chain
all_caches = bot.twitterpostcache_set.all()
next_cache = seed_cache
while next_cache:
new_markov_chain.append(next_cache.final_word)
all_next_caches = all_caches.filter(
word1=next_cache.word2, word2=next_cache.final_word
)
if len(all_next_caches):
next_cache = random.choice(all_next_caches)
else:
all_next_caches = all_caches.filter(word1=next_cache.final_word)
if len(all_next_caches):
next_cache = random.choice(all_next_caches)
new_markov_chain.append(next_cache.word2)
else:
next_cache = None
return " ".join(new_markov_chain) | 652690d61c97de420e88f53cfd05c5d59e9229af | 13,029 |
from re import T
def SurfaceNet_fn_trainVal(N_viewPairs4inference, default_lr, input_cube_size, D_viewPairFeature, \
num_hidden_units, CHANNEL_MEAN, return_train_fn=True, return_val_fn=True, with_weight=True):
"""
This function only defines the train_fn and the val_fn while training process.
There are 2 training process:
1. only train the SurfaceNet without weight
2. train the softmaxWeight with(out) finetuning the SurfaceNet
For the val_fn when only have validation, refer to the [TODO].
===================
>> SurfaceNet_fn_trainVal(with_weight = True)
>> SurfaceNet_fn_trainVal(with_weight = False)
"""
train_fn = None
val_fn = None
tensor5D = T.TensorType('float32', (False,)*5)
input_var = tensor5D('X')
output_var = tensor5D('Y')
similFeature_var = T.matrix('similFeature')
net = __weightedAverage_net__(input_var, similFeature_var, input_cube_size, N_viewPairs4inference,\
D_viewPairFeature, num_hidden_units, with_weight)
if return_val_fn:
pred_fuse_val = lasagne.layers.get_output(net["output_fusionNet"], deterministic=True)
# accuracy_val = lasagne.objectives.binary_accuracy(pred_fuse_val, output_var) # in case soft_gt
accuracy_val = __weighted_accuracy__(pred_fuse_val, output_var)
# fuseNet_val_fn = theano.function([input_var, output_var], [accuracy_val,pred_fuse_val])
val_fn_input_var_list = [input_var, similFeature_var, output_var] if with_weight\
else [input_var, output_var]
val_fn_output_var_list = [accuracy_val,pred_fuse_val] if with_weight\
else [accuracy_val,pred_fuse_val]
val_fn = theano.function(val_fn_input_var_list, val_fn_output_var_list)
if return_train_fn:
pred_fuse = lasagne.layers.get_output(net["output_fusionNet"])
output_softmaxWeights_var= lasagne.layers.get_output(net["output_softmaxWeights"]) if with_weight \
else None
#loss = __weighted_MSE__(pred_fuse, output_var, w_for_1 = 0.98) \
loss = __weighted_mult_binary_crossentropy__(pred_fuse, output_var, w_for_1 = 0.96) \
+ regularize_layer_params(net["output_fusionNet"],l2) * 1e-4 \
aggregated_loss = lasagne.objectives.aggregate(loss)
if not params.__layer_range_tuple_2_update is None:
updates = __updates__(net=net, cost=aggregated_loss, layer_range_tuple_2_update=params.__layer_range_tuple_2_update, \
default_lr=default_lr, update_algorithm='nesterov_momentum')
else:
params = lasagne.layers.get_all_params(net["output_fusionNet"], trainable=True)
updates = lasagne.updates.nesterov_momentum(aggregated_loss, params, learning_rate=params.__lr)
# accuracy = lasagne.objectives.binary_accuracy(pred_fuse, output_var) # in case soft_gt
accuracy = __weighted_accuracy__(pred_fuse, output_var)
train_fn_input_var_list = [input_var, similFeature_var, output_var] if with_weight \
else [input_var, output_var]
train_fn_output_var_list = [loss,accuracy, pred_fuse, output_softmaxWeights_var] if with_weight \
else [loss,accuracy, pred_fuse]
train_fn = theano.function(train_fn_input_var_list, train_fn_output_var_list, updates=updates)
return net, train_fn, val_fn | 47734ee865738706a68bc62a1d0c51db3b1b4c46 | 13,030 |
def _assign_data_radial(root, sweep="sweep_1"):
"""Assign from CfRadial1 data structure.
Parameters
----------
root : xarray.Dataset
Dataset of CfRadial1 file
sweep : str, optional
Sweep name to extract, default to first sweep. If None, all sweeps are
extracted into a list.
"""
var = root.variables.keys()
remove_root = var ^ root_vars
remove_root &= var
root1 = root.drop_vars(remove_root).rename({"fixed_angle": "sweep_fixed_angle"})
sweep_group_name = []
for i in range(root1.dims["sweep"]):
sweep_group_name.append(f"sweep_{i + 1}")
# keep all vars for now
# keep_vars = sweep_vars1 | sweep_vars2 | sweep_vars3
# remove_vars = var ^ keep_vars
# remove_vars &= var
remove_vars = {}
data = root.drop_vars(remove_vars)
data.attrs = {}
start_idx = data.sweep_start_ray_index.values
end_idx = data.sweep_end_ray_index.values
data = data.drop_vars({"sweep_start_ray_index", "sweep_end_ray_index"})
sweeps = []
for i, sw in enumerate(sweep_group_name):
if sweep is not None and sweep != sw:
continue
tslice = slice(start_idx[i], end_idx[i] + 1)
ds = data.isel(time=tslice, sweep=slice(i, i + 1)).squeeze("sweep")
ds.sweep_mode.load()
sweep_mode = ds.sweep_mode.item().decode()
dim0 = "elevation" if sweep_mode == "rhi" else "azimuth"
ds = ds.swap_dims({"time": dim0})
ds = ds.rename({"time": "rtime"})
ds.attrs["fixed_angle"] = np.round(ds.fixed_angle.item(), decimals=1)
time = ds.rtime[0].reset_coords(drop=True)
# get and delete "comment" attribute for time variable
key = [key for key in time.attrs.keys() if "comment" in key]
for k in key:
del time.attrs[k]
coords = {
"longitude": root1.longitude,
"latitude": root1.latitude,
"altitude": root1.altitude,
"azimuth": ds.azimuth,
"elevation": ds.elevation,
"sweep_mode": sweep_mode,
"time": time,
}
ds = ds.assign_coords(**coords)
sweeps.append(ds)
return sweeps | 7734917c8b4aced4812896e3269981c984241202 | 13,031 |
def get_memory_usage():
"""This method returns the percentage of total memory used in this machine"""
stats = get_memstats()
mfree = float(stats['buffers']+stats['cached']+stats['free'])
return 1-(mfree/stats['total']) | 27bcf8866b6b57253acf6a282265949dcfd3af61 | 13,032 |
def gamma0(R, reg=1e-13, symmetrize=True):
"""Integrals over the edges of a triangle called gamma_0 (line charge potentials).
**NOTE: MAY NOT BE VERY PRECISE FOR POINTS DIRECTLY AT TRIANGLE
EDGES.**
Parameters
----------
R : (N, 3, 3) array of points (Neval, Nverts, xyz)
Returns
-------
res: array (Neval, Nverts)
The analytic integrals for each vertex/edge
"""
edges = np.roll(R[0], 1, -2) - np.roll(R[0], 2, -2)
# dotprods1 = np.sum(np.roll(R, 1, -2)*edges, axis=-1)
# dotprods2 = np.sum(np.roll(R, 2, -2)*edges, axis=-1)
dotprods1 = np.einsum("...i,...i", np.roll(R, 1, -2), edges)
dotprods2 = np.einsum("...i,...i", np.roll(R, 2, -2), edges)
en = norm(edges)
del edges
n = norm(R)
# Regularize s.t. neither the denominator or the numerator can be zero
# Avoid numerical issues directly at the edge
nn1 = np.roll(n, 2, -1) * en
nn2 = np.roll(n, 1, -1) * en
res = np.log((nn1 + dotprods2 + reg) / (nn2 + dotprods1 + reg))
# Symmetrize the result since on the negative extension of the edge
# there's division of two small values resulting numerical instabilities
# (also incompatible with adding the reg value)
if symmetrize:
res2 = -np.log((nn1 - dotprods2 + reg) / (nn2 - dotprods1 + reg))
res = np.where(dotprods1 + dotprods2 > 0, res, res2)
res /= en
return -res | c3ed9c624a5f14922d3c76a3eb2ae2f616f82cac | 13,033 |
def is_even(x):
""" True if obj is even. """
return (x % 2) == 0 | f19563063515eb4d39b8b607cf68f6f188af409e | 13,034 |
def get_http_proxy():
"""
Get http_proxy and https_proxy from environment variables.
Username and password is not supported now.
"""
host = conf.get_httpproxy_host()
port = conf.get_httpproxy_port()
return host, port | f04dc8580d9fdd3d867c5b28fa3694fe82a6739a | 13,035 |
def get_parser_udf(
structural=True, # structural information
blacklist=["style", "script"], # ignore tag types, default: style, script
flatten=["span", "br"], # flatten tag types, default: span, br
language="en",
lingual=True, # lingual information
lingual_parser=None,
strip=True,
replacements=[("[\u2010\u2011\u2012\u2013\u2014\u2212]", "-")],
tabular=True, # tabular information
visual=False, # visual information
visual_parser=None,
):
"""Return an instance of ParserUDF."""
parser_udf = ParserUDF(
structural=structural,
blacklist=blacklist,
flatten=flatten,
lingual=lingual,
lingual_parser=lingual_parser,
strip=strip,
replacements=replacements,
tabular=tabular,
visual=visual,
visual_parser=visual_parser,
language=language,
)
return parser_udf | cf12b36fe9219aabfd746b2ad1f1f39e62ad7fe9 | 13,036 |
def img_preprocess2(image, target_shape,bboxes=None, correct_box=False):
"""
RGB转换 -> resize(resize不改变原图的高宽比) -> normalize
并可以选择是否校正bbox
:param image_org: 要处理的图像
:param target_shape: 对图像处理后,期望得到的图像shape,存储格式为(h, w)
:return: 处理之后的图像,shape为target_shape
"""
h_target, w_target = target_shape
h_org, w_org, _ = image.shape
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
resize_ratio = min(1.0 * w_target / w_org, 1.0 * h_target / h_org)
resize_w = int(resize_ratio * w_org)
resize_h = int(resize_ratio * h_org)
image_resized = cv2.resize(image, (resize_w, resize_h))
image_paded = np.full((h_target, w_target, 3), 128.0)
dw = int((w_target - resize_w) / 2)
dh = int((h_target - resize_h) / 2)
image_paded[dh:resize_h+dh, dw:resize_w+dw,:] = image_resized
image = image_paded / 255.0
image = normalize(image)
if correct_box:
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] * resize_ratio + dw
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] * resize_ratio + dh
return image, bboxes
return image,resize_ratio,dw,dh | e950e0e8cca4f31449feb12203ee9a9ef74baa8c | 13,037 |
def pivot_timeseries(df, var_name, timezone=None):
"""
Pivot timeseries DataFrame and shift UTC by given timezone offset
Parameters
----------
df : pandas.DataFrame
Timeseries DataFrame to be pivoted with year, month, hour columns
var_name : str
Name for new column describing data
timezone : int, optional
UTC offset to apply to DatetimeIndex, by default None
Returns
-------
pandas.DataFrame
Seaborn style long table with source, year, month, hour columns
"""
sns_df = []
for name, col in df.iteritems():
col = col.to_frame()
col.columns = [var_name]
col['source'] = name
col['year'] = col.index.year
col['month'] = col.index.month
col['hour'] = col.index.hour
if timezone is not None:
td = pd.to_timedelta('{:}h'.format(timezone))
col['local_hour'] = (col.index + td).hour
sns_df.append(col)
return pd.concat(sns_df) | 914ba75929caacd16da5170e98a95f2135a1682f | 13,038 |
def _preprocess_stored_query(query_text, config):
"""Inject some default code into each stored query."""
ws_id_text = " LET ws_ids = @ws_ids " if 'ws_ids' in query_text else ""
return '\n'.join([
config.get('query_prefix', ''),
ws_id_text,
query_text
]) | bc63391724773cd4a60f3dc9686d243d6d733b40 | 13,039 |
def handler_request_exception(response: Response):
"""
Args:
response (Response):
"""
status_code = response.status_code
data = response.json()
if "details" in data and len(data.get("details")) > 0:
data = data.get("details")[0]
kwargs = {
"error_code": data.get("error_code")
or data.get("error")
or str(data.get("status_code")),
"description": data.get("description_detail")
or data.get("description")
or data.get("error_description")
or data.get("message"),
"response": response,
}
message = "{} {} ({})".format(
kwargs.get("error_code"),
kwargs.get("description"),
response.url,
)
if status_code == 400:
return errors.BadRequest(message, **kwargs)
elif status_code == 402:
return errors.BusinessError(message, **kwargs)
elif status_code == 404:
return errors.NotFound(message, **kwargs)
elif status_code == 500:
return errors.ServerError(message, **kwargs)
elif status_code == 503:
return errors.ServiceUnavailable(message, **kwargs)
elif status_code == 504:
return errors.GatewayTimeout(message, **kwargs)
else:
return errors.RequestError(message, **kwargs) | 8847b4a1fd6f90d6e25d0ef8dc33a32e38e81617 | 13,040 |
import abc
import sys
def n_real_inputs():
"""This gives the number of 'real' inputs. This is determined by trimming away inputs that
have no connection to the logic. This is done by the ABC alias 'trm', which changes the current
circuit. In some applications we do not want to change the circuit, but just to know how may inputs
would go away if we did this. So the current circuit is saved and then restored afterwards."""
## abc('w %s_savetempreal.aig; logic; trim; st ;addpi'%f_name)
abc('w %s_savetempreal.aig'%f_name)
with redirect.redirect( redirect.null_file, sys.stdout ):
## with redirect.redirect( redirect.null_file, sys.stderr ):
reparam()
n = n_pis()
abc('r %s_savetempreal.aig'%f_name)
return n | ed4f87bc9a380df4d650019b5c60a836eeeeea30 | 13,041 |
import os
def get_stats_for_dictionary_file(dictionary_path):
"""Calculate size of manual and recommended sections of given dictionary."""
if not dictionary_path or not os.path.exists(dictionary_path):
return 0, 0
dictionary_content = utils.read_data_from_file(
dictionary_path, eval_data=False)
dictionaries = dictionary_content.split(RECOMMENDED_DICTIONARY_HEADER)
# If there are any elements before RECOMMENDED_DICTIONARY_HEADER, those are
# from "manual" dictionary stored in the repository.
manual_dictionary_size = get_dictionary_size(dictionaries[0])
if len(dictionaries) < 2:
return manual_dictionary_size, 0
# Any elements after RECOMMENDED_DICTIONARY_HEADER are recommended dictionary.
recommended_dictionary_size = get_dictionary_size(dictionaries[1])
return manual_dictionary_size, recommended_dictionary_size | f149e4cfcde6b61345b15d773bca8d4f7a32410b | 13,042 |
def mlrPredict(W, data):
"""
mlrObjFunction predicts the label of data given the data and parameter W
of Logistic Regression
Input:
W: the matrix of weight of size (D + 1) x 10. Each column is the weight
vector of a Logistic Regression classifier.
X: the data matrix of size N x D
Output:
label: vector of size N x 1 representing the predicted label of
corresponding feature vector given in data matrix
"""
label = np.zeros((data.shape[0], 1))
##################
# YOUR CODE HERE #
##################
# HINT: Do not forget to add the bias term to your input data
"""
Add the bias term at the beginning
"""
n_data = data.shape[0]
bias = np.ones((n_data,1))
"""
Concatenate the bias to the training data
"""
data = np.concatenate( (bias,data),axis=1)
outputs = np.zeros([n_data,W.shape[1]],dtype=float)
outputs = np.dot(data,W)
#print (outputs[0])
i = 0
for i in range(n_data):
label[i][0] = np.argmax(outputs[i],axis=0)
return label | 18bf0c86195cf144eb63f5b6c440f92c57d2fe9b | 13,043 |
from .error_pages import add_error_pages
from .global_variables import init_global
from .home import home_page
from .rules import rule_page
from .create_game import create_game_page, root_url_games
from .global_stats import global_stats_page, page_url
from .utils.add_dash_table import add_dash as add_dash_table
from .utils.add_dash_games import add_dash_games
from .admin import admin_page
def create_app():
"""Create Flask application."""
app = Flask(__name__, instance_relative_config=False)
app = add_error_pages(app)
app.config.from_object("config")
with app.app_context():
init_global()
# # Import parts of our application
bootstrap = Bootstrap()
app.register_blueprint(home_page)
Markdown(app)
app.register_blueprint(rule_page)
app.register_blueprint(create_game_page)
app.register_blueprint(global_stats_page)
bootstrap.init_app(app)
app = add_dash_table(app, page_url)
app = add_dash_games(app, root_url_games)
app.register_blueprint(admin_page)
return app | 665ab7beda7ff79e4b81c22d5f28409a31dc896f | 13,044 |
def process_integration(request, case_id):
"""Method to process case."""
try:
case = OVCBasicCRS.objects.get(case_id=case_id, is_void=False)
county_code = int(case.county)
const_code = int(case.constituency)
county_id, const_id = 0, 0
crs_id = str(case_id).replace('-', '')
user_counties, user_geos = get_person_geo(request)
# Get person orgs
ou_ids = get_person_orgs(request)
if request.method == 'POST':
response = handle_integration(request, case, case_id)
print(response)
check_fields = ['sex_id', 'case_category_id', 'case_reporter_id',
'family_status_id', 'household_economics',
'risk_level_id', 'mental_condition_id',
'perpetrator_status_id', 'other_condition_id',
'physical_condition_id', 'yesno_id']
vals = get_dict(field_name=check_fields)
category = OVCBasicCategory.objects.filter(
case_id=case_id, is_void=False)
person = OVCBasicPerson.objects.filter(case_id=case_id, is_void=False)
# Attached Geos and Org Units for the user
# ou_ids = []
org_id = request.session.get('ou_primary', 0)
ou_ids.append(org_id)
ou_attached = request.session.get('ou_attached', 0)
user_level = request.session.get('user_level', 0)
user_type = request.session.get('user_type', 0)
print(org_id, ou_attached, user_level, user_type)
# person_id = request.user.reg_person_id
county = SetupGeography.objects.filter(
area_code=county_code, area_type_id='GPRV')
for c in county:
county_id = c.area_id
# Get constituency
constituency = SetupGeography.objects.filter(
area_code=const_code, area_type_id='GDIS')
for c in constituency:
const_id = c.area_id
ous = RegOrgUnit.objects.filter(is_void=False)
counties = SetupGeography.objects.filter(area_type_id='GPRV')
if user_counties:
counties = counties.filter(area_id__in=user_counties)
if request.user.is_superuser:
all_ou_ids = ['TNGD']
ous = ous.filter(org_unit_type_id__in=all_ou_ids)
geos = SetupGeography.objects.filter(
area_type_id='GDIS', parent_area_id=county_id)
else:
ous = ous.filter(id__in=ou_ids)
geos = SetupGeography.objects.filter(
area_type_id='GDIS', parent_area_id=county_id)
return render(request, 'management/integration_process.html',
{'form': {}, 'case': case, 'vals': vals,
'category': category, 'person': person,
'geos': geos, 'ous': ous, 'counties': counties,
'county_id': county_id, 'const_id': const_id,
'crs_id': crs_id})
except Exception as e:
print('Error processing integration - %s' % (e))
else:
pass | bd383b624a072fec634bc28bbba71c2d635eeac2 | 13,045 |
def get_aabb(pts):
"""axis-aligned minimum bounding box"""
x, y = np.floor(pts.min(axis=0)).astype(int)
w, h = np.ceil(pts.ptp(axis=0)).astype(int)
return x, y, w, h | 68cffaf0b1cacf702a2dd3c6c22af6323d220e93 | 13,046 |
from re import S
def _solve(f, *symbols, **flags):
"""Return a checked solution for f in terms of one or more of the
symbols. A list should be returned except for the case when a linear
undetermined-coefficients equation is encountered (in which case
a dictionary is returned).
If no method is implemented to solve the equation, a NotImplementedError
will be raised. In the case that conversion of an expression to a Poly
gives None a ValueError will be raised."""
not_impl_msg = "No algorithms are implemented to solve equation %s"
if len(symbols) != 1:
soln = None
free = f.free_symbols
ex = free - set(symbols)
if len(ex) != 1:
ind, dep = f.as_independent(*symbols)
ex = ind.free_symbols & dep.free_symbols
if len(ex) == 1:
ex = ex.pop()
try:
# soln may come back as dict, list of dicts or tuples, or
# tuple of symbol list and set of solution tuples
soln = solve_undetermined_coeffs(f, symbols, ex, **flags)
except NotImplementedError:
pass
if soln:
if flags.get('simplify', True):
if isinstance(soln, dict):
for k in soln:
soln[k] = simplify(soln[k])
elif isinstance(soln, list):
if isinstance(soln[0], dict):
for d in soln:
for k in d:
d[k] = simplify(d[k])
elif isinstance(soln[0], tuple):
soln = [tuple(simplify(i) for i in j) for j in soln]
else:
raise TypeError('unrecognized args in list')
elif isinstance(soln, tuple):
sym, sols = soln
soln = sym, {tuple(simplify(i) for i in j) for j in sols}
else:
raise TypeError('unrecognized solution type')
return soln
# find first successful solution
failed = []
got_s = set([])
result = []
for s in symbols:
xi, v = solve_linear(f, symbols=[s])
if xi == s:
# no need to check but we should simplify if desired
if flags.get('simplify', True):
v = simplify(v)
vfree = v.free_symbols
if got_s and any([ss in vfree for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(xi)
result.append({xi: v})
elif xi: # there might be a non-linear solution if xi is not 0
failed.append(s)
if not failed:
return result
for s in failed:
try:
soln = _solve(f, s, **flags)
for sol in soln:
if got_s and any([ss in sol.free_symbols for ss in got_s]):
# sol depends on previously solved symbols: discard it
continue
got_s.add(s)
result.append({s: sol})
except NotImplementedError:
continue
if got_s:
return result
else:
raise NotImplementedError(not_impl_msg % f)
symbol = symbols[0]
# /!\ capture this flag then set it to False so that no checking in
# recursive calls will be done; only the final answer is checked
flags['check'] = checkdens = check = flags.pop('check', True)
# build up solutions if f is a Mul
if f.is_Mul:
result = set()
for m in f.args:
if m in set([S.NegativeInfinity, S.ComplexInfinity, S.Infinity]):
result = set()
break
soln = _solve(m, symbol, **flags)
result.update(set(soln))
result = list(result)
if check:
# all solutions have been checked but now we must
# check that the solutions do not set denominators
# in any factor to zero
dens = flags.get('_denominators', _simple_dens(f, symbols))
result = [s for s in result if
all(not checksol(den, {symbol: s}, **flags) for den in
dens)]
# set flags for quick exit at end; solutions for each
# factor were already checked and simplified
check = False
flags['simplify'] = False
elif f.is_Piecewise:
result = set()
for i, (expr, cond) in enumerate(f.args):
if expr.is_zero:
raise NotImplementedError(
'solve cannot represent interval solutions')
candidates = _solve(expr, symbol, **flags)
# the explicit condition for this expr is the current cond
# and none of the previous conditions
args = [~c for _, c in f.args[:i]] + [cond]
cond = And(*args)
for candidate in candidates:
if candidate in result:
# an unconditional value was already there
continue
try:
v = cond.subs(symbol, candidate)
_eval_simpify = getattr(v, '_eval_simpify', None)
if _eval_simpify is not None:
# unconditionally take the simpification of v
v = _eval_simpify(ratio=2, measure=lambda x: 1)
except TypeError:
# incompatible type with condition(s)
continue
if v == False:
continue
result.add(Piecewise(
(candidate, v),
(S.NaN, True)))
# set flags for quick exit at end; solutions for each
# piece were already checked and simplified
check = False
flags['simplify'] = False
else:
# first see if it really depends on symbol and whether there
# is only a linear solution
f_num, sol = solve_linear(f, symbols=symbols)
if f_num is S.Zero or sol is S.NaN:
return []
elif f_num.is_Symbol:
# no need to check but simplify if desired
if flags.get('simplify', True):
sol = simplify(sol)
return [sol]
result = False # no solution was obtained
msg = '' # there is no failure message
# Poly is generally robust enough to convert anything to
# a polynomial and tell us the different generators that it
# contains, so we will inspect the generators identified by
# polys to figure out what to do.
# try to identify a single generator that will allow us to solve this
# as a polynomial, followed (perhaps) by a change of variables if the
# generator is not a symbol
try:
poly = Poly(f_num)
if poly is None:
raise ValueError('could not convert %s to Poly' % f_num)
except GeneratorsNeeded:
simplified_f = simplify(f_num)
if simplified_f != f_num:
return _solve(simplified_f, symbol, **flags)
raise ValueError('expression appears to be a constant')
gens = [g for g in poly.gens if g.has(symbol)]
def _as_base_q(x):
"""Return (b**e, q) for x = b**(p*e/q) where p/q is the leading
Rational of the exponent of x, e.g. exp(-2*x/3) -> (exp(x), 3)
"""
b, e = x.as_base_exp()
if e.is_Rational:
return b, e.q
if not e.is_Mul:
return x, 1
c, ee = e.as_coeff_Mul()
if c.is_Rational and c is not S.One: # c could be a Float
return b**ee, c.q
return x, 1
if len(gens) > 1:
# If there is more than one generator, it could be that the
# generators have the same base but different powers, e.g.
# >>> Poly(exp(x) + 1/exp(x))
# Poly(exp(-x) + exp(x), exp(-x), exp(x), domain='ZZ')
#
# If unrad was not disabled then there should be no rational
# exponents appearing as in
# >>> Poly(sqrt(x) + sqrt(sqrt(x)))
# Poly(sqrt(x) + x**(1/4), sqrt(x), x**(1/4), domain='ZZ')
bases, qs = list(zip(*[_as_base_q(g) for g in gens]))
bases = set(bases)
if len(bases) > 1 or not all(q == 1 for q in qs):
funcs = set(b for b in bases if b.is_Function)
trig = set([_ for _ in funcs if
isinstance(_, TrigonometricFunction)])
other = funcs - trig
if not other and len(funcs.intersection(trig)) > 1:
newf = TR1(f_num).rewrite(tan)
if newf != f_num:
# don't check the rewritten form --check
# solutions in the un-rewritten form below
flags['check'] = False
result = _solve(newf, symbol, **flags)
flags['check'] = check
# just a simple case - see if replacement of single function
# clears all symbol-dependent functions, e.g.
# log(x) - log(log(x) - 1) - 3 can be solved even though it has
# two generators.
if result is False and funcs:
funcs = list(ordered(funcs)) # put shallowest function first
f1 = funcs[0]
t = Dummy('t')
# perform the substitution
ftry = f_num.subs(f1, t)
# if no Functions left, we can proceed with usual solve
if not ftry.has(symbol):
cv_sols = _solve(ftry, t, **flags)
cv_inv = _solve(t - f1, symbol, **flags)[0]
sols = list()
for sol in cv_sols:
sols.append(cv_inv.subs(t, sol))
result = list(ordered(sols))
if result is False:
msg = 'multiple generators %s' % gens
else:
# e.g. case where gens are exp(x), exp(-x)
u = bases.pop()
t = Dummy('t')
inv = _solve(u - t, symbol, **flags)
if isinstance(u, (Pow, exp)):
# this will be resolved by factor in _tsolve but we might
# as well try a simple expansion here to get things in
# order so something like the following will work now without
# having to factor:
#
# >>> eq = (exp(I*(-x-2))+exp(I*(x+2)))
# >>> eq.subs(exp(x),y) # fails
# exp(I*(-x - 2)) + exp(I*(x + 2))
# >>> eq.expand().subs(exp(x),y) # works
# y**I*exp(2*I) + y**(-I)*exp(-2*I)
def _expand(p):
b, e = p.as_base_exp()
e = expand_mul(e)
return expand_power_exp(b**e)
ftry = f_num.replace(
lambda w: w.is_Pow or isinstance(w, exp),
_expand).subs(u, t)
if not ftry.has(symbol):
soln = _solve(ftry, t, **flags)
sols = list()
for sol in soln:
for i in inv:
sols.append(i.subs(t, sol))
result = list(ordered(sols))
elif len(gens) == 1:
# There is only one generator that we are interested in, but
# there may have been more than one generator identified by
# polys (e.g. for symbols other than the one we are interested
# in) so recast the poly in terms of our generator of interest.
# Also use composite=True with f_num since Poly won't update
# poly as documented in issue 8810.
poly = Poly(f_num, gens[0], composite=True)
# if we aren't on the tsolve-pass, use roots
if not flags.pop('tsolve', False):
soln = None
deg = poly.degree()
flags['tsolve'] = True
solvers = {k: flags.get(k, True) for k in
('cubics', 'quartics', 'quintics')}
soln = roots(poly, **solvers)
if sum(soln.values()) < deg:
# e.g. roots(32*x**5 + 400*x**4 + 2032*x**3 +
# 5000*x**2 + 6250*x + 3189) -> {}
# so all_roots is used and RootOf instances are
# returned *unless* the system is multivariate
# or high-order EX domain.
try:
soln = poly.all_roots()
except NotImplementedError:
if not flags.get('incomplete', True):
raise NotImplementedError(
filldedent('''
Neither high-order multivariate polynomials
nor sorting of EX-domain polynomials is supported.
If you want to see any results, pass keyword incomplete=True to
solve; to see numerical values of roots
for univariate expressions, use nroots.
'''))
else:
pass
else:
soln = list(soln.keys())
if soln is not None:
u = poly.gen
if u != symbol:
try:
t = Dummy('t')
iv = _solve(u - t, symbol, **flags)
soln = list(ordered({i.subs(t, s) for i in iv for s in soln}))
except NotImplementedError:
# perhaps _tsolve can handle f_num
soln = None
else:
check = False # only dens need to be checked
if soln is not None:
if len(soln) > 2:
# if the flag wasn't set then unset it since high-order
# results are quite long. Perhaps one could base this
# decision on a certain critical length of the
# roots. In addition, wester test M2 has an expression
# whose roots can be shown to be real with the
# unsimplified form of the solution whereas only one of
# the simplified forms appears to be real.
flags['simplify'] = flags.get('simplify', False)
result = soln
# fallback if above fails
# -----------------------
if result is False:
# try unrad
if flags.pop('_unrad', True):
try:
u = unrad(f_num, symbol)
except (ValueError, NotImplementedError):
u = False
if u:
eq, cov = u
if cov:
isym, ieq = cov
inv = _solve(ieq, symbol, **flags)[0]
rv = {inv.subs(isym, xi) for xi in _solve(eq, isym, **flags)}
else:
try:
rv = set(_solve(eq, symbol, **flags))
except NotImplementedError:
rv = None
if rv is not None:
result = list(ordered(rv))
# if the flag wasn't set then unset it since unrad results
# can be quite long or of very high order
flags['simplify'] = flags.get('simplify', False)
else:
pass # for coverage
# try _tsolve
if result is False:
flags.pop('tsolve', None) # allow tsolve to be used on next pass
try:
soln = _tsolve(f_num, symbol, **flags)
if soln is not None:
result = soln
except PolynomialError:
pass
# ----------- end of fallback ----------------------------
if result is False:
raise NotImplementedError('\n'.join([msg, not_impl_msg % f]))
if flags.get('simplify', True):
result = list(map(simplify, result))
# we just simplified the solution so we now set the flag to
# False so the simplification doesn't happen again in checksol()
flags['simplify'] = False
if checkdens:
# reject any result that makes any denom. affirmatively 0;
# if in doubt, keep it
dens = _simple_dens(f, symbols)
result = [s for s in result if
all(not checksol(d, {symbol: s}, **flags)
for d in dens)]
if check:
# keep only results if the check is not False
result = [r for r in result if
checksol(f_num, {symbol: r}, **flags) is not False]
return result | af2c8de5f2ee7cdfc41856ffe438c2bf0fcaee78 | 13,047 |
import numpy
def get_object_ratio(obj):
"""Calculate the ratio of the object's size in comparison to the whole image
:param obj: the binarized object image
:type obj: numpy.ndarray
:returns: float -- the ratio
"""
return numpy.count_nonzero(obj) / float(obj.size) | fd18e460be32037c73fe75c8fa5eef5ba6c1c217 | 13,048 |
def get_region(ds, region):
""" Return a region from a provided DataArray or Dataset
Parameters
----------
region_mask: xarray DataArray or list
Boolean mask of the region to keep
"""
return ds.where(region, drop=True) | 102b672f8040b722ec346435775cba1056485ae2 | 13,049 |
def read_borehole_file(path, fix_df=True):
"""Returns the df with the depths for each borehole in one single row instead
instead being each chunck a new row"""
df = pd.read_table(path,
skiprows=41,
header=None,
sep='\t',
)
df.rename(columns={1: 'x', 2: 'y', 3: 'name',
4: 'num', 5: 'z', 6: 'year', 10: 'altitude'},
inplace=True)
if fix_df:
df['name'] = df['name'] + df['num']
n_fixed_columns = 11
n_segments_per_well = 15
n_wells = df.shape[0]
# Repeat fixed rows (collar name and so)
df_fixed = df.iloc[:, :n_fixed_columns]
df_fixed = df_fixed.loc[df_fixed.index.repeat(
n_segments_per_well)]
# Add a formation to each segment
tiled_formations = pd.np.tile(formations, (n_wells))
df_fixed['formation'] = tiled_formations
# Add the segments base to the df
df_bottoms = df.iloc[:,
n_fixed_columns:n_fixed_columns + n_segments_per_well]
df_fixed['base'] = df_bottoms.values.reshape(-1, 1, order='C')
# Adding tops column from collar and base
df_fixed = ss.io.wells.add_tops_from_base_and_altitude_in_place(
df_fixed,
'name',
'base',
'altitude'
)
# Fixing boreholes that have the base higher than the top
top_base_error = df_fixed["top"] > df_fixed["base"]
df_fixed["base"][top_base_error] = df_fixed["top"] + 0.01
# Add real coord
df_fixed['z'] = df_fixed['altitude'] - df_fixed['md']
df = df_fixed
return df | 50c3df5a3d2aae2a0f58b555380efb9fd63a90e1 | 13,050 |
def cpl_parse(path):
""" Parse DCP CPL """
cpl = generic_parse(
path, "CompositionPlaylist",
("Reel", "ExtensionMetadata", "PropertyList"))
if cpl:
cpl_node = cpl['Info']['CompositionPlaylist']
cpl_dcnc_parse(cpl_node)
cpl_reels_parse(cpl_node)
return cpl | a025bf82bdeac13d6c7cfbca95d667f2ae58c8f9 | 13,051 |
def notfound():
"""Serve 404 template."""
return make_response(render_template('404.html'), 404) | d81d794bad67c8128b8f6e55dbc5383bda7a1405 | 13,052 |
from typing import Tuple
from typing import List
def read_network(file: str) -> Tuple[int, int, List[int]]:
"""
Read a Boolean network from a text file:
Line 1: number of state variables
Line 2: number of control inputs
Line 3: transition matrix of the network (linear representation of a logical matrix)
:param file: a text file
:return: (n, m, Lm), where
n: number of state variables
m: number of control inputs
Lm: network transition matrix
"""
with open(file, 'r') as f:
n = int(f.readline().strip())
m = int(f.readline().strip())
N = 2 ** n
M = 2 ** m
line = f.readline().strip()
assert line, f'network transition matrix must be provided!'
numbers = line.split()
assert len(numbers) == M * N, f'The transition matrix must have {M * N} columns'
L = [int(num) for num in numbers]
for i in L:
assert 1 <= i <= N, f'All integers in the network transition matrix must be in range [1, {N}]'
return n, m, L | 217bd86f8d00cf27cf80d1a199b76b023a374f10 | 13,053 |
import os
import fnmatch
import shutil
def copy(srcpath, destpath, pattern=None, pred=_def_copy_pred):
"""
Copies all files in the source path to the specified destination path. The
source path can be a file, in which case that file will be copied as long as
it matches the specified pattern.
If the source path is a directory, all directories in it will be
recursed and any files matching the specified pattern will be copied.
:param srcpath: Source path to copy files from.
:param destpath: Destination path to copy files to.
:param pattern: Pattern to match filenames against.
:param pred: Predicate to decide which files to copy/overwrite.
:return: Number of files copied.
"""
if os.path.isfile(srcpath):
if pattern and not fnmatch.fnmatch(srcpath, pattern):
return 0
if pred and pred(srcpath, destpath) == False:
return 0
path, filename = os.path.split(destpath)
if not os.path.exists(path):
# Make sure all directories needed to copy the file exist.
create_dir(path)
shutil.copyfile(srcpath, destpath)
return 1
num_files_copied = 0
for s in os.listdir(srcpath):
src = os.path.join(srcpath , s)
dest = os.path.join(destpath, s)
num_files_copied += copy(src, dest, pattern)
return num_files_copied | daa4487e77642319f04a0556c238f84e47f513cf | 13,054 |
def bundle_products_list(request,id):
"""
This view Renders Bundle Product list Page """
bundle = get_object_or_404(Bundle, bundle_id=id)
bundleProd = BundleProducts.objects.filter(bundle=id)
stocks = Stock.objects.all()
context = {
"title": "Bundle Products List",
"bundle": bundle,
"bundleproducts": bundleProd,
"stocks": stocks
}
return render(request, 'bundle_products.html',context) | 3afef4fdd2886300bc2fbda306bc05b499a47d0f | 13,055 |
def rot_x(theta):
"""
Rotation matrix around X axis
:param theta: Rotation angle in radians, right-handed
:return: Rotation matrix in form of (3,3) 2D numpy array
"""
return rot_axis(0,theta) | d4a892ed5ede6ffd2353b0121bec640e81c23ec7 | 13,056 |
def ValidateEntryPointNameOrRaise(entry_point):
"""Checks if a entry point name provided by user is valid.
Args:
entry_point: Entry point name provided by user.
Returns:
Entry point name.
Raises:
ArgumentTypeError: If the entry point name provided by user is not valid.
"""
return _ValidateArgumentByRegexOrRaise(entry_point, _ENTRY_POINT_NAME_RE,
_ENTRY_POINT_NAME_ERROR) | 7175e63562b04aba430044e0898db7368b68fb23 | 13,057 |
def park2_4_z(z, x):
""" Computes the Parkd function. """
y1 = x[0][0]
y2 = x[0][1]
chooser = x[1]
y3 = (x[2] - 103.0) / 91.0
y4 = x[3] + 10.0
x = [y1, y2, y3, y4]
if chooser == 'rabbit':
ret = sub_park_1(x)
elif chooser == 'dog':
ret = sub_park_2(x)
elif chooser == 'gerbil':
ret = sub_park_3(x)
elif chooser in ['hamster', 'ferret']:
ret = sub_park_4(x)
return ret * np.exp(z - 1) | 458ba79ada010b3c93419719b68f7a953908b184 | 13,058 |
import re
def get_string_coords(line):
"""return a list of string positions (tuple (start, end)) in the line
"""
result = []
for match in re.finditer(STRING_RGX, line):
result.append( (match.start(), match.end()) )
return result | a8fd7443ce242ce4f84196947fb4d82c2ff0d20e | 13,059 |
def array_from_pixbuf(p):
"""Convert from GdkPixbuf to numpy array"
Args:
p (GdkPixbuf): The GdkPixbuf provided from some window handle
Returns:
ndarray: The numpy array arranged for the pixels in height, width, RGBA order
"""
w,h,c,r=(p.get_width(), p.get_height(), p.get_n_channels(), p.get_rowstride())
assert p.get_colorspace() == GdkPixbuf.Colorspace.RGB
assert p.get_bits_per_sample() == 8
if p.get_has_alpha():
assert c == 4
else:
assert c == 3
assert r >= w * c
a=np.frombuffer(p.get_pixels(),dtype=np.uint8)
if a.shape[0] == w*c*h:
return a.reshape( (h, w, c), order = 'C' )
else:
b=np.zeros((h,w*c),'uint8')
for j in range(h):
b[j,:]=a[r*j:r*j+w*c]
return b.reshape( (h, w, c) ) | da2e980d804c283e2993049c63e3dacf67f7f0bd | 13,060 |
def entropy(x,k=3,base=2):
""" The classic K-L k-nearest neighbor continuous entropy estimator
x should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x)-1, "Set k smaller than num. samples - 1"
d = len(x[0])
N = len(x)
intens = 1e-10 #small noise to break degeneracy, see doc.
x = [list(p + intens*nr.rand(len(x[0]))) for p in x]
tree = ss.cKDTree(x)
nn = [tree.query(point,k+1,p=float('inf'))[0][k] for point in x]
const = digamma(N)-digamma(k) + d*log(2)
return (const + d*np.mean(map(log,nn)))/log(base) | 41d55d2bef2475ece27a487afb1e54d433bad5f0 | 13,061 |
import logging
def respond_to_command(slack_client, branch, thread_ts):
"""Take action on command."""
logging.debug("Responding to command: Deploy Branch-%s", branch)
is_production = False
if branch == 'develop':
message = "Development deployment started"
post_to_channel(slack_client, message, thread_ts, announce=is_production)
result = deploy_develop()
elif branch == 'master':
is_production = True
message = "Production deployment started"
post_to_channel(slack_client, message, thread_ts, announce=is_production)
result = deploy_production()
else:
# Do nothing
return None
if result.return_code == 0:
message = "Branch {} deployed successfully.".format(branch)
post_to_channel(slack_client, message, thread_ts, announce=is_production)
else:
message = "FAILED: Branch {} failed to deploy.".format(branch)
post_to_channel(slack_client, message, thread_ts)
logging.debug("Failed build stdout: %s", result.out)
logging.debug("Failed build stderr: %s", result.err) | b008456d94cdd7e52e626ddd386ed2606cb22f02 | 13,062 |
from typing import Optional
def s3upload_start(
request: HttpRequest,
workflow: Optional[Workflow] = None,
) -> HttpResponse:
"""Upload the S3 data as first step.
The four step process will populate the following dictionary with name
upload_data (divided by steps in which they are set
STEP 1:
initial_column_names: List of column names in the initial file.
column_types: List of column types as detected by pandas
src_is_key_column: Boolean list with src columns that are unique
step_1: URL name of the first step
:param request: Web request
:return: Creates the upload_data dictionary in the session
"""
# Bind the form with the received data
form = UploadS3FileForm(
request.POST or None,
request.FILES or None,
workflow=workflow)
if request.method == 'POST' and form.is_valid():
# Dictionary to populate gradually throughout the sequence of steps. It
# is stored in the session.
request.session['upload_data'] = {
'initial_column_names': form.frame_info[0],
'column_types': form.frame_info[1],
'src_is_key_column': form.frame_info[2],
'step_1': reverse('dataops:csvupload_start')}
return redirect('dataops:upload_s2')
return render(
request,
'dataops/upload1.html',
{
'form': form,
'wid': workflow.id,
'dtype': 'S3 CSV',
'dtype_select': _('S3 CSV file'),
'valuerange': range(5) if workflow.has_table() else range(3),
'prev_step': reverse('dataops:uploadmerge')}) | b3fc1ac6c3754df836d8c219b0fb416f9d5973ce | 13,063 |
def search_explorations(query, limit, sort=None, cursor=None):
"""Searches through the available explorations.
args:
- query_string: the query string to search for.
- sort: a string indicating how to sort results. This should be a string
of space separated values. Each value should start with a '+' or a
'-' character indicating whether to sort in ascending or descending
order respectively. This character should be followed by a field name
to sort on. When this is None, results are based on 'rank'. See
_get_search_rank to see how rank is determined.
- limit: the maximum number of results to return.
- cursor: A cursor, used to get the next page of results.
If there are more documents that match the query than 'limit', this
function will return a cursor to get the next page.
returns: a tuple:
- a list of exploration ids that match the query.
- a cursor if there are more matching explorations to fetch, None
otherwise. If a cursor is returned, it will be a web-safe string that
can be used in URLs.
"""
return search_services.search(
query, SEARCH_INDEX_EXPLORATIONS, cursor, limit, sort, ids_only=True) | bead5de6f9803a7715ad497bb1f5c22da1faf296 | 13,064 |
import pkgutil
def find_resourceadapters():
"""
Finds all resource adapter classes.
:return List[ResourceAdapter]: a list of all resource adapter classes
"""
subclasses = []
def look_for_subclass(module_name):
module = __import__(module_name)
d = module.__dict__
for m in module_name.split('.')[1:]:
d = d[m].__dict__
for key, entry in d.items():
if key == tortuga.resourceAdapter.resourceAdapter.ResourceAdapter.__name__:
continue
try:
if issubclass(entry, tortuga.resourceAdapter.resourceAdapter.ResourceAdapter):
subclasses.append(entry)
except TypeError:
continue
for _, modulename, _ in pkgutil.walk_packages(
tortuga.resourceAdapter.__path__):
look_for_subclass('tortuga.resourceAdapter.{0}'.format(modulename))
return subclasses | 3aab6e6b28fa69cf9e7b1c8bc04589c69e43a3ee | 13,065 |
def print_scale(skill, points):
"""Return TeX lines for a skill scale."""
lines = ['\\cvskill{']
lines[0] += skill
lines[0] += '}{'
lines[0] += str(points)
lines[0] += '}\n'
return lines | c88de0c6db9e7b92dbcee025f42f56817a4aa033 | 13,066 |
from typing import Union
from typing import TextIO
from typing import BinaryIO
import os
import io
def getsize(file: Union[TextIO, BinaryIO]) -> int:
"""
Overview:
Get the size of the given ``file`` stream.
:param file: File which size need to access.
:return: File's size.
Examples::
>>> import io
>>> from hbutils.file import getsize
>>>
>>> with io.BytesIO(b'\\xde\\xad\\xbe\\xef') as file:
... print(getsize(file))
4
>>> with open('README.md', 'r') as file:
... print(getsize(file))
2582
.. note::
Only seekable stream can use :func:`getsize`.
"""
if file.seekable():
try:
return os.stat(file.fileno()).st_size
except OSError:
with keep_cursor(file):
return file.seek(0, io.SEEK_END)
else:
raise OSError(f'Given file {repr(file)} is not seekable, ' # pragma: no cover
f'so its size is unavailable.') | 3b88fc21c52b53ad13dcbfd074542f4b392450d8 | 13,067 |
def print_(fh, *args):
"""Implementation of perl $fh->print method"""
global OS_ERROR, TRACEBACK, AUTODIE
try:
print(*args, end='', file=fh)
return True
except Exception as _e:
OS_ERROR = str(_e)
if TRACEBACK:
cluck(f"print failed: {OS_ERROR}",skip=2)
if AUTODIE:
raise
return False | 8289aba67cb81b710d04da609ea63c65fa986e21 | 13,068 |
def _expm_multiply_interval(A, B, start=None, stop=None,
num=None, endpoint=None, balance=False, status_only=False):
"""
Compute the action of the matrix exponential at multiple time points.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
balance : bool
Indicates whether or not to apply balancing.
status_only : bool
A flag that is set to True for some debugging and testing operations.
Returns
-------
F : ndarray
:math:`e^{t_k A} B`
status : int
An integer status for testing and debugging.
Notes
-----
This is algorithm (5.2) in Al-Mohy and Higham (2011).
There seems to be a typo, where line 15 of the algorithm should be
moved to line 6.5 (between lines 6 and 7).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
# Get the linspace samples, attempting to preserve the linspace defaults.
linspace_kwargs = {'retstep' : True}
if num is not None:
linspace_kwargs['num'] = num
if endpoint is not None:
linspace_kwargs['endpoint'] = endpoint
samples, step = np.linspace(start, stop, **linspace_kwargs)
# Convert the linspace output to the notation used by the publication.
nsamples = len(samples)
if nsamples < 2:
raise ValueError('at least two time points are required')
q = nsamples - 1
h = step
t_0 = samples[0]
t_q = samples[q]
# Define the output ndarray.
# Use an ndim=3 shape, such that the last two indices
# are the ones that may be involved in level 3 BLAS operations.
X_shape = (nsamples,) + B.shape
X = np.empty(X_shape, dtype=float)
t = t_q - t_0
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
# Compute the expm action up to the initial time point.
X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
# Compute the expm action at the rest of the time points.
if q <= s:
if status_only:
return 0
else:
return _expm_multiply_interval_core_0(A, X,
h, mu, m_star, s, q)
elif q > s and not (q % s):
if status_only:
return 1
else:
return _expm_multiply_interval_core_1(A, X,
h, mu, m_star, s, q, tol)
elif q > s and (q % s):
if status_only:
return 2
else:
return _expm_multiply_interval_core_2(A, X,
h, mu, m_star, s, q, tol)
else:
raise Exception('internal error') | 941c4e2d51f0bf524beff52350466a743b51eadf | 13,069 |
def subprocess(mocker):
""" Mock the subprocess and make sure it returns a value """
def with_return_value(value: int = 0, stdout: str = ""):
mock = mocker.patch(
"subprocess.run", return_value=CompletedProcess(None, returncode=0)
)
mock.returncode.return_value = value
mock.stdout = stdout
return mock
return with_return_value | 4b7140127eeb2d9202ed976518a121fed5fac302 | 13,070 |
def ljust(string, width):
"""
A version of ljust that considers the terminal width (see
get_terminal_width)
"""
width -= get_terminal_width(string)
return string + " " * width | e9c6ab8bbeeb268bc82f479e768be32f74fab488 | 13,071 |
import operator
def device_sort (device_set):
"""Sort a set of devices by self_id. Can't be used with PendingDevices!"""
return sorted(device_set, key = operator.attrgetter ('self_id')) | 92a22a87b5b923771cd86588180a8c6eb15b9fdf | 13,072 |
def _ontology_value(curie):
"""Get the id component of the curie, 0000001 from CL:0000001 for example."""
return curie.split(":")[1] | 7ef1f0874e698c498ccef16294c0469f67cd5233 | 13,073 |
import os
def get_current_joblist(JobDir):
""" -function to return current, sorted, joblist in /JobDir """
if os.path.exists(JobDir):
jobdirlist = os.walk(JobDir).next()[1]
jobdirlist.sort()
return jobdirlist | 79f8feb20ccdcb2984b81a236ddcd3cac6fcc351 | 13,074 |
def readpacket( timeout=1000, hexdump=False ):
"""Reads a HP format packet (length, data, checksum) from device.
Handles error recovery and ACKing.
Returns data or prints hexdump if told so.
"""
data = protocol.readpacket()
if hexdump == True:
print hpstr.tohexstr( data )
else:
return data | d673e61974058fc73a47bd0e5856563c9f5370bf | 13,075 |
def df_down_next_empty_pos(df, pos):
"""
Given a position `pos` at `(c, r)`, reads down column `c` from row `r` to find the next
empty cell.
Returns the position of that cell if found, or `None` otherwise.
"""
return df_down_next_matching_pos(df, pos, pd.isna) | 79fdba60e6a5846c39fb1141f3d21430230c2a31 | 13,076 |
def optimise_f2_thresholds(y, p, verbose=False, resolution=100):
"""Optimize individual thresholds one by one. Code from anokas.
Inputs
------
y: numpy array, true labels
p: numpy array, predicted labels
"""
n_labels = y.shape[1]
def mf(x):
p2 = np.zeros_like(p)
for i in range(n_labels):
p2[:, i] = (p[:, i] > x[i]).astype(np.int)
score = fbeta_score(y, p2, beta=2, average='samples')
return score
x = [0.2]*n_labels
for i in range(n_labels):
best_i2 = 0
best_score = 0
for i2 in range(resolution):
i2 /= resolution
x[i] = i2
score = mf(x)
if score > best_score:
best_i2 = i2
best_score = score
x[i] = best_i2
if verbose:
print(i, best_i2, best_score)
return x | 5f1ad6dda86229cffb7167f5cc3365c601048937 | 13,077 |
def holding_vars():
""" input
This is experimental, used to indicate unbound (free) variables in
a sum or list comprehensive.
This is inspired by Harrison's {a | b | c} set comprehension notation.
>>> pstream(holding_vars(),', holding x,y,z')
Etok(holding_vars,', holding x , y , z')
"""
def f(acc):
((_,_),cs) = acc
return Etok(name='holding_vars',etoks=cs[0::2],raw=acc)
return (comma + next_word('holding') + c.plus_comma(var())).treat(f,'holding_vars') | 5566bc97e2fa972b1ccde4d24f30fb06635bdcb7 | 13,078 |
import re
def select_with_several_genes(accessions, name, pattern,
description_items=None,
attribute='gene',
max_items=3):
"""
This will select the best description for databases where more than one
gene (or other attribute) map to a single URS. The idea is that if there
are several genes we should use the lowest one (RNA5S1, over RNA5S17) and
show the names of genes, if possible. This will list the genes if there are
few, otherwise provide a note that there are several.
"""
getter = op.attrgetter(attribute)
candidate = min(accessions, key=getter)
genes = set(getter(a) for a in accessions if getter(a))
if not genes or len(genes) == 1:
description = candidate.description
# Append gene name if it exists and is not present in the description
# already
if genes:
suffix = genes.pop()
if suffix not in description:
description += ' (%s)' % suffix
return description
regexp = pattern % getter(candidate)
basic = re.sub(regexp, '', candidate.description)
func = getter
if description_items is not None:
func = op.attrgetter(description_items)
items = sorted([func(a) for a in accessions if func(a)], key=item_sorter)
if not items:
return basic
return add_term_suffix(basic, items, name, max_items=max_items) | 04df56e64259aafd1e0d5b0d68839d8016514cb7 | 13,079 |
def list_messages_matching_query(service, user_id, query=''):
"""List all Messages of the user's mailbox matching the query.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
query: String used to filter messages returned.
Eg.- 'from:user@some_domain.com' for Messages from a particular sender.
Returns:
List of Messages that match the criteria of the query. Note that the
returned list contains Message IDs, you must use get with the
appropriate ID to get the details of a Message.
"""
try:
response = service.users().messages().list(userId=user_id,
q=query).execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(
userId=user_id, q=query, pageToken=page_token).execute()
messages.extend(response['messages'])
return messages
except errors.HttpError as error:
print('An error occurred: %s' % error) | a6ec376d7cfb4a6c724646a0e4d9ac1b86526ae7 | 13,080 |
def write_to_string(input_otio, **profile_data):
"""
:param input_otio: Timeline, Track or Clip
:param profile_data: Properties passed to the profile tag describing
the format, frame rate, colorspace and so on. If a passed Timeline has
`global_start_time` set, the frame rate will be set automatically.
Please note that numeric values must be passed as strings.
Please check MLT website for more info on profiles.
You may pass an "image_producer" argument with "pixbuf" to change
image sequence producer. The default image sequence producer is "image2"
:return: MLT formatted XML
:rtype: `str`
"""
mlt_adapter = MLTAdapter(input_otio, **profile_data)
return mlt_adapter.create_mlt() | 36a0e7fe741b4c216bd068b8544d68c63176d679 | 13,081 |
import re
def parse_IS(reply: bytes, device: str):
"""Parses the reply to the shutter IS command."""
match = re.search(b"\x00\x07IS=([0-1])([0-1])[0-1]{6}\r$", reply)
if match is None:
return False
if match.groups() == (b"1", b"0"):
if device in ["shutter", "hartmann_right"]:
return "open"
else:
return "closed"
elif match.groups() == (b"0", b"1"):
if device in ["shutter", "hartmann_right"]:
return "closed"
else:
return "open"
else:
return False | 827b5ebf5c98bcc65b823276d5ab5b8086a2c069 | 13,082 |
def quatXYZWFromRotMat(rot_mat):
"""Convert quaternion from rotation matrix"""
quatWXYZ = quaternions.mat2quat(rot_mat)
quatXYZW = quatToXYZW(quatWXYZ, 'wxyz')
return quatXYZW | 2a0a736c3950dca481c993e9801e14b362f78940 | 13,083 |
import sqlite3
def schema_is_current(db_connection: sqlite3.Connection) -> bool:
"""
Given an existing database, checks to see whether the schema version in the existing
database matches the schema version for this version of Gab Tidy Data.
"""
db = db_connection.cursor()
db.execute(
"""
select metadata_value from _gab_tidy_data
where metadata_key = 'schema_version'
"""
)
db_schema_version = db.fetchone()[0]
return db_schema_version == data_mapping.schema_version | 183502c292f9bb92e18a4ea7767028bea4e746fb | 13,084 |
import xattr
def xattr_writes_supported(path):
"""
Returns True if the we can write a file to the supplied
path and subsequently write a xattr to that file.
"""
try:
except ImportError:
return False
def set_xattr(path, key, value):
xattr.setxattr(path, "user.%s" % key, value)
# We do a quick attempt to write a user xattr to a temporary file
# to check that the filesystem is even enabled to support xattrs
fake_filepath = os.path.join(path, 'testing-checkme')
result = True
with open(fake_filepath, 'wb') as fake_file:
fake_file.write(b"XXX")
fake_file.flush()
try:
set_xattr(fake_filepath, 'hits', b'1')
except IOError as e:
if e.errno == errno.EOPNOTSUPP:
result = False
else:
# Cleanup after ourselves...
if os.path.exists(fake_filepath):
os.unlink(fake_filepath)
return result | 4992f2f5808575eac1f816aa09d80ff881286368 | 13,085 |
def _lovasz_softmax(probabilities, targets, classes="present", per_image=False, ignore=None):
"""The multiclass Lovasz-Softmax loss.
Args:
probabilities: [B, C, H, W]
class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output
with outputs of size [B, H, W].
targets: [B, H, W] ground truth targets (between 0 and C - 1)
classes: "all" for all,
"present" for classes present in targets,
or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class targets
"""
if per_image:
loss = mean(
_lovasz_softmax_flat(
*_flatten_probabilities(prob.unsqueeze(0), lab.unsqueeze(0), ignore),
classes=classes
)
for prob, lab in zip(probabilities, targets)
)
else:
loss = _lovasz_softmax_flat(
*_flatten_probabilities(probabilities, targets, ignore), classes=classes
)
return loss | c46006c921d1f40b5b86ff861750a9d89ec4bbdc | 13,086 |
def encodeDERTRequest(negoTypes = [], authInfo = None, pubKeyAuth = None):
"""
@summary: create TSRequest from list of Type
@param negoTypes: {list(Type)}
@param authInfo: {str} authentication info TSCredentials encrypted with authentication protocol
@param pubKeyAuth: {str} public key encrypted with authentication protocol
@return: {str} TRequest der encoded
"""
negoData = NegoData().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
#fill nego data tokens
i = 0
for negoType in negoTypes:
s = Stream()
s.writeType(negoType)
negoToken = NegoToken()
negoToken.setComponentByPosition(0, s.getvalue())
negoData.setComponentByPosition(i, negoToken)
i += 1
request = TSRequest()
request.setComponentByName("version", univ.Integer(2).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
if i > 0:
request.setComponentByName("negoTokens", negoData)
if not authInfo is None:
request.setComponentByName("authInfo", univ.OctetString(authInfo).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
if not pubKeyAuth is None:
request.setComponentByName("pubKeyAuth", univ.OctetString(pubKeyAuth).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
return der_encoder.encode(request) | bba9ed483eec2ef39927689a8924cbcc15a2093e | 13,087 |
import os
def generate_s3_strings(path):
"""Generates s3 bucket name, s3 key and s3 path with an endpoint from a path
with path (string): s3://BUCKETNAME/KEY
x --> path.find(start) returns index 0 + len(start) returns 5 --> 0 + 5 = 5
Y --> path[len(start):] = BUCKENAME/KEY --> .find(end) looking for forward slash in BUCKENAME/KEY --> returns 10
Y --> now we have to add len(start) to 10 because the index was relating to BUCKENAME/KEY and not to s3://BUCKETNAME/KEY
bucket_name = path[X:Y]
Prefix is the string behind the slash that is behind the bucket_name
- so path.find(bucket_name) find the index of the bucket_name, add len(bucket_name) to get the index to the end of the bucket name
- add 1 because we do not want the slash in the Key
Args:
path (string): s3://BUCKETNAME/KEY
Returns:
strings: path = s3://endpoint@BUCKETNAME/KEY
prefix = KEY
bucket_name = BUCKETNAME
"""
start = 's3://'
end = '/'
bucket_name = path[path.find(start)+len(start):path[len(start):].find(end)+len(start)]
prefix = path[path.find(bucket_name)+len(bucket_name)+1:]
if not prefix.endswith('/'):
prefix = prefix+'/'
path = 's3://'+os.environ['S3_ENDPOINT']+'@'+bucket_name+'/'+prefix
return bucket_name, prefix, path | 601b20514c93e2159f9d5747063ce70d265d6e6e | 13,088 |
def hierholzer(network: Network, source=0):
""" Hierholzer's algorithm for finding an Euler cycle
Args:
network (Network): network object
source(int): node where starts (and ends) the path
Raises:
NotEulerianNetwork: if exists at least one node with odd degree
NotNetworkNode: if source is not in the network
Returns:
list of nodes that form a path visiting all edges
References:
.. [1] sanjeev2552, heruslu, Code_Mech,
Geeks For Geeks, A computer science portal for geeks
https://www.geeksforgeeks.org/hierholzers-algorithm-directed-graph/
.. [2] Reinhard Diestel,
Graph Theory,
Springer, Volume 173 of Graduate texts in mathematics, ISSN 0072-5285
"""
if source > network.n:
raise NotNetworkNode(f"Source node {source} is not in the network (N={network.n})")
path = []
temp_path = []
degrees_list = deepcopy(network.degrees_list)
edges_basket = deepcopy(network.edges_basket)
if network.n == 0:
return path
eulerian, odd_degree_nodes = is_eulerian(network)
if not eulerian:
raise NotEulerianNetwork(f"Network is not Eulerian, not all nodes are even degree: {odd_degree_nodes}")
temp_path.append(source)
temp_node = source
while len(temp_path):
if degrees_list[temp_node]:
temp_path.append(temp_node)
next_node = edges_basket[temp_node][-1]
degrees_list[temp_node] -= 1
edges_basket[temp_node].pop()
if not network.directed:
degrees_list[next_node] -= 1
i = edges_basket[next_node].index(temp_node)
del edges_basket[next_node][i]
temp_node = next_node
else:
path.append(temp_node)
temp_node = temp_path[-1]
temp_path.pop()
# If the network is directed we will revert the path
if network.directed:
return path[::-1]
return path | 9a1fb1107e9a2b086d1716cea7708dba9849fb4e | 13,089 |
def fit1d(xdata,zdata,degree=1,reject=0,ydata=None,plot=None,plot2d=False,xr=None,yr=None,zr=None,xt=None,yt=None,zt=None,pfit=None,log=False,colorbar=False,size=5) :
"""
Do a 1D polynomial fit to data set and plot if requested
Args:
xdata : independent variable
zdata : dependent variable to be fit
Keyword args:
degree: degree of polynomial to fit (default=1 for linear fit)
reject : single iteration rejection of points that deviate from initial by more than specified value (default=0, no rejection)
ydata : auxiliary variable for plots (default=None)
plot : axes to plot into (default=None)
plot2d (bool) : set to make a 2D plot with auxiliary variable, rather than 1D color-coded by auxiliary variable
xr[2] : xrange for plot
yr[2] : yrange for plot
zr[2] : zrange for plot
xt : xtitle for plot
yt : ytitle for plot
zt : ztitle for plot
Returns :
pfit : 1D polynomial fit
"""
# set up fitter and do fit
if pfit is None :
fit_p = fitting.LinearLSQFitter()
p_init = models.Polynomial1D(degree=degree)
pfit = fit_p(p_init, xdata, zdata)
# rejection of points?
if reject > 0 :
gd=np.where(abs(zdata-pfit(xdata)) < reject)[0]
bd=np.where(abs(zdata-pfit(xdata)) >= reject)[0]
print('rejected ',len(xdata)-len(gd),' of ',len(xdata),' points')
pfit = fit_p(p_init, xdata[gd], zdata[gd])
print('1D rms: ',(zdata-pfit(xdata)).std())
# plot if requested
if plot is not None :
if xr is None : xr = [xdata.min(),xdata.max()]
if yr is None and ydata is not None : yr = [ydata.min(),ydata.max()]
if log :
zplot=10.**zdata
else :
zplot=zdata
if zr is None : zr = [zplot.min(),zplot.max()]
if ydata is None :
x = np.linspace(xr[0],xr[1],200)
if log :
zfit=10.**pfit(x)
else :
zfit=pfit(x)
# straight 1D plot
plots.plotp(plot,xdata,zplot,xr=xr,yr=yr,zr=zr,
xt=xt,yt=yt,size=size)
plots.plotl(plot,x,zfit)
elif plot2d :
# 2D image plot with auxiliary variable
y, x = np.mgrid[yr[1]:yr[0]:200j, xr[1]:xr[0]:200j]
if log :
zfit=10.**pfit(x)
else :
zfit=pfit(x)
plots.plotc(plot,xdata,ydata,zplot,xr=xr,yr=yr,zr=zr,
xt=xt,yt=xt,zt=yt,colorbar=True,size=size,cmap='rainbow')
plot.imshow(zfit,extent=[xr[1],xr[0],yr[1],yr[0]],
aspect='auto',vmin=zr[0],vmax=zr[1], origin='lower',cmap='rainbow')
else :
# 1D plot color-coded by auxiliary variable
x = np.linspace(xr[0],xr[1],200)
if log :
zfit=10.**pfit(x)
else :
zfit=pfit(x)
plots.plotc(plot,xdata,zplot,ydata,xr=xr,yr=zr,zr=yr,
xt=xt,yt=yt,zt=zt,size=size,colorbar=colorbar)
plots.plotl(plot,x,zfit,color='k')
return pfit | 0c40a2b1af72c0df8523a92cd5cc80c99f631472 | 13,090 |
import torch
def nucleus_sampling(data, p, replace=0, ascending=False, above=True):
"""
:param tensor data: Input data
:param float p: Probability for filtering (or be replaced)
:param float replace: Default value is 0. If value is provided, input data will be replaced by this value
if data match criteria.
:param bool ascending: Return ascending order or descending order. Sorting will be executed if replace is None.
:param bool above: If True is passed, only value smaller than p will be kept (or not replaced)
:return: tensor Filtered result
"""
sorted_data, sorted_indices = torch.sort(data, descending=not ascending)
cum_probas = torch.cumsum(F.softmax(sorted_data, dim=-1), dim=-1)
if replace is None:
if above:
replace_idxes = cum_probas < p
else:
replace_idxes = cum_probas > p
idxes = sorted_indices[replace_idxes]
else:
if above:
replace_idxes = cum_probas > p
else:
replace_idxes = cum_probas < p
idxes = sorted_indices[~replace_idxes]
if replace is None:
sorted_data = sorted_data[replace_idxes]
else:
sorted_data[replace_idxes] = replace
return sorted_data, idxes | 6332e9f5e04fa2ec0130fa2db7dd5a8aad26caec | 13,091 |
from datetime import datetime
import json
def mark_ready_for_l10n_revision(request, document_slug, revision_id):
"""Mark a revision as ready for l10n."""
revision = get_object_or_404(Revision, pk=revision_id,
document__slug=document_slug)
if not revision.document.allows(request.user, 'mark_ready_for_l10n'):
raise PermissionDenied
if revision.can_be_readied_for_localization():
# We don't use update(), because that wouldn't update
# Document.latest_localizable_revision.
revision.is_ready_for_localization = True
revision.readied_for_localization = datetime.now()
revision.readied_for_localization_by = request.user
revision.save()
ReadyRevisionEvent(revision).fire(exclude=request.user)
return HttpResponse(json.dumps({'message': revision_id}))
return HttpResponseBadRequest() | 64d7d84ceab204a3d3fea98e9753fde486c4490c | 13,092 |
def is_all_maxed_out(bad_cube_counts, bad_cube_maximums):
"""Determines whether all the cubes of each type are at their maximum
amounts."""
for cube_type in CUBE_TYPES:
if bad_cube_counts[cube_type] < bad_cube_maximums[cube_type]:
return False
return True | 23332712b46d33a1a8e552ecf30389d4b0a10c90 | 13,093 |
def get_local_vars(*args):
"""
get_local_vars(prov, ea, out) -> bool
"""
return _ida_dbg.get_local_vars(*args) | ebed21c8b90c48e76734f07a5e83c11bf5b9dd0c | 13,094 |
def gcc():
"""
getCurrentCurve
Get the last curve that was added to the last plot plot
:return: The last curve
:rtype: pg.PlotDataItem
"""
plotWin = gcf()
try:
return plotWin.plotWidget.plotItem.dataItems[-1]
except IndexError:
return None | 2f9226c51a84d39b43f1d8ef83969b94a2c308cd | 13,095 |
import requests
import json
def searchDevice(search):
"""
Method that searches the ExtraHop system for a device that
matches the specified search criteria
Parameters:
search (dict): The device search criteria
Returns:
dict: The metadata of the device that matches the criteria
"""
url = urlunparse(("https", HOST, "/api/v1/devices/search", "", "", ""))
headers = {"Authorization": "ExtraHop apikey=%s" % APIKEY}
r = requests.post(
url, headers=headers, verify=False, data=json.dumps(search)
)
return r.json()[0] | 9b65346054f099e4a2aa78035802b2de799850ac | 13,096 |
def regularmeshH8(nelx, nely, nelz, lx, ly, lz):
""" Creates a regular H8 mesh.
Args:
nelx (:obj:`int`): Number of elements on the X-axis.
nely (:obj:`int`): Number of elements on the Y-axis.
nelz (:obj:`int`): Number of elements on the Z-axis.
lx (:obj:`float`): X-axis length.
ly (:obj:`float`): Y-axis length.
lz (:obj:`float`): Z-axis length.
Returns:
Tuple with the coordinate matrix, connectivity, and the indexes of each node.
"""
x, y, z = np.linspace(0, lx, num=nelx + 1), np.linspace(0, ly, num=nely + 1), np.linspace(0, lz, num=nelz + 1)
nx, ny, nz = len(x), len(y), len(z)
mat_x = (x.reshape(nx, 1)@np.ones((1, ny*nz))).T
mat_y = y.reshape(ny, 1)@np.ones((1, nx))
mat_z = z.reshape(nz, 1)@np.ones((1, nx*ny))
x_t, y_t, z_t = mat_x.flatten(), np.tile(mat_y.flatten(), nz), mat_z.flatten()
ind_coord = np.arange(1, (nz)* nx * ny + 1, 1, dtype=int)
coord = (np.array([ind_coord, x_t, y_t, z_t])).T
# processing of connectivity matrix
ind_connect = np.arange(1, nelz * nelx * nely + 1, dtype=int)
mat_aux = ind_connect.reshape(nely, nelx, nelz)
a = np.arange(0, nely * nelz, 1)
for ind in range(nely, len(a), nely):
a[ind:] += nelx + 1
c = (a.reshape(len(a),1)@np.ones((1, nelx))).reshape(nely, nelx, nelz)
b = (mat_aux + c).flatten()
connect = np.array([ind_connect, b+(nelx+1), b, b+1, b+(nelx+2), \
b+(nelx+1)*(nely+1)+(nelx+1), b+(nelx+1)*(nely+1), \
b+1+(nelx+1)*(nely+1), b+(nelx+1)*(nely+1)+(nelx+2)], dtype=int).T
return coord, connect | 1c0050b8c48438f548e67f7776194a067c77ae39 | 13,097 |
def only_t1t2(src, names):
"""
This function...
:param src:
:param names:
:return:
"""
if src.endswith("TissueClassify"):
# print "Keeping T1/T2!"
try:
names.remove("t1_average_BRAINSABC.nii.gz")
except ValueError:
pass
try:
names.remove("t2_average_BRAINSABC.nii.gz")
except ValueError:
pass
else:
names.remove("TissueClassify")
# print "Ignoring these files..."
# for name in names:
# print "\t" + name
return names | 60116fbc602bbe03f7c18776b623ef3680b9dfc1 | 13,098 |
def distanceEucl(a, b):
"""Calcul de la distance euclidienne en dimension quelconque"""
dist = np.linalg.norm(a - b)
return dist | 572d98aecf17cd1f0e34dcad9e07beb3bcf6d06d | 13,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.