content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _h1_cmp_chi2_ ( h1 ,
h2 ,
density = False ) :
"""Compare histograms by chi2
>>> h1 = ... ## the first histo
>>> h2 = ... ## the second histo (or function or anything else)
>>> chi2ndf , probability = h1.cmp_chi2 ( h2 )
"""
assert isinstance ( h1 , ROOT.TH1 ) and 1 == h1.dim () , \
"cmp_dist: invalid type of h1 %s/%s" % ( h1 , type ( h1 ) )
if isinstance ( h2 , ROOT.TH1 ) :
assert 1 == h2.dim () , "cmp_dist: invalid type of h2 %s/%s" % ( h2 , type ( h2 ) )
if density :
h1_ = h1.density() if hasattr ( h1 , 'density' ) else h1
h2_ = h2.density() if hasattr ( h2 , 'density' ) else h2
cmp = _h1_cmp_chi2_ ( h1_ , h2_ , density = False )
if h1_ is not h1 : del h1_
if h2_ is not h2 : del h2_
return cmp
chi2 = 0.0
ndf = 0
for i , x , v1 in h1.items() :
v2 = h2 ( x.value() )
chi2 += v1.chi2 ( v2 )
ndf += 1
c2ndf = chi2/ndf
return c2ndf, ROOT.TMath.Prob ( chi2 , ndf ) | ff2e5c191491c18adc29edf08a5bb837fabf045f | 13,300 |
def _questionnaire_metric(name, col):
"""Returns a metrics SQL aggregation tuple for the given key/column."""
return _SqlAggregation(
name,
"""
SELECT {col}, COUNT(*)
FROM participant_summary
WHERE {summary_filter_sql}
GROUP BY 1;
""".format(
col=col, summary_filter_sql=_SUMMARY_FILTER_SQL
),
lambda v: QuestionnaireStatus.lookup_by_number(v).name,
None,
) | abd477798670733788461ed35fc0b4814ee7081d | 13,301 |
import re
def xyzToAtomsPositions(xyzFileOrStr):
"""
Returns atom positions (order) given a molecule in an xyz format.
Inchi-based algorithm.
Use this function to set the atoms positions in a reference
molecule. The idea is to assign the positions once and to never
change them again.
Arguments:
----------
xyzFileOrStr : str
input xyz molecule (either file path or xyz string)
Returns:
----------
atomsPositions: dict
dictionary whose keys correspond to atoms positions in xyz
file and values to the newly assigned positions
"""
# get inchi with extra auxiliary log
if ioutils.fileExists(xyzFileOrStr): xyzFileOrStr= ioutils.readFile(xyzFileOrStr)
xyzFileOrStr = xyzToIntertialFrame(xyzFileOrStr)
# swap all hydrogens with a heavy atom, here I picked Cl, but any other halogen atom
# should also work. this atom swap is to force inchi to considered all the atoms in its
# connectivity algorithm. note that atoms from the first group (e..g Na, Li) wont work
# as they produce solids and thus the inchi string is significantly changed
xyzFileOrStr = '\n'.join([xyz_line.replace('H','Cl') for xyz_line in xyzFileOrStr.split('\n')])
inchiWithAux = obconverter.obConvert(inputMol=xyzFileOrStr,inputMolFormat='xyz',
outputMolFormat='inchi', options=['-xa'])
inchi, inchiAux = inchiWithAux.split('\n')
# find connectivity info in the inchi string - used to detect the
# presence of heavy atoms.
atomsInchiConnectivity = re.search(r'/c(\d+?\*)?(.*?)(?=/|$)',inchi)
# read the mapping between heavy atoms (+ lone hydrogens) in xyz and inchi
# from the auxiliary log
atomsInchiAuxMap = re.search(r'/N:(.*?)(?=/|$)',inchiAux)
atomsInchiAuxEquivMap = re.search(r'/E:(.*?)(?=/|$)',inchiAux)
# create the rdkit mol object
rdkitMolFromMol = xyzconverters.xyzToMolToRdkitMol(xyzFileOrStr, removeHs=False)
numAtoms = rdkitMolFromMol.GetNumAtoms()
# initialise the atoms position dict
atomsPositions = {k:None for k in range(numAtoms)}
nextAtomId = 0
mol_frags = rdkitmolutils.rdkitMolToMolFrags(rdkitMolFromMol)
if mol_frags:
print(f'Warning: Provided xyz file contains {len(mol_frags)} molecular fragments.')
#return atomsPositions
# get the atoms based on the inchi connectivity info
if atomsInchiConnectivity is not None:
# process the atomsInchiAuxMap and extract the atoms mapping
atomsInchiAuxMap= atomsInchiAuxMap.groups()[0] \
.replace('/','').replace(';',',').split(',')
atomsInchiMatch = {int(atomId)-1: i
for i, atomId in enumerate(atomsInchiAuxMap)}
atomsInchiMatchList = list(map(lambda x: int(x)-1, atomsInchiAuxMap))
if atomsInchiMatch:
# now disambiguate any equivalent atoms
if atomsInchiAuxEquivMap:
atomsInchiAuxEquivMap= atomsInchiAuxEquivMap.groups()[0] \
.replace('/','').replace(')(','#').replace(')','') \
.replace('(','').split('#')
for i in range(len(atomsInchiAuxEquivMap)):
atomsInchiAuxEquivMap[i] = list(map(lambda x: int(x)-1, atomsInchiAuxEquivMap[i].split(',')))
atomsInchiAuxEquivMap[i] = list(map(lambda x: atomsInchiMatchList[x], atomsInchiAuxEquivMap[i]))
for equivAtomsList in atomsInchiAuxEquivMap:
atomsXYZ = rdkitmolutils.getAtomsXYZs(rdkitMolFromMol, equivAtomsList)
atomsXYZ = (atomsXYZ * 1e7).astype(int)
atomsX = atomsXYZ[:,0].tolist()
atomsY = atomsXYZ[:,1].tolist()
atomsZ = atomsXYZ[:,2].tolist()
_atomsDist = rdkitmolutils.rdkitSumAllAtomsDistFromAtoms(rdkitMolFromMol, equivAtomsList)
_atomsDist = [int(dist * 1e7) for dist in _atomsDist]
# use four invariants to disambiguate atoms
equivAtomsOrder = np.lexsort((atomsZ,atomsY,atomsX,_atomsDist)).tolist()
currentAtomsOrder = sorted([atomsInchiMatch[equivAtomId] for equivAtomId in equivAtomsList])
for equivAtomPos in equivAtomsOrder:
atomsInchiMatch[equivAtomsList[equivAtomPos]] = currentAtomsOrder.pop(0)
# add the atoms positions to the overall atomsPosition dictionary
atomsPositions = {**atomsPositions, **atomsInchiMatch}
nextAtomId = len(atomsInchiMatch)
# assign posititions to any atoms that are left
if nextAtomId < numAtoms:
loneAtomsIds = [atomId
for atomId, refId in atomsPositions.items()
if refId is None]
loneAtomsMap = {}
atomsXYZ = rdkitmolutils.getAtomsXYZs(rdkitMolFromMol, loneAtomsIds)
atomsXYZ = (atomsXYZ * 1e7).astype(int)
atomsX = atomsXYZ[:,0].tolist()
atomsY = atomsXYZ[:,1].tolist()
atomsZ = atomsXYZ[:,2].tolist()
_atomsDist = rdkitmolutils.rdkitSumAllAtomsDistFromAtoms(rdkitMolFromMol, loneAtomsIds)
_atomsDist = [int(dist * 1e7) for dist in _atomsDist]
loneAtomsOrder = np.lexsort((atomsZ,atomsY,atomsX,_atomsDist)).tolist()
for loneAtomPos in loneAtomsOrder:
loneAtomsMap[loneAtomsIds[loneAtomPos]] = nextAtomId
nextAtomId += 1
# add the remaining positions to the overall atoms positions
atomsPositions = {**atomsPositions, **loneAtomsMap}
# check for duplicate and None values at the end
hasDuplicates = len(atomsPositions.values()) > len(set(atomsPositions.values()))
hasNones = None in atomsPositions.values()
if hasDuplicates or hasNones:
print('Error: atom canoncial positions algorithm has failed.')
atomsPositions= {}
return atomsPositions | 8db5c81d0e8aca2eef686d955ed810b4b166d0db | 13,302 |
async def modify_video_favorite_list(
media_id: int,
title: str,
introduction: str = '',
private: bool = False,
credential: Credential = None):
"""
修改视频收藏夹信息。
Args:
media_id (int) : 收藏夹 ID.
title (str) : 收藏夹名。
introduction (str, optional) : 收藏夹简介. Defaults to ''.
private (bool, optional) : 是否为私有. Defaults to False.
credential (Credential, optional): Credential. Defaults to None.
Returns:
dict: API 调用结果。
"""
if credential is None:
credential = Credential()
credential.raise_for_no_sessdata()
credential.raise_for_no_bili_jct()
api = API["operate"]["modify"]
data = {
"title": title,
"intro": introduction,
"privacy": 1 if private else 0,
"cover": "",
"media_id": media_id
}
return await request("POST", api["url"], data=data, credential=credential) | e3618fc59785b63cf7f6810a0f2683bcd18d5277 | 13,303 |
def get_salesforce_log_files():
"""Helper function to get a list available log files"""
return {
"totalSize": 2,
"done": True,
"records": [
{
"attributes": {
"type": "EventLogFile",
"url": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ"
},
"Id": "0ATD000000001bROAQ",
"EventType": "API",
"LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ/LogFile",
"LogDate": "2014-03-14T00:00:00.000+0000",
"LogFileLength": 2692.0
},
{
"attributes": {
"type": "EventLogFile",
"url": "/services/data/v32.0/sobjects/EventLogFile/0ATD000000001SdOAI"
},
"Id": "0ATD000000001SdOAI",
"EventType": "API",
"LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001SdOAI/LogFile",
"LogDate": "2014-03-13T00:00:00.000+0000",
"LogFileLength": 1345.0
}
]
} | 1c182898517d73c360e9f2ab36b902afea8c58d7 | 13,304 |
def remove_true_false_edges(dict_snapshots, dict_weights, index):
"""
Remove chosen true edges from the graph so the embedding could be calculated without them.
:param dict_snapshots: Dict where keys are times and values are a list of edges for each time stamp.
:param dict_weights: Dict where keys are times and values are list of weights for each edge in the time stamp, order
corresponds to the order of edges in dict_snapshots.
:param index: Index of pivot time- until pivot time (including) it is train set, afterwards it is test set.
:return: Updated dict_snapshots and dict_weights.
"""
times = list(dict_snapshots.keys())
mapping = {i: times[i] for i in range(len(times))}
keys = list(mapping.keys())
for key in keys:
if key < index:
continue
else:
del dict_snapshots[mapping[key]]
del dict_weights[mapping[key]]
return dict_snapshots, dict_weights | 3f833fda22710c20703aa7590eae0fd649b69634 | 13,305 |
def addFavoriteDir(name:str, directory:str, type:str=None, icon:str=None, tooltip:str=None, key:str=None):
"""
addFavoriteDir(name, directory, type, icon, tooltip, key) -> None.
Add a path to the file choosers favorite directory list. The path name can contain environment variables which will be expanded when the user clicks the favourites button
@param name: Favourite path entry ('Home', 'Desktop', etc.).
@param directory: FileChooser will change to this directory path.
@param type: Optional bitwise OR combination of nuke.IMAGE, nuke.SCRIPT, nuke.FONT or nuke.GEO.
@param icon: Optional filename of an image to use as an icon.
@param tooltip: Optional short text to explain the path and the meaning of the name.
@param key: Optional shortcut key.
@return: None.
"""
return None | 28cbabd79d35151877112dd76ffe2a513a2bfcec | 13,306 |
def save(data):
"""Save cleanup annotations."""
data_and_frames = data.split("_")
data = data_and_frames[0]
frames = data_and_frames[1]
if len(data) == 1:
removed = []
else:
removed = [int(f) for f in data[1:].split(':')]
frames = [int(f) for f in frames[:].split(':')]
#fname = APP.basedir + '/' + APP.dlist[APP.targetid] + '/planttag.npz'
fname = APP.basedir + '/' + APP.dlist[APP.targetid] + '/' + APP.tag_name + '.npz'
if len(removed) == 0: # Before: if len(removed) == 0
idx = np.zeros((np.amax(APP.lbls) + 1,), APP.lbls.dtype)
_id = 1
for i in range(1, len(idx)):
if i not in removed:
idx[i] = _id
_id = _id + 1
lbls = idx[APP.lbls]
else:
lbls = APP.lbls
for j in range(len(removed)):
rem = removed[j]
frame = frames[j]
# Remove that label from the frame onwards:
if APP.tag_type == "deletion-onwards":
lbls[:,:,frame:][lbls[:,:,frame:] == rem] = 0
elif APP.tag_type == "deletion-upto":
lbls[:,:,:frame][lbls[:,:,:frame] == rem] = 0
elif APP.tag_type == "deletion-single":
lbls[:,:,frame][lbls[:,:,frame] == rem] = 0
#
tag = [-1]*lbls.max()
for i in range(len(removed)):
tag[removed[i]] = frames[i]
npz = {'removed': np.asarray(removed, np.int16), 'labels': lbls, "frames": np.asarray(frames, np.int16), \
APP.tag_name: tag}
np.savez_compressed(fname, **npz)
return ' ' | be62bd3933374ebac8e735be0f66ca79a6273b35 | 13,307 |
import re
def list_runs_in_swestore(path, pattern=RUN_RE, no_ext=False):
"""
Will list runs that exist in swestore
:param str path: swestore path to list runs
:param str pattern: regex pattern for runs
"""
try:
status = check_call(['icd', path])
proc = Popen(['ils'], stdout=PIPE)
contents = [c.strip() for c in proc.stdout.readlines()]
runs = [r for r in contents if re.match(pattern, r)]
if no_ext:
runs = [r.split('.')[0] for r in runs]
return runs
except CalledProcessError:
return [] | 616089f049129b284ae6575609741620f2ac48f6 | 13,308 |
def linear_regression(
XL: ArrayLike, YP: ArrayLike, Q: ArrayLike
) -> LinearRegressionResult:
"""Efficient linear regression estimation for multiple covariate sets
Parameters
----------
XL
[array-like, shape: (M, N)]
"Loop" covariates for which N separate regressions will be run
YP
[array-like, shape: (M, O)]
Continuous traits that have had core covariates eliminated through orthogonal projection.
Q
[array-like, shape: (M, P)]
Orthonormal matrix computed by applying QR factorization to covariate matrix
Returns
-------
Dataclass containing:
beta : [array-like, shape: (N, O)]
Beta values associated with each loop covariate and outcome
t_value : [array-like, shape: (N, O)]
T statistics for each beta
p_value : [array-like, shape: (N, O)]
P values as float in [0, 1]
"""
if set([x.ndim for x in [XL, YP, Q]]) != {2}:
raise ValueError("All arguments must be 2D")
n_core_covar, n_loop_covar, n_obs, n_outcome = (
Q.shape[1],
XL.shape[1],
YP.shape[0],
YP.shape[1],
)
dof = n_obs - n_core_covar - 1
if dof < 1:
raise ValueError(
"Number of observations (N) too small to calculate sampling statistics. "
"N must be greater than number of core covariates (C) plus one. "
f"Arguments provided: N={n_obs}, C={n_core_covar}."
)
# Apply orthogonal projection to eliminate core covariates
# Note: QR factorization or SVD should be used here to find
# what are effectively OLS residuals rather than matrix inverse
# to avoid need for MxM array; additionally, dask.lstsq fails
# with numpy arrays
LS = Q @ (Q.T @ XL)
assert XL.shape == LS.shape
XLP = XL - LS
assert XLP.shape == (n_obs, n_loop_covar)
# Estimate coefficients for each loop covariate
# Note: A key assumption here is that 0-mean residuals
# from projection require no extra terms in variance
# estimate for loop covariates (columns of G), which is
# only true when an intercept is present.
XLPS = (XLP ** 2).sum(axis=0, keepdims=True).T
assert XLPS.shape == (n_loop_covar, 1)
B = (XLP.T @ YP) / XLPS
assert B.shape == (n_loop_covar, n_outcome)
# Compute residuals for each loop covariate and outcome separately
YR = YP[:, np.newaxis, :] - XLP[..., np.newaxis] * B[np.newaxis, ...]
assert YR.shape == (n_obs, n_loop_covar, n_outcome)
RSS = (YR ** 2).sum(axis=0)
assert RSS.shape == (n_loop_covar, n_outcome)
# Get t-statistics for coefficient estimates
T = B / np.sqrt(RSS / dof / XLPS)
assert T.shape == (n_loop_covar, n_outcome)
# Match to p-values
# Note: t dist not implemented in Dask so this must be delayed,
# see https://github.com/dask/dask/issues/6857
P = da.map_blocks(
lambda t: 2 * stats.distributions.t.sf(np.abs(t), dof),
map_blocks_asnumpy(T),
dtype="float64",
)
assert P.shape == (n_loop_covar, n_outcome)
P = np.asarray(P, like=T)
return LinearRegressionResult(beta=B, t_value=T, p_value=P) | 3059987940eefce0a4a401c096d8b7be0d3ce1d7 | 13,309 |
import math
def orthogonal_decomposition(C, tr_error, l_exp):
"""
Orthogonal decomposition of the covariance matrix to determine the meaningful directions
:param C: covariance matrix
:param tr_error: allowed truncation error
:param l_exp: expansion order
:return: transformation matrix Wy, number of terms N_t and meaningful directions k
"""
# eigenvalues and eigenvectors
v, w = np.linalg.eig(C)
v_sum = np.sum(v)
err_v = 1
k = 0 # meaningful directions
while err_v > tr_error:
err_v = 1 - v[k] / v_sum
k += 1
N_t = int(math.factorial(l_exp + k) / (math.factorial(k) * math.factorial(l_exp))) # number of terms
Wy = w[:,:k] # and for now, do not define Wz
return Wy, N_t, k | d6920b31a0503ad15b98631de352da690b6761b8 | 13,310 |
import http
import json
def get_data():
"""Reads the current state of the world"""
server = http.client.HTTPConnection(URL)
server.request('GET','/data')
response = server.getresponse()
if (response.status == 200):
data = response.read()
response.close()
return json.loads(data.decode())
else:
return UnexpectedResponse(response) | 3c0563f2776c60ea103db154c63e2053b1d7d045 | 13,311 |
def chi_angles(filepath, model_id=0):
"""Calculate chi angles for a given file in the PDB format.
:param filepath: Path to the PDB file.
:param model_id: Model to be used for chi calculation.
:return: A list composed by a list of chi1, a list of chi2, etc.
"""
torsions_list = _sidechain_torsions(filepath, model_id)
chis = [item[2] for item in torsions_list]
return list(zip(*chis)) | 85c192fe6c272cad5cdd7dbb4f570f1f78284057 | 13,312 |
def surface_sphere(radius):
"""
"""
phi, theta = np.mgrid[0.0:np.pi:100j, 0.0:2.0*np.pi:100j]
x_blank_sphere = radius*np.sin(phi)*np.cos(theta)
y_blank_sphere = radius*np.sin(phi)*np.sin(theta)
z_blank_sphere = radius*np.cos(phi)
sphere_surface = np.array(([x_blank_sphere,
y_blank_sphere,
z_blank_sphere]))
return sphere_surface | 25750b7c4a57dd3a2f3ebb5a2a041fa1f5e56c89 | 13,313 |
import re
def format_bucket_objects_listing(bucket_objects):
"""Returns a formated list of buckets.
Args:
buckets (list): A list of buckets objects.
Returns:
The formated list as string
"""
out = ""
i = 1
for o in bucket_objects:
# Shorten to 24 chars max, remove linebreaks
name = re.sub(r'[\n\r]', ' ',
o.name[:63] + '..'
if len(o.name) > 65
else o.name)
size = sizeof_fmt(o.size)
time = f"{o.time_modified:%Y-%m-%d %H:%M}" \
if o.time_modified is not None else ""
out += (f"{i:>4} {name:65} {size:8} {time:16}\n")
i += 1
return out | f5268d148687338ed606b1593065a0c1842cac00 | 13,314 |
def charts(chart_type, cmid, start_date, end_date=None):
"""
Get the given type of charts for the artist.
https://api.chartmetric.com/api/artist/:id/:type/charts
**Parameters**
- `chart_type`: string type of charts to pull, choose from
'spotify_viral_daily', 'spotify_viral_weekly',
'spotify_top_daily', 'spotify_top_weekly',
'applemusic_top', 'applemusic_daily',
'applemusic_albums', 'itunes_top',
'itunes_albums', 'shazam', 'beatport'
- `cmid`: string or int Chartmetric artist ID
- `start_date`: string of start data in ISO format
- `end_date`: string of end date in ISO format
**Returns**
A list of dictionaries of specific type of charts for the given artist.
"""
urlhandle = f"/artist/{cmid}/{chart_type}/charts"
params = {
"since": start_date,
"until": end_date if end_date else utilities.strDateToday(),
}
data = utilities.RequestData(urlhandle, params)
return utilities.RequestGet(data)["data"] | 2dfafd09f53bf2add20afcba8c85a6f081e551af | 13,315 |
import sys
def fake_traceback(exc_value, tb, filename, lineno):
"""Produce a new traceback object that looks like it came from the
template source instead of the compiled code. The filename, line
number, and location name will point to the template, and the local
variables will be the current template context.
:param exc_value: The original exception to be re-raised to create
the new traceback.
:param tb: The original traceback to get the local variables and
code info from.
:param filename: The template filename.
:param lineno: The line number in the template source.
"""
if tb is not None:
# Replace the real locals with the context that would be
# available at that point in the template.
locals = get_template_locals(tb.tb_frame.f_locals)
locals.pop("__jinja_exception__", None)
else:
locals = {}
globals = {
"__name__": filename,
"__file__": filename,
"__jinja_exception__": exc_value,
}
# Raise an exception at the correct line number.
code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
# Build a new code object that points to the template file and
# replaces the location with a block name.
try:
location = "template"
if tb is not None:
function = tb.tb_frame.f_code.co_name
if function == "root":
location = "top-level template code"
elif function.startswith("block_"):
location = 'block "%s"' % function[6:]
# Collect arguments for the new code object. CodeType only
# accepts positional arguments, and arguments were inserted in
# new Python versions.
code_args = []
for attr in (
"argcount",
"posonlyargcount", # Python 3.8
"kwonlyargcount", # Python 3
"nlocals",
"stacksize",
"flags",
"code", # codestring
"consts", # constants
"names",
"varnames",
("filename", filename),
("name", location),
"firstlineno",
"lnotab",
"freevars",
"cellvars",
):
if isinstance(attr, tuple):
# Replace with given value.
code_args.append(attr[1])
continue
try:
# Copy original value if it exists.
code_args.append(getattr(code, "co_" + attr))
except AttributeError:
# Some arguments were added later.
continue
code = CodeType(*code_args)
except Exception:
# Some environments such as Google App Engine don't support
# modifying code objects.
pass
# Execute the new code, which is guaranteed to raise, and return
# the new traceback without this frame.
try:
exec(code, globals, locals)
except BaseException:
return sys.exc_info()[2].tb_next | fd89728b7703e344e2ffe1b5217f21cbccc8f322 | 13,316 |
def search_candidates(api_key, active_status="true"):
"""
https://api.open.fec.gov/developers#/candidate/get_candidates_
"""
query = """https://api.open.fec.gov/v1/candidates/?sort=name&sort_hide_null=false&is_active_candidate={active_status}&sort_null_only=false&sort_nulls_last=false&page=1&per_page=20&api_key={api_key}""".format(
api_key=api_key,
active_status=active_status
)
return get_response(
query=query
) | 9ec1c39541cda87f1d1618d4e5497b8215a5f4b4 | 13,317 |
def load_dat(file_name):
"""
carga el fichero dat (Matlab) especificado y lo
devuelve en un array de numpy
"""
data = loadmat(file_name)
y = data['y']
X = data['X']
ytest = data['ytest']
Xtest = data['Xtest']
yval = data['yval']
Xval = data['Xval']
return X,y,Xtest,ytest,Xval,yval | 5c3de04f60ea803e1c70dbc2103425b10ee58567 | 13,318 |
def get_specific_pos_value(img, pos):
"""
Parameters
----------
img : ndarray
image data.
pos : list
pos[0] is horizontal coordinate, pos[1] is verical coordinate.
"""
return img[pos[1], pos[0]] | 3929b29fa307a7e8b5282783c16639cacb2ab805 | 13,319 |
from typing import List
from typing import Tuple
from typing import Dict
from typing import Any
from typing import Set
def transpose_tokens(
cards: List[MTGJSONCard]
) -> Tuple[List[MTGJSONCard], List[Dict[str, Any]]]:
"""
Sometimes, tokens slip through and need to be transplanted
back into their appropriate array. This method will allow
us to pluck the tokens out and return them home.
:param cards: Cards+Tokens to iterate
:return: Cards, Tokens as two separate lists
"""
# Order matters with these, as if you do cards first
# it will shadow the tokens lookup
# Single faced tokens are easy
tokens = [
scryfall.download(scryfall.SCRYFALL_API_CARD + card.get("scryfallId"))
for card in cards
if card.get("layout") in ["token", "emblem"]
]
# Do not duplicate double faced tokens
done_tokens: Set[str] = set()
for card in cards:
if (
card.get("layout") == "double_faced_token"
and card.get("scryfallId") not in done_tokens
):
tokens.append(
scryfall.download(scryfall.SCRYFALL_API_CARD + card.get("scryfallId"))
)
done_tokens.add(card.get("scryfallId"))
# Remaining cards, without any kind of token
cards = [
card
for card in cards
if card.get("layout") not in ["token", "double_faced_token", "emblem"]
]
return cards, tokens | 339e8d18a4c80e168411c874f8afac97b14db77b | 13,320 |
from datetime import datetime
def from_local(local_dt, timezone=None):
"""Converts the given local datetime to a universal datetime."""
if not isinstance(local_dt, datetime.datetime):
raise TypeError('Expected a datetime object')
if timezone is None:
a = arrow.get(local_dt)
else:
a = arrow.get(local_dt, timezone)
return a.to('UTC').naive | 6b4eb44aa66c04a23aa8dac2bbe882e5619cd45f | 13,321 |
import re
def mrefresh_to_relurl(content):
"""Get a relative url from the contents of a metarefresh tag"""
urlstart = re.compile('.*URL=')
_, url = content.split(';')
url = urlstart.sub('', url)
return url | 90cc3dbace5d4b001698612f9263309fa95aac8b | 13,322 |
import torch
def simclr_loss_func(
z1: torch.Tensor,
z2: torch.Tensor,
temperature: float = 0.1,
extra_pos_mask=None,
) -> torch.Tensor:
"""Computes SimCLR's loss given batch of projected features z1 from view 1 and
projected features z2 from view 2.
Args:
z1 (torch.Tensor): NxD Tensor containing projected features from view 1.
z2 (torch.Tensor): NxD Tensor containing projected features from view 2.
temperature (float): temperature factor for the loss. Defaults to 0.1.
extra_pos_mask (Optional[torch.Tensor]): boolean mask containing extra positives other
than normal across-view positives. Defaults to None.
Returns:
torch.Tensor: SimCLR loss.
"""
device = z1.device
b = z1.size(0)
z = torch.cat((z1, z2), dim=0)
z = F.normalize(z, dim=-1)
logits = torch.einsum("if, jf -> ij", z, z) / temperature
logits_max, _ = torch.max(logits, dim=1, keepdim=True)
logits = logits - logits_max.detach()
# positive mask are matches i, j (i from aug1, j from aug2), where i == j and matches j, i
pos_mask = torch.zeros((2 * b, 2 * b), dtype=torch.bool, device=device)
pos_mask[:, b:].fill_diagonal_(True)
pos_mask[b:, :].fill_diagonal_(True)
# if we have extra "positives"
if extra_pos_mask is not None:
pos_mask = torch.bitwise_or(pos_mask, extra_pos_mask)
# all matches excluding the main diagonal
logit_mask = torch.ones_like(pos_mask, device=device).fill_diagonal_(0)
exp_logits = torch.exp(logits) * logit_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positives
mean_log_prob_pos = (pos_mask * log_prob).sum(1) / pos_mask.sum(1)
# loss
loss = -mean_log_prob_pos.mean()
return loss | b9d7880ec1c8a66321623a0061d201f9bbaaa426 | 13,323 |
def find_node_types(G, edge_type):
"""
:param G: NetworkX graph.
:param edge_type: Edge type.
:return: Node types that correspond to the edge type.
"""
for e in G.edges:
if G[e[0]][e[1]][e[2]]['type'] == edge_type:
u, v = e[0], e[1]
break
utype = G.nodes[u]['type']
vtype = G.nodes[v]['type']
try:
if int(utype) > int(vtype):
return utype, vtype
else:
return vtype, utype
except:
return utype, vtype | 970bbbabe172460a974dbf961500def2280b9fe1 | 13,324 |
import scipy
def distance_point_point(p1, p2):
"""Calculates the euclidian distance between two points or sets of points
>>> distance_point_point(np.array([1, 0]), np.array([0, 1]))
1.4142135623730951
>>> distance_point_point(np.array([[1, 1], [0, 0]]), np.array([0, 1]))
array([1., 1.])
>>> distance_point_point(np.array([[1, 0], [0, 0]]), np.array([[0, 0], [0, -3]]))
array([1., 3.])
"""
return scipy.spatial.minkowski_distance(p1, p2) | 481733330a99576540d2a80676d51d315b6406f7 | 13,325 |
def switch(
confs=None, remain=False, all_checked=False, _default=None, **kwargs
):
"""
Execute first statement among conf where task result is True.
If remain, process all statements conf starting from the first checked
conf.
:param confs: task confs to check. Each one may contain a task action at
the key 'action' in conf.
:type confs: str or dict or list
:param bool remain: if True, execute all remaining actions after the
first checked condition.
:param bool all_checked: execute all statements where conditions are
checked.
:param _default: default task to process if others have not been checked.
:type _default: str or dict
:return: statement result or list of statement results if remain.
:rtype: list or object
"""
# init result
result = [] if remain else None
# check if remain and one task has already been checked.
remaining = False
if confs is not None:
if isinstance(confs, string_types) or isinstance(confs, dict):
confs = [confs]
for conf in confs:
# check if task has to be checked or not
check = remaining
if not check:
# try to check current conf
check = run(conf=conf, **kwargs)
# if task is checked or remaining
if check:
if STATEMENT in conf: # if statements exist, run them
statement = conf[STATEMENT]
statement_result = run(statement, **kwargs)
# save result
if not remain: # if not remain, result is statement_result
result = statement_result
else: # else, add statement_result to result
result.append(statement_result)
# if remain
if remain:
# change of remaining status
if not remaining:
remaining = True
elif all_checked:
pass
else: # leave execution if one statement has been executed
break
# process _default statement if necessary
if _default is not None and (remaining or (not result) or all_checked):
last_result = run(_default, **kwargs)
if not remain:
result = last_result
else:
result.append(last_result)
return result | aba656d4a6d06f721551aa49ec1521d0fa9444d3 | 13,326 |
def makeProcesses(nChildren):
"""
Create and start all the worker processes
"""
global taskQueue,resultsQueue,workers
if nChildren < 0:
print 'makeProcesses: ',nChildren, ' is too small'
return False
if nChildren > 3:
print 'makeProcesses: ',nChildren, ' is too large'
return False
# Create a task queue for each worker to receive the image segment
taskQueue = []
for k in range(nChildren):
taskQueue.append(Queue())
resultsQueue = Queue() # Single results queue
#Create and start the workers
workers = []
for k in range(nChildren):
p = Process(target=worker, args=(k,taskQueue[k],resultsQueue))
workers.append(p)
for p in workers:
p.start()
time.sleep(2)
return True | 748372d9c83917841eeba5e400f37a5ecf5961dd | 13,327 |
def create_moleculenet_model(model_name):
"""Create a model.
Parameters
----------
model_name : str
Name for the model.
Returns
-------
Created model
"""
for func in [create_bace_model, create_bbbp_model, create_clintox_model, create_esol_model,
create_freesolv_model, create_hiv_model, create_lipophilicity_model,
create_muv_model, create_pcba_model, create_sider_model, create_tox21_model,
create_toxcast_model]:
model = func(model_name)
if model is not None:
return model
return None | 19f15eb4fd1a5c1befaef306cb7d146d7933919e | 13,328 |
from typing import Collection
from typing import Optional
def detect_daml_lf_dir(paths: "Collection[str]") -> "Optional[str]":
"""
Find the biggest Daml-LF v1 version in the set of file names from a Protobuf archive, and return
the path that contains the associated files (with a trailing slash).
If there is ever a Daml-LF 2, then this logic will need to be revisited; however, when that
happens, there are likely to be even larger changes required so we won't worry about this too
much right now.
:param paths: The paths in a Protobuf zipfile to examine.
:return: The root directory of a target Daml-LF protobuf version, stripped of a prefix.
>>> detect_daml_lf_dir([
... "protos-1.15.0/com/daml/daml_lf_1_10/something.proto",
... "protos-1.15.0/com/daml/daml_lf_1_9/something.proto",
... "protos-1.15.0/com/daml/daml_lf_dev/something.proto",
... "protos-1.15.0/com/daml/daml_lf_1_what/something.proto",
... ])
'com/daml/daml_lf_1_10/'
"""
daml_lf_prefix = "com/daml/daml_lf_1_"
minor_versions = set() # type: Set[int]
for p in paths:
_, _, truncated_path = p.partition("/")
if truncated_path.startswith(daml_lf_prefix):
version_str, _, _ = truncated_path[len(daml_lf_prefix) :].partition("/")
try:
minor_versions.add(int(version_str))
except ValueError:
# skip over unrecognized directory names
pass
if minor_versions:
return f"{daml_lf_prefix}{max(minor_versions)}/"
else:
return None | acd2b99236a3534ec64c375893b40511995e6dfc | 13,329 |
def random_mini_batches(X, Y, mini_batch_size):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (m, n_H, n_W, c)
Y -- true "label" vector of shape (m, num_classes)
mini_batch_size -- size of mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
# Extract the input data shapes.
m = X.shape[0]
num_classes = Y.shape[1]
# Instantiate an empty list to hold mini batch X-Y tuples with size batch_size.
mini_batches = []
# Shuffle X and Y.
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation, :, :, :]
shuffled_Y = Y[permutation, :]
# Divide (shuffled_X, shuffled_Y) into batches minus the end case.
num_complete_minibatches = m // mini_batch_size
for k in range(0, num_complete_minibatches):
mini_batch_X = shuffled_X[ k*mini_batch_size:(k+1)*mini_batch_size, :,:,:]
mini_batch_Y = shuffled_Y[ k*mini_batch_size:(k+1)*mini_batch_size, :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handle the end case if the last mini-batch < mini_batch_size.
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[ num_complete_minibatches*mini_batch_size: , :,:,:]
mini_batch_Y = shuffled_Y[ num_complete_minibatches*mini_batch_size: , :]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches | fbad986073bfb867f5e35bf1a0ee639b644f00bb | 13,330 |
import types
def classifyContent(text):
"""
Uses the NLP provider's SDK to perform a content classification operation.
Arguments:
text {String} -- Text to be analyzed.
"""
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language='en')
try:
response = client.classify_text(document=document)
values = []
for category in response.categories:
values.append({
"category": category.name,
"confidence": category.confidence
})
return(Classiciation(values, ""))
except Exception as e:
return Classiciation([], str(e.args)) | eebb4ebef4811748d5fb9e130e582dae289f9ce7 | 13,331 |
def print_instance_summary(instance, use_color='auto'):
""" Print summary info line for the supplied instance """
colorize_ = partial(colorize, use_color=use_color)
name = colorize_(instance.name, "yellow")
instance_type = instance.extra['gonzo_size']
if instance.state == NodeState.RUNNING:
status_colour = "green"
else:
status_colour = "red"
instance_status = NodeState.tostring(instance.state)
status = colorize_(instance_status, status_colour)
if 'owner' in instance.extra['gonzo_tags']:
owner = instance.extra['gonzo_tags']['owner']
else:
owner = "---"
uptime = format_uptime(instance.extra['gonzo_created_time'])
uptime = colorize_(uptime, "blue")
availability_zone = instance.extra['gonzo_az']
result_list = [
name,
instance_type,
status,
owner,
uptime,
availability_zone,
]
return result_list | e250645e040fba4bbd9df0e86bc3711d3f8ac51e | 13,332 |
from datetime import datetime
def generate_blob_sas_token(blob, container, blob_service, permission=BlobPermissions.READ):
"""Generate a blob URL with SAS token."""
sas_token = blob_service.generate_blob_shared_access_signature(
container, blob.name,
permission=permission,
start=datetime.datetime.utcnow() - datetime.timedelta(minutes=15),
expiry=datetime.datetime.utcnow() + datetime.timedelta(days=FileUtils.SAS_EXPIRY_DAYS))
return blob_service.make_blob_url(container, quote(blob.name.encode('utf-8')), sas_token=sas_token) | e3993c3dd075516bce07221cf9351ab74a431a27 | 13,333 |
from typing import Sequence
def rewrite_complex_signature(function, signature: Sequence[tf.TensorSpec]):
"""Compatibility layer for testing complex numbers."""
if not all([spec.dtype.is_complex for spec in signature]):
raise NotImplementedError("Signatures with mixed complex and non-complex "
"tensor specs are not supported.")
# Rewrite the signature, replacing all complex tensors with pairs of real
# and imaginary tensors.
real_imag_signature = []
for spec in signature:
new_dtype = tf.float32 if spec.dtype.size == 8 else tf.float64
real_imag_signature.append(tf.TensorSpec(spec.shape, new_dtype))
real_imag_signature.append(tf.TensorSpec(spec.shape, new_dtype))
return _complex_wrapper(function), real_imag_signature | e541ef96c6b4f2492847443e8ca45f18fc9383ff | 13,334 |
def get_args(argv: list):
"""gets the args and dictionarize them"""
if len(argv) not in [5,7]:
Errors.args_error()
data = {}
# getting the type of the title
if "-" in argv[1]:
data["type"] = "series" if argv[1] == "-s" else "movie" if argv[1] == "-m" else None
else:
Errors.args_error()
# getting the title itself
data["title"] = argv[2]
data["format"] = argv[3]
data["language"] = argv[4]
if data["type"] == "series":
if len(argv) != 7:
Errors.args_error()
try:
data["season"] = int(argv[5])
data["episode"] = int(argv[6])
except:
Errors.args_error()
return data | 6f29e63bc19b57cdf9f49cf2dd7b099c62a604a0 | 13,335 |
def fund_with_erc20(
to_fund_address, erc20_token_contract, ether_amount=0.1, account=None
):
"""Send a specified amount of an ERC20 token to an address.
Args:
to_fund_address (address): Address to send to the tokens to.
erc20_token_contract (Contract): Contract of the ERC20 token.
ether_amount (float, optional): Amount to be sent, in ETHER. Defaults to 0.1.
account (address, optional): Account from which to send the transaction. Defaults to None.
Returns:
TransactionReceipt
"""
account = account if account else get_account()
print(
f"Funding {to_fund_address} with {ether_amount} {erc20_token_contract.symbol()}..."
)
tx = erc20_token_contract.transfer(
to_fund_address,
Web3.toWei(ether_amount, "ether"),
{"from": account},
)
tx.wait(1)
print(
f"Funded {to_fund_address} with {ether_amount} {erc20_token_contract.symbol()}."
)
return tx | 6eaf46519645b8b6bbf36b39ea106c05924ab51f | 13,336 |
from carbonplan_trace.v1.glas_preprocess import select_valid_area # avoid circular import
def energy_adj_ground_to_sig_end(ds):
"""
Waveform energy from the ground peak. We calculated senergy_whrc as the energy of the waveform (in digital counts) from the ground peak
to the signal end multiplied by two. Ground peak defined as whichever of the two lowest peaks has greater amplitude. We then applied the
following linear transformation in order to calculate on the same scale as data published by Margolis et al. (2015)
senergy = -4.397006 + 0.006208 * senergy_whrc
"""
path = 'gs://carbonplan-climatetrace/inputs/volt_table.csv'
volt_table = pd.read_csv(path)
volt_to_digital_count = volt_table.set_index('volt_value')['ind'].to_dict()
wf_in_digital_count = xr.apply_ufunc(
volt_to_digital_count.__getitem__,
ds.rec_wf.astype(float).round(6).fillna(-0.195279),
vectorize=True,
dask='parallelized',
output_dtypes=[int],
)
ds = get_dist_metric_value(ds, metric='adj_ground_peak_dist_actual_wf')
# the processed wf is from sig beg to sig end, select adj ground peak to sig end instead
ground_energy = select_valid_area(
bins=ds.rec_wf_sample_dist,
wf=wf_in_digital_count,
signal_begin_dist=ds.adj_ground_peak_dist_actual_wf,
signal_end_dist=ds.sig_end_dist,
)
# make sure dimensions matches up
dims = ds.processed_wf.dims
ground_energy = ground_energy.transpose(dims[0], dims[1])
senergy_whrc = ground_energy.sum(dim="rec_bin") * 2
return -4.397006 + 0.006208 * senergy_whrc | b1f2f9acacb2186694aec3249632fea1fd4f7a58 | 13,337 |
import logging
def get_previous_version(versions: dict, app: str) -> str:
"""Looks in the app's .version_history to retrieve the prior version"""
try:
with open(f"{app}/.version_history", "r") as fh:
lines = [line.strip() for line in fh]
except FileNotFoundError:
logging.warning(f"No .version_history for {app}")
return ""
if versions[app] != lines[-1]:
logging.warning(
f"Mismatch in data:\n\tCurrent version is {versions[app]}"
f" but most recent line in .version_history is {lines[-1]}"
)
return ""
elif len(lines) < 2:
logging.warning("No prior version recorded")
return ""
return lines[-2] | d3a4aec5c3bc842181aa3901971774761866c3e5 | 13,338 |
def healthcheck() -> bool:
"""FastAPI server healthcheck."""
return True | 1767229ccda121e88264093c479d2bccf994a7e9 | 13,339 |
def ToHexStr(num):
"""
将返回的错误码转换为十六进制显示
:param num: 错误码 字符串
:return: 十六进制字符串
"""
chaDic = {10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f'}
hexStr = ""
if num < 0:
num = num + 2**32
while num >= 16:
digit = num % 16
hexStr = chaDic.get(digit, str(digit)) + hexStr
num //= 16
hexStr = chaDic.get(num, str(num)) + hexStr
return hexStr | b6cf482defdc9f4fcf9ce64903e7a718e096bacb | 13,340 |
import requests
def getSBMLFromBiomodelsURN(urn):
""" Get SBML string from given BioModels URN.
Searches for a BioModels identifier in the given urn and retrieves the SBML from biomodels.
For example:
urn:miriam:biomodels.db:BIOMD0000000003.xml
Handles redirects of the download page.
:param urn:
:return: SBML string for given model urn
"""
if ":" not in urn:
raise ValueError("The URN", urn, "is not in the correct format: it must be divided by colons in a format such as 'urn:miriam:biomodels.db:BIOMD0000000003.xml'.")
core = urn.split(":")[-1].split(".")[0]
url = "https://www.ebi.ac.uk/biomodels/model/download/" + core + "?filename="+ core + "_url.xml"
response = requests.get(url, allow_redirects=True)
response.raise_for_status()
sbml = response.content
# bytes array in py3
try:
sbml_str = str(sbml.decode("utf-8"))
except:
sbml_str = str(sbml)
return sbml_str | 9a28f4a0619ebed6f9e272d84331482442ae9fb8 | 13,341 |
def draw(k, n):
"""
Select k things from a pool of n without replacement.
"""
# At k == n/4, an extra 0.15*k draws are needed to get k unique draws
if k > n/4:
result = rng.permutation(n)[:k]
else:
s = set()
result = np.empty(k, 'i')
for i in range(k):
p = rng.randint(n)
while p in s:
p = rng.randint(n)
s.add(p)
result[i] = p
return result | d7135d659fc4e702942ea2da0f794fcb9d77bfd2 | 13,342 |
def print_version(args):
"""Print the version (short or long)"""
# Long version
if len(args) > 0 and args[0] == '--full':
apk_version = dtfglobals.get_generic_global(
dtfglobals.CONFIG_SECTION_CLIENT, 'apk_version')
bundle_version = dtfglobals.get_generic_global(
dtfglobals.CONFIG_SECTION_BINDINGS, 'version')
python_version = constants.VERSION
print("Python Version: %s" % python_version)
print("dtfClient Version: %s" % apk_version)
print("Bindings Version Date: %s" % bundle_version)
else:
print(constants.VERSION)
return 0 | b7bec22239e765d3e9c2131302144b0e44360f2a | 13,343 |
from datetime import datetime
def naturalTimeDifference(value):
"""
Finds the difference between the datetime value given and now()
and returns appropriate humanize form
"""
if isinstance(value, datetime):
delta = datetime.now() - value
if delta.days > 6:
return value.strftime("%b %d") # May 15
if delta.days > 1:
return value.strftime("%A") # Wednesday
elif delta.days == 1:
return 'yesterday' # yesterday
elif delta.seconds > 3600:
if delta.seconds < 7200:
return '1 hour ago'
else:
return str(delta.seconds / 3600 ) + ' hours ago' # 3 hours ago
elif delta.seconds > 60:
if delta.seconds < 120:
return '1 minute ago'
else:
return str(delta.seconds/60) + ' minutes ago' # 29 minutes ago
elif delta.seconds > 10:
return str(delta.seconds) + ' seconds ago' # 15 seconds ago
else:
return 'a moment ago' # a moment ago
return defaultfilters.date(value)
else:
return str(value) | ce285358b1b99a4b2df460e6193d2a0970aa4eff | 13,344 |
def raises_Invalid(function):
"""A decorator that asserts that the decorated function raises
dictization_functions.Invalid.
Usage:
@raises_Invalid
def call_validator(*args, **kwargs):
return validators.user_name_validator(*args, **kwargs)
call_validator(key, data, errors)
"""
def call_and_assert(*args, **kwargs):
with pytest.raises(df.Invalid):
function(*args, **kwargs)
return call_and_assert | b1dcaea71cfe95e25029be360645c68a0906346d | 13,345 |
from pathlib import Path
def load_dataset(dataset_path: Path) -> [Instruction]:
"""Returns the program as a list of alu instructions."""
with open_utf8(dataset_path) as file:
program = []
for line in file:
if len(line.strip()) > 0:
instruction = line.strip().split(" ")
if len(instruction) == 2:
instruction.append(None) # No b value
else:
try: # B instruction is constant.
instruction[2] = int(instruction[2])
except ValueError:
pass # B instruction is string reference.
program.append(
Instruction(
func=getattr(alu, instruction[0]),
a=instruction[1],
b=instruction[2],
)
)
return program | 6d9fd90401c750a4aa5d83491c9610984b95ebd1 | 13,346 |
import json
def process_info(args):
"""
Process a single json file
"""
fname, opts = args
with open(fname, 'r') as f:
ann = json.load(f)
f.close()
examples = []
skipped_instances = 0
for instance in ann:
components = instance['components']
if len(components[0]['poly']) < 3:
continue
if 'class_filter'in opts.keys() and instance['label'] not in opts['class_filter']:
continue
# if instance['image_url'].find('Bhoomi') == -1:
# continue
candidates = [c for c in components]
instance['components'] = candidates
if candidates:
examples.append(instance)
return examples, skipped_instances | 8ade5b21db3cca57d9de91311fc57754161673de | 13,347 |
def logout():
"""
退出登录
:return:
"""
# pop是移除session中的数据(dict),pop会有一个返回值,如果移除的key不存在返回None
session.pop('user_id', None)
session.pop('mobile', None)
session.pop('nick_name', None)
# 要清除is_admin的session值,不然登录管理员后退出再登录普通用户又能访问管理员后台
session.pop('is_admin', None)
return jsonify(errno=RET.OK, errmsg="退出成功") | a60e91457ddb32bceeda01a66027209adaf8eecb | 13,348 |
def lift_to_dimension(A,dim):
"""
Creates a view of A of dimension dim (by adding dummy dimensions if necessary).
Assumes a numpy array as input
:param A: numpy array
:param dim: desired dimension of view
:return: returns view of A of appropriate dimension
"""
current_dim = len(A.shape)
if current_dim>dim:
raise ValueError('Can only add dimensions, but not remove them')
if current_dim==dim:
return A
else:
return A.reshape([1]*(dim-current_dim)+list(A.shape)) | bc21d0af45e8073f2e8da6ed57c441739a7385f5 | 13,349 |
def search(keyword=None):
"""
Display search results in JSON format
Parameters
----------
keyword : str
Search keyword. Default None
"""
return get_json(False, keyword) | 558a34fedb4e05e7a31b655effa47287cbc46202 | 13,350 |
from typing import List
def min_offerings(heights: List[int]) -> int:
"""
Get the max increasing sequence on the left and the right side of current index,
leading upto the current index.
current index's value would be the max of both + 1.
"""
length = len(heights)
if length < 2:
return length
left_inc = [0] * length
right_inc = [0] * length
for index in range(1, length):
if heights[index] > heights[index - 1]:
left_inc[index] = left_inc[index - 1] + 1
if heights[length - 1 - index] > heights[length - index]:
right_inc[length - 1 - index] = right_inc[length - index] + 1
return sum(1 + max(left_inc[index], right_inc[index]) for index in range(length)) | 952ea82815ecb4db6d4d0347f16b0cf5b299f7d3 | 13,351 |
def pretty(value, width=80, nl_width=80, sep='\n', **kw):
# type: (str, int, int, str, **Any) -> str
"""Format value for printing to console."""
if isinstance(value, dict):
return '{{{0} {1}'.format(sep, pformat(value, 4, nl_width)[1:])
elif isinstance(value, tuple):
return '{}{}{}'.format(
sep, ' ' * 4, pformat(value, width=nl_width, **kw),
)
else:
return pformat(value, width=width, **kw) | d2af8d83c2e116ebb1a6e65cd369c3a33adf4585 | 13,352 |
import os
def get_csv_file_path(file_name: str) -> str:
"""
Get absolute path to csv metrics file
Parameters
----------
file_name
Name of metrics file
Returns
-------
file_path
Full path to csv file
"""
return os.path.join(os.getcwd(), file_name) | 67b80193a75669a0635cf70ab1325e755424d654 | 13,353 |
import os
def failed(config: dict, app_logger: logger.Logger) -> bool:
"""
Set migration status to FAILED.
:param config: pymigrate configuration.
:param app_logger: pymigrate configured logger.
:return: True on success, False otherwise.
"""
app_logger.log_with_ts('Running status_failed action for migration {0}'.format(config['MIGRATION_ID']),
logger.Levels.DEBUG)
migrations_directory_path = os.path.join(os.pardir, config['PROJECT_DIR'] + '/' + config['MIGRATIONS_DIR'])
return migration.set_status_failed(config['MIGRATION_ID'], app_logger, migrations_directory_path) | 4d902be5c42c3485a087ef384708e48fcdcd947f | 13,354 |
def niceNumber(v, maxdigit=6):
"""Nicely format a number, with a maximum of 6 digits."""
assert(maxdigit >= 0)
if maxdigit == 0:
return "%.0f" % v
fmt = '%%.%df' % maxdigit
s = fmt % v
if len(s) > maxdigit:
return s.rstrip("0").rstrip(".")
elif len(s) == 0:
return "0"
else:
return s | d57f83272a819d5abf12d71fdac84fe8e92eeb05 | 13,355 |
def query_incident(conditions: list, method=None, plan_status="A", mulitple_fields=False):
"""
Queries incidents in Resilient/CP4S
:param condition_list: list of conditions as [field_name, field_value, method] or a list of list conditions if multiple_fields==True
:param method: set all field conditions to this method (save user from typing it for each field)
:param plan_status: "A" == Active, "C" == Closed
:param multiple_fields: query more than one field
"""
def buildConditionDict(conditions, method=method):
return {
'field_name': conditions[0],
'value': conditions[1],
"method": method if method else conditions[2],
}
conditionList = []
query_uri = u"/incidents/query?return_level=normal"
if not mulitple_fields:
conditionList.append(buildConditionDict(conditions))
query_uri += u"&field_handle={}".format(conditions[0])
else:
for condition in conditions:
conditionList.append(buildConditionDict(condition))
query_uri += u"&field_handle={}".format(condition[0])
conditionList.append({
'field_name': 'plan_status',
'method': 'equals',
'value': plan_status
})
query = {
'filters': [{
'conditions': conditionList
}],
"sorts": [{
"field_name": "create_date",
"type": "desc"
}]
}
client = create_authenticated_client()
return client.post(query_uri, query) | 9c037b5d864248bd280db644f4c3868557b59721 | 13,356 |
def get_banner():
"""Return a banner message for the interactive console."""
global _CONN
result = ''
# Note how we are connected
result += 'Connected to %s' % _CONN.url
if _CONN.creds is not None:
result += ' as %s' % _CONN.creds[0]
# Give hint about exiting. Most people exit with 'quit()' which will
# not return from the interact() method, and thus will not write
# the history.
result += '\nPress Ctrl-D to exit'
return result | 5c5e1f2d32548d112f75c933ce4c4e842cdfc993 | 13,357 |
def generate_fish(
n,
channel,
interaction,
lim_neighbors,
neighbor_weights=None,
fish_max_speeds=None,
clock_freqs=None,
verbose=False,
names=None
):
"""Generate some fish
Arguments:
n {int} -- Number of fish to generate
channel {Channel} -- Channel instance
interaction {Interaction} -- Interaction instance
lim_neighbors {list} -- Tuple of min and max neighbors
neighbor_weight {float|list} -- List of neighbor weights
fish_max_speeds {float|list} -- List of max speeds
clock_freqs {int|list} -- List of clock speeds
names {list} -- List of names for your fish
"""
if neighbor_weights is None:
neighbor_weights = [1.0] * n
elif not isinstance(neighbor_weights, list):
neighbor_weights = [neighbor_weights] * n
if fish_max_speeds is None:
fish_max_speeds = [1.0] * n
elif not isinstance(fish_max_speeds, list):
fish_max_speeds = [fish_max_speeds] * n
if clock_freqs is None:
clock_freqs = [1] * n
elif not isinstance(clock_freqs, list):
clock_freqs = [clock_freqs] * n
if names is None:
names = ['Unnamed'] * n
fish = []
for i in range(n):
fish.append(Fish(
id=i,
channel=channel,
interaction=interaction,
lim_neighbors=lim_neighbors,
neighbor_weight=neighbor_weights[i],
fish_max_speed=fish_max_speeds[i],
clock_freq=clock_freqs[i],
verbose=verbose,
name=names[i]
))
return fish | 58d8fb4626d18caa5b093b30588603f335074e4b | 13,358 |
import functools
def log_exception(function):
"""Exception logging wrapper."""
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except:
err = "There was an exception in "
err += function.__name__
logger.exception(err)
return wrapper | f2c86b168550c12d73d87f1b2001a3caab46ceda | 13,359 |
def calculate_triad_connectivity(tt1, tt2, tt3, ipi1, ipi2, tau_z_pre, tau_z_post,
base_time, base_ipi, resting_time, n_patterns):
"""
This function gives you the connectivity among a triad, assuming that all the other temporal structure outside of
the trial is homogeneus
:param tt1:
:param tt2:
:param tt3:
:param ipi1:
:param ipi2:
:param tau_z_pre:
:param tau_z_post:
:param base_time:
:param base_ipi:
:param resting_time:
:param n_patterns:
:return:
"""
Tt = (n_patterns - 3) * base_time + tt1 + tt2 + tt3 + ipi1 + ipi2 + \
(n_patterns - 2) * base_ipi + resting_time
# Single probabilities
p1_pre = calculate_probability_theo(Tp=tt1, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p2_pre = calculate_probability_theo(Tp=tt2, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p3_pre = calculate_probability_theo(Tp=tt3, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_pre)
p1_post = calculate_probability_theo(Tp=tt1, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
p2_post = calculate_probability_theo(Tp=tt2, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
p3_post = calculate_probability_theo(Tp=tt3, Tstart=0.0, Ttotal=Tt, tau_z=tau_z_post)
# joint-self probabilities
p11 = calculate_self_probability_theo(T1=tt1, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
p22 = calculate_self_probability_theo(T1=tt2, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
p33 = calculate_self_probability_theo(T1=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
# Joint probabilities
Ts = tt1 + ipi1
p21 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt2, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1 + tt2 + ipi2
p31 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1
p12 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt2, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
Ts = tt2 + ipi2
p32 = calculate_joint_probabilities_theo(T1=tt2, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_pre, tau2=tau_z_post)
Ts = tt1 + ipi1 + tt2 + ipi2
p13 = calculate_joint_probabilities_theo(T1=tt1, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
Ts = tt2 + ipi2
p23 = calculate_joint_probabilities_theo(T1=tt2, Ts=Ts, T2=tt3, Tt=Tt, tau1=tau_z_post, tau2=tau_z_pre)
# Weights
w11 = np.log10(p11 / (p1_pre * p1_post))
w12 = np.log10(p12 / (p1_pre * p2_post))
w13 = np.log10(p13 / (p1_pre * p3_post))
w21 = np.log10(p21 / (p2_pre * p1_post))
w22 = np.log10(p22 / (p2_pre * p2_post))
w23 = np.log10(p23 / (p2_pre * p3_post))
w31 = np.log10(p31 / (p3_pre * p1_post))
w32 = np.log10(p32 / (p3_pre * p2_post))
w33 = np.log10(p33 / (p3_pre * p3_post))
# Betas
beta1 = np.log10(p1_post)
beta2 = np.log10(p2_post)
beta3 = np.log10(p3_post)
# Bs (un-normalized)
B12 = w22 - w12 + beta2 - beta1
B13 = w33 - w13 + beta3 - beta1
B21 = w11 - w21 + beta1 - beta2
B23 = w33 - w32 + beta3 - beta2
B31 = w11 - w31 + beta1 - beta3
B32 = w22 - w32 + beta2 - beta3
return locals() | 6497a68bfdbf9db12a6cbef0784c0aacc3f5e055 | 13,360 |
from datetime import datetime
import random
def random_date_from(date,
min_td=datetime.timedelta(seconds=0),
max_td=datetime.timedelta(seconds=0)):
"""
Produces a datetime at a random offset from date.
Parameters:
date: datetime
The reference datetime.
min_td: timedelta, optional
The minimum offset from the reference datetime (could be negative).
max_td: timedelta, optional
The maximum offset from the reference datetime (could be negative).
Return:
datetime
A new_date such that (date + min_td) <= new_date < (date + max_td).
"""
min_s = min(min_td.total_seconds(), max_td.total_seconds())
max_s = max(min_td.total_seconds(), max_td.total_seconds())
offset = random.uniform(min_s, max_s)
return date + datetime.timedelta(seconds=offset) | 2392e6684de81f5e693a7e6fbe4934940df5eada | 13,361 |
def generate_log_normal_dist_value(frequency, mu, sigma, draws, seed_value):
"""
Generates random values using a lognormal distribution,
given a specific mean (mu) and standard deviation (sigma).
https://stackoverflow.com/questions/51609299/python-np-lognormal-gives-infinite-
results-for-big-average-and-st-dev
The parameters mu and sigma in np.random.lognormal are not the mean
and STD of the lognormal distribution. They are the mean and STD
of the underlying normal distribution.
Parameters
----------
mu : int
Mean of the desired distribution.
sigma : int
Standard deviation of the desired distribution.
draws : int
Number of required values.
Returns
-------
random_variation : float
Mean of the random variation over the specified itations.
"""
if seed_value == None:
pass
else:
frequency_seed_value = seed_value * frequency * 100
np.random.seed(int(str(frequency_seed_value)[:2]))
normal_std = np.sqrt(np.log10(1 + (sigma/mu)**2))
normal_mean = np.log10(mu) - normal_std**2 / 2
hs = np.random.lognormal(normal_mean, normal_std, draws)
return round(np.mean(hs),2) | f0613688a5af83a867825b3e91c8f1f8a99c05ba | 13,362 |
from re import VERBOSE
def compute_mean_field(
grain_index_field,
field_data,
field_name,
vx_size=(1.0, 1.0, 1.0),
weighted=False,
compute_std_dev=False,
):
"""
Compute mean shear system by grains.
Args:
grain_index_field : VTK field containing index
field_data : VTK field containing shear field
field_name : the requested name of field
vx_size=(1.,1.,1.) : the voxel size
weighted=False : whether or not the mean and stddev is weighted
by grain volume ratio
compute_std_dev=False : whether we compute standard deviation
for `field_name`
Returns:
value_by_grain: 2D numpy array with every mean value for each grains
mean_field: 3D numpy array containing mean shear field
std_field: 3D numpy array containing standard_dev grains field
if compute_std_dev is True
"""
real_indx_grains = np.unique(grain_index_field)
field = field_data.PointData[field_name]
field_dimension = field_data.GetDimensions()
mean_field = np.zeros_like(field)
std_field = np.zeros_like(field)
# volume_grains = np.zeros_like(grain_index_field)
vx_vol = np.prod(vx_size) # vx_size[0]*vx_size[1]*vx_size[2]
# print(np.prod(vx_size))
# if weighted:
volume_total = vx_vol * np.prod(field_dimension)
# else:
# volume_total = 1.0
# print(" volume_total ", volume_total)
# print(" np.prod(field_dimension) ", np.prod(field_dimension))
volume = 1.0
for index in real_indx_grains:
mask_grains = np.nonzero(grain_index_field == index)
# if weighted:
# volume = np.count_nonzero(grain_index_field == index) * vx_vol
mean = algs.mean(field[mask_grains], axis=0) # * volume / volume_total
if VERBOSE:
print(
"- index {} v_i {} v_t {} mean {} mean {}".format(
index,
volume,
volume_total,
algs.mean(field[mask_grains], axis=0),
mean,
)
)
if compute_std_dev:
std_dev = np.std(field[mask_grains], axis=0) # * volume / volume_total
std_field[mask_grains] = std_dev
# volume_grains[mask_grains] = volume
mean_field[mask_grains] = mean
# gamma_by_grain = np.row_stack(gamma_by_grain)
value_by_grain = np.unique(mean_field, axis=0)
# print(" gamma_by_grain ", gamma_by_grain.shape)
# mean_by_grains = np.column_stack((real_indx_grains,gamma_by_grain))
return value_by_grain, mean_field, std_field | 8baa187a853c1d44597cae0417b455c74db2072d | 13,363 |
def evaluate_argument_value(xpath_or_tagname, datafile):
"""This function takes checks if the given xpath_or_tagname exists in the
datafile and returns its value. Else returns None."""
tree = ET.parse(datafile)
root = tree.getroot()
if xpath_or_tagname.startswith(root.tag + "/"):
xpath_or_tagname = xpath_or_tagname[len(root.tag + "/"):]
try:
xpath_or_tagname = root.find(xpath_or_tagname).text
except Exception:
print_error("Invalid xpath: {0}".format(root.tag + "/" + xpath_or_tagname))
xpath_or_tagname = None
else:
print_error("Invalid xpath: {0}".format(xpath_or_tagname))
xpath_or_tagname = None
return xpath_or_tagname | be4597e039717a535a86edfa4b04761417d0eaf4 | 13,364 |
def normalise_genome_position(x):
"""
Normalise position (circular genome)
"""
x['PositionNorm0'] = np.where(x['Position'] > (x['GenomeLength'] / 2),
(x['GenomeLength'] - x['Position']),
x['Position'])
x['PositionNorm'] = x['PositionNorm0']**(1/2)
# Reference position
n_reads = x['readCount'].max()
start_position_ref = int(1)
end_position_ref = x['GenomeLength'].iloc[0]
end_position_ref = end_position_ref + n_reads
increase_by = (end_position_ref / n_reads)
x['ref_Position'] = list(frange(start_position_ref, end_position_ref,
increase_by))
x['ref_Position'] = x['ref_Position'].astype(int)
x['PositionNorm_ref0'] = np.where(x['ref_Position'] > (x['GenomeLength'] / 2),
(x['GenomeLength'] - x['ref_Position']),
x['ref_Position'])
x['PositionNorm_ref'] = x['PositionNorm_ref0'].astype(int)
return x | 251808a0c7ea2b4f83e8c82ec06fc2d1d9e9b887 | 13,365 |
from faker import Faker
def random_address(invalid_data):
"""
Generate Random Address
return: string containing imitation postal address.
"""
fake = Faker(['en_CA']) # localized to Canada
return fake.address().replace('\n',', '), global_valid_data | 3375f2eefd05e1575ec8caf2944ee7960a17ca46 | 13,366 |
import math
def rot_poly(angle, polygon, n):
"""rotate polygon into 2D plane in order to determine if a point exists
within it. The Shapely library uses 2D geometry, so this is done in order
to use it effectively for intersection calculations.
Parameters
----------
angle : float
Euler angle to rotate a vector with respect to n
polygon : NumPy array
Coordinates encompassing a polygon (i.e. a boundary)
n : NumPy array
Normal vector of a boundary
Returns
-------
poly_2d : Shapely Polygon object
Shapely Polygon object in 2D coordinates
Notes
-----
This is not an elegant way of doing this. This works for surfaces that are
tilted with respect to the x-axis, and will work for surfaces with a normal
that is parallel to the y-axis, but will not allow for anything else. For
the code to be fully generalizable, this function will need to be expanded.
"""
xvect = np.array([1,0,0])
frontbacktest = lc.incidence_angle(n,xvect)
# if this is a front or back surface of the LSC, rotate with respect to y
if frontbacktest == 0 or frontbacktest == math.pi:
poly_2d = rot_poly_y(angle, polygon)
# otherwise, rotate with respect to x
else:
poly_2d = rot_poly_x(angle, polygon)
return poly_2d | 3048d6fac8a5e2dffb3d621c8bec2a25aa6b31d0 | 13,367 |
def bytes_isspace(x: bytes) -> bool:
"""Checks if given bytes object contains only whitespace elements.
Compiling bytes.isspace compiles this function.
This function is only intended to be executed in this compiled form.
Args:
x: The bytes object to examine.
Returns:
Result of check.
"""
if len(x) == 0:
return False
for i in x:
if i != ord(' ') and i != ord('\t') and i != ord('\n') and i != ord('\r') and i != 0x0b and i != ord('\f'):
return False
return True | 6c28b904cb6e0ef515ce7a16725fb99a535c3192 | 13,368 |
import re
def snake_case(name: str):
"""
https://stackoverflow.com/a/1176023/1371716
"""
name = re.sub('(\\.)', r'_', name)
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
name = re.sub('__([A-Z])', r'_\1', name)
name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', name)
return name.lower() | 4696ca3c1a50590aa6617ee3917b8364c11f3910 | 13,369 |
import glob
def get_loss_data():
"""
This function returns a list of paths to all .npy loss
files.
Returns
-------
path_list : list of strings
The list of paths to output files
"""
path = "./data/*_loss.npy"
path_list = glob.glob(path, recursive=True)
return path_list | bc98b0bdf60ac3f7125da82fd68956957e89a777 | 13,370 |
def ranked_avg_knn_scores(batch_states, memory, k=10, knn=batch_count_scaled_knn):
"""
Computes ranked average KNN score for each element in batch of states
\sum_{i = 1}^{K} (1/i) * d(x, x_i)
Parameters
----------
k: k neighbors
batch_states: numpy array of size [batch_size x state_size]
memory: numpy array of size [memory_size x state_size]
Returns
-------
numpy array of scores of dims [batch_size]
"""
nearest_neighbor_scores = knn(batch_states, memory, k=k)
k = nearest_neighbor_scores.shape[1]
scales = 1 / np.expand_dims(np.arange(1, k + 1), axis=0).repeat(batch_states.shape[0], axis=0)
# There may be the edge case where the number of unique distances for this particular batch
# is less than k. If that's the case, we need to reduce our scales dimension.
# This means one of two things:
# 1. you either have a very small map, or
# 2. your representation has collapsed into less than k points.
ranked_avg_scores = np.multiply(nearest_neighbor_scores, scales)
return np.sum(ranked_avg_scores, axis=-1) | 56d60d97e7c10b06deb417e0d6b52a5a76f9150e | 13,371 |
def login_exempt(view_func):
"""登录豁免,被此装饰器修饰的action可以不校验登录."""
def wrapped_view(*args, **kwargs):
return view_func(*args, **kwargs)
wrapped_view.login_exempt = True
return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) | d0853317260d68e9ea3d3a70a02c2f1ca67681a2 | 13,372 |
def to_uint8_image(message : ImageMessage) -> ImageMessage:
"""Convert image type to uint8.
Args:
message (ImageMessage): Image to be converted
Returns:
ImageMessage: Resulting iamge
"""
message.image = np.uint8(message.image*255)
if message.mask is not None:
message.mask = np.uint8(message.mask*255)
return message | fd9626800b5c0fec284ab185c1ff29e9cfefb3e7 | 13,373 |
import string
def list_zero_alphabet() -> list:
"""Build a list: 0, a, b, c etc."""
score_dirs = ['0']
for char in string.ascii_lowercase:
score_dirs.append(char)
return score_dirs | 6cd9fc9e93257dcc7729235ac3cffa01dbd80c95 | 13,374 |
def proxy(values=(0,), names=('constant',), types=('int8',)):
""" Create a proxy image with the given values, names and types
:param values: list of values for every band of the resulting image
:type values: list
:param names: list of names
:type names: list
:param types: list of band types. Options are 'int8', 'int16', 'int32',
'int64', 'uint8', 'uint16', 'uint32', 'byte', 'short', 'int', 'long',
'float' and 'double'
:type types: list
:rtype: ee.Image
"""
values = list(values)
names = list(names)
types = list(types)
tps = dict(zip(names, types))
im = ee.Image(values).rename(names).cast(tps)
# update mask
for v, n in zip(values, names):
if v is None:
band = im.select(n)
masked = band.selfMask()
im = im.addBands(masked, overwrite=True)
return im | b57c4a625d8fa8c7a76bb0f1d2202e0e5cf2d41e | 13,375 |
import six
def _to_versions(raw_ls_remote_lines, version_join, tag_re, tag_filter_re):
"""Converts raw ls-remote output lines to a sorted (descending)
list of (Version, v_str, git_hash) objects.
This is used for source:git method to find latest version and git hash.
"""
ret = []
for line in raw_ls_remote_lines:
git_hash, ref = six.ensure_text(line).split('\t')
if ref.startswith('refs/tags/'):
tag = ref[len('refs/tags/'):]
if tag_filter_re and not tag_filter_re.match(tag):
continue
m = tag_re.match(tag)
if not m:
continue
v_str = m.group(1)
if version_join:
v_str = '.'.join(v_str.split(version_join))
ret.append((parse_version(v_str), v_str, git_hash))
return sorted(ret, reverse=True) | 9113d26dbec144bbc72c89ca41935305a7321a18 | 13,376 |
def arraysum(x: int)->int:
"""
These function gives sum of all elements of list by iterating through loop and adding them.
Input: Integer
Output: Interger
"""
sum = 0
for i in x:
sum += i
return sum | aa14eaf4e2bb800ad5e61a63ab0bc17c56dbd86d | 13,377 |
def get_sensitivity_scores(model, features, top_n):
"""
Finds the sensitivity of each feature in features for model. Returns the top_n
feature names, features_top, alongside the sensitivity values, scores_top.
"""
# Get just the values of features
x_train = features.values
# Apply min max normalization
scaler = MinMaxScaler().fit(x_train)
x_train = scaler.transform(x_train)
# Find mean and standard deviation of each feature
x_train_avg = np.mean(x_train, axis=0).reshape(1, -1)
x_train_std = np.std(x_train, axis=0).reshape(1, -1)
prediction_mean = model.predict(x_train_avg)
scores_max = []
# Iterate over each feature
for i in range(x_train_avg.shape[1]):
# Copy x_train_avg
x_train_i = x_train_avg.copy()
# Add the standard deviation of i to that column
x_train_i[:, i] = x_train_i[:, i] + x_train_std[:, i]
result_i = model.predict(x_train_i)
# Take the difference and divide by standard deviation
diff = (result_i - prediction_mean) / x_train_std[:, i]
scores_max.append(diff.flatten()[0])
scores_max = np.absolute(scores_max)
indices_top = np.argsort(scores_max)[-top_n:]
features_top = features.iloc[:, indices_top].columns
scores_top = scores_max[indices_top]
return features_top, scores_top | a955c93691b09073be20fc65a2c6958a620f5548 | 13,378 |
def mad(data):
"""Median absolute deviation"""
m = np.median(np.abs(data - np.median(data)))
return m | 6b32901a94aca256736c1cb936c8b1c1794857d7 | 13,379 |
async def get_intents(current_user: User = Depends(Authentication.get_current_user_and_bot)):
"""
Fetches list of existing intents for particular bot
"""
return Response(data=mongo_processor.get_intents(current_user.get_bot())).dict() | 2a62bc579f6b392bc0038bf4f555941f764d456c | 13,380 |
from typing import Union
from typing import AnyStr
import os
from typing import Optional
from typing import Callable
def open_beneath(
path: Union[AnyStr, "os.PathLike[AnyStr]"],
flags: int,
*,
mode: int = 0o777,
dir_fd: Optional[int] = None,
no_symlinks: bool = False,
remember_parents: bool = False,
audit_func: Optional[Callable[[str, int, AnyStr], None]] = None,
) -> int:
"""
Open a file "beneath" a given directory.
This function guarantees that no ``..`` component in ``path``, or in a symbolic link encountered
in resolving ``path``, will ever be allowed to escape the "root" directory specified by
``dir_fd``. (In very specific circumstances, race conditions may allow multiple ``..``
components in a row to cause ``open_beneath()`` to temporarily leave the directory in question,
but it will check for such an escape before continuing and resolving any non-``..`` components).
Currently, ``open_beneath()`` is able to take advantage of OS-specific path resolution features
on the following platforms:
- Linux 5.6+
The ``path``, ``flags``, and ``mode`` arguments are as for ``os.open(...)``.
If ``dir_fd`` is given and not ``None``, it is used to determine the directory relative to which
paths will be resolved. Otherwise, the current working directory is used.
``path`` can be an absolute path, or it can contain references to symlinks that target absolute
paths. In either case, the path is interpreted as if the process had ``chroot()``ed to the
directory referenced by ``dir_fd`` (or the current working directory, as described above).
If ``no_symlinks`` is True, no symlinks will be allowed during resolution of the path.
If ``audit_func`` is not ``None``, it indicates a function that will be called to "audit"
components of the path as they are resolved. The function will be called with three arguments:
a "description" string indicating the context, a file descriptor referring to the most recently
resolved directory, and a path whose meaning depends on the "description". The following
"descriptions" are currently used (though more may be added):
- ``"before"``: This is called at each stage of the path resolution, just before the next
component is resolved. In this case, the third argument is the component that is about to be
resolved (which may be ``/`` or ``..``).
- ``"symlink"``: This is called immediately after encountering a symbolic link. In this case,
the third argument is the target of the symlink that was encountered.
The function should NOT perform any operations on the given file descriptor, or behavior is
undefined. Additionally, it should always return ``None``; other return values may have special
meanings in the future.
If an exception is raised in an ``audit_func``, ``open_beneath()`` will clean up properly and
pass the exception up to the caller.
Here is an example ``audit_func`` that blocks ``..`` components in symlinks::
def audit(desc, cur_fd, path):
if desc == "symlink":
while path:
path, part = os.path.split(path.rstrip("/"))
if part == "..":
raise RuntimeError("'..' component encountered")
If ``remember_parents`` is True, it triggers an alternate escape prevention strategy. This flag
makes ``open_beneath()`` retain open file descriptors to all of the directories it has
previously seen. This allows it to simply rewind back to those directories when encountering a
``..`` element, instead of having to perform potentially inefficient escape detection. (By
default, after a series of ``..`` elements, ``open_beneath()`` has to check that the current
directory is still contained within the "root".)
This is more efficient, but it requires a large number of file descriptors, and a malicious
attacker in control of the specified ``path`` *or* the filesystem could easily cause
``open_beneath()`` to exhaust all the available file descriptors. Use with caution!
Note: If ``open_beneath`` is able to take advantage of OS-specific path resolution features,
then ``remember_parents`` is ignored.
"""
path = os.fspath(path)
flags |= os.O_NOCTTY
if audit_func is None and _try_open_beneath is not None:
fd = _try_open_beneath(path, flags, mode=mode, dir_fd=dir_fd, no_symlinks=no_symlinks)
if fd is not None:
return fd
slash: AnyStr
dot: AnyStr
if isinstance(path, bytes):
slash = b"/"
dot = b"."
else:
slash = "/"
dot = "."
# We need a file descriptor that won't move (the current directory might) that we can use to
# perform lookups from.
new_dir_fd = os.open(".", DIR_OPEN_FLAGS) if dir_fd is None else dir_fd
try:
return _open_beneath(
path,
new_dir_fd,
flags,
mode,
no_symlinks,
slash=slash,
dot=dot,
remember_parents=remember_parents,
audit_func=audit_func,
)
finally:
if new_dir_fd != dir_fd:
os.close(new_dir_fd) | 9801c48c3e10416f4355499a584ee295e696faef | 13,381 |
import time
from datetime import datetime
import os
def train():
"""Train CIFAR-10/100 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [], dtype=tf.int32,
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
(FLAGS.batch_size * FLAGS.num_gpus))
decay_steps = int(num_batches_per_epoch * cifar.NUM_EPOCHS_PER_DECAY)
lr = learning_rate_fn(num_batches_per_epoch, global_step)
if FLAGS.alt_optimizer != '':
# Create an alternate optimizer
opt = alt_optimizer(lr, FLAGS.alt_optimizer)
else:
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
tower_losses = []
tower_images = []
tower_labels = []
tower_images_pl = []
tower_labels_pl = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
loss, images, labels, images_pl, labels_pl, precision = cifar_common.tower_loss(scope)
tower_losses.append(loss)
tower_images.append(images)
tower_labels.append(labels)
tower_images_pl.append(images_pl)
tower_labels_pl.append(labels_pl)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Retain the Batch Normalization updates operations only from the
# final tower. Ideally, we should grab the updates from all towers
# but these stats accumulate extremely fast so we can ignore the
# other stats from the other towers without significant detriment.
batchnorm_updates = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = cifar_common.average_gradients(tower_grads)
loss = tf.add_n(tower_losses)
loss = tf.divide(loss, FLAGS.num_gpus)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
batchnorm_updates_op = tf.group(*batchnorm_updates)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables() +
tf.moving_average_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op, batchnorm_updates_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
max_steps = int(FLAGS.num_epochs * num_batches_per_epoch)
print('Max Training Steps: ', max_steps)
for step in xrange(max_steps):
start_time = time.time()
_, loss_value, lrate = sess.run([train_op, loss, lr])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f, lrate = %.4f, (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value, lrate,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
return loss_value | c2660e782dbc4f1a1a4603da83d756483d314bf8 | 13,382 |
def find_offset(
ax: Numbers, ay: Numbers, bx: Numbers, by: Numbers, upscale: bool = True
) -> float:
"""Finds value, by which the spectrum should be shifted along x-axis to best
overlap with the first spectrum. If resolution of spectra is not identical,
one of them will be interpolated to match resolution of the other one. By default
interpolation is done on the lower-resolution spectra. This can be changed
by passing ``upscale = False`` to function call.
Parameters
----------
ax
Abscissa of the first spectrum.
ay
Values of the first spectrum.
bx
Abscissa of the second spectrum.
by
Values of the second spectrum.
upscale
If interpolation should be done on more loosely spaced spectrum (default).
When set to False, spectrum with lower resolution will be treated as reference
for density of data points.
Returns
-------
float
Value, by which second spectrum should be shifted, in appropriate units.
"""
ax, ay, bx, by = unify_abscissa(ax, ay, bx, by, upscale=upscale)
shift = idx_offset(ay, by)
if shift < 0:
offset = ax[0] - bx[abs(shift)]
else:
offset = ax[shift] - bx[0]
return offset | 64e0c13a16b3ead30227ab80398fea296674385d | 13,383 |
def And(*xs, simplify=True):
"""Expression conjunction (product, AND) operator
If *simplify* is ``True``, return a simplified expression.
"""
xs = [Expression.box(x).node for x in xs]
y = exprnode.and_(*xs)
if simplify:
y = y.simplify()
return _expr(y) | 5f25e8b2f37a4bbc077f10eee561936e41defefa | 13,384 |
def get_assignment_submissions(course_id, assignment_id):
""" return a list of submissions for an assignment """
return api.get_list('courses/{}/assignments/{}/submissions'.format(course_id, assignment_id)) | eb1a6143b551298efdb6c4181e2356d759c6fd6c | 13,385 |
def send_email(to, content=None, title=None, mail_from=None,
attach=None, cc=None, bcc=None, text=None, html=None, headers=None):
"""
:param to: 收件人,如 '[email protected]' 或 '[email protected], [email protected]' 或 ['[email protected], [email protected]']
:param content: 邮件内容,纯文本或HTML str
:param title: 邮件标题 str or list
:param mail_from: 发件人 str
:param attach: 附件列表 ["@/tmp/test.txt"]
:param cc: 抄送人, 格式同收件人
:param bcc: 匿名抄送人, 格式同收件人
:param text: 邮件纯文本 str
:param html: 邮件HTML str
:param headers: 其他 MIME Header属性 dict
:return: 正常返回 {} dict
"""
arg_dict = dict()
if isinstance(to, list):
to = ', '.join(to)
arg_dict['to'] = to
if isinstance(cc, list):
cc = ', '.join(cc)
arg_dict['cc'] = cc
if isinstance(bcc, list):
bcc = ', '.join(bcc)
arg_dict['bcc'] = bcc
if isinstance(title, list):
title = ''.join(title)
arg_dict['title'] = title
arg_dict['mail_from'] = mail_from
arg_dict['content'] = content
arg_dict['attach'] = attach
arg_dict['text'] = text
arg_dict['html'] = html
arg_dict['headers'] = headers or {}
e = Email()
msg = e.build_email(arg_dict)
return e.send_email(msg) | c68c4db3c96890d1e82f33666b69cd4e1ac4c116 | 13,386 |
def get_abbreviation(res_type, abbr):
"""
Returns abbreviation value from data set
@param res_type: Resource type (html, css, ...)
@type res_type: str
@param abbr: Abbreviation name
@type abbr: str
@return dict, None
"""
return get_settings_resource(res_type, abbr, 'abbreviations') | 91831f10fc2be1d7c7201b02e0d044939ce82e83 | 13,387 |
import time
def get_stock_list(month_before=12, trade_date='20200410', delta_price=(10, 200), total_mv=50, pe_ttm=(10, 200)):
"""
month_before : 获取n个月之前所有上市公司的股票列表,
默认为获取一年前上市公司股票列表
delta_price :用于剔除掉金额大于delta_price的股票,若为空则不剔除
TIPS : delta_price 和今天的股价进行比较
"""
stock_list = pro.stock_basic(exchange='', list_status='L', fields='ts_code,name,market,list_date')
# 去除创业板和科创板股票
stock_list1 = stock_list[~stock_list['market'].isin(["科创板","创业板"])].reset_index(drop=True)
# 去除ST,银行和证券股票
index_list = []
for i in range(len(stock_list1)):
if '银行' in stock_list1.iloc[i]['name'] \
or 'ST' in stock_list1.iloc[i]['name'] \
or '证券' in stock_list1.iloc[i]['name'] :
index_list.append(i)
for i in index_list:
stock_list1 = stock_list1.drop(i)
stock_list1 = stock_list1.reset_index(drop=True)
# 去除上市时间未满一年的股票(默认)
delta_date = date_util.get_date_months_before(month_before)
stock_list2 = stock_list1[stock_list1["list_date"] <= delta_date].reset_index(drop=True)
stock_list = stock_list2.drop(['market', 'list_date'], axis=1)
# 去除市值在x亿之下的公司
if total_mv is not None:
for i in range(len(stock_list)):
try:
df = pro.daily_basic(ts_code=stock_list["ts_code"][i], \
trade_date=trade_date, fields='ts_code,total_mv')
stock_list.loc[i, "total_mv"] = df.loc[0, "total_mv"] if df.empty is False else 0
except:
time.sleep(3)
stock_list = stock_list[stock_list["total_mv"] > total_mv * 10000].reset_index(drop=True)
# 去除pe_ttm为None且不在区间内的公司
if pe_ttm is not None:
for i in range(len(stock_list)):
try:
df = pro.daily_basic(ts_code=stock_list["ts_code"][i], \
trade_date=trade_date, fields='ts_code,pe_ttm')
stock_list.loc[i, "pe_ttm"] = df.loc[0, "pe_ttm"] if df.empty is False else None
except:
time.sleep(3)
stock_list = stock_list[stock_list['pe_ttm'] > pe_ttm[0]].reset_index(drop=True)
stock_list = stock_list[stock_list['pe_ttm'] < pe_ttm[1]].dropna().reset_index(drop=True)
# 剔除 date_time 时刻价格不在区间内的股票
if delta_price is not None:
stock_list['price'] = np.zeros(len(stock_list))
for i in range(len(stock_list)):
stock_code = stock_list.iloc[i]["ts_code"]
try:
current_df = ts.pro_bar(ts_code=stock_code, adj='qfq',
start_date=trade_date, end_date=trade_date)
if current_df.empty:
continue
stock_list.loc[i, "price"] = (current_df.loc[0, "close"] + current_df.loc[0, "pre_close"]) / 2
except:
time.sleep(3)
stock_list = stock_list[stock_list['price'] > delta_price[0]].reset_index(drop=True)
stock_list = stock_list[stock_list['price'] < delta_price[1]].reset_index(drop=True)
stock_list.to_csv("./data_pulled/stock_date_delta_price{}.csv".format(delta_price), index=False)
return stock_list | 13f0dd7b31c297ea643ad42efb519a88907bbfd5 | 13,388 |
def dim_axis_label(dimensions, separator=', '):
"""
Returns an axis label for one or more dimensions.
"""
if not isinstance(dimensions, list): dimensions = [dimensions]
return separator.join([d.pprint_label for d in dimensions]) | f03e4eb02fc57890421bdcdaa0aea7d6541b8678 | 13,389 |
def get_random_idx(k: int, size: int) -> np.ndarray:
"""
Get `k` random values of a list of size `size`.
:param k: number or random values
:param size: total number of values
:return: list of `k` random values
"""
return (np.random.rand(k) * size).astype(int) | eedcc9953e878c9b475cc18666eb621de2811dbe | 13,390 |
def bitmask_8bit(array, pad_value=None):
"""Return 8-bit bitmask for cardinal and diagonal neighbours."""
shape, padded = shape_padded(array, pad_value=pad_value)
cardinals = get_cardinals(shape, padded)
diagonals = get_diagonals(shape, padded)
# TODO: https://forum.unity.com/threads/2d-tile-bitmasking.513840/#post-3366221
bitmask = cardinals + (diagonals << 4)
return bitmask | 680f6ad7319af36a23d0e3b174cc5df7021a9920 | 13,391 |
from typing import Union
def fhir_search_path_meta_info(path: str) -> Union[tuple, NoneType]:
""" """
resource_type = path.split(".")[0]
properties = path.split(".")[1:]
model_cls = resource_type_to_resource_cls(resource_type)
result = None
for prop in properties:
for (
name,
jsname,
typ,
is_list,
of_many,
not_optional,
) in model_cls().elementProperties():
if prop != name:
continue
if typ not in (int, float, bool, str):
model_cls = typ
result = (jsname, is_list, of_many)
break
return result | 2117e9f09c401e2b027d3c3eb7347650eaa03582 | 13,392 |
def _is_camel_case_ab(s, index):
"""Determine if the index is at 'aB', which is the start of a camel token.
For example, with 'workAt', this function detects 'kA'."""
return index >= 1 and s[index - 1].islower() and s[index].isupper() | c21ec7d8aa7e786d1ea523106af6f9426fea01d8 | 13,393 |
import os
def create_folder():
"""
This Function Create Empty Folder At Begin
:return:folder status as boolean
"""
folder_flag = 0
list_of_folders = os.listdir(SOURCE_DIR)
for i in ["doc", "image", "output", "font"]:
if i not in list_of_folders:
os.mkdir(i)
folder_flag += 1
if i == "doc":
file = open(os.path.join(DOC_DIR, "index.txt"), "w")
if read_lorem() is None:
file.write("This is For First Page . . .")
else:
file.write(read_lorem())
file.close()
return bool(folder_flag) | d4f1864fb4b5158682d414a09e1391f78600fbb2 | 13,394 |
def create_bulleted_tool_list(tools):
"""
Helper function that returns a text-based bulleted list of the given tools.
Args:
tools (OrderedDict): The tools whose names (the keys) will be added to the
text-based list.
Returns:
str: A bulleted list of tool names.
"""
return TOOL_LIST_HEADER + create_bulleted_list(tools.keys()) | d75fb7793c019f2499b549c2af627bb2038876e7 | 13,395 |
def _c3_merge(sequences, cls, context):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
# Show all the remaining bases, which were considered as
# candidates for the next mro sequence.
raise exceptions.InconsistentMroError(
message="Cannot create a consistent method resolution order "
"for MROs {mros} of class {cls!r}.",
mros=sequences, cls=cls, context=context)
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0] | 6453f151fe227226f3fcbc29d4e5fffd800683cb | 13,396 |
def rgb2hex(rgb: tuple) -> str:
"""
Converts RGB tuple format to HEX string
:param rgb:
:return: hex string
"""
return '#%02x%02x%02x' % rgb | 1ecb1ca68fa3dbe7b58f74c2e50f76175e9a0c5a | 13,397 |
import warnings
def min_var_portfolio(cov_mat, allow_short=False):
"""
Computes the minimum variance portfolio.
Note: As the variance is not invariant with respect
to leverage, it is not possible to construct non-trivial
market neutral minimum variance portfolios. This is because
the variance approaches zero with decreasing leverage,
i.e. the market neutral portfolio with minimum variance
is not invested at all.
Parameters
----------
cov_mat: pandas.DataFrame
Covariance matrix of asset returns.
allow_short: bool, optional
If 'False' construct a long-only portfolio.
If 'True' allow shorting, i.e. negative weights.
Returns
-------
weights: pandas.Series
Optimal asset weights.
"""
if not isinstance(cov_mat, pd.DataFrame):
raise ValueError("Covariance matrix is not a DataFrame")
n = len(cov_mat)
P = opt.matrix(cov_mat.values)
q = opt.matrix(0.0, (n, 1))
# Constraints Gx <= h
if not allow_short:
# x >= 0
G = opt.matrix(-np.identity(n))
h = opt.matrix(0.0, (n, 1))
else:
G = None
h = None
# Constraints Ax = b
# sum(x) = 1
A = opt.matrix(1.0, (1, n))
b = opt.matrix(1.0)
# Solve
optsolvers.options['show_progress'] = False
sol = optsolvers.qp(P, q, G, h, A, b)
if sol['status'] != 'optimal':
warnings.warn("Convergence problem")
# Put weights into a labeled series
weights = pd.Series(sol['x'], index=cov_mat.index)
return weights | 2efd839b8ca8ea6fe7b26f645630beb78699a8ea | 13,398 |
def relu(fd: DahliaFuncDef) -> str:
"""tvm.apache.org/docs/api/python/relay/nn.html#tvm.relay.nn.relu"""
data, res = fd.args[0], fd.dest
num_dims = get_dims(data.comp)
args = data.comp.args
indices = ""
var_name = CHARACTER_I
for _ in range(num_dims):
indices += f'[{var_name}]'
var_name = next_character(var_name)
data_type = fd.data_type
zero = f'({"0.0" if "fix" in data_type else "0"} as {data_type})'
input = f'{data.id.name}{indices}'
result = f'{res.id.name}{indices}'
loop_body = f"""if ({input} > {zero}) {{ {result} := {input}; }}
else {{ {result} := {zero}; }}"""
return emit_dahlia_definition(
fd,
emit_dahlia_loop(data, loop_body)
) | 30eefb572f632e91993d715ecc70570d38030657 | 13,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.