content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def days_in_month(year, month):
""" return number of days in that month in that year """
if not 1 <= month <= 12:
return 'Invalid Month'
if month == 2 and is_leap(year):
return 29
return month_days[month]
|
8e9e5878fcfb595518d33a38baaf5bdc1b45c8ed
| 19,100 |
def tmm_normal(fPath, bFilter=True):
"""
Function to obtain the Voom normal Count
Args:
fPath string Path with the raw counts
outPath string File output
bFilter Bool Bool to FIlter low expression genes
Returns:
tmm dataframe DataFrame with the log2(TMM) counts
"""
tmm = tmm_normalization(fPath, str(bFilter))
return tmm
|
4741a6af490e24485bd4ad28e7289dd320abf77d
| 19,101 |
import math
def StrainFitness(all_cix_series,
all_cntrl_sum,
debug_print=False):
"""
Args:
all_cix_series (pandas Series): The current experiment name column of values from all_df_used
length = nAllStrainsCentralGoodGenes
all_cntrl_sum (pandas Series): The sum of the current control values without the current index;
Is a pandas series the same length as all_cix series,
but with the sum of the other control values
length = nAllStrainsCentralGoodGenes
debug_print (bool): Decides whether to print out this function's results and stop
the program
Returns:
fit: pandas Series (float) with a computation applied to values
Same length as inputs: nAllStrainsCentralGoodGenes
se: pandas Series (float) with computations applied to values
Same length as inputs: nAllStrainsCentralGoodGenes
Description:
fit: Median-Normalized log2 difference between Current experiment and the time0s
se: Standard Error of the values
"
# simple log-ratio with pseudocount (of 1) and normalized so each scaffold has a median of 0
# note is *not* normalized except to set the total median to 0
"
"""
sf_fit = mednorm( (1+all_cix_series).apply(np.log2) - (1 + all_cntrl_sum).apply(np.log2) )
sf_se = (1/(1 + all_cix_series) + 1/(1 + all_cntrl_sum)).apply(math.sqrt)/ np.log(2)
return {
"fit": sf_fit,
"se": sf_se
}
|
8237c99bd9af25fe728c6c133572abf2f4cba1ad
| 19,102 |
import base64
def np_to_base64(img_np):
"""
Convert numpy image (RGB) to base64 string
"""
img = Image.fromarray(img_np.astype("uint8"), "RGB")
buffered = BytesIO()
img.save(buffered, format="PNG")
return "data:image/png;base64," + base64.b64encode(
buffered.getvalue()).decode("ascii")
|
2856e8ccf5402b5f6615bc8b66a364cef3e3a01c
| 19,103 |
def sample_unknown_parameters(_params, _n=None):
"""
AW - sample_unknown_parameters - Sample the parameters we do not fix and hence wish to marginalize over.
:param _params: SimpNameSp: dot accessible simple name space of simulation parameters.
:return: SimpNameSp: dot accessible simple name space of simulation parameters, where those parameters
that are not fixed have been re-drawn from the prior.
"""
if _n is None:
_n = len(_params.log_a)
_params_from_unknown = dc(_params)
_params_from_prior = sample_prior_parameters(_params_from_unknown, _n)
for _k in _params.uncontrolled_parameters:
setattr(_params_from_unknown, _k, getattr(_params_from_prior, _k))
return _params_from_unknown
|
700d4ab80cd3e798fa87f9249194015377d19cc7
| 19,104 |
import logging
def vector2Table (hdu, xlabel='wavelength',ylabel='flux') :
"""
Reads a 1-D vector from a FITS HDU into a Table.
If present, the wavelength scale is hopefully in a simple, linear WCS!
"""
hdr = hdu.header
if hdr['NAXIS'] != 1 :
logging.error ('vector2Table can only construct 1-D tables!')
return None
nw = hdr['NAXIS1']
pixl = np.arange(nw)
wave = None
# GET FLUX
bscale = 1.0
bzero = 0.0
"""
if 'BSCALE' in hdr and 'BZERO' in hdr :
bscale = hdr['BSCALE']
bzero = hdr['BZERO']
"""
flux = hdu.data*bscale+bzero
# GET WAVELENGTH
if 'CRVAL1' in hdr and 'CDELT1' in hdr : # SIMPLE WCS
crpix1 = 1
if 'CRPIX1' in hdr :
crpix1 = hdr['CRPIX1']
w0 = hdr['CRVAL1']
dwdx = hdr['CDELT1']
wave = w0+dwdx*(pixl+1-(crpix1-1))
# GET UNITS
if 'CUNIT1' in hdr :
cunit1 = hdr['CUNIT1']
elif wave is not None : # ASSUME ASTRONOMERS USE ANGSTROMS
cunit1 = 'nm'
wave /= 10.
else :
cunit1 = 'pix'
# CONSTRUCT Table
t = Table()
if wave is not None :
t[xlabel] = Column(wave,unit=cunit1, description=xlabel)
else :
t[xlabel] = Column(pixl,unit=cunit1, description=xlabel)
t[ylabel] = Column(flux,unit='unknown', description=ylabel)
t.meta = hdr
return t
|
b0ff458f8cf6de660ae5c314f3e9db1f50aeaf3c
| 19,105 |
def get_zarr_size(fn):
"""Get size of zarr file excluding metadata"""
# Open file
grp = zarr.open_group(fn)
# Collect size
total = 0
for var in list(grp.keys()):
total += grp[var].nbytes_stored
return total
|
e2fe053bf239156e74038672a435144cf7bc5216
| 19,106 |
def rotation_matrix(a, b):
""" Calculate rotation matrix M, such that Ma is aligned to b
Args:
a: Initial vector direction
b: Target direction
"""
# np.allclose might be safer here
if np.array_equal(a, b):
return np.eye(3)
# Allow cases where a,b are not unit vectors, so normalise
a = a / np.linalg.norm(a)
b = b / np.linalg.norm(b)
# Anti-parallel - rotate 180 degrees about any axis.
if np.array_equal(a, -b):
# If vector is (anti)parallel to z, rotate around x
if np.array_equal(np.abs(a), np.array([0, 0, 1])):
return np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
# Otherwise rotate around z
return np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
v = np.cross(a, b)
s = np.linalg.norm(v)
t = np.dot(a, b)
vx = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
return np.eye(3) + vx + np.dot(vx, vx) * (1-t) / (s**2)
|
948eb08758b81a6b9f2cc0f518dbb74f04970a1c
| 19,107 |
from sys import version_info
def bind(filename=None, blockpairs=1):
""" Open a connection. If filename is not given or None, a filename
is chosen automatically. This function returns blockpairs number
of Writer, Reader pairs.
"""
# Open memory mapped file, deduced file size from number of blocks
size = HEAD_SIZE + blockpairs * 2 * BLOCK_SIZE
m = Mmap(filename, size=size)
# Write header
m[0:5] = 'yoton'.encode('ascii')
m[5] = uint8(version_info[0])
m[6] = uint8(version_info[1])
# Create blocks
blocks = []
for i in range(blockpairs):
b1 = Writer(m, (2 * i + 0) * BLOCK_SIZE + HEAD_SIZE)
b2 = Reader(m, (2 * i + 1) * BLOCK_SIZE + HEAD_SIZE)
blocks.extend([b1, b2])
return tuple(blocks)
|
1cae390b7644219ca07335fa7ed078fe24bf6ca8
| 19,108 |
def saveuserprefs():
""" Fetch the preferences of the current user in JSON form """
user = current_user()
j = request.get_json(silent=True)
# Return the user preferences in JSON form
uf = UserForm()
uf.init_from_dict(j)
err = uf.validate()
if err:
return jsonify(ok=False, err=err)
uf.store(user)
return jsonify(ok=True)
|
0b2e893623432f0337014df3f0a67a4d2174a082
| 19,109 |
import os
def process_args(args):
""" Process the options got from get_args()
"""
args.input_dir = args.input_dir.strip()
if args.input_dir == '' or not os.path.exists(os.path.join(args.input_dir, 'model.meta')):
raise Exception("This scripts expects the input model was exist in '{0}' directory.".format(args.input_dir))
if args.tar_file == '':
if args.ranges_file == '' or not os.path.exists(args.ranges_file):
raise Exception("The specified range file '{0}' not exist.".format(args.ranges_file))
if args.scp_file == '' or not os.path.exists(args.scp_file):
raise Exception("The specified scp file '{0}' not exist.".format(args.scp_file))
else:
if not os.path.exists(args.tar_file):
raise Exception("The specified tar file '{0}' not exist.".format(args.tar_file))
if not os.path.exists(args.tar_file.replace('.tar', '.npy')):
raise Exception("There is no corresponding npy label file for tar file '{0}'.".format(args.tar_file))
if args.dropout_proportion > 1.0 or args.dropout_proportion < 0.0:
raise Exception("The value of dropout-proportion must be in range [0 - 1].")
return args
|
255ef1df0c5402b8d8d4dc21221f4bfd369fadf6
| 19,110 |
import torch
def make_features(batch, side, data_type='text'):
"""
Args:
batch (Tensor): a batch of source or target data.
side (str): for source or for target.
data_type (str): type of the source input.
Options are [text|img|audio].
Returns:
A sequence of src/tgt tensors with optional feature tensors
of size (len x batch).
"""
assert side in ['src', 'conversation', 'tgt']
if isinstance(batch.__dict__[side], tuple):
data = batch.__dict__[side][0]
else:
data = batch.__dict__[side]
feat_start = side + "_feat_"
keys = sorted([k for k in batch.__dict__ if feat_start in k])
features = [batch.__dict__[k] for k in keys]
levels = [data] + features
if data_type == 'text':
return torch.cat([level.unsqueeze(2) for level in levels], 2)
else:
return levels[0]
|
6ffed5546ea35a7be559f58521aa119d576ed465
| 19,111 |
def page_dirs_to_file_name(page_dirs):
"""
[カテゴリ1,カテゴリ2,ページ]というディレクトリの配列状態になっているページパスを
「カテゴリ1_._カテゴリ2_._ページ」というファイル名形式に変換する。
:param page_dirs:
:return:
"""
file_name = ""
for page_dir in page_dirs:
if page_dir:
file_name = file_name + page_dir.strip() + '_._'
file_name = file_name[0:-len('_._')]
file_name = _replace_windows_ng_word(file_name)
return file_name
|
1e7bb5f04900440824e7a223fbb88599add86c07
| 19,112 |
def has_field(feature_class, field_name):
"""Returns true if the feature class has a field named field_name."""
for field in arcpy.ListFields(feature_class):
if field.name.lower() == field_name.lower():
return True
return False
|
afe2352a1a17b9c0c48e68b68ab41595230343f9
| 19,113 |
from re import T
def process_settings(settings: AttrDict, params: T.Optional[T.Set[str]] = None, ignore: T.Iterable[str]=()) -> AttrDict:
"""
Process an dict-like input parameters, according to the rules specified in the
`Input parameter documentation <https://sqsgenerator.readthedocs.io/en/latest/input_parameters.html>`_. This function
should be used for processing user input. Therefore, exports the parser functions defined in
``sqsgenerator.settings.readers``. To specify a specify subset of parameters the {params} argument is used.
To {ignore} specifc parameters pass a list of parameter names
:param settings: the dict-like user configuration
:type settings: AttrDict
:param params: If specified only the subset of {params} is processed (default is ``None``)
:type params: Optional[Set[``None``]]
:param ignore: a list/iterable of params to ignore (default is ``()``)
:type ignore: Iterable[``str``]
:return: the processed settings dictionary
:rtype: AttrDict
"""
params = params if params is not None else set(parameter_list())
last_needed_parameter = max(params, key=parameter_index)
ignore = set(ignore)
for index, (param, processor) in enumerate(__parameter_registry.items()):
if param not in params:
# we can only skip this parameter if None of the other parameters depends on param
if parameter_index(param) > parameter_index(last_needed_parameter):
continue
if param in ignore:
continue
settings[param] = processor(settings)
return settings
|
0cd49f857fe2923d71fb4be46cac4eefa1fa11bf
| 19,114 |
def serialize_block(block: dict) -> Block:
"""Serialize raw block from dict to structured and filtered custom Block object
Parameters
----------
block : dict
Raw KV block data from gRPC response
Returns
-------
Block
Structured, custom defined Block object for more controlled data access
"""
return Block(
block.get("id", None),
block.get("number", None),
block.get("header", {}).get("timestamp", None),
block.get("header", {}).get("producer", None),
block.get("unfilteredTransactionCount", 0),
block.get("unfilteredTransactionTraceCount", 0),
block.get("unfilteredExecutedInputActionCount", 0),
block.get("unfilteredExecutedTotalActionCount", 0),
block.get("filteringIncludeFilterExpr", 0),
block.get("filteredTransactionTraceCount", 0),
block.get("filteredExecutedInputActionCount", 0),
block.get("filteredExecutedTotalActionCount", 0),
list(
map(
lambda tx_trace: TransactionTrace(
tx_trace.get("id", None),
tx_trace.get("blockNum", None),
tx_trace.get("blockTime", None),
tx_trace.get("receipt", {}).get("status", None),
tx_trace.get("receipt", {}).get("cpuUsageMicroSeconds", None),
tx_trace.get("netUsage", None),
tx_trace.get("elapsed", None),
list(
map(
lambda act_trace: ActionTrace(
act_trace.get("transactionId", None),
act_trace.get("blockNum", None),
act_trace.get("actionOrdinal", None),
Action(
act_trace.get("action", {}).get("account", None),
act_trace.get("action", {}).get("name", None),
act_trace.get("action", {}).get(
"jsonData", {"from": None, "to": None}
),
),
act_trace.get("elapsed", None),
act_trace.get("action", {}).get(
"authorization", [{"actor": None}]
)[0]["actor"],
act_trace.get("receiver", None),
),
tx_trace.get("actionTraces", None),
)
),
),
block.get("filteredTransactionTraces", []),
)
),
block.get("filteredTransactionCount", 0),
)
|
05931685b970a562b108df134e26c6857bd9bb6a
| 19,115 |
from pandas import get_option
def repr_pandas_Series(series, _):
"""
This function can be configured by setting the `max_rows` attributes.
"""
return series.to_string(
max_rows=repr_pandas_Series.max_rows,
name=series.name,
dtype=series.dtype,
length=get_option("display.show_dimensions"),
)
|
86009d8fc1559dd97361a8c5e113c5477ff73de2
| 19,116 |
def convert_fmt(fmt):
"""rs.format to pyglet format string"""
return {
rs.format.rgb8: 'RGB',
rs.format.bgr8: 'BGR',
rs.format.rgba8: 'RGBA',
rs.format.bgra8: 'BGRA',
rs.format.y8: 'L',
}[fmt]
|
b2f34498969d2e29d8c21367788ddcaebe205acf
| 19,117 |
import math
def train_ALS(train_data, validation_data, num_iters, reg_param, ranks):
"""
Grid Search Function to select the best model based on RMSE of hold-out data
"""
# initial
min_error = float('inf')
best_rank = -1
best_regularization = 0
best_model = None
for rank in ranks:
for reg in reg_param:
# train ALS model
model = ALS.train(
ratings=train_data, # (userID, productID, rating) tuple
iterations=num_iters,
rank=rank,
lambda_=reg, # regularization param
seed=99)
# make prediction
valid_data = validation_data.map(lambda p: (p[0], p[1]))
predictions = model.predictAll(valid_data).map(lambda r: ((r[0], r[1]), r[2]))
# get the rating result
ratesAndPreds = validation_data.map(lambda r: ((r[0], r[1]), r[2])).join(predictions)
# get the RMSE
MSE = ratesAndPreds.map(lambda r: (r[1][0] - r[1][1])**2).mean()
error = math.sqrt(MSE)
print('{} latent factors and regularization = {}: validation RMSE is {}'.format(rank, reg, error))
if error < min_error:
min_error = error
best_rank = rank
best_regularization = reg
best_model = model
print('\nThe best model has {} latent factors and regularization = {}'.format(best_rank, best_regularization))
return best_model
|
99d0584e9374a529632024caeadb88d85c681b81
| 19,118 |
import sys
def parse_input_vspec (opts):
"""Parses input from vspec and returns excitation energies in the
form [energy, f], in eV and atomic units units, respectively."""
lines = sys.stdin.readlines ()
inside_data = False
roots = []
for l in lines:
if "<START>" in l:
try:
ls = l.split()
tag = ls[0]
nexcite = int (ls[1])
except:
raise Exception ("Failed to parse <START> tag and number: {0}".format(l))
iexcite = 0
inside_data = True
continue
if inside_data:
if "<END>" in l:
inside_data = False
continue
# break
try:
line_split = l.strip().split()
n = int (line_split[0])
occ = int (line_split[1]) #not used
virtual = int (line_split[2]) #not used
energy_ev = float (line_split[3])
osc = float (line_split[7])
except:
raise Exception ("Failed to parse data line: {0}".format(l))
iexcite = iexcite + 1
if n != iexcite:
raise Exception ("Expected excitation number {0}, found {1}".format(iexcite, n))
if energy_ev < 0.0:
print ("{0} Warning: Ignored negative vpsec excitation: {1} eV, {2}".format(opts.cchar, energy_ev, osc))
if opts.verbose:
sys.stderr.write ("Warning: Ignored negative vpsec excitation: {0} eV, {1}\n".format(energy_ev, osc))
else:
roots.append ([energy_ev, osc])
# if not inside_data:
# raise Exception ("Failed to find <START> tag")
if iexcite != nexcite:
print ("{0} Warning: Expected {1} excitations, found {2}".format(opts.cchar, nexcite, iexcite))
if opts.verbose:
sys.stderr.write ("Warning: Expected {0} excitations, found {1}\n".format(nexcite,iexcite))
if opts.verbose:
sys.stderr.write ("{0}: Found {1} vspec excitations\n".format(pname, len(roots)))
return roots
|
cd381ee4ab67c13a43311b9bc1bdc185c46ed86d
| 19,119 |
import os
def is_yaml_file(filename):
"""Return true if 'filename' ends in .yml or .yaml, and false otherwise."""
return os.path.splitext(filename)[1] in (".yaml", ".yml")
|
5d9f6faf485e3724aaef19294cfde52671505a7f
| 19,120 |
import copy
def response_ack(**kwargs):
""" Policy-based provisioning of ACK value. """
try:
tlv, code, policy, post_c2c = kwargs["tlv"], kwargs["code"], kwargs["policy"], kwargs["post_c2c"]
new_tlv = copy.deepcopy(tlv)
if post_c2c is not True:
ret = policy.get_available_policy(new_tlv)
if ret == None:
new_tlv["notAvailable"]
new_tlv['ope'] = 'info'
return [new_tlv]
except Exception as ex:
print("Exception in response_ack()", ex)
return None
|
ff34cf196e0d565ebac7700f5b412f615685ca37
| 19,121 |
def is_file(path, use_sudo=False):
"""
Check if a path exists, and is a file.
"""
func = use_sudo and sudo or run
with settings(hide('running', 'warnings'), warn_only=True):
return func('[ -f "%(path)s" ]' % locals()).succeeded
|
9b3402205fe972dbedfa582117b6d03bdb949122
| 19,122 |
def bounded_random_walk(minval, maxval, delta_min, delta_max, T,
dtype=tf.float32, dim=1):
"""
Simulates a random walk with boundary conditions. Used for data augmentation
along entire tube.
Based on: https://stackoverflow.com/questions/48777345/vectorized-random-
walk-in-python-with-boundaries
Args:
minval (int/float): Minimum value.
maxval (int/float): Maximum value.
delta_min (int/float): Minimum change.
delta_max (int/float): Maximum change.
T (int): Length of sequence.
dtype (type): Data type of walk.
dim (int): Dimension.
Returns:
Tensor (T x dim).
"""
if maxval <= minval:
return tf.ones((T, dim)) * minval
# Don't do this yet for consistency
if minval == delta_min and maxval == delta_max:
print('Using the old data augmentation!')
walk = tf.random_uniform(
shape=(T, dim),
minval=minval,
maxval=maxval,
dtype=dtype,
)
return walk
start = tf.random_uniform(
shape=(1, dim),
minval=minval,
maxval=maxval,
dtype=dtype,
)
size = maxval - minval
walk = tf.cumsum(tf.random_uniform(
shape=(T, dim),
minval=delta_min,
maxval=delta_max,
dtype=dtype,
))
return tf.abs((walk + start - minval + size) % (2 * size) - size) + minval
|
18bba29b9f0c320da04eb2419a49483ee301e178
| 19,123 |
def validate_photo_url(photo_url, required=False):
"""Parses and validates the given URL string."""
if photo_url is None and not required:
return None
if not isinstance(photo_url, str) or not photo_url:
raise ValueError(
'Invalid photo URL: "{0}". Photo URL must be a non-empty '
'string.'.format(photo_url))
try:
parsed = parse.urlparse(photo_url)
if not parsed.netloc:
raise ValueError('Malformed photo URL: "{0}".'.format(photo_url))
return photo_url
except Exception:
raise ValueError('Malformed photo URL: "{0}".'.format(photo_url))
|
9c6d617d4b618f626c29977b0a7c4c9dc9b3f9ab
| 19,124 |
def to_flp(stipples, dpi=300, x_mm=0, y_mm=0, laser_pwr=35000,
ticks=500, base=100):
"""" Converts a set of stipples into a list of FLP packets
dpi is the image's DPI
x_mm and y_mm are the corner location of the image (default 0,0)
(where 0,0 is the center of the build platform)
laser_power is the laser's power level in ticks
ticks is the number of frames the laser spends a black point
base is the number of frames the laser spends on a white point
"""
# Accumulated list of FLP packets
packets = F.Packets()
# Sort by X to reduce the amount of laser moves necessary
stipples = sorted(stipples, key=lambda s: s[0])
# Draw stuff for every point
for x, y, i in stipples:
# Center position in mm
x = mm_to_pos(x / float(dpi) * 25.4 + x_mm)
y = mm_to_pos(y / float(dpi) * 25.4 + y_mm)
# Decide how long to stay on this point (longer time = darker point)
t = int(ceil((ticks - base) * (1 - i)) + base)
if t == 0:
continue
# Move to this stipple's location with the laser off, then pause
# briefly to let the controller stabilize
packets.append(F.LaserPowerLevel(0))
packets.append(F.XYMove([[x, y, 200], [x, y, 100]]))
# Draw the spot with the laser on
packets.append(F.LaserPowerLevel(laser_pwr))
packets.append(F.XYMove([[x, y, t]]))
return packets
|
9d826b6174478cbfb3d2033e05b9ccafb5dca79c
| 19,125 |
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance.
"""
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent
|
3445acb7bade3cca15d4eeeb3da4d548ea44a206
| 19,126 |
def ranked_bots_query(alias="ranked_bots"):
"""
Builds a query that ranks all bots.
This is a function in case you need this as a subquery multiple times.
"""
return sqlalchemy.sql.select([
bots.c.user_id,
bots.c.id.label("bot_id"),
bots.c.mu,
bots.c.sigma,
bots.c.score,
bots.c.games_played,
bots.c.version_number,
bots.c.language,
bots.c.update_time,
bots.c.compile_status,
sqlalchemy.sql.func.rank().over(
order_by=bots.c.score.desc()
).label("bot_rank"),
sqlalchemy.sql.func.rank().over(
partition_by=users.c.organization_id,
order_by=bots.c.score.desc()
).label("bot_organization_rank"),
]).select_from(
bots.join(users, bots.c.user_id == users.c.id)
).where(
users.c.is_active == True
).order_by(
bots.c.score.desc()
).alias(alias)
|
f6641efa611884721e33453f4fcc4af0503b4aaf
| 19,127 |
def marathon_deployments_check(service):
"""Checks for consistency between deploy.yaml and the marathon yamls"""
the_return = True
pipeline_deployments = get_pipeline_config(service)
pipeline_steps = [step['instancename'] for step in pipeline_deployments]
pipeline_steps = [step for step in pipeline_steps if step not in DEPLOY_PIPELINE_NON_DEPLOY_STEPS]
marathon_steps = get_marathon_steps(service)
in_marathon_not_deploy = set(marathon_steps) - set(pipeline_steps)
if len(in_marathon_not_deploy) > 0:
print "%s There are some instance(s) you have asked to run in marathon that" % x_mark()
print " do not have a corresponding entry in deploy.yaml:"
print " %s" % PaastaColors.bold(", ".join(in_marathon_not_deploy))
print " You should probably add entries to deploy.yaml for them so they"
print " are deployed to those clusters."
the_return = False
in_deploy_not_marathon = set(pipeline_steps) - set(marathon_steps)
if len(in_deploy_not_marathon) > 0:
print "%s There are some instance(s) in deploy.yaml that are not referenced" % x_mark()
print " by any marathon instance:"
print " %s" % PaastaColors.bold((", ".join(in_deploy_not_marathon)))
print " You should probably delete these deploy.yaml entries if they are unused."
the_return = False
if the_return is True:
print success("All entries in deploy.yaml correspond to a marathon entry")
print success("All marathon instances have a corresponding deploy.yaml entry")
return the_return
|
3f2df53652efad4b731a05b3ecc17929d65982ac
| 19,128 |
def get_book_info(book_id, books):
"""Obtain meta data of certain books.
:param book_id: Books to look up
:type: int or list of ints
:param books: Dataframe containing the meta data
:type: pandas dataframe
:return: Meta data for the book ids
:rtype: List[str], List[str], List[str]
"""
if not isinstance(book_id, list):
book_id = [book_id]
book_authors, book_titles, book_img_urls = [], [], []
for i in book_id:
book_info = books.loc[books["book_id"]==i].squeeze()
if book_info.shape[0]==0:
raise ValueError("Could not find book_id {} in the dataset.".format(book_id))
book_authors.append(book_info.authors)
book_titles.append(book_info.title)
book_img_urls.append(book_info.image_url)
return book_authors, book_titles, book_img_urls
|
64a91a498f9bf9df918d256a7ce705e98dadbbd9
| 19,129 |
import functools
import six
def save_error_message(func):
"""
This function will work only if transition_entity is defined in kwargs and
transition_entity is instance of ErrorMessageMixin
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exception:
message = six.text_type(exception)
transition_entity = kwargs['transition_entity']
if message:
transition_entity.error_message = message
transition_entity.save(update_fields=['error_message'])
raise exception
return wrapped
|
9ac592100445a0232efc4afaa3807b050c8eddff
| 19,130 |
def EMV(data,n=20,m=23):
"""
"""
def emv(high,low,vol,n=14):
MID = np.zeros(len(high))
MID[1:] = (np.array(high[1:])+np.array(low[1:])-np.array(high[:-1])-np.array(low[:-1]))/2.
BRO = np.array(vol)/(100000000.*(np.array(high)-np.array(low)))
EM = MID/BRO
return ta.SMA(EM,n)
data['emv'] = emv(np.array(data.high),np.array(data.low),np.array(data.vol),n)
data['maemv'] = ta.SMA(np.array(data['emv']),m)
signal = pd.DataFrame(index=data.index)
#strategy 1
"""
EMV 大于0,买入,信号为1
EMV 小于0,卖出,信号为-1
常用参数:n=14
"""
signal['1'] = (data['emv']>0)*2 - 1
#strategy 2
"""
EMV 大于MAEMV,买入,信号为1
EMV 小于MAEMV,卖出,信号为-1
参数设为n=20,m=23
"""
signal['2'] = (data['emv'] > data['maemv'])*2 - 1
signal = signal.fillna(0)
return signal
|
a3555738c2f0c047ad4c21ae32dcfed460a9ec5b
| 19,131 |
from typing import Tuple
def desired_directions(state: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Given the current state and destination, compute desired direction."""
destination_vectors = state[:, 4:6] - state[:, 0:2]
directions, dist = normalize(destination_vectors)
return directions, dist
|
02088734bd3ef6ec2e1b009d5c43e6ea9f008aab
| 19,132 |
import operator
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1))
digits = map (operator.mod, shifts, max_length * [2])
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
|
40d0198067722d8d4c1ef1e1a195ce6817ab0935
| 19,133 |
def df_fc_overlap_9():
"""Scenario case with 3 sets of 2 overlapping fragments, bound to a common combination of 2 redundant fragments."""
mol = Chem.MolFromSmiles('NC1C(O)C(CCCC2CC2CCC2CC2)C1CCC1CC(C(N)C1O)C1CCC(O)C(N)C1')
return DataFrame([
['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O2', 0, 'O2:0', 'ffo', 'fusion', 'false_positive', 'overlap', (30, 29, 28, 27, 26, 33, 31), (32, 31, 29, 28, 27, 26, 33), 34, mol, mol_o1, mol_o2, 'O1:0@1,2,3,4,5,6[ffo]O2:0@1,2,3,4,5,6'],
['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O5', 0, 'O4:0', 'cm', 'connection', 'monopodal', '', (30, 29, 28, 27, 26, 33, 31), (25, 24, 22, 21, 20, 19), 34, mol, mol_o1, mol_o4, 'O1:0@4[cm]O4:0@3'],
['mol_fc_overlap_9', 'XXX', 'O1', 0, 'O1:0', 'O5', 0, 'O5:0', 'cm', 'connection', 'monopodal', '', (30, 29, 28, 27, 26, 33, 31), (23, 22, 21, 20, 19, 24), 34, mol, mol_o1, mol_o5, 'O1:0@4[cm]O5:0@2'],
['mol_fc_overlap_9', 'XXX', 'O2', 0, 'O2:0', 'O5', 0, 'O4:0', 'cm', 'connection', 'monopodal', '', (32, 31, 29, 28, 27, 26, 33), (25, 24, 22, 21, 20, 19), 34, mol, mol_o2, mol_o4, 'O2:0@5[cm]O4:0@3'],
['mol_fc_overlap_9', 'XXX', 'O2', 0, 'O2:0', 'O5', 0, 'O5:0', 'cm', 'connection', 'monopodal', '', (32, 31, 29, 28, 27, 26, 33), (23, 22, 21, 20, 19, 24), 34, mol, mol_o2, mol_o5, 'O2:0@5[cm]O5:0@2'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O5', 0, 'O5:0', 'ffo', 'fusion', 'false_positive', 'overlap', (25, 24, 22, 21, 20, 19), (23, 22, 21, 20, 19, 24), 34, mol, mol_o4, mol_o5, 'O4:0@1,2,3,4,5[ffo]O5:0@1,2,3,4,5'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O6', 0, 'O6:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (1, 2, 4, 16), 34, mol, mol_o4, mol_o6, 'O4:0@5[cm]O6:0@3'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (0, 1, 2, 4, 16), 34, mol, mol_o4, mol_o8, 'O4:0@5[cm]O8:0@4'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O4:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (25, 24, 22, 21, 20, 19), (3, 2, 1, 16, 4), 34, mol, mol_o4, mol_o9, 'O4:0@5[cm]O9:0@3'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O6', 0, 'O6:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (1, 2, 4, 16), 34, mol, mol_o5, mol_o6, 'O5:0@4[cm]O6:0@3'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (0, 1, 2, 4, 16), 34, mol, mol_o5, mol_o8, 'O5:0@4[cm]O8:0@4'],
['mol_fc_overlap_9', 'XXX', 'O5', 0, 'O5:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (23, 22, 21, 20, 19, 24), (3, 2, 1, 16, 4), 34, mol, mol_o5, mol_o9, 'O5:0@4[cm]O9:0@3'],
['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O7', 0, 'O7:0', 'cm', 'connection', 'monopodal', '', (1, 2, 4, 16), (8, 9, 10), 34, mol, mol_o6, mol_o7, 'O6:0@2[cm]O7:0@0'],
['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O8', 0, 'O8:0', 'ffs', 'fusion', 'false_positive', 'substructure', (1, 2, 4, 16), (0, 1, 2, 4, 16), 34, mol, mol_o6, mol_o8, 'O6:0@0,1,2,3[ffs]O8:0@1,2,3,4'],
['mol_fc_overlap_9', 'XXX', 'O6', 0, 'O6:0', 'O9', 0, 'O9:0', 'ffs', 'fusion', 'false_positive', 'substructure', (1, 2, 4, 16), (3, 2, 1, 16, 4), 34, mol, mol_o6, mol_o9, 'O6:0@0,1,2,3[ffs]O9:0@1,2,3,4'],
['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O7', 1, 'O7:1', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (13, 14, 15), 34, mol, mol_o7, mol_o7, 'O7:0@2[cm]O7:1@0'],
['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O8', 0, 'O8:0', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (0, 1, 2, 4, 16), 34, mol, mol_o7, mol_o8, 'O7:0@0[cm]O8:0@3'],
['mol_fc_overlap_9', 'XXX', 'O7', 0, 'O7:0', 'O9', 0, 'O9:0', 'cm', 'connection', 'monopodal', '', (8, 9, 10), (3, 2, 1, 16, 4), 34, mol, mol_o7, mol_o9, 'O7:0@0[cm]O9:0@4'],
['mol_fc_overlap_9', 'XXX', 'O8', 0, 'O8:0', 'O9', 0, 'O9:0', 'ffo', 'fusion', 'false_positive', 'overlap', (0, 1, 2, 4, 16), (3, 2, 1, 16, 4), 34, mol, mol_o8, mol_o9, 'O8:0@1,2,3,4[ffo]O9:0@1,2,3,4'],
], columns=['idm', 'inchikey', 'idf1', 'idxf1', 'fid1', 'idf2', 'idxf2', 'fid2', 'fcc', 'category', 'type', 'subtype', '_aidxf1', '_aidxf2', 'hac', 'mol', 'mol_frag_1', 'mol_frag_2', 'fc'])
|
5a86bce8741b76ac265b5a1865f7d3d1ad8970ea
| 19,134 |
import requests
import json
def check_cal(es_url, es_index, id):
"""Query for calibration file with specified input ID."""
query = {
"query":{
"bool":{
"must": [
{ "term": { "_id": id } },
]
}
},
"fields": [],
}
if es_url.endswith('/'):
search_url = '%s%s/_search' % (es_url, es_index)
else:
search_url = '%s/%s/_search' % (es_url, es_index)
#logger.info("search_url: %s" % search_url)
r = requests.post(search_url, data=json.dumps(query))
if r.status_code == 200:
result = r.json()
#logger.info(pformat(result))
total = result['hits']['total']
id = 'NONE' if total == 0 else result['hits']['hits'][0]['_id']
else:
logger.error("Failed to query %s:\n%s" % (es_url, r.text))
logger.error("query: %s" % json.dumps(query, indent=2))
logger.error("returned: %s" % r.text)
if r.status_code == 404: total, id = 0, 'NONE'
else: r.raise_for_status()
return total, id
|
10aab4dd6587b901cda543298c13662e6edeb0e1
| 19,135 |
def mountpoint_create(name, size):
"""Service Layer to create mountpoint"""
mountpoint = MountPoint(name, size)
return mountpoint
|
d52be1773b3cfad62423d2695b42d56ca83f7eaf
| 19,136 |
import requests
import json
def GetTSAWaitTimes(airportCode):
"""
Returns data from the TSA Wait Times API for a particular airport shortcode.
:param airportCode: 3-letter shortcode of airport
:return: Returns the full parsed json data from TSA Wait Times API
"""
base_url = "http://apps.tsa.dhs.gov/MyTSAWebService/GetTSOWaitTimes.ashx"
params_tsa_d = {}
params_tsa_d['ap'] = airportCode
params_tsa_d['output'] = 'json'
try:
## Uncomment this line if you want to get with caching for testing purposes
#tsa_result_diction = json.loads(get_with_caching(base_url, params_tsa_d, saved_cache, cache_fname))
## Comment out these two lines if you want to enable caching
results_tsa = requests.get(base_url, params=params_tsa_d)
tsa_result_diction = json.loads(results_tsa.text)
return tsa_result_diction
except Exception:
print("Error: Unable to load TSA wait times. Please try again.")
print("Exception: ")
# sys.exit(1)
quit()
|
bd03be14c95a3892ac75a0396da12ca04b52a59b
| 19,137 |
def False(context):
"""Function: <boolean> false()"""
return boolean.false
|
93d1f1c9fbe9cf7bb02d5caac2c01ed7d0d9a2dc
| 19,138 |
def namedtuple_to_dict(model_params):
"""Transfers model specification from a
named tuple class object to dictionary."""
init_dict = {}
init_dict["GENERAL"] = {}
init_dict["GENERAL"]["num_periods"] = model_params.num_periods
init_dict["GENERAL"]["num_choices"] = model_params.num_choices
init_dict["CONSTANTS"] = {}
init_dict["CONSTANTS"]["delta"] = model_params.delta
init_dict["CONSTANTS"]["mu"] = model_params.mu
init_dict["CONSTANTS"]["benefits"] = model_params.benefits
init_dict["INITIAL_CONDITIONS"] = {}
init_dict["INITIAL_CONDITIONS"]["educ_max"] = model_params.educ_max
init_dict["INITIAL_CONDITIONS"]["educ_min"] = model_params.educ_min
init_dict["SIMULATION"] = {}
init_dict["SIMULATION"]["seed_sim"] = model_params.seed_sim
init_dict["SIMULATION"]["num_agents_sim"] = model_params.num_agents_sim
init_dict["SOLUTION"] = {}
init_dict["SOLUTION"]["seed_emax"] = model_params.seed_emax
init_dict["SOLUTION"]["num_draws_emax"] = model_params.num_draws_emax
init_dict["PARAMETERS"] = {}
init_dict["PARAMETERS"]["optim_paras"] = model_params.optim_paras
init_dict["DERIVED_ATTR"] = {}
init_dict["DERIVED_ATTR"]["educ_range"] = model_params.educ_range
init_dict["DERIVED_ATTR"]["shocks_cov"] = model_params.shocks_cov
return init_dict
|
9ac2f23aff3b9c57599eb2c2c6cacd455ac711a5
| 19,139 |
def roberts(stream: Stream, *args, **kwargs) -> FilterableStream:
"""https://ffmpeg.org/ffmpeg-filters.html#roberts"""
return filter(stream, roberts.__name__, *args, **kwargs)
|
ff58eaea65d536b47614050600c91136dc2d6f7e
| 19,140 |
import json
def run_code():
"""
codec api response
{
"error": {
"decode:": "error message"
},
"output": {
"status_code": 0,
"result": {
"data_type": "event",
"data": {
"humidity": {
"time": 1547660823,
"value": 34
},
"temperature": {
"time": 1547660823,
"value": -3.7
}
}
}
}
}
"""
request_json = CodeRunSchema.validate_request()
analog_type = request_json.get('analogType')
protocol = db.session.query(Product.cloudProtocol) \
.filter(Product.productID == request_json.get('productID')) \
.scalar()
if protocol is None:
raise DataNotFound(field='productID')
request_url = f"http://{current_app.config['CODEC_NODE']}/api/v1/codec"
with SyncHttp() as sync_http:
response = sync_http.post(request_url, json=request_json)
if response.responseCode != 200:
try:
errors = json.loads(response.responseContent)
except Exception:
errors = {
'codec': response.responseContent
}
raise APIException(errors=errors)
response_json = json.loads(response.responseContent)
# return response if it has error
if 'error' in response_json:
return jsonify(response_json)
output_data = response_json.get('output')
status_code = output_data.get('status_code')
# If status code is 1(ERROR)
# or analog type is 2(encode)
# return response without validate
if status_code == 1 or analog_type == 2:
return jsonify(response_json)
result = output_data.get('result')
error_dict = {}
validate_data, validate_error = DecodeSchema().load(result)
for key, value in validate_error.items():
error_dict[key] = value[0][:-1]
data_stream = DataStream.query \
.filter(DataStream.productID == request_json.get('productID'),
DataStream.tenantID == g.tenant_uid, DataStream.topic == request_json.get('topic'),
DataStream.streamID == validate_data.get('stream_id')) \
.first()
if not data_stream:
raise DataNotFound(field='data_stream')
error, passed_data = validate_decode_response(data_stream, validate_data)
error_dict.update(error)
record = {
'output': {
'status_code': status_code,
'result': passed_data
}
}
if error_dict:
record['error'] = error_dict
return jsonify(record)
|
96118a1c74b027716a68d7c1f25eb3585e1a255c
| 19,141 |
from typing import Mapping
from pathlib import Path
import os
def _strip_paths(notebook_json: Mapping, project_root: Path):
"""Strip user paths from given notebook."""
project_root_string = str(project_root) + os.sep
mutated = False
for cell in notebook_json["cells"]:
if cell["cell_type"] == "code":
for output in cell["outputs"]:
for line_number, line in enumerate(output.get("text", [])):
if project_root_string in line:
output["text"][line_number] = line.replace(
project_root_string, ""
)
mutated = True
return notebook_json, mutated
|
318493239c964104b4838ffe2ba2b37a295ef792
| 19,142 |
def build_dense_conf_block(x, filter_size=32, dropout_rate=None):
"""
builds a dense block according to https://arxiv.org/pdf/1608.06993.pdf
:param x:
:param dropout_rate:
:param filter_size
:return:
"""
x = BatchNormalization(axis=-1, epsilon=1.1e-5)(x)
x = Activation('relu')(x)
x = Conv2D(filter_size * 4, (1, 1), padding='same')(x)
x = Conv2D(filter_size, (3, 3), padding='same')(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
|
2cb9639ed620d32c513ecbccf2c311360cc3cb9d
| 19,143 |
def _viz_flow(u, v, logscale=True, scaledown=6):
"""
Copied from @jswulff:
https://github.com/jswulff/pcaflow/blob/master/pcaflow/utils/viz_flow.py
top_left is zero, u is horizon, v is vertical
red is 3 o'clock, yellow is 6, light blue is 9, blue/purple is 12
"""
color_wheel = _color_wheel()
n_cols = color_wheel.shape[0]
radius = np.sqrt(u ** 2 + v ** 2)
if logscale:
radius = np.log(radius + 1)
radius = radius / scaledown
rot = np.arctan2(-v, -u) / np.pi
fk = (rot + 1) / 2 * (n_cols - 1) # -1~1 mapped to 0~n_cols
k0 = fk.astype(np.uint8) # 0, 1, 2, ..., n_cols
k1 = k0 + 1
k1[k1 == n_cols] = 0
f = fk - k0
n_colors = color_wheel.shape[1]
img = np.zeros(u.shape + (n_colors,))
for i in range(n_colors):
tmp = color_wheel[:, i]
col0 = tmp[k0]
col1 = tmp[k1]
col = (1 - f) * col0 + f * col1
idx = radius <= 1
# increase saturation with radius
col[idx] = 1 - radius[idx] * (1 - col[idx])
# out of range
col[~idx] *= 0.75
img[:, :, i] = np.floor(255 * col).astype(np.uint8)
return img.astype(np.uint8)
|
43901f227bc30367910bc41f9ba324bcc217bdbf
| 19,144 |
def submission_history(request, course_id, learner_identifier, location):
"""Render an HTML fragment (meant for inclusion elsewhere) that renders a
history of all state changes made by this user for this problem location.
Right now this only works for problems because that's all
StudentModuleHistory records.
"""
found_user_name = get_learner_username(learner_identifier)
if not found_user_name:
return HttpResponse(escape(_('User does not exist.')))
course_key = CourseKey.from_string(course_id)
try:
usage_key = UsageKey.from_string(location).map_into_course(course_key)
except (InvalidKeyError, AssertionError):
return HttpResponse(escape(_('Invalid location.')))
course = get_course_overview_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
# Permission Denied if they don't have staff access and are trying to see
# somebody else's submission history.
if (found_user_name != request.user.username) and (not staff_access):
raise PermissionDenied
user_state_client = DjangoXBlockUserStateClient()
try:
history_entries = list(user_state_client.get_history(found_user_name, usage_key))
except DjangoXBlockUserStateClient.DoesNotExist:
return HttpResponse(escape(_('User {username} has never accessed problem {location}').format(
username=found_user_name,
location=location
)))
# This is ugly, but until we have a proper submissions API that we can use to provide
# the scores instead, it will have to do.
csm = StudentModule.objects.filter(
module_state_key=usage_key,
student__username=found_user_name,
course_id=course_key)
scores = BaseStudentModuleHistory.get_history(csm)
if len(scores) != len(history_entries):
log.warning(
"Mismatch when fetching scores for student "
"history for course %s, user %s, xblock %s. "
"%d scores were found, and %d history entries were found. "
"Matching scores to history entries by date for display.",
course_id,
found_user_name,
location,
len(scores),
len(history_entries),
)
scores_by_date = {
score.created: score
for score in scores
}
scores = [
scores_by_date[history.updated]
for history in history_entries
]
context = {
'history_entries': history_entries,
'scores': scores,
'username': found_user_name,
'location': location,
'course_id': str(course_key)
}
return render_to_response('courseware/submission_history.html', context)
|
dd0459844b4f30e653dacf474cdb5ddf186ed0dc
| 19,145 |
def tou(month, weekday, hour):
""" Calculate TOU pricing
"""
if weekday in [0, 6]:
return OFFPEAK
else:
if month in [5, 6, 7, 8, 9, 10]:
if hour in [11, 12, 13, 14, 15, 16]:
return ONPEAK
elif hour in [7, 8, 9, 10, 17, 18, 19, 20]:
return MIDPEAK
else:
return OFFPEAK
else:
if hour in [11, 12, 13, 14, 15, 16]:
return MIDPEAK
elif hour in [7, 8, 9, 10, 17, 18, 19, 20]:
return ONPEAK
else:
return OFFPEAK
|
31708916be97d52d229499053b0b3d29603fdfb9
| 19,146 |
import json
def get_job(request):
""" Retrieve a specific Job
URL: /admin/Jobs/GetOne
:param request:
:return:
"""
id = request.GET.dict().get("id")
response = {
'status': 1,
'status_message': 'Success',
'job': job.objects.filter(id=id)
}
return HttpResponse(json.dumps(response))
|
82d4c981b48fb0274ae4f2f888149b11ed731b88
| 19,147 |
def cpt_lvq_merid_deriv(temp, sphum):
"""Meridional derivative of c_p*T + L_v*q on pressure coordinates."""
deriv_obj = LatCenDeriv(cpt_lvq(temp, sphum), LAT_STR)
return deriv_obj.deriv()
|
629a630bb0663b16f20fb0f7d68ca54ebebc7e21
| 19,148 |
import os
def ZonalStats(fhs, dates, output_dir, quantity, unit, location, color = '#6bb8cc'):
"""
Calculate and plot some statictics of a timeseries of maps.
Parameters
----------
fhs : ndarray
Filehandles pointing to maps.
dates : ndarray
Datetime.date object corresponding to fhs.
output_dir : str
Folder to save the graphs.
quantity : str
Quantity of the maps.
unit : str
Unit of the maps.
location : str
Location name of the maps.
color : str, optional
Color in which the graphs will be plotted, default is '#6bb8cc'.
Returns
-------
monthly_average : float
Monthly spatial average.
yearly_average : float
Yearly spatial average.
Examples
--------
>>> ZonalStats(p_fhs, p_dates, output_dir, 'Precipitation', 'mm/month', 'North-Vietnam')
>>> ZonalStats(et_fhs, et_dates, output_dir, 'Evapotranspiration', 'mm/month', 'South-Vietnam')
"""
ts = np.array([])
data_monthly_ts = dict()
data_monthly_counter = dict()
months = np.unique([date.month for date in dates])
for month in months:
data_monthly_ts[month] = 0
data_monthly_counter[month] = 0
data_yearly_ts = dict()
data_yearly_counter = dict()
years = np.unique([date.year for date in dates])
for year in years:
data_yearly_ts[year] = 0
data_yearly_counter[year] = 0
for date in dates:
DATA = OpenAsArray(fhs[dates == date][0], nan_values = True)
data = np.nanmean(DATA)
ts = np.append(ts, data)
data_monthly_ts[date.month] += data
data_monthly_counter[date.month] += 1
data_yearly_ts[date.year] += data
data_yearly_counter[date.year] += 1
monthly_ts = np.array(data_monthly_ts.values()) / np.array(data_monthly_counter.values())
months = np.array(data_monthly_ts.keys())
yearly_mask = np.array(data_yearly_counter.values()) == 12
yearly_ts = np.array(data_yearly_ts.values())[yearly_mask] / np.array(data_yearly_counter.values())[yearly_mask]
years = np.array(data_yearly_ts.keys())[yearly_mask]
idx = np.argsort(dates)
fig = plt.figure(figsize = (10,5))
plt.clf()
plt.grid(b=True, which='Major', color='0.65',linestyle='--', zorder = 0)
ax = plt.subplot(111)
ax.plot(dates[idx], ts[idx], '-k')
ax.fill_between(dates[idx], ts[idx], color = color)
ax.set_xlabel('Time')
ax.set_ylabel(quantity + ' ' + unit)
ax.set_title(quantity + ', ' + location)
fig.autofmt_xdate()
[i.set_zorder(10) for i in ax.spines.itervalues()]
plt.savefig(os.path.join(output_dir, quantity + '_' + location + '_ts.png'))
plt.close(fig)
fig = plt.figure(figsize = (10,5))
plt.clf()
plt.grid(b=True, which='Major', color='0.65',linestyle='--', zorder = 0)
ax = plt.subplot(111)
ax.bar(months - 0.4, monthly_ts, 0.8, color = color)
ax.set_xlabel('Time [month]')
ax.set_xlim([0, max(months)+1])
ax.set_xticks(months)
ax.set_ylabel(quantity + ' ' + unit)
ax.set_title('Monthly average ' + quantity + ', ' + location)
[i.set_zorder(10) for i in ax.spines.itervalues()]
plt.savefig(os.path.join(output_dir, quantity + '_' + location + '_monthly.png'))
plt.close(fig)
fig = plt.figure(figsize = (10,5))
plt.clf()
plt.grid(b=True, which='Major', color='0.65',linestyle='--', zorder = 0)
ax = plt.subplot(111)
ax.bar(years - 0.4, yearly_ts, 0.8, color = color)
ax.set_xlabel('Time [year]')
ax.set_xlim([min(years) - 1, max(years)+1])
ax.set_ylabel(quantity + ' ' + unit)
ax.set_title('Yearly average ' + quantity + ', ' + location)
[i.set_zorder(10) for i in ax.spines.itervalues()]
plt.savefig(os.path.join(output_dir, quantity + '_' + location + '_yearly.png'))
plt.close(fig)
monthly_max = np.nanmax(monthly_ts)
monthly_average = np.nanmean(monthly_ts)
yearly_average = np.nanmean(yearly_ts)
return monthly_max, monthly_average, yearly_average
|
f27f15c4dee0e2c163fd3b1d748a264768bae7e2
| 19,149 |
def hubert_pretrain_large(
encoder_projection_dropout: float = 0.0,
encoder_attention_dropout: float = 0.0,
encoder_ff_interm_dropout: float = 0.0,
encoder_dropout: float = 0.0,
encoder_layer_drop: float = 0.0,
) -> HuBERTPretrainModel:
# Overriding the signature so that the return type is correct on Sphinx
"""hubert_pretrain_large(encoder_projection_dropout: float = 0.0, encoder_attention_dropout: float = 0.0, encoder_ff_interm_dropout: float = 0.0, encoder_dropout: float = 0.0, encoder_layer_drop: float = 0.0) -> torchaudio.models.HuBERTPretrainModel
Build HuBERTPretrainModel model for pre-training with "large" architecture from *HuBERT* [:footcite:`hsu2021hubert`]
Args:
encoder_projection_dropout (float):
See :py:func:`hubert_pretrain_model`.
encoder_attention_dropout (float):
See :py:func:`hubert_pretrain_model`.
encoder_ff_interm_dropout (float):
See :py:func:`hubert_pretrain_model`.
encoder_dropout (float):
See :py:func:`hubert_pretrain_model`.
encoder_layer_drop (float):
See :py:func:`hubert_pretrain_model`.
Returns:
HuBERTPretrainModel:
The resulting model.
""" # noqa: E501
return hubert_pretrain_model(
extractor_mode="layer_norm",
extractor_conv_layer_config=None,
extractor_conv_bias=False,
encoder_embed_dim=1024,
encoder_projection_dropout=encoder_projection_dropout,
encoder_pos_conv_kernel=128,
encoder_pos_conv_groups=16,
encoder_num_layers=24,
encoder_num_heads=16,
encoder_attention_dropout=encoder_attention_dropout,
encoder_ff_interm_features=4096,
encoder_ff_interm_dropout=encoder_ff_interm_dropout,
encoder_dropout=encoder_dropout,
encoder_layer_norm_first=True,
encoder_layer_drop=encoder_layer_drop,
mask_prob=0.80,
mask_selection="static",
mask_other=0.0,
mask_length=10,
no_mask_overlap=False,
mask_min_space=1,
mask_channel_prob=0.0,
mask_channel_selection="static",
mask_channel_other=0.0,
mask_channel_length=10,
no_mask_channel_overlap=False,
mask_channel_min_space=1,
skip_masked=False,
skip_nomask=False,
num_classes=500,
final_dim=768,
)
|
dd57cfcb803424ed46fcb597a71aa8e88de3ad32
| 19,150 |
def random_scaled_rotation(ralpha=(-0.2, 0.2), rscale=((0.8, 1.2), (0.8, 1.2))):
"""Compute a random transformation matrix for a scaled rotation.
:param ralpha: range of rotation angles
:param rscale: range of scales for x and y
:returns: random transformation
"""
affine = np.eye(2)
if rscale is not None:
(x0, x1), (y0, y1) = rscale
affine = np.diag([npr.uniform(x0, x1), npr.uniform(y0, y1)])
if ralpha is not None:
a0, a1 = ralpha
a = npr.uniform(a0, a1)
c = cos(a)
s = sin(a)
m = np.array([[c, -s], [s, c]], 'f')
affine = np.dot(m, affine)
return affine
|
f6216486e94fa7eac0be75b2a420fc1f251987c2
| 19,151 |
import time
def time_as_int() -> int:
"""
Syntactic sugar for
>>> from time import time
>>> int(time())
"""
return int(time.time())
|
f7f6d037d156c09a01c0ff13f8b43418133ab1b0
| 19,152 |
from unittest.mock import Mock
from unittest.mock import patch
def test_end_response_is_one_send():
"""Test that ``HAPServerHandler`` sends the whole response at once."""
class ConnectionMock:
sent_bytes = []
def sendall(self, bytesdata):
self.sent_bytes.append([bytesdata])
return 1
def getsent(self):
return self.sent_bytes
amock = Mock()
with patch("pyhap.hap_server.HAPServerHandler.setup"), patch(
"pyhap.hap_server.HAPServerHandler.handle_one_request"
), patch("pyhap.hap_server.HAPServerHandler.finish"):
handler = hap_server.HAPServerHandler(
"mocksock", "mockclient_addr", "mockserver", amock
)
handler.request_version = "HTTP/1.1"
handler.connection = ConnectionMock()
handler.requestline = "GET / HTTP/1.1"
handler.send_response(200)
handler.wfile = MagicMock()
handler.end_response(b"body")
assert handler.connection.getsent() == [
[b"HTTP/1.1 200 OK\r\nContent-Length: 4\r\n\r\nbody"]
]
assert handler._headers_buffer == [] # pylint: disable=protected-access
assert handler.wfile.called_once()
|
7c28c6b6fb8f123daa75f9710c26d5345810160b
| 19,153 |
def compute_norm_cond_entropy_corr(data_df, attrs_from, attrs_to):
"""
Computes the correlations between attributes by calculating
the normalized conditional entropy between them. The conditional
entropy is asymmetric, therefore we need pairwise computation.
The computed correlations are stored in a dictionary in the format:
{
attr_a: { cond_attr_i: corr_strength_a_i,
cond_attr_j: corr_strength_a_j, ... },
attr_b: { cond_attr_i: corr_strength_b_i, ...}
}
:return a dictionary of correlations
"""
corr = {}
# Compute pair-wise conditional entropy.
for x in attrs_from:
corr[x] = {}
for y in attrs_to:
# Set correlation to 1 for same attributes.
if x == y:
corr[x][y] = 1.0
continue
xy_df = data_df[[x, y]]
xy_df = xy_df.loc[~(xy_df[x] == NULL_REPR) & ~(xy_df[y] == NULL_REPR)]
x_vals = xy_df[x]
x_domain_size = x_vals.nunique()
# Set correlation to 0.0 if entropy of x is 1 (only one possible value).
if x_domain_size == 1 or len(xy_df) == 0:
corr[x][y] = 0.0
continue
# Compute the conditional entropy H(x|y) = H(x,y) - H(y).
# H(x,y) denotes H(x U y).
# If H(x|y) = 0, then y determines x, i.e., y -> x.
# Use the domain size of x as a log base for normalization.
y_vals = xy_df[y]
x_y_entropy = drv.entropy_conditional(x_vals, y_vals, base=x_domain_size).item()
# The conditional entropy is 0 for strongly correlated attributes and 1 for
# completely independent attributes. We reverse this to reflect the correlation.
corr[x][y] = 1.0 - x_y_entropy
return corr
|
12dafa7ecb941c008ab2bb7c93ed6e0c8b1302ad
| 19,154 |
def should_retry_http_code(status_code):
"""
:param status_code: (int) http status code to check for retry eligibility
:return: (bool) whether or not responses with the status_code should be retried
"""
return status_code not in range(200, 500)
|
69acb5bd34b06e1ff1e29630ac93e60a3ccc835c
| 19,155 |
def softmax(x):
"""Calculates the softmax for each row of the input x.
Your code should work for a row vector and also for matrices of shape (n, m).
Argument:
x -- A numpy matrix of shape (n,m)
Returns:
s -- A numpy matrix equal to the softmax of x, of shape (n,m)
"""
# Apply exp() element-wise to x. Use np.exp(...).
x_exp = np.exp(x)
# Create a vector x_sum that sums each row of x_exp. Use np.sum(..., axis = 1, keepdims = True).
x_sum = np.sum(x_exp, axis=1, keepdims=True)
# Compute softmax(x) by dividing x_exp by x_sum. It should automatically use numpy broadcasting.
s = x_exp / x_sum
# print("x_exp: {}, x_sum: {}".format(x_exp.shape, x_sum.shape))
return s
|
d4905ec1a145aae47532b43a66a00a29180a37e4
| 19,156 |
def inertia_tensor_eigvals(image, mu=None, T=None):
"""Compute the eigenvalues of the inertia tensor of the image.
The inertia tensor measures covariance of the image intensity along
the image axes. (See `inertia_tensor`.) The relative magnitude of the
eigenvalues of the tensor is thus a measure of the elongation of a
(bright) object in the image.
Parameters
----------
image : array
The input image.
mu : array, optional
The pre-computed central moments of ``image``.
T : array, shape ``(image.ndim, image.ndim)``
The pre-computed inertia tensor. If ``T`` is given, ``mu`` and
``image`` are ignored.
Returns
-------
eigvals : list of float, length ``image.ndim``
The eigenvalues of the inertia tensor of ``image``, in descending
order.
Notes
-----
Computing the eigenvalues requires the inertia tensor of the input image.
This is much faster if the central moments (``mu``) are provided, or,
alternatively, one can provide the inertia tensor (``T``) directly.
"""
if T is None:
T = inertia_tensor(image, mu)
eigvals = np.linalg.eigvalsh(T)
# Floating point precision problems could make a positive
# semidefinite matrix have an eigenvalue that is very slightly
# negative. This can cause problems down the line, so set values
# very near zero to zero.
eigvals = np.clip(eigvals, 0, None, out=eigvals)
return sorted(eigvals, reverse=True)
|
af48827b709b48cdae7b2a8fe7ad3723845ee6cd
| 19,157 |
def extract_values(inst):
"""
:param inst: the instance
:return: python values extracted from the instance
"""
# inst should already be python
return inst
|
087bb00ee6e3666b4a9e682ca420623982a12102
| 19,158 |
def voc_ap(rec, prec, use_07_metric=False):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
|
e9a4ebec8908e306bcf12e2e9538a8de8b74e84b
| 19,159 |
def is_on_path(prog):
"""Checks if a given executable is on the current PATH."""
r = runcmd("which %s" % prog)
if r.failed:
return False
else:
return r
|
1019ab3b08ef97c307588f8902a7884a89039998
| 19,160 |
def validate_entry(new_text) -> bool:
"""Função callback para validação de entrada dos campos na janela
ExperimentPCR.
É chamada toda vez que o usuário tenta inserir um valor no campo de
entrada.
Uma entrada válida deve atender os seguintes requisitos:
-Ser composto apenas de números inteiros.
-Ter um número de caracteres menor que 3.
:param new_text: Passada pelo próprio widget de entrada.
:return: boolean - Retorna pro widget se a entrada é ou não válida.
"""
if new_text == '': # Se "backspace"
return True
try:
int(new_text)
if len(new_text) <= 3:
return len(new_text) <= 3
except ValueError:
return False
|
8e0f5f126d0688279fc28a8be287fda00d346a59
| 19,161 |
import io
def eia_cbecs_land_call(*, resp, url, **_):
"""
Convert response for calling url to pandas dataframe, begin
parsing df into FBA format
:param resp: df, response from url call
:param url: string, url
:return: pandas dataframe of original source data
"""
# Convert response to dataframe
df_raw_data = pd.read_excel(io.BytesIO(resp.content),
sheet_name='data')
df_raw_rse = pd.read_excel(io.BytesIO(resp.content),
sheet_name='rse')
if "b5.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data = pd.DataFrame(df_raw_data.loc[15:32]).reindex()
df_rse = pd.DataFrame(df_raw_rse.loc[15:32]).reindex()
df_data.columns = ["Name", "All buildings", "New England",
"Middle Atlantic", "East North Central",
"West North Central", "South Atlantic",
"East South Central", "West South Central",
"Mountain", "Pacific"]
df_rse.columns = ["Name", "All buildings", "New England",
"Middle Atlantic", "East North Central",
"West North Central", "South Atlantic",
"East South Central", "West South Central",
"Mountain", "Pacific"]
df_rse = df_rse.melt(id_vars=["Name"],
var_name="Location",
value_name="Spread")
df_data = df_data.melt(id_vars=["Name"],
var_name="Location",
value_name="FlowAmount")
if "b12.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data1 = pd.DataFrame(df_raw_data[4:5]).reindex()
df_data2 = pd.DataFrame(df_raw_data.loc[46:50]).reindex()
df_data = pd.concat([df_data1, df_data2], ignore_index=True)
df_rse1 = pd.DataFrame(df_raw_rse[4:5]).reindex()
df_rse2 = pd.DataFrame(df_raw_rse.loc[46:50]).reindex()
df_rse = pd.concat([df_rse1, df_rse2], ignore_index=True)
# drop the empty columns at end of df
df_data = df_data.iloc[:, 0:9]
df_rse = df_rse.iloc[:, 0:9]
df_data.columns = ["Description", "All buildings", "Office",
"Warehouse and storage", "Service",
"Mercantile", "Religious worship",
"Education", "Public assembly"]
df_rse.columns = ["Description", "All buildings", "Office",
"Warehouse and storage", "Service",
"Mercantile", "Religious worship",
"Education", "Public assembly"]
df_rse = df_rse.melt(id_vars=["Description"],
var_name="Name",
value_name="Spread")
df_data = df_data.melt(id_vars=["Description"],
var_name="Name",
value_name="FlowAmount")
if "b14.xlsx" in url:
# skip rows and remove extra rows at end of dataframe
df_data = pd.DataFrame(df_raw_data.loc[27:31]).reindex()
df_rse = pd.DataFrame(df_raw_rse.loc[27:31]).reindex()
# drop the empty columns at end of df
df_data = df_data.iloc[:, 0:8]
df_rse = df_rse.iloc[:, 0:8]
df_data.columns = ["Description", "All buildings", "Food service",
"Food sales", "Lodging", "Health care In-Patient",
"Health care Out-Patient",
"Public order and safety"]
df_rse.columns = ["Description", "All buildings", "Food service",
"Food sales", "Lodging", "Health care In-Patient",
"Health care Out-Patient", "Public order and safety"]
df_rse = df_rse.melt(id_vars=["Description"],
var_name="Name",
value_name="Spread")
df_data = df_data.melt(id_vars=["Description"],
var_name="Name",
value_name="FlowAmount")
df = pd.merge(df_rse, df_data)
return df
|
396079863ecc2faa6420e90f3d608ff997a3fb39
| 19,162 |
def add_l2_interface(interface_name, interface_desc=None, interface_admin_state="up", **kwargs):
"""
Perform a POST call to create an Interface table entry for physical L2 interface.
:param interface_name: Alphanumeric Interface name
:param interface_desc: Optional description for the interface. Defaults to nothing if not specified.
:param interface_admin_state: Optional administratively-configured state of the interface.
Defaults to "up" if not specified
:param kwargs:
keyword s: requests.session object with loaded cookie jar
keyword url: URL in main() function
:return: True if successful, False otherwise
"""
if kwargs["url"].endswith("/v1/"):
return port.add_l2_port(interface_name, interface_desc, interface_admin_state, **kwargs)
else: # Updated else for when version is v10.04
return _add_l2_interface(interface_name, interface_desc, interface_admin_state, **kwargs)
|
d27b4b5ec738a5a508a3fc9d8852ecf5df56debe
| 19,163 |
import pkg_resources
def language_descriptions():
"""
Return a dict of `LanguageDesc` instances keyed by language name.
"""
global languages
if languages is None:
languages = {}
for language in pkg_resources.WorkingSet().iter_entry_points(
group='textx_languages'):
register_language_with_project(language.load(),
language.dist.project_name,
language.dist.version)
return languages
|
236b8fd595f1b4754eeca2b8b17a88fa36090ca5
| 19,164 |
import six
def load_fixtures(fixtures_dict=None):
"""
Loads fixtures specified in fixtures_dict. This method must be
used for fixtures that don't have associated data models. We
simply want to load the meta into dict objects.
fixtures_dict should be of the form:
{
'actionchains': ['actionchain1.json', 'actionchain2.json'],
'workflows': ['workflow.yaml']
}
:param fixtures_dict: Dictionary specifying the fixtures to load for each type.
:type fixtures_dict: ``dict``
:rtype: ``dict``
"""
if fixtures_dict is None:
fixtures_dict = {}
all_fixtures = {}
fixtures_base_path = get_fixtures_base_path()
for fixture_type, fixtures in six.iteritems(fixtures_dict):
loaded_fixtures = {}
for fixture in fixtures:
fixture_path = fixtures_base_path + '/' + fixture
fixture_dict = load_content(fixture_path)
loaded_fixtures[fixture] = fixture_dict
all_fixtures[fixture_type] = loaded_fixtures
return all_fixtures
|
b43c1303a7c54a571a0e3ddf7881d7113371e293
| 19,165 |
def generate_sbm(sizes, probs, maxweight=1):
"""Generate a Stochastic Block Model graph.
Assign random values drawn from U({1, ..., maxw}) to the edges.
sizes : list of sizes (int) of the blocks
probs : matrix of probabilities (in [0, 1]) of edge creation
between nodes depending on the blocks they belong to
maxweight : maximum value of the weights to randomly assign
(default 1, resulting in weights all equal to 1)
"""
graph = nx.stochastic_block_model(sizes, probs)
weights = 1 + np.random.choice(maxweight, len(graph.edges))
weights = dict(zip(graph.edges, weights))
nx.set_edge_attributes(graph, weights, 'weight')
return graph
|
c6b0a106d88016afc99bf45abeb7c60af2981d77
| 19,166 |
import re
def eq_portions(actual: str, expected: str):
"""
Compare whether actual matches portions of expected. The portions to ignore are of two types:
- ***: ignore anything in between the left and right portions, including empty
- +++: ignore anything in between left and right, but non-empty
:param actual: string to test
:param expected: expected string, containing at least one of the two patterns
:return: a list of the portions ignored; if empty, it means there is no match.
>>> eq_portions('', '+++aaaaaa***ccccc+++eeeeeee+++')
()
>>> eq_portions('_1__aaaaaa__2__ccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++')
('_1__', '__2__', '_3__', '_4_')
>>> eq_portions('_1__aaaaaaccccc_3__eeeeeee_4_', '+++aaaaaa***ccccc+++eeeeeee+++')
('_1__', '', '_3__', '_4_')
>>> eq_portions('_1__aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee+++')
()
>>> eq_portions('aaaaaaccccc_3__eeeeeee', '+++aaaaaa***ccccc+++eeeeeee')
()
>>> eq_portions('aaaaaa_1__ccccc__2_eeeeeee', '***aaaaaa***ccccc+++eeeeeee***')
('', '_1__', '__2_', '')
>>> eq_portions('aaaaaa___ccccc___eeeeeee', '***aaaaaa')
()
>>> eq_portions('aaaaaa___ccccc___eeeeeee', 'aaaaaa')
Traceback (most recent call last):
...
ValueError: The 'expected' argument must contain at least one *** OR +++
"""
re_expect = re.escape(expected)
ANYTHING = re.escape('\\*' * 3)
SOMETHING = re.escape('\\+' * 3)
if not re.search(ANYTHING, re_expect) and not re.search(SOMETHING, re_expect):
raise ValueError("The 'expected' argument must contain at least one *** OR +++")
re_expect = re.sub(SOMETHING, '(.+)', re_expect)
re_expect = re.sub(ANYTHING, '(.*)', re_expect)
matches = re.fullmatch(re_expect, actual)
if not matches:
return ()
return matches.groups()
|
704b2a83575347c5143c2dc0aca5227a8fc5bd4b
| 19,167 |
def _get_encoder(
in_features: int,
embed_dim: int,
dropout_input: float,
pos_conv_kernel: int,
pos_conv_groups: int,
num_layers: int,
num_heads: int,
attention_dropout: float,
ff_interm_features: int,
ff_interm_dropout: float,
dropout: float,
layer_norm_first: bool,
layer_drop: float,
) -> Encoder:
"""
Args:
in_features (int): The number of input features.
embed_dim (int):
The dimension of embedding.
This option corresponds to "encoder_embed_dim" from fairseq.
Expected values are 768 for Base arch, and 1024 for Large arch.
dropout_input (float):
The dropout probability applied after the input feature is projected
to ``embed_dim``.
This option corresponds to "dropout_input" from fairseq.
Expected values are 0.1 for both Base and Large arch.
pos_conv_kernel (int):
The kernel size of convolutional positional embeddings.
This option corresponds to "conv_pos" from fairseq.
Expected values are 128 for both Base and Large arch.
pos_conv_groups (int):
The number of groups of convolutional positional embeddings.
This option corresponds to "conv_pos_groups" from fairseq.
Expected values are 16 for both Base and Large arch.
num_layers (int):
The number of self attention layers in transformer block.
This option corresponds to "encoder_layers" from fairseq.
Expected values are 12 for Base and 24 for Large arch.
num_heads (int):
The number of heads in self attention layers.
This option corresponds to "encoder_attention_heads" from fairseq.
Expected values are 12 for Base and 16 for Large arch.
attention_dropout (float):
The dropout probability applied after softmax in self-attention layer.
This option corresponds to "attention_dropout" from fairseq.
Expected values are 0.1 for Base and 0.0 for Large arch.
ff_interm_features (int):
The dimension of hidden features in feed forward layer.
This option corresponds to "encoder_ffn_embed_dim" from fairseq.
Expected values are 3072 for Base and 4096 for Large arch.
ff_interm_dropout (float):
The dropout probability applied in feedforward layer.
This option correspinds to "activation_dropout" from fairseq.
Expected values are 0.1 for both Base and Large arch.
dropout (float):
The dropout probability applied at the end of feed forward layer.
This option corresponds to "dropout" from fairseq.
Expected values are 0.1 for Base and 0.0 for Large arch.
layer_norm_first (bool):
Control the order of layer norm in transformer layer and each encoder layer.
If True, in transformer layer, layer norm is applied before features are fed
to encoder layers. In encoder layer, two layer norms are applied before and after
self attention.
If False, in transformer layer, layer norm is applied after features are fed
to encoder layers. In encoder layer, two layer norms are applied after self
attention, before and after feed forward.
This option corresponds to "layer_norm_first" from fairseq.
Expected values are False for Base and True for Large arch.
layer_drop (float):
Probability to drop each encoder layer during training.
This option corresponds to "layerdrop" from fairseq.
Expected values are 0.1 for both Base and Large arch.
See Also:
* "encoder_embed_dim"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L49-L51
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L64
* "dropout_input"
- Def, base and large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L75-L78
* "conv_pos"
- Def, base and large
NOTE: The description is wrong.
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L204-L207
- Usage
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L756
* "conv_pos_groups"
- Def, base and large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L208-L211
* "encoder_layers"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L46-L48
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L63
* "encoder_attention_heads"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L55-L57
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L66
* "attention_dropout"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L66-L68
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L60
* "encoder_ffn_embed_dim"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L52-L54
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L65
* "activation_dropout"
- Def
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L69-L71
- Base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L55
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L55
* "dropout"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L63-L65
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L59
* "layer_norm_first"
- Def and base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L91-L93
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/pretraining/wav2vec2_large_librivox.yaml#L53
* "layerdrop"
- Def
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L72-L74
- Base
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/base_960h.yaml#L54
- Large
https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/examples/wav2vec/config/finetuning/vox_960h.yaml#L54
"""
feature_projection = FeatureProjection(in_features, embed_dim, dropout_input)
pos_conv = ConvolutionalPositionalEmbedding(embed_dim, pos_conv_kernel, pos_conv_groups)
# Original impl
# https://github.com/pytorch/fairseq/blob/425c36eafff535fe7337f8bdd5ace22ebacc78cb/fairseq/models/wav2vec/wav2vec2.py#L768-L782
encoder_layers = nn.ModuleList()
for _ in range(num_layers):
attention = SelfAttention(
embed_dim=embed_dim,
num_heads=num_heads,
dropout=attention_dropout,
)
feed_forward = FeedForward(
io_features=embed_dim,
intermediate_features=ff_interm_features,
intermediate_dropout=ff_interm_dropout,
output_dropout=dropout,
)
encoder_layers.append(
EncoderLayer(
attention=attention,
dropout=dropout,
layer_norm_first=layer_norm_first,
feed_forward=feed_forward,
)
)
transformer = Transformer(
pos_conv_embed=pos_conv,
dropout=dropout,
layers=encoder_layers,
layer_norm_first=not layer_norm_first,
layer_drop=layer_drop,
)
return Encoder(feature_projection, transformer)
|
72ff9887575905172db0b095b3d6822ee6b51411
| 19,168 |
def _soft_threshold(a, b):
"""Soft-threshold operator for the LASSO and elastic net."""
return np.sign(a) * np.clip(np.abs(a) - b, a_min=0, a_max=None)
|
34f28c1154cf9eefecc19e1ece8dfa3ca82e677e
| 19,169 |
def predict(input_tokens):
"""register predict method in pangu-alpha"""
token_ids, valid_length = register.call_preprocess(preprocess, input_tokens)
############# two output ###################
# p, p_args = register.call_servable(token_ids)
# add_token = register.call_postprocess(postprocess, p, p_args, valid_length)
#############################################
################# one output ####################
logits = register.call_servable(token_ids)
add_token = register.call_postprocess(postprocess_topk, logits, valid_length)
return add_token
|
f2de6ff2ba78c3cac47a823bfe7a201a9a6b93ad
| 19,170 |
import dbm
def get_sim_data():
"""
Create the data needed to initialize a simulation
Performs the steps necessary to set up a stratified plume model simulation
and passes the input variables to the `Model` object and
`Model.simulate()` method.
Returns
-------
profile : `ambient.Profile` object
Return a profile object from the BM54 CTD data
particles : list of `PlumeParticle` objects
List of `PlumeParticle` objects containing the dispersed phase initial
conditions
z : float
Depth of the release port (m)
R : float
Radius of the release port (m)
maxit : float
Maximum number of iterations to converge between inner and outer
plumes
toler : float
Relative error tolerance to accept for convergence (--)
delta_z : float
Maximum step size to use in the simulation (m). The ODE solver
in `calculate` is set up with adaptive step size integration, so
in theory this value determines the largest step size in the
output data, but not the numerical stability of the calculation.
"""
# Get the ambient CTD data
profile = get_profile()
# Specify the release location and geometry and initialize a particle
# list
z0 = 300.
R = 0.15
particles = []
# Add a dissolving particle to the list
composition = ['oxygen', 'nitrogen', 'argon']
yk = np.array([1.0, 0., 0.])
o2 = dbm.FluidParticle(composition)
Q_N = 150. / 60. / 60.
de = 0.005
lambda_1 = 0.85
particles.append(stratified_plume_model.particle_from_Q(profile, z0, o2,
yk, Q_N, de, lambda_1))
# Add an insoluble particle to the list
composition = ['inert']
yk = np.array([1.])
oil = dbm.InsolubleParticle(True, True)
mb0 = 50.
de = 0.01
lambda_1 = 0.8
particles.append(stratified_plume_model.particle_from_mb0(profile, z0,
oil, [1.], mb0, de, lambda_1))
# Set the other simulation parameters
maxit = 2
toler = 0.2
delta_z = 1.0
# Return the results
return (profile, particles, z0, R, maxit, toler, delta_z)
|
e253665f451b167a188c997ff36fc406e0f3a587
| 19,171 |
import numpy as np
def _organize_arch(fils, pth):
"""Allocate data from each specific type of file (keys from the input dict) to a new dict
Arguments:
fils {dict} -- Dictionary containing type of files and list of files
Returns:
[dict] -- [description]
"""
imgdata = dict()
for i in fils.keys():
images = dict()
for ii in np.arange(len(fils[i])):
images[str('img_' + str(ii+1))] = {'path': pth + str('\\') + str(fils[i][ii]),
'coords': np.loadtxt(pth + str('\\') + str(fils[i][ii]), skiprows=1, usecols=(-2, -1))}
imgdata[i] = images
return imgdata
|
c62c9b23bf4735c2062090d77278ce5a8acbd668
| 19,172 |
def gather_allele_freqs(record, all_samples, males, females, pop_dict, pops, no_combos = False):
"""
Wrapper to compute allele frequencies for all sex & population pairings
"""
#Get allele frequencies
calc_allele_freq(record, all_samples)
if len(males) > 0:
calc_allele_freq(record, males, prefix = 'MALE')
if len(females) > 0:
calc_allele_freq(record, females, prefix = 'FEMALE')
if len(pops) > 0:
for pop in pops:
pop_samps = [s for s in all_samples if pop_dict.get(s, None) == pop]
calc_allele_freq(record, pop_samps, prefix = pop)
if len(males) > 0 and not no_combos:
calc_allele_freq(record, [s for s in pop_samps if s in males],
prefix = pop + '_MALE')
if len(females) > 0 and not no_combos:
calc_allele_freq(record, [s for s in pop_samps if s in females],
prefix = pop + '_FEMALE')
#Get POPMAX AF for biallelic sites
if 'MULTIALLELIC' not in record.filter and len(record.alleles) <= 2:
AFs = [record.info['{0}_AF'.format(pop)][0] for pop in pops]
popmax = max(AFs)
record.info['POPMAX_AF'] = popmax
return record
|
0f74616fa64ee5b3582467da27161906abf28463
| 19,173 |
def get_selected(n=1):
"""
Return the first n selected object, or None if nothing is selected.
"""
if get_selection_len():
selection = bpy.context.selected_objects
if n == 1:
return selection[0]
elif n == -1:
return selection[:]
else:
return selection[:n]
else:
return []
|
6049900ef069731b1fbe9f40fff184085940c83e
| 19,174 |
def label_src_vertno_sel(label, src):
""" Find vertex numbers and indices from label
Parameters
----------
label : Label
Source space label
src : dict
Source space
Returns
-------
vertno : list of length 2
Vertex numbers for lh and rh
src_sel : array of int (len(idx) = len(vertno[0]) + len(vertno[1]))
Indices of the selected vertices in sourse space
"""
if src[0]['type'] != 'surf':
return Exception('Label are only supported with surface source spaces')
vertno = [src[0]['vertno'], src[1]['vertno']]
if label.hemi == 'lh':
vertno_sel = np.intersect1d(vertno[0], label.vertices)
src_sel = np.searchsorted(vertno[0], vertno_sel)
vertno[0] = vertno_sel
vertno[1] = np.array([])
elif label.hemi == 'rh':
vertno_sel = np.intersect1d(vertno[1], label.vertices)
src_sel = np.searchsorted(vertno[1], vertno_sel) + len(vertno[0])
vertno[0] = np.array([])
vertno[1] = vertno_sel
elif label.hemi == 'both':
vertno_sel_lh = np.intersect1d(vertno[0], label.lh.vertices)
src_sel_lh = np.searchsorted(vertno[0], vertno_sel_lh)
vertno_sel_rh = np.intersect1d(vertno[1], label.rh.vertices)
src_sel_rh = np.searchsorted(vertno[1], vertno_sel_rh) + len(vertno[0])
src_sel = np.hstack((src_sel_lh, src_sel_rh))
vertno = [vertno_sel_lh, vertno_sel_rh]
else:
raise Exception("Unknown hemisphere type")
return vertno, src_sel
|
a1858258b6c789557d6bdeff9428cc7aacbe4655
| 19,175 |
def get_subjects(creative_work):
"""
Returns generated html of subjects associated with the
Creative Work HTML or 0-length string
Parameters:
creative_work -- Creative Work
"""
html_output = ''
#! Using LOC Facet as proxy for subjects
facets = list(
REDIS_DATASTORE.smembers(
"{0}:hasAnnotation".format(creative_work.redis_key)))
for facet in facets:
if facet.startswith("bf:Facet"):
subject_template = loader.get_template('subject-icon.html')
loc_key = facet.split(":")[-1]
context = {
'name': REDIS_DATASTORE.hget('bf:Annotation:Facet:LOCFirstLetters',
loc_key),
'letter': loc_key}
html_output += subject_template.render(Context(context))
return mark_safe(html_output)
|
328aeadb21a22972c0843efdba251b2f6c5f937d
| 19,176 |
import argparse
import subprocess
import logging
def run_tests(runner: cmake_runner.CMakeRunner, args: argparse.Namespace,
build_config: str) -> bool:
"""Run tests for the current project.
Args:
runner: Cmake runner object.
args: Arguments for cmake.
build_config: Name of configuration target.
Returns:
True when testing ran successfully, False otherwise.
"""
try:
runner.test(args=args.cmake_test_regex)
except (subprocess.CalledProcessError, RuntimeError) as error:
logging.exception('Tests failed for %s CMake project %s: %s', build_config,
args.cmake_source_project_root, error)
return False
return True
|
f448029f6983c9e37d75cf318717d8c7c65d1295
| 19,177 |
def sort(request):
"""Boolean sort keyword for concat and DataFrame.append."""
return request.param
|
83f35eb41bc0cf7eecea932ae4f14646d9e8732f
| 19,178 |
def is_comprehension(leaf):
"""
Return true if the leaf is the beginning of a list/set/dict comprehension.
Returns true for generators as well
"""
if leaf.type != 'operator' or leaf.value not in {'[', '(', '{'}:
return False
sibling = leaf.get_next_sibling()
return (sibling.type in {'testlist_comp', 'dictorsetmaker'}
and sibling.children[-1].type == 'sync_comp_for')
|
11fff76ff8ed19b3d57359b56db886c003603a86
| 19,179 |
def get_class(x):
"""
x: index
"""
# Example
distribution = [0, 2000, 4000, 6000, 8000, 10000]
x_class = 0
for i in range(len(distribution)):
if x > distribution[i]:
x_class += 1
return x_class
|
1ae95f3d9bc6f342169232ab10cd08a42de0f692
| 19,180 |
from re import T
def square(x):
"""Elementwise square of a tensor. """
return T.sqr(x)
|
c052e31a450b91eb1e6a08843f99afd6e618da9d
| 19,181 |
import re
def update_email_body(parsed_email, key):
"""
Finds and updates the "text/html" and "text/plain" email body parts.
Parameters
----------
parsed_email: email.message.Message, required
EmailMessage representation the downloaded email
key: string, required
The object key that will be used for storing the message in S3
Returns
-------
email.message.Message
EmailMessage representation the updated email
"""
# template in the key for purposes of optional displaying to the recipient
this_disclaimer_text = re.sub("{key}", key, disclaimer_text)
this_footer_text = re.sub("{key}", key, footer_text)
text_charset = None
if parsed_email.is_multipart():
# Walk over message parts of this multipart email.
for part in parsed_email.walk():
content_type = part.get_content_type()
content_disposition = str(part.get_content_disposition())
if content_type == 'text/plain' and 'attachment' not in content_disposition:
transfer_encoding = part['Content-Transfer-Encoding']
text_charset = part.get_content_charset()
new_text_body = update_text_content(part, this_disclaimer_text, this_footer_text)
part.set_content(new_text_body, "plain", charset=text_charset, cte=transfer_encoding)
elif content_type == 'text/html' and 'attachment' not in content_disposition:
transfer_encoding = part['Content-Transfer-Encoding']
html_charset = part.get_content_charset()
new_html_body = update_html_content(part, this_disclaimer_text, this_footer_text)
if new_html_body is not None:
part.set_content(new_html_body.encode(html_charset), "text", "html", cte=transfer_encoding)
part.set_charset(html_charset)
else:
# Its a plain email with text/plain body
transfer_encoding = parsed_email['Content-Transfer-Encoding']
text_charset = parsed_email.get_content_charset()
new_text_body = update_text_content(parsed_email, this_disclaimer_text, this_footer_text)
parsed_email.set_content(new_text_body, "plain", charset=text_charset, cte=transfer_encoding)
return parsed_email
|
d942a4cf47af9d7c1e36a4a2af5d0239b90464d8
| 19,182 |
import json
def create_collaborators(collaborators, destination_url, destination, credentials):
"""Post collaborators to GitHub
INPUT:
collaborators: python list of dicts containing collaborators info to be POSTED to GitHub
destination_url: the root url for the GitHub API
destination: the team and repo '<team>/<repo>' to post milestones to
OUTPUT: A list of collaborators
"""
for collaborator in collaborators:
if collaborator['login'] == credentials['user_name']:
continue
url = destination_url + "repos/" + destination + "/collaborators/" + collaborator["login"]
perm = "push"
if collaborator["permissions"]["admin"] == True or collaborator['login'] == credentials['user_name']:
perm = "admin"
# create a new collaborator that includes only the attributes needed to create a new milestone
r = put_req(url, json.dumps({"permission": perm}), credentials)
status = check_res(r)
print(status)
return {"done": "true"}
|
2d39a2970d9f52af5209b1f4717c0b4d39e1cb5c
| 19,183 |
import scipy
def complexity_recurrence(signal, delay=1, dimension=3, tolerance="default", show=False):
"""Recurrence matrix (Python implementation)
Fast Python implementation of recurrence matrix (tested against pyRQA). Returns a tuple
with the recurrence matrix (made of 0s and 1s) and the distance matrix (the non-binarized
version of the former).
Parameters
----------
signal : Union[list, np.ndarray, pd.Series]
The signal (i.e., a time series) in the form of a vector of values.
delay : int
Time delay (often denoted 'Tau', sometimes referred to as 'lag'). In practice, it is common
to have a fixed time lag (corresponding for instance to the sampling rate; Gautama, 2003),
or to find a suitable value using some algorithmic heuristics (see ``delay_optimal()``).
dimension : int
Embedding dimension (often denoted 'm' or 'd', sometimes referred to as 'order'). Typically
2 or 3. It corresponds to the number of compared runs of lagged data. If 2, the embedding
returns an array with two columns corresponding to the original signal and its delayed (by
Tau) version.
tolerance : float
Tolerance (similarity threshold, often denoted as 'r'). The radius used for detecting
neighbours (states considered as recurrent). A rule of thumb is to set 'r' so that the
percentage of points classified as recurrences is about 2-5%.
show : bool
Visualise recurrence matrix.
See Also
--------
complexity_embedding, complexity_tolerance
Returns
-------
np.ndarray
The recurrence matrix.
np.ndarray
The distance matrix.
Examples
----------
>>> import neurokit2 as nk
>>>
>>> signal = nk.signal_simulate(duration=5, sampling_rate=100, frequency=[5, 6], noise=0.01)
>>>
>>> # Default r
>>> rc, _ = nk.complexity_recurrence(signal, show=True)
>>>
>>> # Larger radius
>>> rc, d = nk.complexity_recurrence(signal, tolerance=0.5, show=True)
>>>
>>> # Optimization of tolerance via recurrence matrix
>>> rc, d = nk.complexity_tolerance(signal, delay=1, dimension=3, method="recurrence", show=True)
References
----------
- Rawald, T., Sips, M., Marwan, N., & Dransch, D. (2014). Fast computation of recurrences
in long time series. In Translational Recurrences (pp. 17-29). Springer, Cham.
- Dabiré, H., Mestivier, D., Jarnet, J., Safar, M. E., & Chau, N. P. (1998). Quantification of
sympathetic and parasympathetic tones by nonlinear indexes in normotensive rats. American
Journal of Physiology-Heart and Circulatory Physiology, 275(4), H1290-H1297.
"""
if tolerance == "default":
tolerance, _ = complexity_tolerance(
signal, method="sd", delay=None, dimension=None, show=False
)
# Time-delay embedding
emb = complexity_embedding(signal, delay=delay, dimension=dimension)
# Compute distance matrix
d = scipy.spatial.distance.cdist(emb, emb, metric="euclidean")
# Flip the matrix to match traditional RQA representation
d = np.flip(d, axis=0)
# Initialize the recurrence matrix filled with 0s
recmat = np.zeros((len(d), len(d)))
# If lower than tolerance, then 1
recmat[d <= tolerance] = 1
# Plotting
if show is True:
try:
fig, axes = plt.subplots(ncols=2)
axes[0].imshow(recmat, cmap="Greys")
axes[0].set_title("Recurrence Matrix")
im = axes[1].imshow(d)
axes[1].set_title("Distance")
cbar = fig.colorbar(im, ax=axes[1], fraction=0.046, pad=0.04)
cbar.ax.plot([0, 1], [tolerance] * 2, color="r")
except MemoryError as e:
raise MemoryError(
"NeuroKit error: complexity_rqa(): the recurrence plot is too large to display. ",
"You can recover the matrix from the parameters and try to display parts of it.",
) from e
return recmat, d
|
cc93a80ff34fffc5774f7b52109fd09d8b0ac69e
| 19,184 |
import torch
import tqdm
import time
def train(params):
"""
Trains error model.
Arguments:
params (dict): hyperparameters with which to train
"""
p, x = load_error_data()
# calculate means
p_mean = p.mean(axis=(0, 1))
p_std = p.std(axis=(0, 1))
x_mean = x.mean(axis=(0, 1))
x_std = x.std(axis=(0, 1))
# TODO - does this make sense?
# delta = x[:,2::2] - x[:,:-2:2]
# the number to look ahead
delta = x[:, 1:] - x[:, :-1]
delta_mean = delta.mean(axis=(0, 1))
delta_std = delta.std(axis=(0, 1))
# send to torch tensors
p_mean, p_std = torch.Tensor(p_mean).to(device), torch.Tensor(p_std).to(device)
x_mean, x_std = torch.Tensor(x_mean).to(device), torch.Tensor(x_std).to(device)
delta_mean, delta_std = (
torch.Tensor(delta_mean).to(device),
torch.Tensor(delta_std).to(device),
)
# parameters
buffer_size = int(params["buffer size"])
activation = params["activation"]
# train val split
training_split = 0.8
n = len(p)
k = int(np.ceil(n * training_split))
train_p, val_p = p[:k], p[k:]
train_x, val_x = x[:k], x[k:]
n_ahead = 1
train_dataset = LookaheadDataset(states=train_x, actions=train_p, n_ahead=n_ahead)
val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)
action_size = len(train_dataset[0][0][0])
state_size = len(train_dataset[0][1])
output_size = len(train_dataset[0][2][0])
model_path = params.get("model path", None)
dropout = params["dropout"]
hidden_layers = int(params["hidden layers"])
hidden_size = int(params["hidden size"])
# LOAD ANALYTICAL MDOEL
analytical_model = Network(
action_size=action_size,
state_size=state_size,
output_size=output_size,
hidden_layers=hidden_layers,
hidden_size=hidden_size,
dropout=dropout,
activation=activation,
action_mean=p_mean,
action_std=p_std,
state_mean=x_mean,
state_std=x_std,
output_mean=delta_mean,
output_std=delta_std,
)
analytical_model.to(device)
analytical_path = params["analytical model path"]
analytical_model.load_state_dict(torch.load(analytical_path))
model = Network(
action_size=action_size,
state_size=state_size,
output_size=output_size,
hidden_layers=hidden_layers,
hidden_size=hidden_size,
dropout=dropout,
activation=activation,
action_mean=p_mean,
action_std=p_std,
state_mean=x_mean,
state_std=x_std,
output_mean=delta_mean,
output_std=delta_std,
)
model.to(device)
if params.get("load", False):
model.load_state_dict(torch.load(model_path))
learning_rate = params["learning rate"]
batch_size = int(params["batch size"])
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True)
train_losses = []
val_losses = []
best_loss = np.inf
print_info = params.get("print", False)
epochs = int(params["epochs"])
max_batches = np.inf
if print_info:
loop = tqdm(total=min(len(train_dataloader), max_batches) * epochs)
def step(state, deltas):
s = state + deltas
return s
for epoch in range(epochs):
model.train()
# new_n_ahead = min((epoch + 1) * 5, 100)
new_n_ahead = 10
if new_n_ahead != n_ahead:
n_ahead = new_n_ahead
if print_info:
print(n_ahead)
train_dataset = LookaheadDataset(
states=train_x, actions=train_p, n_ahead=n_ahead
)
val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)
train_dataloader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=True
)
val_dataloader = DataLoader(
val_dataset, batch_size=batch_size, shuffle=True
)
for b, (a, s, d) in enumerate(train_dataloader):
s = s.float().to(device)
a = a.float().to(device)
d = d.float().to(device)
d_est = torch.zeros(d.shape).to(device)
for i in range(n_ahead):
d_hat = model(a[:, i], s) + analytical_model(a[:, i], s)
if i == 0:
# d_est[:,i] = d_est[:,i] + d_hat
d_est[:, i] = d_hat
else:
d_est[:, i] = d_est[:, i - 1] + d_hat
s = s + d_hat
# normalize d
d = (d - delta_mean) / delta_std
d_est = (d_est - delta_mean) / delta_std
loss = loss_function(d, d_est)
if print_info:
if not val_losses:
loop.set_description("loss: {:.3f}".format(loss.item()))
else:
loop.set_description(
"loss: {:.4f}, val loss: {:.4f}".format(
loss.item(), val_losses[-1]
)
)
train_losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if print_info:
loop.update(1)
if b > max_batches:
break
with torch.no_grad():
model.eval()
epoch_losses = []
for b, (a, s, d) in enumerate(val_dataloader):
s = s.float().to(device)
a = a.float().to(device)
d = d.float().to(device)
d_est = torch.zeros(d.shape).to(device)
for i in range(n_ahead):
d_hat = model(a[:, i], s) + analytical_model(a[:, i], s)
if i == 0:
# d_est[:,i] = d_est[:,i] + d_hat
d_est[:, i] = d_hat
else:
d_est[:, i] = d_est[:, i - 1] + d_hat
s = s + d_hat
# normalize d
d = (d - delta_mean) / delta_std
d_est = (d_est - delta_mean) / delta_std
loss = loss_function(d, d_est)
epoch_losses.append(loss.item())
if b > max_batches:
break
val_losses.append(np.mean(epoch_losses))
if np.mean(epoch_losses) < best_loss:
best_loss = np.mean(epoch_losses)
if model_path:
torch.save(model.state_dict(), model_path)
if print_info:
print("Best val loss: {:.4}".format(best_loss))
n_ahead = 100
val_dataset = LookaheadDataset(states=val_x, actions=val_p, n_ahead=n_ahead)
val_dataloader = DataLoader(val_dataset, batch_size=100, shuffle=True)
# calculate HZ
start = time()
with torch.no_grad():
model.eval()
for b, (a, s, d) in enumerate(val_dataloader):
s = s.float().to(device)
a = a.float().to(device)
d = d.float().to(device)
d_est = torch.zeros(d.shape).to(device)
for i in range(n_ahead):
d_hat = model(a[:, i], s) + analytical_model(a[:, i], s)
if i == 0:
# d_est[:,i] = d_est[:,i] + d_hat
d_est[:, i] = d_hat
else:
d_est[:, i] = d_est[:, i - 1] + d_hat
s = s + d_hat
elapsed = time() - start
speed = elapsed / len(val_dataloader)
return val_losses[-1].item(), speed
|
3dc2e014c31cac3bbc42d5972184b6d24175a8db
| 19,185 |
def get_awb_shutter( f ):
"""
Get AWB and shutter speed from file object
This routine extracts the R and B white balance gains and the shutter speed
from a jpeg file made using the Raspberry Pi camera. These are stored as text in
a custom Makernote.
The autoexposure and AWB white balance values are not available directly until
a picture is taken and are saved in a Jpeg.
Returns 0 for the values if they're not found
"""
f.seek(256)
s = f.read(512) # Only part of the header needed
r_pos = s.find('gain_r=')
b_pos = s.find('gain_b=')
s_pos = s.find(' exp=')
gain_r = eval(s[r_pos+7:r_pos+12].split()[0]) if r_pos > -1 else 0
gain_b = eval(s[b_pos+7:b_pos+12].split()[0]) if b_pos > -1 else 0
shutter = eval(s[s_pos+5:s_pos+12].split()[0]) if s_pos > -1 else 0
return (gain_r,gain_b,shutter)
|
cfafdf531809729ae0ec96ab90a60a4961b9437a
| 19,186 |
def rgb2lab(rgb_arr):
"""
Convert colur from RGB to CIE 1976 L*a*b*
Parameters
----------
rgb_arr: ndarray
Color in RGB
Returns
-------
lab_arr: ndarray
Color in CIE 1976 L*a*b*
"""
return xyz2lab(rgb2xyz(rgb_arr))
|
3eba11b8017908393e1e238e6b4ae046dd265520
| 19,187 |
def format_rfidcard(rfidcard):
"""
:type rfidcard: apps.billing.models.RfidCard
"""
return {
'atqa': rfidcard.atqa if len(rfidcard.atqa) > 0 else None,
'sak': rfidcard.sak if len(rfidcard.sak) > 0 else None,
'uid': rfidcard.uid,
'registered_at': rfidcard.registered_at.isoformat(),
'user': rfidcard.user.username,
}
|
120ca8e338b01235b2ba12ae3f874fd317ffebe8
| 19,188 |
def make_exposure_shares(exposure_levels, geography="geo_nm", variable="rank"):
"""Aggregate shares of activity at different levels of exposure
Args:
exposure_levels (df): employment by lad and sector and exposure ranking
geography (str): geography to aggregate over
variable (str): variable we want to calculate shares over
"""
exp_distr = (
exposure_levels.groupby(["month_year", variable, geography])["value"]
.sum()
.reset_index(drop=False)
.groupby([geography, "month_year"])
.apply(lambda x: x.assign(share=lambda df: df["value"] / df["value"].sum()))
).reset_index(drop=True)
return exp_distr
|
02d990f2b08e3acb2a2b8ac01e44848770bdea71
| 19,189 |
import collections
def init_ranks(mpi_comm):
"""Returns rank information of the local process in `mpi_comm`.
Args:
mpi_comm (type:TODO)
MPI Communicator from mpi4py
Returns:
rank_info (list):
Elements are:
* rank (`mpi_comm.rank`)
* intra_rank (rank within the local computing node)
* intra_size (number of processes on the node)
* inter_rank (rank of the node)
* inter_size (number of computing nodes)
"""
global_names = mpi_comm.gather(mpi4py.MPI.Get_processor_name())
if mpi_comm.rank == 0:
name_to_global_ranks = collections.defaultdict(list)
for global_rank, name in enumerate(global_names):
name_to_global_ranks[name].append(global_rank)
for global_ranks in name_to_global_ranks.values():
global_ranks.sort()
inter_names = sorted(
set(global_names), key=lambda name: name_to_global_ranks[name])
name_to_inter_rank = {
name: inter_rank
for inter_rank, name in enumerate(inter_names)
}
inter_size = len(inter_names)
all_ranks = []
for global_rank, name in enumerate(global_names):
ranks = name_to_global_ranks[name]
intra_rank = ranks.index(global_rank)
intra_size = len(ranks)
inter_rank = name_to_inter_rank[name]
all_ranks.append((
global_rank, intra_rank, intra_size,
inter_rank, inter_size))
my_ranks = mpi_comm.scatter(all_ranks)
else:
my_ranks = mpi_comm.scatter(None)
assert my_ranks[0] == mpi_comm.rank
return my_ranks
|
334bfa7856049e612f3f6d1f6ec82873926040b1
| 19,190 |
def boxscores(sports=["basketball/nba"], output="dict", live_only=True, verbose=False):
"""
~ 10 seconds
"""
links = boxlinks(sports=sports, live_only=live_only, verbose=verbose)
boxes = [boxscore(link) for link in links]
return boxes
|
9cdc1bd4ec90d8ab9593d49316669dc6b801cf2e
| 19,191 |
def runningMedian(seq, M):
"""
Purpose: Find the median for the points in a sliding window (odd number in size)
as it is moved from left to right by one point at a time.
Inputs:
seq -- list containing items for which a running median (in a sliding window)
is to be calculated
M -- number of items in window (window size) -- must be an integer > 1
Otputs:
medians -- list of medians with size N - M + 1
Note:
1. The median of a finite list of numbers is the "center" value when this list
is sorted in ascending order.
2. If M is an even number the two elements in the window that
are close to the center are averaged to give the median (this
is not by definition)
"""
seq = iter(seq)
s = []
m = M // 2 #// does a truncated division like integer division in Python 2
# Set up list s (to be sorted) and load deque with first window of seq
s = [item for item in islice(seq,M)]
d = deque(s)
# Simple lambda function to handle even/odd window sizes
median = lambda : s[m] if bool(M&1) else (s[m-1]+s[m])*0.5
# Sort it in increasing order and extract the median ("center" of the sorted window)
s.sort()
medians = [median()]
# Now slide the window by one point to the right for each new position (each pass through
# the loop). Stop when the item in the right end of the deque contains the last item in seq
for item in seq:
old = d.popleft() # pop oldest from left
d.append(item) # push newest in from right
del s[bisect_left(s, old)] # locate insertion point and then remove old
insort(s, item) # insert newest such that new sort is not required
medians.append(median())
return medians
|
b37af61c9f6f62bd6fbd395bc2c423a770ba2797
| 19,192 |
def min_max_normalize(img):
""" Center and normalize the given array.
Parameters:
----------
img: np.ndarray
"""
min_img = img.min()
max_img = img.max()
return (img - min_img) / (max_img - min_img)
|
faaafbc8e0b36f26f8319b671de326dd6a97e6f9
| 19,193 |
def find_common_features(experiment: FCSExperiment,
samples: list or None = None):
"""
Generate a list of common features present in all given samples of an experiment. By 'feature' we mean
a variable measured for a particular sample e.g. CD4 or FSC-A (forward scatter)
Parameters
----------
experiment: FCSExperiment
Experiment to extract features from
samples: list, optional
List of samples to get common features of. If None, will search all samples in experiment.
Returns
-------
List
"""
if samples is None:
samples = experiment.list_samples()
assert all([s in experiment.list_samples() for s in samples]), \
'One or more samples specified do not belong to experiment'
features = [_get_features(experiment, sample_id=s) for s in samples]
common_features = set(features[0])
for f in features[1:]:
common_features.intersection_update(f)
return list(common_features)
|
8022a97721eb9ff26efcba347e4f631aff3ede84
| 19,194 |
import heapq
def generate_blend_weights(positions, new_p, n_neighbors):
""" Use inverse distance and K-Nearest-Neighbors Interpolation to estimate weights
according to [Johansen 2009] Section 6.2.4
"""
distances = []
for n, p in positions.items():
distance = np.linalg.norm(new_p - p)
heapq.heappush(distances, (distance, n))
distances = distances[:n_neighbors]
weights = dict()
if distances[0][0] <= 0:
weights[distances[0][1]] = 1.0
else:
inv_k_distance = 1.0 / distances[-1][0]
inv_distances = [(1.0 / d) - inv_k_distance for d, n in distances]
new_weights = inv_distances / np.sum(inv_distances)
for idx, v in enumerate(distances):
weights[v[1]] = new_weights[idx]
return weights
|
f9db2b47d5847cdb2e7367cebe9b9bf81809b11d
| 19,195 |
def check_method(adata):
"""Check that method output fits expected API."""
assert "connectivities" in adata.obsp
assert "distances" in adata.obsp
return True
|
0ad772187c6d2960149723df17f6b0cf3fa703d1
| 19,196 |
def load_model(Model, params, checkpoint_path='', device=None):
""" loads a model from a checkpoint or from scratch if checkpoint_path='' """
if checkpoint_path == '':
model = Model(params['model_params'], **params['data_params'])
else:
print("model:", Model)
print(f'-> Loading model checkpoint: {checkpoint_path}')
model = Model.load_from_checkpoint(checkpoint_path)
if device is not None:
model = model.eval().cuda(device)
return model
|
8f1339b5548024714f731de037ef320535fc3b69
| 19,197 |
import inspect
def get_widget_type_choices():
"""
Generates Django model field choices based on widgets
in holodeck.widgets.
"""
choices = []
for name, member in inspect.getmembers(widgets, inspect.isclass):
if member != widgets.Widget:
choices.append((
"%s.%s" % (member.__module__, member.__name__),
member.name
))
return choices
|
143ff91ee3bc4166e5091e344a38fb2bbb72934e
| 19,198 |
import array
def iau2000a(jd_tt):
"""Compute Earth nutation based on the IAU 2000A nutation model.
`jd_tt` - Terrestrial Time: Julian date float, or NumPy array of floats
Returns a tuple ``(delta_psi, delta_epsilon)`` measured in tenths of
a micro-arcsecond. Each value is either a float, or a NumPy array
with the same dimensions as the input argument.
"""
# Interval between fundamental epoch J2000.0 and given date.
t = (jd_tt - T0) / 36525.0
# Compute fundamental arguments from Simon et al. (1994), in radians.
a = fundamental_arguments(t)
# ** Luni-solar nutation **
# Summation of luni-solar nutation series (in reverse order).
arg = nals_t.dot(a)
fmod(arg, TAU, out=arg)
sarg = sin(arg)
carg = cos(arg)
stsc = array((sarg, t * sarg, carg)).T
ctcs = array((carg, t * carg, sarg)).T
dpsi = tensordot(stsc, lunisolar_longitude_coefficients)
deps = tensordot(ctcs, lunisolar_obliquity_coefficients)
# Compute and add in planetary components.
if getattr(t, 'shape', ()) == ():
a = t * anomaly_coefficient + anomaly_constant
else:
a = (outer(anomaly_coefficient, t).T + anomaly_constant).T
a[-1] *= t
fmod(a, TAU, out=a)
arg = napl_t.dot(a)
fmod(arg, TAU, out=arg)
sc = array((sin(arg), cos(arg))).T
dpsi += tensordot(sc, nutation_coefficients_longitude)
deps += tensordot(sc, nutation_coefficients_obliquity)
return dpsi, deps
|
beadd6469a85b475dc22ca1e2a967310555140a9
| 19,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.