content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def weighted_moments(values, weights):
"""Return weighted mean and weighted standard deviation of a sequence"""
w_mean = np.average(values, weights=weights)
sq_err = (values - w_mean)**2
w_var = np.average(sq_err, weights=weights)
w_std = np.sqrt(w_var)
return w_mean, w_std | 84775550c54f285f9a032641cf27976eebf94322 | 6,400 |
def forestplot(data, kind='forestplot', model_names=None, var_names=None, combined=False,
credible_interval=0.95, quartiles=True, r_hat=True, n_eff=True, colors='cycle',
textsize=None, linewidth=None, markersize=None, joyplot_alpha=None,
joyplot_overlap=2, figsize=None):
"""
Forest plot
Generates a forest plot of 100*(credible_interval)% credible intervals from
a trace or list of traces.
Parameters
----------
data : xarray.Dataset or list of compatible
Samples from a model posterior
kind : str
Choose kind of plot for main axis. Supports "forestplot" or "joyplot"
model_names : list[str], optional
List with names for the models in the list of data. Useful when
plotting more that one dataset
var_names: list[str], optional
List of variables to plot (defaults to None, which results in all
variables plotted)
combined : bool
Flag for combining multiple chains into a single chain. If False (default),
chains will be plotted separately.
credible_interval : float, optional
Credible interval to plot. Defaults to 0.95.
quartiles : bool, optional
Flag for plotting the interquartile range, in addition to the credible_interval intervals.
Defaults to True
r_hat : bool, optional
Flag for plotting Gelman-Rubin statistics. Requires 2 or more chains. Defaults to True
n_eff : bool, optional
Flag for plotting the effective sample size. Requires 2 or more chains. Defaults to True
colors : list or string, optional
list with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically chose a color per model from the
matyplolibs cycle. If a single color is passed, eg 'k', 'C2', 'red' this color will be used
for all models. Defauls to 'cycle'.
textsize: int
Text size for labels. If None it will be autoscaled based on figsize.
linewidth : int
Line width throughout. If None it will be autoscaled based on figsize.
markersize : int
Markersize throughout. If None it will be autoscaled based on figsize.
joyplot_alpha : float
Transparency for joyplot fill. If 0, border is colored by model, otherwise
a black outline is used.
joyplot_overlap : float
Overlap height for joyplots.
figsize : tuple, optional
Figure size. Defaults to None
Returns
-------
gridspec : matplotlib GridSpec
"""
ncols, width_ratios = 1, [3]
if n_eff:
ncols += 1
width_ratios.append(1)
if r_hat:
ncols += 1
width_ratios.append(1)
plot_handler = PlotHandler(data, var_names=var_names, model_names=model_names,
combined=combined, colors=colors)
if figsize is None:
figsize = (min(12, sum(width_ratios) * 2), plot_handler.fig_height())
textsize, auto_linewidth, auto_markersize = _scale_text(figsize, textsize=textsize)
if linewidth is None:
linewidth = auto_linewidth
if markersize is None:
markersize = auto_markersize
fig, axes = plt.subplots(nrows=1,
ncols=ncols,
figsize=figsize,
gridspec_kw={'width_ratios': width_ratios},
sharey=True
)
axes = np.atleast_1d(axes)
if kind == 'forestplot':
plot_handler.forestplot(credible_interval, quartiles, textsize,
linewidth, markersize, axes[0])
elif kind == 'joyplot':
plot_handler.joyplot(joyplot_overlap, textsize, linewidth, joyplot_alpha, axes[0])
else:
raise TypeError(f"Argument 'kind' must be one of 'forestplot' or "
f"'joyplot' (you provided {kind})")
idx = 1
if r_hat:
plot_handler.plot_rhat(axes[idx], textsize, markersize)
idx += 1
if n_eff:
plot_handler.plot_neff(axes[idx], textsize, markersize)
idx += 1
for ax in axes:
ax.grid(False)
# Remove ticklines on y-axes
for ticks in ax.yaxis.get_major_ticks():
ticks.tick1On = False
ticks.tick2On = False
for loc, spine in ax.spines.items():
if loc in ['left', 'right']:
spine.set_color('none') # don't draw spine
if len(plot_handler.data) > 1:
plot_handler.make_bands(ax)
labels, ticks = plot_handler.labels_and_ticks()
axes[0].set_yticks(ticks)
axes[0].set_yticklabels(labels)
all_plotters = list(plot_handler.plotters.values())
y_max = plot_handler.y_max() - all_plotters[-1].group_offset
if kind == 'joyplot': # space at the top
y_max += joyplot_overlap
axes[0].set_ylim(-all_plotters[0].group_offset, y_max)
return fig, axes | e2170c4bdbfc2fbf5c3db24688e8ab09a2ec498c | 6,401 |
import yaml
import sys
import io
import csv
import gzip
def write_file(filename, data, plain=False): # pylint: disable=too-many-branches
"""
Write a file, use suffix to determine type and compression.
- types: '.json', '.yaml'
- compression: None, '.gz'
write_file('variable.json.gz')
"""
if '.json' in filename:
content = ujson.dumps(data, indent=1, escape_forward_slashes=False)
elif '.yaml' in filename:
content = yaml.dump(data, indent=1)
elif filename == 'STDOUT':
sys.stdout.write(ujson.dumps(data, indent=1, escape_forward_slashes=False)+'\n')
return True
elif filename == 'STDOUT':
sys.stderr.write(ujson.dumps(data, indent=1, escape_forward_slashes=False)+'\n')
return True
elif plain:
content = '\n'.join(data)
elif '.csv' in filename or '.tsv' in filename:
output = io.StringIO()
if '.csv' in filename:
writer = csv.writer(output, quoting=csv.QUOTE_NONNUMERIC)
else:
writer = csv.writer(output, delimiter='\t')
for row in data:
writer.writerow(row)
content = output.getvalue()
else:
content = data
if '.gz' in filename:
try:
with gzip.open(filename, 'wt') as fh:
fh.write(content)
except OSError:
return False
else:
try:
with open(filename, 'wt') as fh:
fh.write(content)
except IOError:
return False
return True | e68c05b3d1b10fde32e89ce547e2c882e2862c7c | 6,402 |
import glob
import os
def SetupPythonPackages(system, wheel, base_dir):
"""Installs python package(s) from CIPD and sets up the build environment.
Args:
system (System): A System object.
wheel (Wheel): The Wheel object to install a build environment for.
base_dir (str): The top-level build directory for the wheel.
Returns: A tuple (path to the python interpreter to run,
dict of environment variables to be set).
"""
host_platform = HostCipdPlatform()
# Building Windows x86 on Windows x64 is a special case. In this situation,
# we want to directly install and run the windows-x86 python package. This
# is because some wheels use properties of the Python interpreter (e.g.
# sys.maxsize) to detect whether to build for 32-bit or 64-bit CPUs.
if (host_platform == 'windows-amd64' and
wheel.plat.cipd_platform == 'windows-386'):
host_platform = 'windows-386'
_, interpreter = _InstallCipdPythonPackage(system, host_platform, wheel,
base_dir)
env = wheel.plat.env.copy()
# If we are cross-compiling, also install the target-platform python and set
# PYTHONHOME to point to it. This will ensure that we use the correct
# compiler and linker command lines which are generated at build time in the
# sysconfigdata module.
if not wheel.spec.universal and host_platform != wheel.plat.cipd_platform:
pkg_dir, _ = _InstallCipdPythonPackage(system, wheel.plat.cipd_platform,
wheel, base_dir)
env['PYTHONHOME'] = pkg_dir
# For python 3, we need to also set _PYTHON_SYSCONFIGDATA_NAME to point to
# the target-architecture sysconfig module.
if wheel.pyversion[0] == '3':
sysconfigdata_modules = glob.glob('%s/lib/python%s/_sysconfigdata_*.py' %
(pkg_dir, '.'.join(wheel.pyversion)))
if len(sysconfigdata_modules) != 1:
raise Exception(
'Expected 1 sysconfigdata module in python package ' +
'for %s, got: [%s]',
(wheel.plat.cipd_platform, ','.join(sysconfigdata_modules)))
env['_PYTHON_SYSCONFIGDATA_NAME'] = (os.path.basename(
sysconfigdata_modules[0])[:-3]) # remove .py
# Make sure not to pick up any extra host python modules.
env['PYTHONPATH'] = ''
return interpreter, env | 38b12bd7a7040d67a2172d1497f4d295739217b4 | 6,403 |
def tf_dtype(dtype):
"""Translates dtype specifications in configurations to tensorflow data types.
Args:
dtype: String describing a numerical type (e.g. 'float'), numpy data type,
or numerical type primitive.
Returns: TensorFlow data type
"""
if dtype == 'float' or dtype == float or dtype == np.float32 or dtype == tf.float32:
return tf.float32
elif dtype == 'int' or dtype == int or dtype == np.int32 or dtype == tf.int32:
return tf.int32
elif dtype == 'bool' or dtype == bool or dtype == np.bool_ or dtype == tf.bool:
return tf.bool
else:
raise TensorforceError("Error: Type conversion from type {} not supported.".format(str(dtype))) | 0c51804974e7e1bb36fcc32f181cfab713fca263 | 6,404 |
from typing import Tuple
import timeit
def decode_frame(raw_frame: bytes, frame_width: int, frame_height: int) -> Tuple[str, np.ndarray]:
"""
Decode the image bytes into string compatible with OpenCV
:param raw_frame: frame data in bytes
:param frame_width width of the frame, obtained from Kinesis payload
:param frame_height height of the frame, obtained from Kinesis payload
"""
start_time = timeit.default_timer()
# frameBuffer = Image.frombytes('RGB', (frame_width, frame_height), raw_frame)
# frameBuffer.save("./h264decoded.png", "png")
# frame = np.array(frameBuffer)
# img_str = cv2.imencode('.jpg', frame)[1].tostring()
img = imageio.get_reader(raw_frame, ".png")
frame: np.ndarray = img.get_data(0)
img_str = cv2.imencode('.png', frame)[1].tostring()
logger.info(f'Decoded frame after: {timeit.default_timer() - start_time}')
return img_str, frame | a38d7362997701756767cf466bb3fbb76b77a92e | 6,405 |
def preprocess(picPath):
"""preprocess"""
#read img
bgr_img = cv.imread(picPath)
#get img shape
orig_shape = bgr_img.shape[:2]
#resize img
img = cv.resize(bgr_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.int8)
# save memory C_CONTIGUOUS mode
if not img.flags['C_CONTIGUOUS']:
img = np.ascontiguousarray(img)
return orig_shape, img | ae44fdbf3613e159f28db0d9417470872439f76d | 6,406 |
import requests
def live_fractal_or_skip():
"""
Ensure Fractal live connection can be made
First looks for a local staging server, then tries QCArchive.
"""
try:
return FractalClient("localhost:7777", verify=False)
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
print("Failed to connect to localhost, trying MolSSI QCArchive.")
try:
requests.get("https://api.qcarchive.molssi.org:443", json={}, timeout=5)
return FractalClient()
except (requests.exceptions.ConnectionError, ConnectionRefusedError):
return pytest.skip("Could not make a connection to central Fractal server") | b121e33a2294edc80d336abbb07c9a12f3301aea | 6,407 |
def get_legendre(theta, keys):
"""
Calculate Schmidt semi-normalized associated Legendre functions
Calculations based on recursive algorithm found in "Spacecraft Attitude Determination and Control" by James Richard Wertz
Parameters
----------
theta : array
Array of colatitudes in degrees
keys: iterable
list of spherical harmnoic degree and order, tuple (n, m) for each
term in the expansion
Returns
-------
P : array
Array of Legendre functions, with shape (theta.size, len(keys)).
dP : array
Array of dP/dtheta, with shape (theta.size, len(keys))
"""
# get maximum N and maximum M:
n, m = np.array([k for k in keys]).T
nmax, mmax = np.max(n), np.max(m)
theta = theta.flatten()[:, np.newaxis]
P = {}
dP = {}
sinth = np.sin(d2r*theta)
costh = np.cos(d2r*theta)
# Initialize Schmidt normalization
S = {}
S[0, 0] = 1.
# initialize the functions:
for n in range(nmax +1):
for m in range(nmax + 1):
P[n, m] = np.zeros_like(theta, dtype = np.float64)
dP[n, m] = np.zeros_like(theta, dtype = np.float64)
P[0, 0] = np.ones_like(theta, dtype = np.float64)
for n in range(1, nmax +1):
for m in range(0, min([n + 1, mmax + 1])):
# do the legendre polynomials and derivatives
if n == m:
P[n, n] = sinth * P[n - 1, m - 1]
dP[n, n] = sinth * dP[n - 1, m - 1] + costh * P[n - 1, n - 1]
else:
if n == 1:
Knm = 0.
P[n, m] = costh * P[n -1, m]
dP[n, m] = costh * dP[n - 1, m] - sinth * P[n - 1, m]
elif n > 1:
Knm = ((n - 1)**2 - m**2) / ((2*n - 1)*(2*n - 3))
P[n, m] = costh * P[n -1, m] - Knm*P[n - 2, m]
dP[n, m] = costh * dP[n - 1, m] - sinth * P[n - 1, m] - Knm * dP[n - 2, m]
# compute Schmidt normalization
if m == 0:
S[n, 0] = S[n - 1, 0] * (2.*n - 1)/n
else:
S[n, m] = S[n, m - 1] * np.sqrt((n - m + 1)*(int(m == 1) + 1.)/(n + m))
# now apply Schmidt normalization
for n in range(1, nmax + 1):
for m in range(0, min([n + 1, mmax + 1])):
P[n, m] *= S[n, m]
dP[n, m] *= S[n, m]
Pmat = np.hstack(tuple(P[key] for key in keys))
dPmat = np.hstack(tuple(dP[key] for key in keys))
return Pmat, dPmat | 8afd34e7e4805fab9393c2efff15c0cffcb9466a | 6,408 |
def table_4_28(x_t, c_):
"""
Вывод поправочного коэффициента, учитывающего влияние толщины профиля
arguments: относительное положение точки перехода ламинарного пограничного слоя в турбулентный (Х_т_),
относительная толщина профиля
return: Значение поправочного коэффициента"""
nu_t_00 = [1.00, 1.03, 1.05, 1.08, 1.11, 1.13, 1.16, 1.19, 1.22, 1.25, 1.29, 1.33, 1.37]
nu_t_02 = [1.000, 1.020, 1.040, 1.060, 1.080, 1.104, 1.127, 1.155, 1.180, 1.205, 1.235, 1.260, 1.295]
nu_t_04 = [1.00, 1.01, 1.03, 1.04, 1.05, 1.07, 1.09, 1.10, 1.12, 1.14, 1.16, 1.17, 1.20]
c_mas = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12]
k = int(c_ // 0.01 + 1)
if x_t == 0:
nu_t = interpol(nu_t_00[k], nu_t_00[k - 1], procent(c_, c_mas[k - 1], c_mas[k]))
elif (x_t >= 0) and (x_t <= 0.2):
nu_t = interpol(interpol(nu_t_02[k], nu_t_02[k - 1], procent(c_, c_mas[k - 1], c_mas[k])),
interpol(nu_t_00[k], nu_t_00[k - 1], procent(c_, c_mas[k - 1], c_mas[k])),
procent(x_t, 0, 0.2))
elif (x_t >= 0.2) and (x_t <= 0.4):
nu_t = interpol(interpol(nu_t_04[k], nu_t_04[k - 1], procent(c_, c_mas[k - 1], c_mas[k])),
interpol(nu_t_02[k], nu_t_02[k - 1], procent(c_, c_mas[k - 1], c_mas[k])),
procent(x_t, 0.2, 0.4))
else:
nu_t = interpol(nu_t_04[k], nu_t_04[k - 1], procent(c_, c_mas[k - 1], c_mas[k]))
return nu_t | 954c272bde503363f354b33f6af4f97bee5ad740 | 6,409 |
def random_spectra(path_length, coeffs, min_wavelength, max_wavelength, complexity):
"""
"""
solution = random_solution(coeffs, complexity)
return beers_law(solution, path_length, coeffs, min_wavelength, max_wavelength) | a82a0d638473d63cd768bfdc0318f5042a75ae12 | 6,410 |
def data_prep(data,unit_identifier,time_identifier,matching_period,treat_unit,control_units,outcome_variable,
predictor_variables, normalize=False):
"""
Prepares the data by normalizing X for section 3.3. in order to replicate Becker and Klößner (2017)
"""
X = data.loc[data[time_identifier].isin(matching_period)]
X.index = X.loc[:,unit_identifier]
X0 = X.loc[(X.index.isin(control_units)),(predictor_variables)]
X0 = X0.groupby(X0.index).mean().values.T #control predictors
X1 = X.loc[(X.index == treat_unit),(predictor_variables)]
X1 = X1.groupby(X1.index).mean().values.T #treated predictors
# outcome variable realizations in matching period - Z0: control, Z1: treated
Z0 = np.array(X.loc[(X.index.isin(control_units)),(outcome_variable)]).reshape(len(control_units),len(matching_period)).T #control outcome
Z1 = np.array(X.loc[(X.index == treat_unit),(outcome_variable)]).reshape(len(matching_period),1) #treated outcome
if normalize == True:
# Scaling
nvarsV = X0.shape[0]
big_dataframe = pd.concat([pd.DataFrame(X0), pd.DataFrame(X1)], axis=1)
divisor = np.sqrt(big_dataframe.apply(np.var, axis=1))
V = np.zeros(shape=(len(predictor_variables), len(predictor_variables)))
np.fill_diagonal(V, np.diag(np.repeat(big_dataframe.shape[0],1)))
scaled_matrix = ((big_dataframe.T) @ (np.array(1/(divisor)).reshape(len(predictor_variables),1) * V)).T
X0 = np.array(scaled_matrix.iloc[:,0:len(control_units)])
X1 = np.array(scaled_matrix.iloc[:,len(control_units):(len(control_units)+1)])
Z0 = Z0.astype('float64')
Z1 = Z1.astype('float64')
return X0, X1, Z0, Z1 | 6f13cc083973d5bd7ac7d2ca239741aaf067fede | 6,411 |
def calculate_auroc_statistics(y_true, y_pred, confint_alpha=0.05):
"""
calculate AUROC and it's p-values and CI
"""
#TODO: small sample test
#TODO: check when it crashes
#TODO: confidence intervals
predictions_group0 = y_pred[y_true==0, 1]
predictions_group1 = y_pred[y_true==1, 1]
try:
pval_auc = mannwhitneyu(predictions_group0,
predictions_group1,
alternative='less')[1]
except:
pval_auc = 1
auroc = roc_auc_score(y_true, y_pred[:,1])
auroc_ci = calculate_auroc_confint(auroc, len(predictions_group0),
len(predictions_group1), confint_alpha)
return([auroc, pval_auc, auroc_ci[0], auroc_ci[1]]) | 5413912892d833cbb448efb396bff0c015866c59 | 6,412 |
import numpy
def kabsch_superpose(P, Q): # P,Q: vstack'ed matrix
"""
Usage:
P = numpy.vstack([a2, b2, c2])
Q = numpy.vstack([a1, b1, c1])
m = kabsch_superpose(P, Q)
newP = numpy.dot(m, P)
"""
A = numpy.dot(numpy.transpose(P), Q)
U, s, V = numpy.linalg.svd(A)
tmp = numpy.identity(3)
tmp[2,2] = numpy.sign(numpy.linalg.det(A))
R = numpy.dot(numpy.dot(numpy.transpose(V), tmp), numpy.transpose(U))
return R | 56b7b9c3168e644ad71bee2146af3e4ae455c648 | 6,413 |
def add(a_t, b_t):
"""
add operator a+b
"""
return add_op(a_t, b_t) | be4e5bad6deb651af8e8f084cbefd185c1f9781f | 6,414 |
import os
def download_pkg():
"""第二步下载相关环境需要的第三方库
:return: bool
"""
print("正在下载安装必要的第三方库文件...")
try:
# 如果需要使用IT之家爬虫还需要下载selenium、BeautifulSoup4、requests。可添加到后面
os.system('pip install flask flask_cors flask_wtf flask_mail pymysql redis apscheduler xlwt psutil ')
print("安装成功...")
flag = True
except Exception as e:
print("下载安装失败...原因是:%s" % e)
flag = False
return flag | fed51c16d21cb0425c13d737a20b141e91eae2d6 | 6,415 |
def GetExtensionDescriptor(full_extension_name):
"""Searches for extension descriptor given a full field name."""
return _pool.FindExtensionByName(full_extension_name) | 5e0088f785809e38d306d7416129114ac09a5135 | 6,416 |
import os
import logging
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = os.path.basename(file_name)
# Upload the file
try:
response = s3.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True | 171869ca015151ec8474a26198f6d50c615b15ff | 6,417 |
from operator import sub
def parse_args():
"""
Parses command-line arguments and returns username, title of specified
repository and its' branch.
Returns: tuple (username, repo_name, branch).
Used only once in `main` method.
"""
DESC = 'Automatic license detection of a Github repository.'
parser = ArgumentParser(description=DESC)
# Specify agruments
parser.add_argument('--branch',
default='master',
required=False,
help='A branch of a repository from which license file should be obtained. Default: `master`.')
parser.add_argument('--repository_name',
required=False,
help='A name of a repository, whose license needs to be detected. Required.')
parser.add_argument('--username',
required=False,
help='A name of a user who owns a repository. Required.')
parser.add_argument('--url',
required=False,
help='An URL to Github repository.')
# Start parsing sys.argv
arg_dict = parser.parse_args().__dict__
branch = arg_dict['branch'] # `master` by default
user = arg_dict['username']
repo = arg_dict['repository_name']
url = arg_dict['url']
if (user is None) or (repo is None):
if (url is None):
# No repository information was typed, exiting...
print('Usage: --user <USERNAME> --repo <REPOSITORY NAME> --branch'
'<BRANCH NAME> (optional) or ')
print('--url <LINK TO REPOSITORY>')
exit(-1)
# Cut the `http` header of an URL
chopped_url = sub('https+:\/\/', '', url)
# Extract user and repository names from URL
user, repo = findall('\/{1}([^\/]+)', chopped_url)
return user, repo, branch | f7bf4b0cc27a87add8f65f82bebbabb6a4b9ca06 | 6,418 |
import argparse
def get_parameters():
"""
Parse script arguments
"""
parser = argparse.ArgumentParser(prog='compile.py')
# config.h template parameters
parser.add_argument('os', type=str, default="LINUX", choices=available_os)
parser.add_argument('arch', type=str, default="X86", choices=available_archs)
parser.add_argument('--log_lvl', type=str, default="LOG_LVL_INFO", choices=available_log_lvl)
parser.add_argument('--name', type=str, default="SUCHAI-DEV")
parser.add_argument('--id', type=str, default="0")
parser.add_argument('--version', type=str, default=configure.call_git_describe())
parser.add_argument('--con', type=str, default="1")
parser.add_argument('--comm', type=str, default="1")
parser.add_argument('--fp', type=str, default="1")
parser.add_argument('--hk', type=str, default="1")
parser.add_argument('--sen', type=str, default="0")
parser.add_argument('--adcs', type=str, default="0")
parser.add_argument('--test', type=str, default="0")
parser.add_argument('--node', type=str, default="1")
parser.add_argument('--zmq_in', type=str, default="tcp://127.0.0.1:8001")
parser.add_argument('--zmq_out', type=str, default="tcp://127.0.0.1:8002")
parser.add_argument('--st_mode', type=str, default="1")
parser.add_argument('--st_triple_wr', type=str, default="1")
parser.add_argument('--buffers_csp', type=str, default="10")
parser.add_argument('--socket_len', type=str, default="100")
# Build parameters
parser.add_argument('--drivers', action="store_true", help="Install platform drivers")
parser.add_argument('--ssh', action="store_true", help="Use ssh for git clone")
parser.add_argument('--test_type', type=str, default='', choices=available_tests)
# Force clean
parser.add_argument('--clean', action="store_true", help="Clean before build")
# Program
parser.add_argument('--program', action="store_true", help="Compile and program")
parser.add_argument('--console', type=int, default=4, help="Console to use. 2=Nanomind-USB-SERIAL, 4=FFP-USB")
# Skip config
parser.add_argument('--no-config', action="store_true", help="Skip configure, do not generate a new config.h")
return parser.parse_args() | 106e325dcea9835badaae33c8ac51e1ef56dbf7a | 6,419 |
import itertools
def collect_inventory_values(dataset, inventory_list, parameter_map):
"""
Collect inventories from a dataset.
"""
# Collect raw/unicode/clts for all relevant inventories
to_collect = []
for catalog in inventory_list.keys():
to_collect += list(
itertools.chain.from_iterable(inventory_list[catalog].values())
)
values = defaultdict(list)
for row in dataset["ValueTable"]:
if row["Contribution_ID"] in to_collect:
values[row["Contribution_ID"]].append(
{
"raw": row["Value"],
"unicode": parameter_map[row["Parameter_ID"]]["unicode"],
"bipa": parameter_map[row["Parameter_ID"]]["bipa"],
}
)
return values | 1c59e0784b6fc4db24f994440990e46a0ba2b1f0 | 6,420 |
import sys
import copy
import time
import optparse
import random
import logging
def Start(parser=None,
argv=sys.argv,
quiet=False,
add_pipe_options=True,
add_extract_options=False,
add_group_dedup_options=True,
add_sam_options=True,
add_umi_grouping_options=True,
return_parser=False):
"""set up an experiment.
The :py:func:`Start` method will set up a file logger and add some
default and some optional options to the command line parser. It
will then parse the command line and set up input/output
redirection and start a timer for benchmarking purposes.
The default options added by this method are:
``-v/--verbose``
the :term:`loglevel`
``timeit``
turn on benchmarking information and save to file
``timeit-name``
name to use for timing information,
``timeit-header``
output header for timing information.
``seed``
the random seed. If given, the python random
number generator will be initialized with this
seed.
Optional options added are:
Arguments
---------
param parser : :py:class:`U.OptionParser`
instance with command line options.
argv : list
command line options to parse. Defaults to
:py:data:`sys.argv`
quiet : bool
set :term:`loglevel` to 0 - no logging
return_parser : bool
return the parser object, no parsing. Useful for inspecting
the command line options of a script without running it.
add_pipe_options : bool
add common options for redirecting input/output
add_extract_options : bool
add options for extracting barcodes
add_sam_options : bool
add options for SAM/BAM input
add_umi_grouping_options : bool
add options for barcode grouping
add_group_dedup_options : bool
add options for UMI grouping and deduping
Returns
-------
tuple
(:py:class:`U.OptionParser` object, list of positional
arguments)
"""
if not parser:
parser = OptionParser(
version="%prog version: $Id$")
global global_options, global_args, global_starting_time
# save default values given by user
user_defaults = copy.copy(parser.defaults)
global_starting_time = time.time()
if add_extract_options:
group = OptionGroup(parser, "fastq barcode extraction options")
group.add_option("--extract-method",
dest="extract_method", type="choice",
choices=["string", "regex"],
help=("How to extract the umi +/- cell barcodes, "
"Choose from 'string' or 'regex'"))
group.add_option("-p", "--bc-pattern", dest="pattern", type="string",
help="Barcode pattern")
group.add_option("--bc-pattern2", dest="pattern2", type="string",
help="Barcode pattern for paired reads")
group.add_option("--3prime", dest="prime3", action="store_true",
help="barcode is on 3' end of read.")
group.add_option("--read2-in", dest="read2_in", type="string",
help="file name for read pairs")
parser.add_option_group(group)
if add_sam_options:
group = OptionGroup(parser, "Barcode extraction options")
group.add_option("--extract-umi-method", dest="get_umi_method", type="choice",
choices=("read_id", "tag", "umis"), default="read_id",
help="how is the read UMI +/ cell barcode encoded? "
"[default=%default]")
group.add_option("--umi-separator", dest="umi_sep",
type="string", help="separator between read id and UMI",
default="_")
group.add_option("--umi-tag", dest="umi_tag",
type="string", help="tag containing umi",
default='RX')
group.add_option("--umi-tag-split", dest="umi_tag_split",
type="string",
help="split UMI in tag and take the first element",
default=None)
group.add_option("--umi-tag-delimiter", dest="umi_tag_delim",
type="string",
help="concatenate UMI in tag separated by delimiter",
default=None)
group.add_option("--cell-tag", dest="cell_tag",
type="string", help="tag containing cell barcode",
default=None)
group.add_option("--cell-tag-split", dest="cell_tag_split",
type="string",
help=("split cell barcode in tag and take the first element"
"for e.g 10X GEM tags"),
default='-')
group.add_option("--cell-tag-delimiter", dest="cell_tag_delim",
type="string",
help="concatenate cell barcode in tag separated by delimiter",
default=None)
parser.add_option_group(group)
if add_umi_grouping_options:
group = OptionGroup(parser, "UMI grouping options")
group.add_option("--method", dest="method", type="choice",
choices=("adjacency", "directional",
"percentile", "unique", "cluster"),
default="directional",
help="method to use for umi grouping [default=%default]")
group.add_option("--edit-distance-threshold", dest="threshold",
type="int",
default=1,
help="Edit distance theshold at which to join two UMIs "
"when grouping UMIs. [default=%default]")
group.add_option("--spliced-is-unique", dest="spliced",
action="store_true",
help="Treat a spliced read as different to an unspliced"
" one [default=%default]",
default=False)
group.add_option("--soft-clip-threshold", dest="soft_clip_threshold",
type="float",
help="number of bases clipped from 5' end before "
"read is counted as spliced [default=%default]",
default=4)
group.add_option("--read-length", dest="read_length",
action="store_true", default=False,
help="use read length in addition to position and UMI "
"to identify possible duplicates [default=%default]")
parser.add_option_group(group)
if add_sam_options:
group = OptionGroup(parser, "single-cell RNA-Seq options")
group.add_option("--per-gene", dest="per_gene", action="store_true",
default=False,
help="Group/Dedup/Count per gene. Must combine with "
"either --gene-tag or --per-contig")
group.add_option("--gene-tag", dest="gene_tag",
type="string",
help="Gene is defined by this bam tag [default=%default]",
default=None)
group.add_option("--assigned-status-tag", dest="assigned_tag",
type="string",
help="Bam tag describing whether read is assigned to a gene "
"By defualt, this is set as the same tag as --gene-tag",
default=None)
group.add_option("--skip-tags-regex", dest="skip_regex",
type="string",
help="Used with --gene-tag. "
"Ignore reads where the gene-tag matches this regex",
default="^(__|Unassigned)")
group.add_option("--per-contig", dest="per_contig", action="store_true",
default=False,
help="group/dedup/count UMIs per contig (field 3 in BAM; RNAME),"
" e.g for transcriptome where contig = gene")
group.add_option("--gene-transcript-map", dest="gene_transcript_map",
type="string",
help="File mapping transcripts to genes (tab separated)",
default=None)
group.add_option("--per-cell", dest="per_cell", action="store_true",
default=False,
help="group/dedup/count per cell")
parser.add_option_group(group)
if add_group_dedup_options:
group = OptionGroup(parser, "group/dedup options")
group.add_option("--buffer-whole-contig", dest="whole_contig",
action="store_true", default=False,
help="Read whole contig before outputting bundles: "
"guarantees that no reads are missed, but increases "
"memory usage")
group.add_option("--whole-contig", dest="whole_contig",
action="store_true", default=False,
help=optparse.SUPPRESS_HELP)
group.add_option("--multimapping-detection-method",
dest="detection_method", type="choice",
choices=("NH", "X0", "XT"),
default=None,
help="Some aligners identify multimapping using bam "
"tags. Setting this option to NH, X0 or XT will "
"use these tags when selecting the best read "
"amongst reads with the same position and umi "
"[default=%default]")
parser.add_option_group(group)
# options added separately here to maintain better output order
if add_sam_options:
group = OptionGroup(parser, "SAM/BAM options")
group.add_option("--mapping-quality", dest="mapping_quality",
type="int",
help="Minimum mapping quality for a read to be retained"
" [default=%default]",
default=0)
group.add_option("--output-unmapped", dest="output_unmapped", action="store_true",
default=False, help=optparse.SUPPRESS_HELP)
group.add_option("--unmapped-reads", dest="unmapped_reads",
type="choice",
choices=("discard", "use", "output"),
default="discard",
help=("How to handle unmapped reads. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--chimeric-pairs", dest="chimeric_pairs",
type="choice",
choices=("discard", "use", "output"),
default="use",
help=("How to handle chimeric read pairs. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--unpaired-reads", dest="unpaired_reads",
type="choice",
choices=("discard", "use", "output"),
default="use",
help=("How to handle unpaired reads. Options are "
"'discard', 'use' or 'correct' [default=%default]"))
group.add_option("--ignore-umi", dest="ignore_umi",
action="store_true", help="Ignore UMI and dedup"
" only on position", default=False)
group.add_option("--ignore-tlen", dest="ignore_tlen", action="store_true",
default=False,
help="Option to dedup paired end reads based solely on read1, "
"whether or not the template length is the same")
group.add_option("--chrom", dest="chrom", type="string",
help="Restrict to one chromosome",
default=None)
group.add_option("--subset", dest="subset", type="float",
help="Use only a fraction of reads, specified by subset",
default=None)
group.add_option("-i", "--in-sam", dest="in_sam", action="store_true",
help="Input file is in sam format [default=%default]",
default=False)
group.add_option("--paired", dest="paired", action="store_true",
default=False,
help="paired input BAM. [default=%default]")
group.add_option("-o", "--out-sam", dest="out_sam", action="store_true",
help="Output alignments in sam format [default=%default]",
default=False)
group.add_option("--no-sort-output", dest="no_sort_output",
action="store_true", default=False,
help="Don't Sort the output")
parser.add_option_group(group)
if add_pipe_options:
group = OptionGroup(parser, "input/output options")
group.add_option("-I", "--stdin", dest="stdin", type="string",
help="file to read stdin from [default = stdin].",
metavar="FILE")
group.add_option("-L", "--log", dest="stdlog", type="string",
help="file with logging information "
"[default = stdout].",
metavar="FILE")
group.add_option("-E", "--error", dest="stderr", type="string",
help="file with error information "
"[default = stderr].",
metavar="FILE")
group.add_option("-S", "--stdout", dest="stdout", type="string",
help="file where output is to go "
"[default = stdout].",
metavar="FILE")
group.add_option("--temp-dir", dest="tmpdir", type="string",
help="Directory for temporary files. If not set,"
" the bash environmental variable TMPDIR is used"
"[default = None].",
metavar="FILE")
group.add_option("--log2stderr", dest="log2stderr",
action="store_true", help="send logging information"
" to stderr [default = False].")
group.add_option("--compresslevel", dest="compresslevel", type="int",
help="Level of Gzip compression to use. Default (6) matches"
"GNU gzip rather than python gzip default (which is 9)")
parser.set_defaults(stderr=sys.stderr)
parser.set_defaults(stdout=sys.stdout)
parser.set_defaults(stdlog=sys.stdout)
parser.set_defaults(stdin=sys.stdin)
parser.set_defaults(tmpdir=None)
parser.set_defaults(log2stderr=False)
parser.set_defaults(compresslevel=6)
parser.add_option_group(group)
group = OptionGroup(parser, "profiling options")
group.add_option("--timeit", dest='timeit_file', type="string",
help="store timeing information in file [%default].")
group.add_option("--timeit-name", dest='timeit_name', type="string",
help="name in timing file for this class of jobs "
"[%default].")
group.add_option("--timeit-header", dest='timeit_header',
action="store_true",
help="add header for timing information [%default].")
parser.add_option_group(group)
group = OptionGroup(parser, "common options")
group.add_option("-v", "--verbose", dest="loglevel", type="int",
help="loglevel [%default]. The higher, the more output.")
group.add_option("-h", "--help", dest="short_help", action="callback",
callback=callbackShortHelp,
help="output short help (command line options only).")
group.add_option('--help-extended', action='help',
help='Output full documentation')
group.add_option("--random-seed", dest='random_seed', type="int",
help="random seed to initialize number generator "
"with [%default].")
parser.add_option_group(group)
# restore user defaults
parser.defaults.update(user_defaults)
if quiet:
parser.set_defaults(loglevel=0)
else:
parser.set_defaults(loglevel=1)
parser.set_defaults(
timeit_file=None,
timeit_name='all',
timeit_header=None,
random_seed=None,
)
if return_parser:
return parser
global_options, global_args = parser.parse_args(argv[1:])
if global_options.random_seed is not None:
random.seed(global_options.random_seed)
if add_pipe_options:
if global_options.stdout != sys.stdout:
global_options.stdout = openFile(global_options.stdout, "w")
if global_options.stderr != sys.stderr:
if global_options.stderr == "stderr":
global_options.stderr = global_options.stderr
else:
global_options.stderr = openFile(global_options.stderr, "w")
if global_options.stdlog != sys.stdout:
global_options.stdlog = openFile(global_options.stdlog, "a")
elif global_options.log2stderr:
global_options.stdlog = global_options.stderr
if global_options.stdin != sys.stdin:
global_options.stdin = openFile(global_options.stdin, "r")
else:
global_options.stderr = sys.stderr
global_options.stdout = sys.stdout
global_options.stdin = sys.stdin
if global_options.log2stderr:
global_options.stdlog = sys.stderr
else:
global_options.stdlog = sys.stdout
if global_options.loglevel >= 1:
global_options.stdlog.write(getHeader() + "\n")
global_options.stdlog.write(getParams(global_options) + "\n")
global_options.stdlog.flush()
# configure logging
# map from 0-10 to logging scale
# 0: quiet
# 1: little verbositiy
# >1: increased verbosity
if global_options.loglevel == 0:
lvl = logging.ERROR
elif global_options.loglevel == 1:
lvl = logging.INFO
else:
lvl = logging.DEBUG
if global_options.stdout == global_options.stdlog:
format = '# %(asctime)s %(levelname)s %(message)s'
else:
format = '%(asctime)s %(levelname)s %(message)s'
logging.basicConfig(
level=lvl,
format=format,
stream=global_options.stdlog)
# set up multi-line logging
# Note that .handlers is not part of the API, might change
# Solution is to configure handlers explicitely.
for handler in logging.getLogger().handlers:
handler.setFormatter(MultiLineFormatter(format))
return global_options, global_args | ee5f99a8f65735d2e7d3e53e358f352253a3e580 | 6,421 |
from spinup.env_wrappers.dynamic_skip_env import DynamicSkipEnv
from spinup.env_wrappers.single_agent_env import SingleAgentEnv
import time
def sac(env_fn, actor_critic=core.mlp_actor_critic, ac_kwargs=dict(), seed=0,
steps_per_epoch=10000, epochs=10000, replay_size=int(1e6), gamma=0.99,
polyak=0.995, lr=1e-4, alpha=0.004, batch_size=256, start_steps=1000,
update_after=1000, update_every=1, num_test_episodes=0, max_ep_len=1000,
dynamic_skip = True,
logger_kwargs=dict(), save_freq=1):
"""
Soft Actor-Critic (SAC)
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``mu`` (batch, act_dim) | Computes mean actions from policy
| given states.
``pi`` (batch, act_dim) | Samples actions from policy given
| states.
``logp_pi`` (batch,) | Gives log probability, according to
| the policy, of the action sampled by
| ``pi``. Critical: must be differentiable
| with respect to policy parameters all
| the way through action sampling.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to SAC.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
lr (float): Learning rate (used for both policy and value learning).
alpha (float): Entropy regularization coefficient. (Equivalent to
inverse of reward scale in the original SAC paper.)
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
update_after (int): Number of env interactions to collect before
starting to do gradient descent updates. Ensures replay buffer
is full enough for useful updates.
update_every (int): Number of env interactions that should elapse
between gradient descent updates. Note: Regardless of how long
you wait between updates, the ratio of env steps to gradient steps
is locked to 1.
num_test_episodes (int): Number of episodes to test the deterministic
policy at the end of each epoch.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
tf.set_random_seed(seed)
np.random.seed(seed)
#TODO multithreading is a problem with deepmimic at the moment
#env, test_env = env_fn(), env_fn()
env = env_fn()
if dynamic_skip:
env = DynamicSkipEnv(env)
#if num_test_episodes > 0:
# test_env = DynamicSkipEnv(test_env)
test_env = env
# Let's make sure that every incoming env can be treated as a multi agent env.
if not type(env.observation_space) is list:
env = SingleAgentEnv(env)
if num_test_episodes > 0:
test_env = SingleAgentEnv(test_env)
n_agents = len(env.observation_space)
obs_dim = [space for space in env.observation_space]
act_dim = [space.shape[0] for space in env.action_space]
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = [space.high[0] for space in env.action_space]
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = core.placeholders(obs_dim, act_dim, obs_dim, [None] * n_agents, [None] * n_agents)
# Main outputs from computation graph
replay_buffer = []
step_ops = []
target_init = []
mu, pi, q1, q2 = [], [], [], []
for i in range(n_agents):
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space[i]
with tf.variable_scope('main' + str(i)):
mu_, pi_, logp_pi, q1_, q2_ = actor_critic(x_ph[i], a_ph[i], **ac_kwargs)
mu.append(mu_)
pi.append(pi_)
q1.append(q1_)
q2.append(q2_)
with tf.variable_scope('main' + str(i), reuse=True):
# compose q with pi, for pi-learning
_, _, _, q1_pi, q2_pi = actor_critic(x_ph[i], pi_, **ac_kwargs)
# get actions and log probs of actions for next states, for Q-learning
_, pi_next, logp_pi_next, _, _ = actor_critic(x2_ph[i], a_ph[i], **ac_kwargs)
# Target value network
with tf.variable_scope('target' + str(i)):
# target q values, using actions from *current* policy
_, _, _, q1_targ, q2_targ = actor_critic(x2_ph[i], pi_next, **ac_kwargs)
# Experience buffer
#replay_buffer.append(ReplayBuffer(obs_dim_obj=obs_dim[i], act_dim=act_dim[i], size=replay_size))
spinup_replay_buffer = ReplayBuffer(obs_dim_obj=obs_dim[i], act_dim=act_dim[i], size=replay_size)
#TODO make AMP optional
replay_buffer.append(AMPReplayBuffer(inner_replay_buffer=spinup_replay_buffer, env_reward_weight=0.5, amp_env=env, logger=logger))
# Count variables
var_counts = tuple(core.count_vars(scope) for scope in ['main' + str(i) + '/pi', 'main' + str(i) + '/q1', 'main' + str(i) + '/q2', 'main' + str(i)])
print('\nNumber of parameters agent ' + str(i) + ': \t pi: %d, \t q1: %d, \t q2: %d, \t total: %d\n'%var_counts)
# Min Double-Q:
min_q_pi = tf.minimum(q1_pi, q2_pi)
min_q_targ = tf.minimum(q1_targ, q2_targ)
# Entropy-regularized Bellman backup for Q functions, using Clipped Double-Q targets
q_backup = tf.stop_gradient(r_ph[i] + gamma*(1-d_ph[i])*(min_q_targ - alpha * logp_pi_next))
# Soft actor-critic losses
pi_loss = tf.reduce_mean(alpha * logp_pi - min_q_pi)
q1_loss = 0.5 * tf.reduce_mean((q_backup - q1_)**2)
q2_loss = 0.5 * tf.reduce_mean((q_backup - q2_)**2)
value_loss = q1_loss + q2_loss
# Policy train op
# (has to be separate from value train op, because q1_pi appears in pi_loss)
pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main' + str(i) + '/pi'))
# Value train op
# (control dep of train_pi_op because sess.run otherwise evaluates in nondeterministic order)
value_optimizer = tf.train.AdamOptimizer(learning_rate=lr)
value_params = get_vars('main' + str(i) + '/q')
with tf.control_dependencies([train_pi_op]):
train_value_op = value_optimizer.minimize(value_loss, var_list=value_params)
# Polyak averaging for target variables
# (control flow because sess.run otherwise evaluates in nondeterministic order)
with tf.control_dependencies([train_value_op]):
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main' + str(i)), get_vars('target' + str(i)))])
# All ops to call during one training step
step_ops.append([pi_loss, q1_loss, q2_loss, q1_, q2_, logp_pi,
train_pi_op, train_value_op, target_update])
# Initializing targets to match main variables
target_init.append(tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main' + str(i)), get_vars('target' + str(i)))]))
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(target_init)
# Setup model saving
dict_space = False
inputs, outputs = {}, {}
for i in range(n_agents):
# multi modality handling
if isinstance(x_ph[i], dict):
input_modalities = x_ph[i]
dict_space = True
else:
input_modalities = {'':x_ph[i]}
inputs.update({'x' + k + str(i):v for k,v in input_modalities.items()})
inputs.update({'a' + str(i): a_ph[i]})
outputs.update({'mu' + str(i): mu[i], 'pi' + str(i): pi[i], 'q1' + str(i): q1[i], 'q2' + str(i): q2[i]})
logger.setup_tf_saver(sess, inputs=inputs,
outputs=outputs)
def get_action(o, deterministic=False):
act_op = mu if deterministic else pi
feed_dict = {}
for i in range(n_agents):
if dict_space:
feed_dict.update({x_ph[i][modality]: o[i][modality].reshape(1,-1) for modality in x_ph[i]})
else:
feed_dict.update({x_ph[i]:o[i].reshape(1,-1)})
outputs = sess.run(act_op, feed_dict=feed_dict)
return np.array([out[0] for out in outputs])
def test_agent():
if num_test_episodes == 0:
return
test_env.reset()
for j in range(num_test_episodes):
o, d, ep_ret, ep_len = test_env.reset(), False, np.zeros(n_agents), 0
while not(np.any(d) or (ep_len == max_ep_len)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(o, True))
ep_ret += np.array(r)
ep_len += 1
logger.store(TestEpRet=np.average(ep_ret), TestEpLen=ep_len)
test_env.reset()
start_time = time.time()
o, ep_ret, ep_len = env.reset(), np.zeros(n_agents), 0
total_steps = steps_per_epoch * epochs
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
# Until start_steps have elapsed, randomly sample actions
# from a uniform distribution for better exploration. Afterwards,
# use the learned policy.
if t > start_steps:
a = get_action(o)
else:
a = np.array([space.sample() for space in env.action_space])
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += np.array(r)
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = [False] if ep_len==max_ep_len else d
# Store experience to replay buffer
for i in range(n_agents):
amp_obs = env.get_amp_obs(i)
state_amp_agent = amp_obs["state_amp_agent"]
state_amp_expert = amp_obs["state_amp_expert"]
# TODO remove
#print("\no[i]: " + str(o[i]))
#print("a[i]: " + str(a[i]))
#print("o2[i]: " + str(o2[i]))
#print("d[i]: " + str(d[i]))
#print("state_amp_agent: " + str(state_amp_agent))
#print("state_amp_expert: " + str(state_amp_expert))
replay_buffer[i].store(o[i], a[i], r[i], o2[i], d[i], state_amp_agent, state_amp_expert)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
# End of trajectory handling
if np.any(d) or (ep_len == max_ep_len):
# TODO I think it would be better if I could reset for agents individually. Maybe the env could do this internally.
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, ep_ret, ep_len = env.reset(), 0, 0
# Update handling
if t >= update_after and t % update_every == 0:
for j in range(update_every):
feed_dict = {}
for i in range(n_agents):
batch = replay_buffer[i].sample_batch(batch_size)
feed_dict.update(replay_buffer[i].to_feed_dict(x_ph[i], batch['obs1']))
feed_dict.update(replay_buffer[i].to_feed_dict(x2_ph[i], batch['obs2']))
feed_dict.update({
a_ph[i]: batch['acts'],
r_ph[i]: batch['rews'],
d_ph[i]: batch['done'],
})
outs = sess.run(step_ops, feed_dict)
loss_pi, loss_q1, loss_q2, q1_vals, q2_vals, log_pi = [], [], [], [], [], []
for i in range(n_agents):
loss_pi.append(outs[i][0])
loss_q1.append(outs[i][1])
loss_q2.append(outs[i][2])
q1_vals.append(outs[i][3])
q2_vals.append(outs[i][4])
log_pi.append(outs[i][5])
logger.store(LossPi=np.average(loss_pi), LossQ1=np.average(loss_q1), LossQ2=np.average(loss_q2),
Q1Vals=np.average(q1_vals), Q2Vals=np.average(q2_vals), LogPi=np.average(log_pi))
# End of epoch wrap-up
if (t+1) % steps_per_epoch == 0:
epoch = (t+1) // steps_per_epoch
# Save model
if (epoch % save_freq == 0) or (epoch == epochs):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
test_agent()
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
if num_test_episodes > 0:
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
if "LossAmp" in logger.epoch_dict:
logger.log_tabular('LossAmp', average_only=True)
if "AccExpertAMP" in logger.epoch_dict:
logger.log_tabular('AccExpertAMP', average_only=True)
if "AccAgentAMP" in logger.epoch_dict:
logger.log_tabular('AccAgentAMP', average_only=True)
if "AmpRew" in logger.epoch_dict:
logger.log_tabular('AmpRew', average_only=True)
if "AmpRewBatchMax" in logger.epoch_dict:
logger.log_tabular('AmpRewBatchMax', average_only=True)
if "AmpRewBatchMin" in logger.epoch_dict:
logger.log_tabular('AmpRewBatchMin', average_only=True)
logger.log_tabular('LogPi', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ1', average_only=True)
logger.log_tabular('LossQ2', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular() | e6cbaa93e6a05ad0d128f1e50a277bbd9bff72ea | 6,422 |
def flatten_list(a_list, parent_list=None):
"""Given a list/tuple as entry point, return a flattened list version.
EG:
>>> flatten_list([1, 2, [3, 4]])
[1, 2, 3, 4]
NB: The kwargs are only for internal use of the function and should not be
used by the caller.
"""
if parent_list is None:
parent_list = []
for element in a_list:
if isinstance(element, list):
flatten_list(element, parent_list=parent_list)
elif isinstance(element, tuple):
flatten_list(element, parent_list=parent_list)
else:
parent_list.append(element)
return parent_list | dd6c9c66a370e65744ede40dfdc295b0ec63379a | 6,423 |
from typing import List
def list_to_csv_str(input_list: List) -> Text:
"""
Concatenates the elements of the list, joining them by ",".
Parameters
----------
input_list : list
List with elements to be joined.
Returns
-------
str
Returns a string, resulting from concatenation of list elements,
separeted by ",".
Example
-------
>>> from pymove import conversions
>>> a = [1, 2, 3, 4, 5]
>>> conversions.list_to_csv_str(a)
'1 1:2 2:3 3:4 4:5'
"""
return list_to_str(input_list) | 4c172b0ce3daba01f2d976bc60739846b852c459 | 6,424 |
def scheme_listp(x):
"""Return whether x is a well-formed list. Assumes no cycles."""
while x is not nil:
if not isinstance(x, Pair):
return False
x = x.second
return True | e5001695035d2d24e2914295e8ae2f86d8ead0b3 | 6,425 |
def list_to_dict(config):
"""
Convert list based beacon configuration
into a dictionary.
"""
_config = {}
list(map(_config.update, config))
return _config | 3d7ace7612e67a0c406a2a400ad3147f99dbef0a | 6,426 |
def get_model(model_name, in_channels = 3, input_size = 224, num_classes = 1000):
"""Get model
Args :
--model_name: model's name
--in_channels: default is 3
--input_size: default is 224
--num_classes: default is 1000 for ImageNet
return :
--model: model instance
"""
string = model_name
if model_name == 'cmt_ti':
model = CMT_Ti(in_channels = in_channels, input_size = input_size, num_classes = num_classes)
elif model_name == 'cmt_xs':
model = CMT_XS(in_channels = in_channels, input_size = input_size, num_classes = num_classes)
elif model_name == 'cmt_s':
model = CMT_S(in_channels = in_channels, input_size = input_size, num_classes = num_classes)
elif model_name == 'cmt_b':
model = CMT_B(in_channels = in_channels, input_size = input_size, num_classes = num_classes)
else:
raise Exception('No other models!')
print(string + ': \n', model)
total = sum(p.numel() for p in model.parameters())
print("Total params: %.2fM" % (total / 1e6))
return model | 0380a19e2a063382920b7986d1087aaf70f05eda | 6,427 |
def check_edge_heights(
stack, shifts, height_resistance, shift_lines, height_arr, MIN_H, MAX_H,
RESOLUTION
):
"""
Check all edges and output an array indicating which ones are
0 - okay at minimum pylon height, 2 - forbidden, 1 - to be computed
NOTE: function not used here! only for test purposes
"""
# print(len(stack))
for i in range(len(stack)):
v_x = stack[-i - 1][0]
v_y = stack[-i - 1][1]
# so far height on in edges
for s in range(len(shifts)):
neigh_x = v_x + shifts[s][0]
neigh_y = v_y + shifts[s][1]
# get minimum heights of v_x,v_y dependent on incoming edge
bres_line = shift_lines[s] + np.array([v_x, v_y])
# required heights
S = int(
np.sqrt((v_x - neigh_x)**2 + (v_y - neigh_y)**2)
) * RESOLUTION
# left and right point
yA = height_resistance[v_x, v_y] + MIN_H
yB = height_resistance[neigh_x, neigh_y] + MIN_H
# compute lowest point of sag
x0 = S / 2 - ((yB - yA) * CAT_H / (CAT_W * S))
# compute height above x0 at left point
A_height = (CAT_W * x0**2) / (2 * CAT_H)
# print(height_bline)
# iterate over points on bres_line
stepsize = S / (len(bres_line) + 1)
heights_above = np.zeros(len(bres_line))
for k, (i, j) in enumerate(bres_line):
x = x0 - stepsize * (k + 1)
cat = (CAT_W * x**2) / (2 * CAT_H)
heights_above[k
] = yA - A_height - height_resistance[i, j] + cat
# analyse heights_above:
if np.all(heights_above >= 11):
# whole cable is okay
fine_60 = 0
elif np.any(heights_above < -MAX_H - MIN_H):
# would not work with 80 - 80
fine_60 = 2
else:
# somewhere inbetween
fine_60 = 1
height_arr[s, neigh_x, neigh_y] = fine_60
return height_arr | f73c6fd0396967e2a5ecfa89d52f1468d2005967 | 6,428 |
def linear_int_ext(data_pts, p, scale=None, allow_extrap=False):
"""
Interpolate data points to find remaining unknown values absent from
`p` with optionally scaled axes. If `p` is not in the range and
`allow_extra` == True, a linear extrapolation is done using the two data
points at the end corresponding to the `p`.
Parameters
----------
data_pts : list_like(tuple)
[(a_1, ... a_n), ...] sorted on the required axis (either direction).
p : list_like
Required point to interpolate / extrapolate with at least a single
known component, i.e. :math:`(..., None, p_i, None, ...)`. If
more than one is supplied, the first is used.
scale :
Same as ``line_pt`` scale.
allow_extrap : bool, optional
If True linear extrapolation from the two adjacent endpoints is
permitted. Default = False.
Returns
-------
list :
Interpolated / extrapolated point :math:`[q_1, ..., q_n]` where
:math:`q_i = p_i` from above.
"""
if len(data_pts) < 2:
raise ValueError("At least two data points required.")
if scale is None:
scale = [None] * len(data_pts[0])
# Get working axis.
for ax, x in enumerate(p):
if x is not None:
break
else:
raise ValueError("Requested point must include at least one known "
"value.")
def on_axis(li): # Return value along required axis.
return li[ax]
# Get two adjacent points for straight line.
try:
# Try interpolation.
l_idx, r_idx = bracket_list(data_pts, p, key=on_axis)
except ValueError:
if not allow_extrap:
raise ValueError(f"Point not within data range.")
if ((on_axis(data_pts[0]) < on_axis(data_pts[-1])) != (
on_axis(p) < on_axis(data_pts[0]))):
l_idx, r_idx = -2, -1 # RHS extrapolation.
else:
l_idx, r_idx = 0, 1 # LHS extrapolation.
return line_pt(data_pts[l_idx], data_pts[r_idx], p, scale) | f69cc25d4610987a5f76f21e23df49efad5c6a7f | 6,429 |
def eval_in_els_and_qp(expression, ig, iels, coors,
fields, materials, variables,
functions=None, mode='eval', term_mode=None,
extra_args=None, verbose=True, kwargs=None):
"""
Evaluate an expression in given elements and points.
Parameters
----------
expression : str
The expression to evaluate.
fields : dict
The dictionary of fields used in `variables`.
materials : Materials instance
The materials used in the expression.
variables : Variables instance
The variables used in the expression.
functions : Functions instance, optional
The user functions for materials etc.
mode : one of 'eval', 'el_avg', 'qp'
The evaluation mode - 'qp' requests the values in quadrature points,
'el_avg' element averages and 'eval' means integration over
each term region.
term_mode : str
The term call mode - some terms support different call modes
and depending on the call mode different values are
returned.
extra_args : dict, optional
Extra arguments to be passed to terms in the expression.
verbose : bool
If False, reduce verbosity.
kwargs : dict, optional
The variables (dictionary of (variable name) : (Variable
instance)) to be used in the expression.
Returns
-------
out : array
The result of the evaluation.
"""
weights = nm.ones_like(coors[:, 0])
integral = Integral('ie', coors=coors, weights=weights)
domain = fields.values()[0].domain
region = Region('Elements', 'given elements', domain, '')
region.cells = iels + domain.mesh.el_offsets[ig]
region.update_shape()
domain.regions.append(region)
for field in fields.itervalues():
field.clear_mappings(clear_all=True)
for ap in field.aps.itervalues():
ap.clear_qp_base()
aux = create_evaluable(expression, fields, materials,
variables.itervalues(), Integrals([integral]),
functions=functions,
mode=mode, extra_args=extra_args, verbose=verbose,
kwargs=kwargs)
equations, variables = aux
out = eval_equations(equations, variables,
preserve_caches=False,
mode=mode, term_mode=term_mode)
domain.regions.pop()
return out | b71a20f0806ac03f9f995ffa41f317bc2c029d1c | 6,430 |
def tracks2Dataframe(tracks):
"""
Saves lsit of Track objects to pandas dataframe
Input:
tracks: List of Track objects
Output:
df: Pandas dataframe
"""
if(len(tracks) == 0):
print("Error saving to CSV. List of tracks is empty")
return
# Collect tracks into single dataframe
df = pd.DataFrame()
for t in tracks:
df = df.append(t.toDataframe())
df = df.sort_values(by=['frame', 'id'], ascending=[True, True])
return df | 9d25e7f9535cfefc5b6faf791555b382edf12a07 | 6,431 |
def sift_point_to_best(target_point, point, sift_dist):
"""
Move a point to target point given a distance. Based on Jensen's inequality formula.
Args:
target_point: A ndarray or tensor, the target point of pca,
point: A ndarray or tensor, point of pca,
sift_dist: A float, distance where point will sift to new one.
Returns:
new_points: A tuple, a couple of new updated points.
References:
https://en.wikipedia.org/wiki/Jensen%27s_inequality
"""
dist = np.sqrt(np.sum((point - target_point) ** 2))
a = sift_dist / dist
new_point = np.array([
point[0] * a + (1 - a) * target_point[0],
point[1] * a + (1 - a) * target_point[1]
])
new_points = (new_point[0], new_point[1])
return new_points | a14216998631f22d6c8d4e98112672608b8477e5 | 6,432 |
def jrandom_counts(sample, randoms, j_index, j_index_randoms, N_sub_vol, rp_bins, pi_bins,
period, num_threads, do_DR, do_RR):
"""
Count jackknife random pairs: DR, RR
"""
if do_DR is True:
DR = npairs_jackknife_xy_z(sample, randoms, rp_bins, pi_bins, period=period,
jtags1=j_index, jtags2=j_index_randoms,
N_samples=N_sub_vol, num_threads=num_threads)
DR = np.diff(np.diff(DR, axis=1), axis=2)
else:
DR = None
if do_RR is True:
RR = npairs_jackknife_xy_z(randoms, randoms, rp_bins, pi_bins, period=period,
jtags1=j_index_randoms, jtags2=j_index_randoms,
N_samples=N_sub_vol, num_threads=num_threads)
RR = np.diff(np.diff(RR, axis=1), axis=2)
else:
RR = None
return DR, RR | dce697b11f1d66b61aef46982c40b0310a292a92 | 6,433 |
from typing import Any
def process_not_inferred_array(ex: pa.ArrowInvalid, values: Any) -> pa.Array:
"""Infer `pyarrow.array` from PyArrow inference exception."""
dtype = process_not_inferred_dtype(ex=ex)
if dtype == pa.string():
array: pa.Array = pa.array(obj=[str(x) for x in values], type=dtype, safe=True)
else:
raise ex # pragma: no cover
return array | c2d84f436dbd1123e38e1468101f8910e928e9ba | 6,434 |
def start_end(tf):
"""Find start and end indices of running streaks of True values"""
n = len(tf)
tf = np.insert(tf, [0, len(tf)], [False, False])
# 01 and 10 masks
start_mask = (tf[:-1] == 0) & (tf[1:] == 1)
end_mask = (tf[:-1] == 1) & (tf[1:] == 0)
# Locations
start_loc = np.where(start_mask)[0]
end_loc = np.minimum(np.where(end_mask)[0] - 1, n-1)
return start_loc, end_loc | 592a55da0d1c02259676444d2dd640f759dfb62d | 6,435 |
def remove_provinces(data, date_range):
"""
REMOVE PROVINCES
:param data: The Data received from the API
:param date_range: the date range of the data
:return: data after removing provinces
"""
countries_with_provinces = []
names_of_countries_with_prov = []
# get countries with provinces
for country in data[:]:
if country['province'] is not None:
if country['country'] not in names_of_countries_with_prov:
names_of_countries_with_prov.append(country['country'])
countries_with_provinces.append(data.pop(data.index(country)))
else:
pass
# deal with countries with provinces
for country_name in names_of_countries_with_prov[:]: # for each country,
countries = list(
filter(lambda x: x['country'] == country_name, countries_with_provinces))
names_of_countries_with_prov.remove(country_name)
# calculate total cases, deaths & recovered per day
cases = {}
recovered = {}
deaths = {}
for date in date_range:
cs = 0
dt = 0
rc = 0
# sum data up per province
for prov in countries:
cs += prov['timeline']['cases'][date]
dt += prov['timeline']['deaths'][date]
rc += prov['timeline']['recovered'][date]
cases[date] = cs
recovered[date] = rc
deaths[date] = dt
# return country after removing provinces
totals = ({'country': country_name, 'province': None, 'timeline': {
'cases': cases, 'deaths': deaths, 'recovered': recovered}})
data.append(totals)
return data | 05e973254402fb2c9873fa065d45a6a5dd3da353 | 6,436 |
def plot_publish(families, targets=None, identifiers=None, keys=None):
"""Parse and plot all plugins by families and targets
Args:
families (list): List of interested instance family names
targets (list, optional): List of target names
identifiers (list, optional): List of interested dict names, take
["context.data", "instance.data"] if not provided.
keys (list, optional): List of interested key names, return all dict
keys if not provided.
"""
if not targets:
targets = ["default"] + api.registered_targets()
plugins = api.discover()
plugins = logic.plugins_by_families(plugins, families)
plugins = logic.plugins_by_targets(plugins, targets)
reports = list()
for plugin in plugins:
report = plot_plugin(plugin, identifiers, keys)
if report:
reports.append(report)
return reports | de2dc8cf3184fdd4d883e340256b55153346a3a9 | 6,437 |
from datetime import datetime
import math
def get_job_view(execution, prev_execution, stackstorm_url):
"""
Gets a job view from the specified execution and previous execution
:param execution: dict
:param prev_execution: dict
:param stackstorm_url: string
:return: dict
"""
current_time = datetime.datetime.utcnow()
hash_code = abs(hash(execution['action']['name'])) % (10 ** 8)
estimated_duration = ''
prev_time_elapsed_since = ''
if execution and 'start_timestamp' in execution:
start_time = datetime.datetime.strptime(execution['start_timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ')
elapsed_seconds = int((current_time - start_time).total_seconds())
else:
elapsed_seconds = 0
if prev_execution and 'elapsed_seconds' in prev_execution:
prev_elapsed_seconds = int(math.ceil(prev_execution['elapsed_seconds']))
else:
prev_elapsed_seconds = 0
if prev_execution:
prev_execution_id = prev_execution['id']
prev_build_name = prev_execution['id']
if 'end_timestamp' in prev_execution:
prev_end_time = datetime.datetime.strptime(prev_execution['end_timestamp'], '%Y-%m-%dT%H:%M:%S.%fZ')
prev_time_elapsed_since = int((current_time - prev_end_time).total_seconds())
if 'elapsed_seconds' in prev_execution:
estimated_duration = '{}s'.format(int(math.ceil(prev_execution['elapsed_seconds'])))
else:
prev_execution_id = ''
prev_build_name = ''
prev_build_duration = estimated_duration
progress = 0
if execution['status'] == 'succeeded':
status = 'successful'
elif execution['status'] == 'failed':
status = 'failing'
elif execution['status'] == 'running':
if prev_execution and prev_execution['status'] == 'failed':
status = 'failing running'
elif prev_execution and prev_execution['status'] == 'succeeded':
status = 'successful running'
else:
status = 'unknown running'
if prev_execution and (prev_execution['status'] == 'failed' or prev_execution['status'] == 'succeeded'):
if prev_elapsed_seconds > 0:
progress = int(math.floor((float(elapsed_seconds) / float(prev_elapsed_seconds)) * 100))
if progress > 100:
progress = 100
else:
progress = 100
else:
progress = 100
else:
status = 'unknown'
job_view = {
'name': execution['action']['name'],
'url': '{}/#/history/{}/general'.format(stackstorm_url, execution['id']),
'status': status,
'hashCode': hash_code,
'progress': progress,
'estimatedDuration': estimated_duration,
'headline': '',
'lastBuild': {
"timeElapsedSince": str(prev_time_elapsed_since),
"duration": prev_build_duration,
"description": '',
"name": prev_build_name,
"url": '{}/#/history/{}/general'.format(stackstorm_url, prev_execution_id),
},
'debug': {
'elapsed_seconds': elapsed_seconds,
'prev_elapsed_seconds': prev_elapsed_seconds,
}
}
return job_view | 88acb9a725bacb7a51518c3250001a607346ddba | 6,438 |
import torch
def mdetr_resnet101_refcocoplus(pretrained=False, return_postprocessor=False):
"""
MDETR R101 with 6 encoder and 6 decoder layers.
Trained on refcoco+, achieves 79.52 val accuracy
"""
model = _make_detr("resnet101")
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://zenodo.org/record/4721981/files/refcoco%2B_resnet101_checkpoint.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model | 30f73654fbabccc35629c9adb3d7ac91c5fe368d | 6,439 |
import sys
import requests
import json
def launch_duo_report(related_genome_id, duo_relation, duo_affected,
proband_genome_id, proband_sex, score_indels,
accession_id):
"""Launch a family report. Return the JSON response.
"""
# Construct url and request
url = "{}/reports/".format(FABRIC_API_URL)
url_payload = {'report_type': "Duo",
'duo_relation_genome_id': int(related_genome_id),
'duo_relation': duo_relation,
'duo_affected': duo_affected == 'affected',
'proband_genome_id': int(proband_genome_id),
'proband_sex': ('f' if proband_sex == 'female' else 'm'),
'background': 'FULL',
'score_indels': bool(score_indels),
'accession_id': accession_id}
sys.stdout.write("Launching family report...\n")
result = requests.post(url, auth=auth, data=json.dumps(url_payload))
return result.json() | 58be6c1a80ce34539683862fcda0b10cfbed2de1 | 6,440 |
import re
def readConfigFile(filePath):
""" Read the config file and generate a dictionnary containing an entry for
every modules of the installation. """
modules_attributes_list = []
confFile = open(filePath, "r")
for i, line in enumerate(confFile.readlines()):
# Remove everything that is written after "#" character (comments)
line = line.split("#")[0]
line = line.split("//")[0]
line = line.split("$")[0]
# Remove special characters
line = re.sub('[!@#$\0\\n ]','',line)
# Get the MAC addresses and the modules number
words = line.split(",")
if len(words) == 4:
modID = int(words[0])
posY = int(words[1])
posX = int(words[2])
macAddr = words[3]
modules_attributes_list.append((modID, posY, posX, macAddr))
elif len(words) < 2:
pass
else :
raise AttributeError("Wrong formatting of the MAC file.")
return modules_attributes_list | fadaec4dd005d6337eb5950b8782d5db944fb4cc | 6,441 |
def unpad_pkcs7(data):
"""
Strips PKCS#7 padding from data.
Raises ValueError if padding is invalid.
"""
if len(data) == 0:
raise ValueError("Error: Empty input.")
pad_value = data[-1]
if pad_value == 0 or pad_value > 16:
raise ValueError("Error: Invalid padding.")
for i in range(1, pad_value + 1):
if data[-i] != pad_value:
raise ValueError("Error: Invalid padding.")
unpadded = data[: (len(data) - pad_value)]
return unpadded | 27e59b8a880c130997f19814135c09cb6e94354d | 6,442 |
def create_output_channel(
mgr: sl_tag.TagManager, group: str, name: str, data_type: sl_tag.DataType
) -> sl_tag.TagData:
"""Create a FlexLogger output channel."""
# "Import" the channel into FlexLogger.
full_name = get_tag_prefix() + ".Import.Setpoint.{}.{}".format(group, name)
mgr.open(full_name, data_type, create=True)
# Once FlexLogger creates the channel, we'll interact with it as an "export" channel
# (for both reads and writes).
full_name = get_tag_prefix() + ".Export.Setpoint.{}".format(name)
# Go ahead and pre-create the output channel, for ease-of-use. Otherwise, when
# trying to read its value, we'd have to be prepared for an ApiException complaining
# that the tag doesn't exist.
mgr.open(full_name, data_type, create=True)
return sl_tag.TagData(full_name, data_type) | 40bf2f6f555993deb4433d00768a3241dc8d72f6 | 6,443 |
import re
def slugify(value, allow_unicode=False):
"""
adapted from https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, or hyphens. Convert to lowercase. Also strip leading and
trailing whitespace, dashes, and underscores.
"""
value = str(value)
value = value.replace(":", "_")
value = value.replace("/", "_")
value = re.sub(r'[^\w\s-]', '', value.lower())
return re.sub(r'[-\s]+', '-', value).strip('-_') | f87a54f124a06fde2163fec39ba41881032db569 | 6,444 |
import torch
def data_process(raw_text_iter: dataset.IterableDataset) -> Tensor:
"""Converts raw text into a flat Tensor."""
data = [torch.tensor(vocab(tokenizer(item)), dtype=torch.long) for item in raw_text_iter]
return torch.cat(tuple(filter(lambda t: t.numel() > 0, data))) | 1c4bb8cf9997f6a6205c7c4f1122892b528f5c0e | 6,445 |
def rna_view_redirect(request, upi, taxid):
"""Redirect from urs_taxid to urs/taxid."""
return redirect('unique-rna-sequence', upi=upi, taxid=taxid, permanent=True) | 7a45b8b75e2cffb7573a7856c74d8e7b21e70543 | 6,446 |
from typing import AbstractSet
def skip_for_variants(meta: MetaData, variant_keys: AbstractSet[str]) -> bool:
"""Check if the recipe uses any given variant keys
Args:
meta: Variant MetaData object
Returns:
True if any variant key from variant_keys is used
"""
# This is the same behavior as in
# conda_build.metadata.Metadata.get_hash_contents but without leaving out
# "build_string_excludes" (python, r_base, etc.).
dependencies = set(meta.get_used_vars())
trim_build_only_deps(meta, dependencies)
return not dependencies.isdisjoint(variant_keys) | 89bc8bf82431043cc4c6b42b6f8385df14c8d7d1 | 6,447 |
def _is_safe_url(url, request):
"""Override the Django `is_safe_url()` to pass a configured list of allowed
hosts and enforce HTTPS."""
allowed_hosts = (
settings.DOMAIN,
urlparse(settings.EXTERNAL_SITE_URL).netloc,
)
require_https = request.is_secure() if request else False
return is_safe_url(url, allowed_hosts=allowed_hosts, require_https=require_https) | e1a8779c72b6d5adfa3fe01b478783d81ef515de | 6,448 |
from typing import Union
import asyncio
import os
def test_full_pipeline() -> None:
"""Test the full pipeline."""
# Define a class that can send messages and one that can receive them.
class TestClassS:
"""Test class incorporating send functionality."""
msg = _TestMessageSenderBoth()
def __init__(self, target: Union[TestClassRSync,
TestClassRAsync]) -> None:
self._target = target
@msg.send_method
def _send_raw_message(self, data: str) -> str:
"""Handle synchronous sending of raw json message data."""
# Just talk directly to the receiver for this example.
# (currently only support synchronous receivers)
assert isinstance(self._target, TestClassRSync)
return self._target.receiver.handle_raw_message(data)
@msg.send_async_method
async def _send_raw_message_async(self, data: str) -> str:
"""Handle asynchronous sending of raw json message data."""
# Just talk directly to the receiver for this example.
# (we can do sync or async receivers)
if isinstance(self._target, TestClassRSync):
return self._target.receiver.handle_raw_message(data)
return await self._target.receiver.handle_raw_message(data)
class TestClassRSync:
"""Test class incorporating synchronous receive functionality."""
receiver = _TestSyncMessageReceiver()
@receiver.handler
def handle_test_message_1(self, msg: _TMsg1) -> _TResp1:
"""Test."""
if msg.ival == 1:
raise CleanError('Testing Clean Error')
if msg.ival == 2:
raise RuntimeError('Testing Runtime Error')
return _TResp1(bval=True)
@receiver.handler
def handle_test_message_2(self,
msg: _TMsg2) -> Union[_TResp1, _TResp2]:
"""Test."""
del msg # Unused
return _TResp2(fval=1.2)
@receiver.handler
def handle_test_message_3(self, msg: _TMsg3) -> None:
"""Test."""
del msg # Unused
receiver.validate()
class TestClassRAsync:
"""Test class incorporating asynchronous receive functionality."""
receiver = _TestAsyncMessageReceiver()
@receiver.handler
async def handle_test_message_1(self, msg: _TMsg1) -> _TResp1:
"""Test."""
if msg.ival == 1:
raise CleanError('Testing Clean Error')
if msg.ival == 2:
raise RuntimeError('Testing Runtime Error')
return _TResp1(bval=True)
@receiver.handler
async def handle_test_message_2(
self, msg: _TMsg2) -> Union[_TResp1, _TResp2]:
"""Test."""
del msg # Unused
return _TResp2(fval=1.2)
@receiver.handler
async def handle_test_message_3(self, msg: _TMsg3) -> None:
"""Test."""
del msg # Unused
receiver.validate()
obj_r_sync = TestClassRSync()
obj_r_async = TestClassRAsync()
obj = TestClassS(target=obj_r_sync)
obj2 = TestClassS(target=obj_r_async)
# Test sends (of sync and async varieties).
response1 = obj.msg.send(_TMsg1(ival=0))
response2 = obj.msg.send(_TMsg2(sval='rah'))
response3 = obj.msg.send(_TMsg3(sval='rah'))
response4 = asyncio.run(obj.msg.send_async(_TMsg1(ival=0)))
# Make sure static typing lines up with what we expect.
if os.environ.get('EFRO_TEST_MESSAGE_FAST') != '1':
assert static_type_equals(response1, _TResp1)
assert static_type_equals(response3, None)
assert isinstance(response1, _TResp1)
assert isinstance(response2, (_TResp1, _TResp2))
assert response3 is None
assert isinstance(response4, _TResp1)
# Remote CleanErrors should come across locally as the same.
try:
_response5 = obj.msg.send(_TMsg1(ival=1))
except Exception as exc:
assert isinstance(exc, CleanError)
assert str(exc) == 'Testing Clean Error'
# Other remote errors should result in RemoteError.
with pytest.raises(RemoteError):
_response5 = obj.msg.send(_TMsg1(ival=2))
# Now test sends to async handlers.
response6 = asyncio.run(obj2.msg.send_async(_TMsg1(ival=0)))
assert isinstance(response6, _TResp1)
# Make sure static typing lines up with what we expect.
if os.environ.get('EFRO_TEST_MESSAGE_FAST') != '1':
assert static_type_equals(response6, _TResp1) | 04baa2c69b183ee2eff762f08943969863bdb3d0 | 6,449 |
def _server():
"""
Reconstitute the name of this Blueprint I/O Server.
"""
return urlparse.urlunparse((request.environ.get('wsgi.url_scheme',
'https'),
request.environ.get('HTTP_HOST',
'devstructure.com'),
'', '', '', '')) | 122457aa7f2a5e301299ccaaed4bba75cf273f5a | 6,450 |
def get_range_api(spreadsheetToken, sheet_id, range, valueRenderOption=False):
"""
该接口用于根据 spreadsheetToken 和 range 读取表格单个范围的值,返回数据限制为10M。
:return:
"""
range_fmt = sheet_id + '!' + range
get_range_url = cfg.get_range_url.format(spreadsheetToken=spreadsheetToken, range=range_fmt)
headers = {
"Authorization": "Bearer " + cfg.access_token,
"Content-Type": "application/json"
}
params = {
"valueRenderOption": "ToString" if valueRenderOption else None
}
result = get_http_request(get_range_url, headers=headers, params=params)
return result | 0c226caaa64e1bab09ac9b3af6a839609d62d5a3 | 6,451 |
def rotate_rboxes90(rboxes: tf.Tensor,
image_width: int,
image_height: int,
rotation_count: int = 1) -> tf.Tensor:
"""Rotate oriented rectangles counter-clockwise by multiples of 90 degrees."""
image_width = tf.cast(image_width, dtype=tf.float32)
image_height = tf.cast(image_height, dtype=tf.float32)
rotation_count = rotation_count % 4
x, y, w, h, angle = tf.split(rboxes, 5, axis=1)
if rotation_count == 0:
return rboxes
elif rotation_count == 1:
angle = tf.where(angle < -90.0, angle + 270, angle - 90)
return tf.concat([y, image_width - x - 1, w, h, angle], axis=1)
elif rotation_count == 2:
angle = tf.where(angle < 0.0, angle + 180, angle - 180)
return tf.concat([image_width - x - 1, image_height - y - 1, w, h, angle],
axis=1)
else:
angle = tf.where(angle > 90.0, angle - 270, angle + 90)
return tf.concat([image_height - y - 1, x, w, h, angle], axis=1) | 7c21d6ea3bf8454af9aabd0f4408c9de593432ac | 6,452 |
def get_wrong_user_credentials():
"""
Monkeypatch GithubBackend.get_user_credentials to force the case where
invalid credentias were provided
"""
return dict(username='invalid',
password='invalid',
token='invalid',
remember=False,
remember_token=False) | 3598f00b05a53cdb13543642048fc8c333eebe52 | 6,453 |
def get_points(coords, m, b=None, diagonal=False):
"""Returns all discrete points on a line"""
points = []
x1, y1, x2, y2 = coords[0], coords[1], coords[2], coords[3]
# vertical line
if m is np.nan:
# bottom to top
y = min(y1, y2)
while y <= max(y1, y2):
points.append((x1, y))
y += 1
# horizontal line
elif m == 0:
# left to right
x = min(x1, x2)
while x <= max(x1, x2):
points.append((x, y1))
x += 1
else:
# diagonal line
if diagonal:
x = x1
y = y1
if x1 < x2:
# left to right
while x <= x2:
points.append((x, y))
x += 1
y = m * x + b
else:
# right to left
while x >= x2:
points.append((x, y))
x -= 1
y = m * x + b
else:
return None
return points | 23d6d4002c5625b8ea6011c26a0419c2a2710b53 | 6,454 |
def get_region_geo(region_id):
"""Get Geo/TopoJSON of a region.
Args:
region_id (str): Region ID (e.g. LK-1, LK-23)
Returns:
Geo-spatial data as GeoPandasFrame
"""
region_type = get_entity_type(region_id)
region_to_geo = _get_region_to_geo(region_type)
return region_to_geo.get(region_id, {}) | 0493aa9e521c6d27cad6e6be07662449b6768a20 | 6,455 |
def load_vocabulary(f):
"""
Load the vocabulary from file.
:param f: Filename or file object.
:type f: str or file
:return: Vocabulary
"""
v = Vocabulary()
if isinstance(f, str):
file_ = open(f, 'r')
else:
file_ = f
for line in file_:
wordid, word, wordcount = line.strip().split('\t')
wordid, wordcount = int(wordid), int(wordcount)
v.id2word[wordid] = word
v.word2id[word] = wordid
if wordcount != 0:
v.word_count[wordid] = wordcount
if isinstance(f, str):
file_.close()
return v | 7a7cdf44016eccd1ceefd4fcc9e19f8f50caece2 | 6,456 |
def populate_objects(phylodata_objects, project_name, path_to_species_trees, path_to_gene_trees, path_to_ranger_outputs):
"""
this function will try and associate each phylodata object with the correct
species_besttree
gene_bootstrap_trees
and rangerDTL output files (if they exist)
args:
list of phylodata objects
name of project
paths (to species trees, to bootstrap gene trees, to rangerDTL
returns
True if everything was associated
False if something has gone horribly awry
"""
#try and populate the species and gene files. should work.
for obj in phylodata_objects:
#print("Populating species trees")
obj.populate_species_tree(path_to_species_trees)
#print("Populating gene trees")
obj.populate_gene_boots(path_to_gene_trees)
#now try and populate ranger output, if not make directory and run run_rangerDTL
for obj in phylodata_objects:
#print("Checking for rangerDTL outputs")
exists = obj.populate_ranger_dtl_outputs(path_to_ranger_outputs)
if exists is False:
#run the program.
print("Running RangerDTL")
path_to_ranger_outputs, list_of_ranger_outputs = annotate_ranger.run_rangerDTL(obj, project_name)
#print("Checking for new rangerDTL outputs")
exists = obj.populate_ranger_dtl_outputs(path_to_ranger_outputs)
if exists is False:
print ("error in rangerdtl_output assignation")
raise SystemExit
return True | 5813981113be0513920fed0bc21bd6eedd6890f3 | 6,457 |
import copy
def extract_peers_dataset(
work_dict,
scrub_mode='sort-by-date'):
"""extract_peers_dataset
Fetch the IEX peers data for a ticker and
return it as a pandas Dataframe
:param work_dict: dictionary of args
:param scrub_mode: type of scrubbing handler to run
"""
label = work_dict.get('label', 'extract')
df_type = iex_consts.DATAFEED_PEERS
df_str = iex_consts.get_datafeed_str(df_type=df_type)
req = copy.deepcopy(work_dict)
if 'redis_key' not in work_dict:
# see if it's get dataset dictionary
if 'peers' in work_dict:
req['redis_key'] = req['peers']
req['s3_key'] = req['peers']
# end of support for the get dataset dictionary
log.debug(f'{label} - {df_str} - start')
return extract_utils.perform_extract(
df_type=df_type,
df_str=df_str,
work_dict=req,
scrub_mode=scrub_mode) | d0aec2efe89a0a65010f2de4da686694c8906cbf | 6,458 |
def xml_ind(content):
"""Translate a individual expression or variable to MathCAD XML.
:param content: str, math expression or variable name.
:return: str, formatted MathCAD XML.
"""
ns = ''' xmlns:ml="http://schemas.mathsoft.com/math30">''' # name space as initial head
sub_statement = xml_stat(xml_ex(content))
return sub_statement.replace('>', ns, 1) | 67e82cbfd2796e31eaef7305e240fc9e3d93c08e | 6,459 |
from typing import Any
def read_slug(slug: str, db: Session = Depends(get_db)) -> Any:
"""
Get a post by slug
"""
db_slug = get_post(db, slug)
if db_slug is None:
raise HTTPException(status_code=404, detail="Post not found")
return db_slug | 347dfd32aa87417cecfbb5b192288fdc0585a071 | 6,460 |
from typing import List
def left_join_predictions(anno_gold: pd.DataFrame, anno_predicted: pd.DataFrame, columns_keep_gold: List[str],
columns_keep_system: List[str]) -> pd.DataFrame:
"""
Given gold mention annotations and predicted mention annotations, this method returns the gold annotations with
additional columns from the system prediction merged in, based on the optimal 1:1 span matching per sentence. Gold
annotation spans will not be modified, only enriched (hence: left join). Index and column of dataframes must
conform to a certain format (see assert in code). Spans in the dataframes must be non-overlapping.
:param anno_gold:
:param anno_predicted:
:param columns_keep_gold:
:param columns_keep_system:
:return:
"""
assert anno_gold.index.names == [DOCUMENT_ID, MENTION_ID]
assert anno_predicted.index.names == [DOCUMENT_ID, MENTION_ID]
mappings = []
MENTION_ID_GOLD = "mention-id-gold"
MENTION_ID_PREDICTED = "mention-id-predicted"
# perform intersection sentence-wise
if not anno_predicted.empty:
for (doc_id, sent_idx), df_gold in anno_gold.reset_index().groupby([DOCUMENT_ID, SENTENCE_IDX]):
spans_gold = df_gold[[TOKEN_IDX_FROM, TOKEN_IDX_TO]].values.tolist()
# look up mentions at the same spot in system output
anno_predicted_wout_index = anno_predicted.reset_index()
df_predicted = anno_predicted_wout_index.loc[(anno_predicted_wout_index[DOCUMENT_ID] == doc_id) & (anno_predicted_wout_index[SENTENCE_IDX] == sent_idx)]
spans_predicted = df_predicted[[TOKEN_IDX_FROM, TOKEN_IDX_TO]].values.tolist()
# perform span matching (only based on spans! no type information taken into consideration!)
matched_spans = span_matching(spans_gold, spans_predicted, keep_A=True)
# keep MENTION_IDs of matched mentions
for i_gold, i_predicted in matched_spans.items():
row = {DOCUMENT_ID: doc_id,
MENTION_ID_GOLD: df_gold.iloc[i_gold][MENTION_ID]}
# this index can be None because we set keep_A=True for span_matching, to always keep all gold annotations
if i_predicted is not None:
row[MENTION_ID_PREDICTED] = df_predicted.iloc[i_predicted][MENTION_ID]
mappings.append(row)
mappings = pd.DataFrame(mappings, columns=[DOCUMENT_ID, MENTION_ID_GOLD, MENTION_ID_PREDICTED])
if not mappings.empty:
# merge in the columns we want to keep from the gold annotations
mappings = mappings.merge(anno_gold[columns_keep_gold],
left_on=[DOCUMENT_ID, MENTION_ID_GOLD],
right_index=True)
# merge in the columns we want to keep from the predicted annotations - note the use of how="left" to keep gold annotations which have MENTION_ID_PREDICTED == None
left_joined = mappings.merge(anno_predicted[columns_keep_system],
left_on=[DOCUMENT_ID, MENTION_ID_PREDICTED],
right_index=True,
how="left")
# drop unwanted columns, return to original column names, return to original index
left_joined = left_joined.drop(columns=[MENTION_ID_PREDICTED])
left_joined = left_joined.rename(columns={MENTION_ID_GOLD: MENTION_ID})
left_joined = left_joined.set_index([DOCUMENT_ID, MENTION_ID])
else:
# append lots of NaNs if there is nothing to merge
left_joined = pd.concat([anno_gold[columns_keep_gold], pd.DataFrame([], columns=columns_keep_system)], axis=1)
left_joined.sort_index(inplace=True)
return left_joined | c34785e255940f69375ee64674186b0b7e8bdf1f | 6,461 |
import json
def get_users_data(filter):
"""
Returns users in db based on submitted filter
:param filter:
:return:
"""
# presets - filter must be in one of the lists
filter_presets = {"RegistrationStatus": ["Pending", "Verified"], "userTypeName": ["Administrator", "Event Manager", "Alumni"]}
if filter.title() in filter_presets["RegistrationStatus"]:
users_data = db.get_users(RegistrationStatus=filter)
elif filter.title() in filter_presets["userTypeName"]:
users_data = db.get_users(userTypeName=filter)
else:
#filter doesn't exist return all users
users_data = db.get_users()
users_data = list(enumerate(users_data))
return json.jsonify(users_data) | 568dab028a30c9c6f88de83054b6a7b3e95662fc | 6,462 |
def calAdjCCTTFromTrace(nt,dt,tStartIn,tEndIn,dataIn, synthIn):
""" calculate the cross correlation traveltime adjoint sources for one seismogram
IN:
nt : number of timesteps in each seismogram
dt : timestep of seismograms
tStartIn : float starting time for trace
tEndIn : float end time for trace
OUT:
fBar : array containing the adjoint seismogram for the trace
t : ndarray containing the time steps
"""
isCalculateWeights = False
if isCalculateWeights:
dSeism = np.zeros(nt)
weight = 0
# -- time vector
t = np.ogrid[0:(nt-1)*dt:nt*1j]
# -- the norm
norm = 0
# -- numpy arrays initialisation
velSynth = np.zeros(nt)
accSynth = np.zeros(nt)
timeWind = np.zeros(nt)
fBar = np.zeros(nt)
# -- calculate time time-window
tStart = tStartIn
tEnd = tEndIn
# -- the starting and ending sample numbers
iStart = int(np.floor(tStart/dt))
iEnd = int(np.ceil(tEnd/dt))
# -- sample length of the window
iWind = iEnd - iStart
#print iStart,iEnd,iWind
timeWind[iStart:iEnd]=sgnl.hann(iWind)
# -- calculate the adjoint
synth = synthIn
interpTrc = interp.InterpolatedUnivariateSpline(t,synth)
velSynth = interpTrc(t,1)
accSynth = interpTrc(t,2)
integrArgument = timeWind*synth*accSynth
# -- calculating the norm
norm = integr.simps(integrArgument,dx=dt,axis=-1,even='last')
# -- divide every trace (row in matrices) by their norm (row in vector norm)
fBar = timeWind*velSynth / norm
if isCalculateWeights:
# -- read in the data seismograms
data = dataIn
# -- calculate the difference between data and synthetics (amplitude) per trace
dSeism = data - synth
# -- calculate the weight per trace
integrArgument = timeWind*velSynth*dSeism
weight = integr.simps(integrArgument,dx=dt,axis=-1,even='last')
print "weight", weight/norm
# -- multiply weight with every adj trace
fBar = fBar*weight
print weight
return [fBar,t] | 7524d350c241ae07810b1e5c38bb3db869136804 | 6,463 |
def get_par_idx_update_pars_dict(pars_dict, cmd, params=None, rev_pars_dict=None):
"""Get par_idx representing index into pars tuples dict.
This is used internally in updating the commands H5 and commands PARS_DICT
pickle files. The ``pars_dict`` input is updated in place.
This code was factored out verbatim from kadi.update_cmds.py.
:param pars_dict: dict of pars tuples
:param cmd: dict or CommandRow
Command for updated par_idx
:param pars: dict, optional
If provided, this is used instead of cmd['params']
:param rev_pars_dict: dict, optional
If provided, also update the reverse dict.
:returns: int
Params index (value of corresponding pars tuple dict key)
"""
# Define a consistently ordered tuple that has all command parameter information
if params is None:
params = cmd['params']
keys = set(params.keys()) - set(('SCS', 'STEP', 'TLMSID'))
if cmd['tlmsid'] == 'AOSTRCAT':
pars_tup = encode_starcat_params(params) if params else ()
else:
if cmd['tlmsid'] == 'OBS':
# Re-order parameters to a priority order.
new_keys = ['obsid', 'simpos', 'obs_stop', 'manvr_start', 'targ_att']
for key in sorted(cmd['params']):
if key not in new_keys:
new_keys.append(key)
keys = new_keys
else:
# Maintain original order of keys for OBS command but sort the rest.
# This is done so the OBS command displays more nicely.
keys = sorted(keys)
pars_tup = tuple((key.lower(), params[key]) for key in keys)
try:
par_idx = pars_dict[pars_tup]
except KeyError:
# Along with transition to 32-bit idx in #190, ensure that idx=65535
# never gets used. Prior to #190 this value was being used by
# get_cmds_from_backstop() assuming that it will never occur as a
# key in the pars_dict. Adding 65536 allows older versions to work
# with the new cmds.pkl pars_dict.
par_idx = len(pars_dict) + 65536
pars_dict[pars_tup] = par_idx
if rev_pars_dict is not None:
rev_pars_dict[par_idx] = pars_tup
return par_idx | 9be23a37884eb674b3faa67ac17342b830c123fd | 6,464 |
import time
import logging
def profile(function, *args, **kwargs):
"""
Log the runtime of a function call.
Args:
function: The callable to profile.
args: Additional positional arguments to ``function``.
kwargs: Additional keyword arguments to ``function``.
Returns:
The result of applying ``function`` to ``args`` and ``kwargs``.
"""
start_time = time.time()
result = function(*args, **kwargs)
end_time = time.time()
time_elapsed = end_time - start_time
LOGGER.log(logging.DEBUG, 'Call to "%s" took %.3f seconds',
function.__name__, time_elapsed)
return result | 73369ea9e97e50e72100be5e6537d4ca39664a17 | 6,465 |
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from .. import __version__ as _vstr
from pathlib import Path
def get_parser():
"""Define the command line interface"""
parser = ArgumentParser(description='SDC Workflows',
formatter_class=RawTextHelpFormatter)
parser.add_argument(
'bids_dir', action='store', type=Path,
help='the root folder of a BIDS dataset')
parser.add_argument('output_dir', action='store', type=Path,
help='the output path for the outcomes of preprocessing and visual '
'reports')
parser.add_argument('analysis_level', choices=['participant', 'group'], nargs='+',
help='processing stage to be run, "participant" means individual analysis '
'and "group" is second level analysis.')
# optional arguments
parser.add_argument('--version', action='version', version='v{}'.format(_vstr))
# Options that affect how pyBIDS is configured
g_bids = parser.add_argument_group('Options for filtering BIDS queries')
g_bids.add_argument('--participant-label', action='store', type=str,
nargs='*', dest='subject', help='process only particular subjects')
g_bids.add_argument('--task', action='store', type=str, nargs='*',
help='select a specific task to be processed')
g_bids.add_argument('--dir', action='store', type=str, nargs='*',
help='select a specific direction entity to be processed')
g_bids.add_argument('--acq', action='store', type=str, nargs='*', dest='acquisition',
help='select a specific acquisition entity to be processed')
g_bids.add_argument('--run', action='store', type=int, nargs='*',
help='select a specific run identifier to be processed')
g_bids.add_argument('--suffix', action='store', type=str, nargs='*', default='bold',
help='select a specific run identifier to be processed')
g_perfm = parser.add_argument_group('Options to handle performance')
g_perfm.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=0,
help="increases log verbosity for each occurence, debug level is -vvv")
g_perfm.add_argument('--ncpus', '--nprocs', action='store', type=int,
help='maximum number of threads across all processes')
g_perfm.add_argument('--nthreads', '--omp-nthreads', action='store', type=int,
help='maximum number of threads per-process')
g_other = parser.add_argument_group('Other options')
g_other.add_argument('-w', '--work-dir', action='store', type=Path,
help='path where intermediate results should be stored')
return parser | 825f69340e1dce9c281f67768ff2559241bb1acc | 6,466 |
def COSTR(LR, R, W, S):
"""
COSTR one value of cosine transform of two-sided function
p. 90
"""
COSNW = 1.
SINNW = 0.
COSW = COS(W)
SINW = SIN(W)
S = R[0]
for I in range(1, LR):
T = COSW * COSNW - SINW * SINNW
COSNW = T
S += 2 * R[I] * COSNW
return S | c4a4ff69ac4bd2d22885fc5103e62b0d861d8ed6 | 6,467 |
def CV_SIGN(*args):
"""CV_SIGN(int a)"""
return _cv.CV_SIGN(*args) | 380758a917df6111c27e6072a72a483ac13513c9 | 6,468 |
def import_config_data(config_path):
"""
Parameters
----------
config_path : str
path to the experimental configuration file
Returns
-------
config data : dict
dict containing experimental metadata for a given session config file
"""
data = get_config(config_path)
return data | c174272dfd56876f7e8dbd306248c81aa1f3bdb2 | 6,469 |
def sigmaLabel(ax, xlabel, ylabel, sigma=None):
"""Label the axes on a figure with some uncertainty."""
confStr = r'$\pm{} \sigma$'.format(sigma) if sigma is not None else ''
ax.set_xlabel(xlabel + confStr)
ax.set_ylabel(ylabel + confStr)
return ax | 8ecf5ae2defd0d67c545943ea48992906612282e | 6,470 |
def startswith(x, prefix):
"""Determines if entries of x start with prefix
Args:
x: A vector of strings or a string
prefix: The prefix to test against
Returns:
A bool vector for each element in x if element startswith the prefix
"""
x = regcall(as_character, x)
return x.str.startswith(prefix) | 2d41a61d5a569af1925e8df7e2218fdae2bcb7ec | 6,471 |
def create_estimator(est_cls, const_kwargs, node, child_list):
"""
Creates an estimator.
:param est_cls: Function that creates the estimator.
:param const_kwargs: Keyword arguments which do not change during the evolution.
:param child_list: List of converted child nodes - should me empty.
:param evolved_kwargs: Keyword arguments which are set during the evolution process.
:return: A new estimator.
"""
if len(child_list) > 0:
raise ValueError("Estimator cannot have sub-estimators.")
evolved_kwargs = node.obj_kwargs
if 'feat_frac' in evolved_kwargs.keys():
feat_frac = evolved_kwargs['feat_frac']
evolved_kwargs = {key: val for key, val in evolved_kwargs.items()
if key != 'feat_frac'}
est = est_cls(**const_kwargs, **evolved_kwargs)
return RelativeTransformer(est, feat_frac)
return est_cls(**const_kwargs, **evolved_kwargs) | 306a948f11bc3f70a3c489d1740d7144bbaa4c5b | 6,472 |
def exists(hub_id):
"""Check for existance of hub in local state.
Args:
hub_id(str): Id of hub to query. The id is a string of hexadecimal sections used internally to represent a hub.
"""
if 'Hubs.{0}'.format(hub_id) in config.state:
return True
else:
return False | 4b6d333e070e1dea9300db20bcd50b58c1b9b457 | 6,473 |
import logging
def query_by_date_after(**kwargs):
"""
根据发布的时间查询,之后的记录: 2020-06-03之后,即2020-06-03, 2020-06-04, ......
:param kwargs: {'date': date}
:return:
"""
session = None
try:
date = kwargs['date'].strip() + config.BEGIN_DAY_TIME
session = get_session()
ret = session.query(Play).filter(Play.DATE_TIME >= date).order_by(
Play.DATE_TIME.desc()).limit(config.LIMIT_MAX).all()
# 提交即保存到数据库
session.commit()
results = parse_object(*ret)
logging.info('OK : play.py--->query_by_date_after(), 成功')
return results
except Exception as e:
logging.critical('Error : play.py--->query_by_date_after() 失败: {}'.format(e))
return []
finally:
# 关闭session
session.close() | b6bbf40441ae8c3f6db861e8bed025986b7373bd | 6,474 |
import os
def get_artifact_path(name):
"""Получение пути для сохранения артефакта. Side-эффект: Создание директории
@param name: Название артефакта
@return Путь для сохранения
"""
if not os.path.exists('../artifacts/'):
os.makedirs('../artifacts/')
path = f'../artifacts/{name}.png'
print(f'New artifact: {path}')
return path | 99c076c5803b418b27deeec0f65a5a42a24f3579 | 6,475 |
from skimage.feature import peak_local_max # Defer slow import
from scipy.stats import iqr
import math
def _step_4_find_peaks(
aligned_composite_bg_removed_im,
aligned_roi_rect,
raw_mask_rects,
border_size,
field_df,
sigproc_params,
):
"""
Find peaks on the composite image
TASK: Remove the mask rect checks and replace with the same masking
logic that is now implemented in the alignment phase. That is, just remove
the peaks from the source instead of in post-processing.
"""
n_outchannels, n_inchannels, n_cycles, dim = sigproc_params.channels_cycles_dim
assert (
aligned_composite_bg_removed_im.shape[0]
== aligned_composite_bg_removed_im.shape[1]
)
aligned_dim, _ = aligned_composite_bg_removed_im.shape
check.array_t(aligned_composite_bg_removed_im, is_square=True)
hat_rad = sigproc_params.hat_rad
brim_rad = sigproc_params.hat_rad + 1
hat_mask, brim_mask = _hat_masks(hat_rad, brim_rad)
kernel = imops.generate_gauss_kernel(1.0)
kernel = kernel - kernel.mean()
_fiducial_im = imops.convolve(aligned_composite_bg_removed_im, kernel)
# Black out the convolution artifact around the perimeter of the _fiducial_im
search_roi_rect = Rect(
aligned_roi_rect.b + brim_rad,
aligned_roi_rect.t - brim_rad,
aligned_roi_rect.l + brim_rad,
aligned_roi_rect.r - brim_rad,
)
search_roi = search_roi_rect.roi()
composite_fiducial_im = np.zeros_like(aligned_composite_bg_removed_im)
# Use Inter-Quartile Range for some easy filtering
_iqr = 0
if sigproc_params.iqr_rng is not None:
_iqr = iqr(
_fiducial_im[search_roi],
rng=(100 - sigproc_params.iqr_rng, sigproc_params.iqr_rng),
)
composite_fiducial_im[search_roi] = (_fiducial_im[search_roi] - _iqr).clip(min=0)
locs = peak_local_max(
composite_fiducial_im,
min_distance=hat_rad,
threshold_abs=sigproc_params.threshold_abs,
)
# Emergency exit to prevent memory overflows
# check.affirm(len(locs) < 7000, f"Too many peaks {len(locs)}")
shift = field_df.set_index("cycle_i").sort_index()[["shift_y", "shift_x"]].values
shift_y = shift[:, 0]
shift_x = shift[:, 1]
# Discard any peak in any mask_rect
# ALIGN the mask rects to the composite coordinate system
aligned_mask_rects = []
for channel in range(sigproc_params.n_output_channels):
channel_rects = safe_list_get(raw_mask_rects, channel, [])
for cycle in range(n_cycles):
for rect in safe_list_get(channel_rects, cycle, []):
yx = XY(rect[0], rect[1])
hw = WH(rect[2], rect[3])
yx += XY(border_size, border_size) - XY(shift_x[cycle], shift_y[cycle])
aligned_mask_rects += [(yx[0], yx[1], yx[0] + hw[0], yx[1] + hw[1])]
aligned_mask_rects = np.array(aligned_mask_rects)
if aligned_mask_rects.shape[0] > 0:
# To compare every loc with every mask rect we use the tricky np.fn.outer()
y_hits = np.greater_equal.outer(locs[:, 0], aligned_mask_rects[:, 0])
y_hits &= np.less.outer(locs[:, 0], aligned_mask_rects[:, 2])
x_hits = np.greater_equal.outer(locs[:, 1], aligned_mask_rects[:, 1])
x_hits &= np.less.outer(locs[:, 1], aligned_mask_rects[:, 3])
inside_rect = x_hits & y_hits # inside a rect if x and y are inside the rect
locs_to_keep = ~np.any(
inside_rect, axis=1
) # Reject if inside of any masked rect
locs = locs[locs_to_keep]
circle_im = np.zeros((aligned_dim, aligned_dim))
center = aligned_dim / 2
peak_rows = []
for field_peak_i, loc in enumerate(locs):
if sigproc_params.radial_filter is not None:
radius = math.sqrt((loc[0] - center) ** 2 + (loc[1] - center) ** 2)
radius /= center
if radius >= sigproc_params.radial_filter:
continue
imops.set_with_mask_in_place(circle_im, brim_mask, 1, loc=loc, center=True)
peak_rows += [
Munch(
peak_i=0,
field_peak_i=field_peak_i,
aln_y=int(loc[0]),
aln_x=int(loc[1]),
)
]
peak_df = pd.DataFrame(peak_rows)
return peak_df, circle_im, aligned_mask_rects | 1439336369681569a85ee3e8a8566e4d02cc2999 | 6,476 |
import socket
def get_reverse_host():
"""Return the reverse hostname of the IP address to the calling function."""
try:
return socket.gethostbyaddr(get_ipaddress())[0]
except:
return "Unable to resolve IP address to reverse hostname" | 48911baf6507563470cb2d34af2392b52c58ac9a | 6,477 |
def trans_stop(value) -> TransformerResult:
"""
A transformer that simply returns TransformerResult.RETURN.
"""
return TransformerResult.RETURN | c124c62a15bca1000ecdfaaa02433de405338e6c | 6,478 |
def generator(n, mode):
""" Returns a data generator object.
Args:
mode: One of 'training' or 'validation'
"""
flip_cams = False
if FLAGS.regularization == 'GRU':
flip_cams = True
gen = ClusterGenerator(FLAGS.train_data_root, FLAGS.view_num, FLAGS.max_w, FLAGS.max_h,
FLAGS.max_d, FLAGS.interval_scale, FLAGS.base_image_size, mode=mode, flip_cams=flip_cams)
logger.info('Initializing generator with mode {}'.format(mode))
if mode == 'training':
global training_sample_size
training_sample_size = len(gen.train_clusters)
if FLAGS.regularization == 'GRU':
training_sample_size = training_sample_size * 2
return iter(gen) | 1dc7950a4ae0f7c35a4ba788710327c2e63fae14 | 6,479 |
import re
def remove_multispaces(text):
""" Replace multiple spaces with only 1 space """
return [re.sub(r' +', " ",word) for word in text] | 0b87f6a4b0d49931b3f4bec6f9c313be05d476f0 | 6,480 |
def empirical_ci(arr: np.ndarray, alpha: float = 95.0) -> np.ndarray:
"""Computes percentile range in an array of values.
Args:
arr: An array.
alpha: Percentile confidence interval.
Returns:
A triple of the lower bound, median and upper bound of the confidence interval
with a width of alpha.
"""
percentiles = 50 - alpha / 2, 50, 50 + alpha / 2
return np.percentile(arr, percentiles) | 8bb0ff5768c70d4e34174ea4187898513a4f841e | 6,481 |
def euclidean(a,b):
"""Calculate GCD(a,b) with the Euclidean algorithm.
Args:
a (Integer): an integer > 0.
b (Integer): an integer > 0.
Returns:
Integer: GCD(a,b) = m ∈ ℕ : (m|a ⋀ m|b) ⋀ (∄ n ∈ ℕ : (n|a ⋀ n|b) ⋀ n>m).
"""
if(a<b):
a,b = b,a
a, b = abs(a), abs(b)
while a != 0:
a, b = b % a, a
return b | 8af351e251e52336d7ef946a28bb6d666bff97c3 | 6,482 |
from typing import Union
def check_reserved_pulse_id(pulse: OpInfo) -> Union[str, None]:
"""
Checks whether the function should be evaluated generically or has special
treatment.
Parameters
----------
pulse
The pulse to check.
Returns
-------
:
A str with a special identifier representing which pulse behavior to use
"""
reserved_pulse_mapping = {
"stitched_square_pulse": _check_square_pulse_stitching,
"staircase": _check_staircase,
}
for key, checking_func in reserved_pulse_mapping.items():
if checking_func(pulse):
return key
return None | d4bd89da98612031fbcc7fcde9bcf40bb0843f70 | 6,483 |
def figure(*args, **kwargs):
"""
Returns a new SpectroFigure, a figure extended with features useful for
analysis of spectrograms.
Compare pyplot.figure.
"""
kw = {
'FigureClass': SpectroFigure,
}
kw.update(kwargs)
return plt.figure(*args, **kw) | 851b02773dc974691ba6e43477244aa8e4ba0760 | 6,484 |
import six
def allow_ports(ports, proto="tcp", direction="in"):
"""
Fully replace the incoming or outgoing ports
line in the csf.conf file - e.g. TCP_IN, TCP_OUT,
UDP_IN, UDP_OUT, etc.
CLI Example:
.. code-block:: bash
salt '*' csf.allow_ports ports="[22,80,443,4505,4506]" proto='tcp' direction='in'
"""
results = []
ports = set(ports)
ports = list(ports)
proto = proto.upper()
direction = direction.upper()
_validate_direction_and_proto(direction, proto)
ports_csv = ",".join(six.moves.map(six.text_type, ports))
directions = build_directions(direction)
for direction in directions:
result = __salt__["file.replace"](
"/etc/csf/csf.conf",
# pylint: disable=anomalous-backslash-in-string
pattern='^{0}_{1}(\ +)?\=(\ +)?".*"$'.format(proto, direction),
# pylint: enable=anomalous-backslash-in-string
repl='{0}_{1} = "{2}"'.format(proto, direction, ports_csv),
)
results.append(result)
return results | 3a14a9ea74daf4062e3bd970623284a152ffde08 | 6,485 |
def add(n1, n2, base=10):
"""Add two numbers represented as lower-endian digit lists."""
k = max(len(n1), len(n2)) + 1
d1 = n1 + [0 for _ in range(k - len(n1))]
d2 = n2 + [0 for _ in range(k - len(n2))]
res = []
carry = 0
for i in range(k):
if d1[i] + d2[i] + carry < base:
res.append(d1[i] + d2[i] + carry)
carry = 0
else:
res.append(d1[i] + d2[i] + carry - base)
carry = 1
while res and res[-1] == 0:
res = res[:-1]
if res: return res
return [0] | 098bfa9ebedf7f219a6f9910e98c4cf9cbf13aa8 | 6,486 |
from datetime import datetime
import pytz
def folder_datetime(foldername, time_infolder_fmt=TIME_INFOLDER_FMT):
"""Parse UTC datetime from foldername.
Foldername e.g.: hive1_rpi1_day-190801/
"""
# t_str = folder.name.split("Photos_of_Pi")[-1][2:] # heating!!
t_str = foldername.split("day-")[-1]
day_naive = datetime.strptime(t_str, time_infolder_fmt)
# # Localize as UTC
# day_local = local_tz.localize(day_naive)
# dt_utc = day_local.astimezone(pytz.utc)
day_utc = pytz.utc.localize(day_naive)
return day_utc | fbbd9d9749cba6807391009e1720ca35b9dd7c7b | 6,487 |
def get_policy_profile_by_name(name, db_session=None):
"""
Retrieve policy profile by name.
:param name: string representing the name of the policy profile
:param db_session: database session
:returns: policy profile object
"""
db_session = db_session or db.get_session()
vsm_hosts = config.get_vsm_hosts()
pp = n1kv_models.PolicyProfile
pprofiles = db_session.query(pp).filter(
sql.and_(pp.name == name, pp.vsm_ip.in_(vsm_hosts))).all()
if pprofiles and check_policy_profile_exists_on_all_vsm(pprofiles,
vsm_hosts):
return pprofiles[0]
else:
raise n1kv_exc.PolicyProfileNotFound(profile=name) | 36136be7e618490bcda58799285277982bc41f71 | 6,488 |
def ecg_hrv_assessment(hrv, age=None, sex=None, position=None):
"""
Correct HRV features based on normative data from Voss et al. (2015).
Parameters
----------
hrv : dict
HRV features obtained by :function:`neurokit.ecg_hrv`.
age : float
Subject's age.
sex : str
Subject's gender ("m" or "f").
position : str
Recording position. To compare with data from Voss et al. (2015), use "supine".
Returns
----------
hrv_adjusted : dict
Adjusted HRV features.
Example
----------
>>> import neurokit as nk
>>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks)
>>> ecg_hrv_assessment = nk.bio_ecg.ecg_hrv_assessment(hrv)
Notes
----------
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
*Details*
- **Adjusted HRV**: The raw HRV features are normalized :math:`(raw - Mcluster) / sd` according to the participant's age and gender. In data from Voss et al. (2015), HRV analysis was performed on 5-min ECG recordings (lead II and lead V2 simultaneously, 500 Hz sampling rate) obtained in supine position after a 5–10 minutes resting phase. The cohort of healthy subjects consisted of 782 women and 1124 men between the ages of 25 and 74 years, clustered into 4 groups: YF (Female, Age = [25-49], n=571), YM (Male, Age = [25-49], n=744), EF (Female, Age = [50-74], n=211) and EM (Male, Age = [50-74], n=571).
References
-----------
- Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308.
"""
hrv_adjusted = {}
if position == "supine":
if sex == "m":
if age <= 49:
hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-930)/133
hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-45.8)/18.8
hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-34.0)/18.3
hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-203)/262
hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-101)/143
hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-3.33)/3.47
else:
hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-911)/128
hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-33.0)/14.8
hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-20.5)/11.0
hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-84)/115
hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-29.5)/36.6
hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-4.29)/4.06
if sex == "f":
if age <= 49:
hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-901)/117
hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-44.9)/19.2
hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-36.5)/20.1
hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-159)/181
hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-125)/147
hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-2.75)/2.93
else:
hrv_adjusted["meanNN_Adjusted"] = (hrv["meanNN"]-880)/115
hrv_adjusted["sdNN_Adjusted"] = (hrv["sdNN"]-31.6)/13.6
hrv_adjusted["RMSSD_Adjusted"] = (hrv["RMSSD"]-22.0)/13.2
hrv_adjusted["LF_Adjusted"] = (hrv["LF"]-66)/83
hrv_adjusted["HF_Adjusted"] = (hrv["HF"]-41.4)/72.1
hrv_adjusted["LF/HF_Adjusted"] = (hrv["LF/HF"]-2.09)/2.05
return(hrv_adjusted) | fe3ab5e6f97920f44b7a32928785a19a6185e3d9 | 6,489 |
import warnings
def declared_attr_roles(rw=None, call=None, read=None, write=None):
"""
Equivalent of :func:`with_roles` for use with ``@declared_attr``::
@declared_attr
@declared_attr_roles(read={'all'})
def my_column(cls):
return Column(Integer)
While :func:`with_roles` is always the outermost decorator on properties
and functions, :func:`declared_attr_roles` must appear below
``@declared_attr`` to work correctly.
.. deprecated:: 0.6.1
Use :func:`with_roles` instead. It works for
:class:`~sqlalchemy.ext.declarative.declared_attr` since 0.6.1
"""
def inner(f):
@wraps(f)
def attr(cls):
# Pass f(cls) as a parameter to with_roles.inner to avoid the test for
# iterables within with_roles. We have no idea about the use cases for
# declared_attr in downstream code. There could be a declared_attr
# that returns a list that should be accessible via the proxy.
return with_roles(rw=rw, call=call, read=read, write=write)(f(cls))
return attr
warnings.warn("declared_attr_roles is deprecated; use with_roles", stacklevel=2)
return inner | 4128754046a18e332d5b6f8ba7e2c60d1d576c6b | 6,490 |
def _in_iterating_context(node):
"""Check if the node is being used as an iterator.
Definition is taken from lib2to3.fixer_util.in_special_context().
"""
parent = node.parent
# Since a call can't be the loop variant we only need to know if the node's
# parent is a 'for' loop to know it's being used as the iterator for the
# loop.
if isinstance(parent, astroid.For):
return True
# Need to make sure the use of the node is in the iterator part of the
# comprehension.
elif isinstance(parent, astroid.Comprehension):
if parent.iter == node:
return True
# Various built-ins can take in an iterable or list and lead to the same
# value.
elif isinstance(parent, astroid.Call):
if isinstance(parent.func, astroid.Name):
parent_scope = parent.func.lookup(parent.func.name)[0]
if _is_builtin(parent_scope) and parent.func.name in _ACCEPTS_ITERATOR:
return True
elif isinstance(parent.func, astroid.Attribute):
if parent.func.attrname == 'join':
return True
# If the call is in an unpacking, there's no need to warn,
# since it can be considered iterating.
elif (isinstance(parent, astroid.Assign) and
isinstance(parent.targets[0], (astroid.List, astroid.Tuple))):
if len(parent.targets[0].elts) > 1:
return True
return False | f1109b22842e3e9d6306a266b0654670a9f30ac8 | 6,491 |
def to_point(obj):
"""Convert `obj` to instance of Point."""
if obj is None or isinstance(obj, Point):
return obj
if isinstance(obj, str):
obj = obj.split(",")
return Point(*(int(i) for i in obj)) | 340182f054ebac39133edb09c9e1d049f9dde9d4 | 6,492 |
def issues(request, project_id):
"""问题栏"""
if request.method == "GET":
# 筛选条件 -- 通过get来实现参数筛选
allow_filter_name = ['issues_type', 'status', 'priority', 'assign', 'attention']
condition = {} # 条件
for name in allow_filter_name:
value_list = request.GET.getlist(name)
if not value_list:
continue
condition['{}__in'.format(name)] = value_list
# 分页获取数据
form = IssuesModelForm(request)
issues_obj = Issues.objects.filter(project=request.tracer.project).filter(**condition)
page_object = Pagination(
current_page=request.GET.get('page'),
all_count=issues_obj.count(),
base_url=request.path_info,
query_params=request.GET,
per_page=3,
)
issues_object_list = issues_obj[page_object.start:page_object.end]
project_total_user = [(request.tracer.project.create_user_id, request.tracer.project.create_user.username,)]
join_user = ProjectUser.objects.filter(project_id=project_id).values_list('user_id', 'user__username')
project_total_user.extend(join_user)
invite_form = InviteModelForm(data=request.POST)
context = {
'form': form,
'invite_form': invite_form,
'issues_object_list': issues_object_list,
'page_html': page_object.page_html(),
'filter_list': [
{'title': '问题类型', 'filter': CheckFilter('issues_type',
IssuesType.objects.filter(project_id=project_id).values_list(
'id',
'title'),
request)},
{'title': '状态', 'filter': CheckFilter('status', Issues.STATUS_CHOICES, request)},
{'title': '优先级', 'filter': CheckFilter('priority', Issues.PRIORITY_CHOICES, request)},
{'title': '指派者', 'filter': SelectFilter('assign', project_total_user, request)},
{'title': '关注者', 'filter': SelectFilter('attention', project_total_user, request)},
]
}
return render(request, 'web/issues.html', context)
if request.method == "POST":
form = IssuesModelForm(request, data=request.POST)
if form.is_valid():
# 添加问题数据
form.instance.project = request.tracer.project
form.instance.create_user = request.tracer.user
form.save()
return JsonResponse({'code': 200})
return JsonResponse({'msg': form.errors, 'code': 416}) | 099b292e8143f1559f3e78e96ad555eaa7328449 | 6,493 |
from operator import and_
def get_30mhz_rht_data(sensor_id):
"""
Produces a JSON with the 30MHz RH & T sensor data for a specified sensor.
Args:
sensor_id - Advanticsys sensor ID
Returns:
result - JSON string
"""
dt_from, dt_to = parse_date_range_argument(request.args.get("range"))
query = (
db.session.query(
ReadingsZensieTRHClass.sensor_id,
ReadingsZensieTRHClass.timestamp,
ReadingsZensieTRHClass.temperature,
ReadingsZensieTRHClass.humidity,
ReadingsZensieTRHClass.time_created,
ReadingsZensieTRHClass.time_updated,
)
.filter(
and_(
ReadingsZensieTRHClass.sensor_id == sensor_id,
ReadingsZensieTRHClass.timestamp >= dt_from,
ReadingsZensieTRHClass.timestamp <= dt_to,
)
)
.order_by(desc(ReadingsZensieTRHClass.timestamp))
)
execute_result = db.session.execute(query).fetchall()
result = jasonify_query_result(execute_result)
return result | d688850720162c33ea9bdd84eaed9ecd83a49902 | 6,494 |
import requests
def stock_em_gpzy_industry_data() -> pd.DataFrame:
"""
东方财富网-数据中心-特色数据-股权质押-上市公司质押比例-行业数据
http://data.eastmoney.com/gpzy/industryData.aspx
:return: pandas.DataFrame
"""
url = "http://dcfm.eastmoney.com/EM_MutiSvcExpandInterface/api/js/get"
page_num = _get_page_num_gpzy_industry_data()
temp_df = pd.DataFrame()
for page in range(1, page_num + 1):
print(f"一共{page_num}页, 正在下载第{page}页")
params = {
"type": "ZD_HY_SUM",
"token": "70f12f2f4f091e459a279469fe49eca5",
"cmd": "",
"st": "amtshareratio_pj",
"sr": "-1",
"p": str(page),
"ps": "5000",
"js": "var SIqThurI={pages:(tp),data:(x),font:(font)}",
"rt": "52584617",
}
res = requests.get(url, params=params)
data_text = res.text
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
map_dict = dict(
zip(
pd.DataFrame(data_json["font"]["FontMapping"])["code"],
pd.DataFrame(data_json["font"]["FontMapping"])["value"],
)
)
for key, value in map_dict.items():
data_text = data_text.replace(key, str(value))
data_json = demjson.decode(data_text[data_text.find("={") + 1 :])
temp_df = temp_df.append(pd.DataFrame(data_json["data"]), ignore_index=True)
temp_df.columns = [
"统计时间",
"-",
"行业",
"平均质押比例(%)",
"公司家数",
"质押总笔数",
"质押总股本",
"最新质押市值",
]
temp_df = temp_df[["统计时间", "行业", "平均质押比例(%)", "公司家数", "质押总笔数", "质押总股本", "最新质押市值"]]
temp_df["统计时间"] = pd.to_datetime(temp_df["统计时间"])
return temp_df | 4ac0de7bdbae197c9d89dc663dbef594e2010fc6 | 6,495 |
def to_float32(x: tf.Tensor) -> tf.Tensor:
"""Cast the given tensor to float32.
Args:
x: The tensor of any type.
Returns:
The tensor casts to float32.
"""
return tf.cast(x, tf.float32) | 2c25ea5450e86139fa1c21041be73e21f01b1bff | 6,496 |
def cli_usage(name=None):
"""
custom usage message to override `cli.py`
"""
return """
{logo}
usage: signalyze [-h] [-o OUTPUT] [--show-name] [-b | -w | -all] [--show-graph | --show-extra-info]
""".format(logo=get_logo()) | f512be4404da92aff9a237cdece487a266cbf175 | 6,497 |
def unban_chat_member(chat_id, user_id, **kwargs):
"""
Use this method to unban a previously kicked user in a supergroup. The user will not return to the group automatically,
but will be able to join via link, etc. The bot must be an administrator in the group for this to work
:param chat_id: Unique identifier for the target chat or username of the target channel (in the format @channelusername)
:param user_id: Unique identifier of the target user
:param kwargs: Args that get passed down to :class:`TelegramBotRPCRequest`
:type chat_id: int or str
:type user_id: int
:returns: Returns True on success.
:rtype: bool
"""
# required args
params = dict(
chat_id=chat_id,
user_id=user_id,
)
return TelegramBotRPCRequest('unbanChatMember', params=params, on_result=lambda result: result, **kwargs) | cc1558e1d49841e47ebf4f457e615319e47abae4 | 6,498 |
from typing import Optional
import re
def parse_progress_line(prefix: str, line: str) -> Optional[float]:
"""Extract time in seconds from a prefixed string."""
regexp = prefix + r"(?P<hours>\d+):(?P<minutes>\d{2}):(?P<seconds>\d{2}.\d{2})"
match = re.search(regexp, line)
if not match:
return None
return (
int(match.group("hours")) * 3600
+ int(match.group("minutes")) * 60
+ float(match.group("seconds"))
) | 690b2f0e48a5f584da646f9e4058ed75e654251e | 6,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.