content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import ctypes
import six
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, six.text_type):
short_path = short_path.decode(_fsencoding())
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path | 72d6b9fc1fb8acd6285019a8d48ea42e847ce8db | 4,900 |
import types
def _create_behavioral_cloning_agent(
time_step_spec: types.NestedTensorSpec, action_spec: types.NestedTensorSpec,
preprocessing_layers: types.NestedLayer,
policy_network: types.Network) -> tfa.agents.TFAgent:
"""Creates a behavioral_cloning_agent."""
network = policy_network(
time_step_spec.observation,
action_spec,
preprocessing_layers=preprocessing_layers,
name='QNetwork')
return behavioral_cloning_agent.BehavioralCloningAgent(
time_step_spec, action_spec, cloning_network=network, num_outer_dims=2) | c3420767aaa153ef44054fdb4fbdcc9540d59775 | 4,901 |
import os
def prep_fastq_inputs(in_files, data):
"""Prepare bgzipped fastq inputs
"""
if len(in_files) == 1 and _is_bam_input(in_files):
out = _bgzip_from_bam(in_files[0], data["dirs"], data)
elif len(in_files) == 1 and _is_cram_input(in_files):
out = _bgzip_from_cram(in_files[0], data["dirs"], data)
elif len(in_files) in [1, 2] and _ready_gzip_fastq(in_files, data):
out = _symlink_in_files(in_files, data)
else:
if len(in_files) > 2:
fpairs = fastq.combine_pairs(in_files)
pair_types = set([len(xs) for xs in fpairs])
assert len(pair_types) == 1
fpairs.sort(key=lambda x: os.path.basename(x[0]))
organized = [[xs[0] for xs in fpairs]]
if len(fpairs[0]) > 1:
organized.append([xs[1] for xs in fpairs])
in_files = organized
parallel = {"type": "local", "num_jobs": len(in_files),
"cores_per_job": max(1, data["config"]["algorithm"]["num_cores"] // len(in_files))}
inputs = [{"in_file": x, "read_num": i, "dirs": data["dirs"], "config": data["config"],
"is_cwl": "cwl_keys" in data,
"rgnames": data["rgnames"]}
for i, x in enumerate(in_files) if x]
out = run_multicore(_bgzip_from_fastq_parallel, [[d] for d in inputs], data["config"], parallel)
return out | 1dbb51a07068e5a77c59b771a719fb1c2f41858c | 4,902 |
def sidequery():
"""Serves AJAX call for HTML content for the sidebar (*query* **record** page).
Used when the user is switching between **material** and **record** pages.
See also [M:RECORD.body][record.RECORD.body].
Client code: [{sidecontent.fetch}][sidecontentfetch].
"""
session.forget(response)
Query = QUERY()
Record = RECORDQUERY(Query)
return Record.body() | 6f5c6660f25e568ea4fa2ad046eac5a57cb4f7e5 | 4,903 |
import collections
def learn_encoding_model_ln(sess, met, stimulus, response, ttf_in=None,
initialize_RF_using_ttf=True,
scale_ttf=True, lr=0.1,
lam_l1_rf=0):
"""Learn GLM encoding model using the metric.
Uses ttf to initialize the RF only if ttf_in is given and
initialize_RF_using_ttf=True.
If scale_ttf is True, it scales time course to match firing rate
in observed data.
"""
# get paramters
data_len = stimulus.shape[0]
n_cells = response.shape[2]
dimx = stimulus.shape[1]
dimy = stimulus.shape[2]
# generate responses using current parameters.
# stimulus - response placeholders
stim_tf = tf.placeholder(dtype=tf.float32, shape=[None, dimx, dimy])
resp_tf = tf.placeholder(dtype=tf.float32, shape=[None, n_cells])
# Compute variables.
tlen = 30
if ttf_in is None:
ttf = tf.Variable(0.1 + 0*np.random.randn(tlen).astype(np.float32),
name='ttf')
else:
ttf = tf.Variable(ttf_in.astype(np.float32), name='ttf')
# Time filter.
stim_inp = tf.expand_dims(tf.transpose(stim_tf, [1, 0, 2]), 3)
ttf_filt = tf.expand_dims(tf.expand_dims(tf.expand_dims(ttf, 1), 2), 3)
stim_time_filtered = tf.nn.conv2d(stim_inp, ttf_filt, strides=[1, 1, 1, 1],
padding='VALID')
stim_time_filt_reshape = tf.transpose(stim_time_filtered,
[1, 0, 2, 3])
# Initialize remaining variables
uninitialized_vars = []
for var in tf.all_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
init_new_vars_op = tf.variables_initializer(uninitialized_vars)
sess.run(init_new_vars_op)
# compute STAs
if ttf_in is None or not initialize_RF_using_ttf :
stas_np = None
print('RF will be randomly initialized')
else:
stas = tf.reshape(tf.matmul(tf.transpose(tf.reshape(stim_time_filt_reshape
, [-1, dimx*dimy])),
resp_tf), [dimx, dimy, n_cells])
print('RF will be initialized to STAs computed using given ttf')
batch_sz = data_len - tlen
end_time = data_len
feed_dict = {stim_tf: stimulus[end_time-batch_sz-(tlen-1):
end_time, : , : ].astype(np.float32),
resp_tf: response[0, end_time-batch_sz:
end_time, : ].astype(np.float32)}
stas_np = sess.run(stas, feed_dict=feed_dict)
# Space filter.
if stas_np is None:
RF_all = tf.Variable(0.1 + 0*np.random.randn(dimx, dimy,
n_cells).astype(np.float32),
name='RFs')
else :
RF_all = tf.Variable(stas_np.astype(np.float32),
name='RFs')
stim_space_filtered = tf.reduce_sum(tf.reduce_sum(stim_time_filt_reshape * RF_all, 2),
1) # ? x n_cells
generator_signal = stim_space_filtered
firing_rate = tf.nn.relu(generator_signal)
# update parameters.
distances = met.get_expected_score(firing_rate, resp_tf)
# distances = tf.reduce_sum(tf.pow(firing_rate - resp_tf, 2), 1)
loss_encoding = (tf.reduce_sum(distances) +
lam_l1_rf*tf.reduce_sum(tf.abs(RF_all)))
train_step_RF = tf.train.AdamOptimizer(lr).minimize(loss_encoding,
var_list=[RF_all])
train_step_ttf = tf.train.AdamOptimizer(lr).minimize(loss_encoding,
var_list=[ttf])
# Initialize remaining variables
uninitialized_vars = []
for var in tf.all_variables():
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninitialized_vars.append(var)
init_new_vars_op = tf.variables_initializer(uninitialized_vars)
sess.run(init_new_vars_op)
## Learning
# test data
batch_sz = 1000
end_time = np.random.randint(batch_sz, data_len)
feed_dict_test = {stim_tf: stimulus[end_time-batch_sz-(tlen-1): end_time,:,:].astype(np.float32),
resp_tf: response[0, end_time-batch_sz: end_time, :].astype(np.float32)}
# Scale RF and TTF
# Scale time course to match firing rate of all cells.
if scale_ttf:
scale_ttf = tf.reduce_mean(resp_tf) / tf.reduce_mean(firing_rate)
update_ttf = tf.assign(ttf, ttf*scale_ttf)
batch_sz = 1000
end_time = np.random.randint(batch_sz, data_len)
feed_dict = {stim_tf: stimulus[end_time-batch_sz-(tlen-1): end_time,:,:].astype(np.float32),
resp_tf: response[0, end_time-batch_sz: end_time, :].astype(np.float32)}
sess.run(update_ttf, feed_dict=feed_dict)
print('Time course scaled to match firing rate')
# TODO(bhaishahster): Scale RF to match firing rate of individual cells.
for outer_iter in range(10):
# Plot test loss
loss_encoding_np_test = sess.run(loss_encoding, feed_dict=feed_dict_test)
print(outer_iter, loss_encoding_np_test)
# Learn spatial RF
for iiter in range(1000):
end_time = np.random.randint(batch_sz+1000, data_len)
feed_dict = {stim_tf: stimulus[end_time-batch_sz-(tlen-1): end_time,:,:].astype(np.float32),
resp_tf: response[0, end_time-batch_sz: end_time, :].astype(np.float32)}
_, loss_encoding_np = sess.run([train_step_RF, loss_encoding],
feed_dict=feed_dict)
'''
if iiter % 100 == 0:
loss_encoding_np_test = sess.run(loss_encoding, feed_dict=feed_dict_test)
print(iiter, end_time, loss_encoding_np, loss_encoding_np_test)
'''
# Learn temporal part
for iiter in range(1000):
end_time = np.random.randint(batch_sz+1000, data_len)
feed_dict = {stim_tf: stimulus[end_time-batch_sz-(tlen-1): end_time,:,:].astype(np.float32),
resp_tf: response[0, end_time-batch_sz: end_time, :].astype(np.float32)}
_, loss_encoding_np = sess.run([train_step_ttf, loss_encoding],
feed_dict=feed_dict)
# Collect return parameters.
RF_np = sess.run(RF_all)
ttf_np = sess.run(ttf)
encoding_model = collections.namedtuple('encoding_model',
['stimulus', 'firing_rate'])
model = encoding_model(stim_tf, firing_rate)
return RF_np, ttf_np, model
'''
# do some response prediction
batch_sz = 1000
end_time = np.random.randint(batch_sz, data_len)
feed_dict_fr = {stim_tf: stimulus[end_time-batch_sz-(tlen-1): end_time,:,:].astype(np.float32),
resp_tf: response[0, end_time-batch_sz: end_time, :].astype(np.float32)}
fr_np = self.sess.run(firing_rate, feed_dict=feed_dict_fr)
spks_sample = np.sum(np.random.rand(batch_sz, n_cells) < fr_np)
spks_rec = np.sum(response[0, end_time-batch_sz: end_time, :].astype(np.float32))
print('True spks %d, sample spks %d' % (spks_rec, spks_sample))
plt.plot(fr_np[:, 0]);
plt.show()
# plot RF
RF_np = self.sess.run(RF_all)
plt.figure()
for icell in range(n_cells):
plt.subplot(np.ceil(np.sqrt(n_cells)), np.ceil(np.sqrt(n_cells)), icell+1)
plt.imshow(RF_np[:, :, icell], interpolation='nearest', cmap='gray')
plt.show()
# plot ttf
ttf_np = self.sess.run(ttf)
plt.plot(ttf_np)
plt.hold(True)
plt.plot(ttf_in)
plt.legend(['Fit', 'Initialized'])
plt.title('ttf')
plt.show()
''' | bdabe98b689466faeb7fe954cb2cd04e9217fba8 | 4,904 |
import pandas
import numpy
def random_answers_2020_ml():
"""
Generates random answers the machine learning challenge of
hackathons :ref:`l-hackathon-2020`.
"""
df = pandas.DataFrame({"index": numpy.arange(473333)})
df['label'] = numpy.random.randint(low=0, high=2, size=(df.shape[0], ))
df['score'] = numpy.random.random((df.shape[0], ))
return df | 24a721c1c8e512ade6293644eff61b0866c3f0fe | 4,905 |
import optparse
import os
def _buildParser():
"""Returns a custom OptionParser for parsing command-line arguments.
"""
parser = _ErrorOptionParser(__doc__)
filter_group = optparse.OptionGroup(parser,
'File Options',
'Options used to select which files to process.')
filter_group.add_option(
'-f', '--files', dest='files_pattern',
default='(?!^.*\.pyc|.*\.ico|.*\.gif|.*\.png|.*\.jpg$)',
metavar='FILES_REGEX',
help=('Python regex pattern (*not* a glob!) defining files to process'
' in each directory [default: %default]'))
filter_group.add_option(
'-F', '--follow', dest='follow_symlinks', default=False,
action='store_true',
help=('follow file and subdirectory symlinks (possibly *DANGEROUS*)'
' [default: %default]'))
parser.add_option_group(filter_group)
dir_group = optparse.OptionGroup(parser,
'Directory Options',
'Options used to indicate which directories to traverse.')
dir_group.add_option(
'-s', '--start', dest='start_path', default=os.curdir, metavar='PATH',
help='directory in which to start processing files [default: %default]')
dir_group.add_option(
'-R', '--recursive', dest='recurse_dirs', default=False,
action='store_true',
help='recurse into subdirectories [default: %default]')
dir_group.add_option(
'-d', '--dirs', dest='dirs_pattern', default='^[^.].*$',
metavar='SUBDIRS_REGEX',
help=('Python regex pattern (*not* a glob!) defining subdirectories to'
' recurse into (if --recursive) [default: %default]'))
parser.add_option_group(dir_group)
output_group = optparse.OptionGroup(parser,
'Output Options',
'Options used to control program output.')
output_group.add_option(
'-a', '--abspath', dest='abs_path', default=False, action='store_true',
help=('output absolute paths instead of relative paths'
' [default: %default]'))
output_group.add_option(
'', '--nopaths', dest='hide_paths', default=False, action='store_true',
help=('suppress printing of file path names for successfully matched'
' files to stdout [default: %default]'))
output_group.add_option(
'', '--notext', dest='hide_text', default=False, action='store_true',
help=('suppress find/replace text output to stdout (but still print'
' paths if not --nopath, and still perform replacements if'
' specified) [default: %default]'))
output_group.add_option(
'-q', '--quiet', dest='quiet_output', default=False, action='store_true',
help=('suppress *all* printed output to stdout (but still perform'
' replacements if specified) [default: %default]'))
parser.add_option_group(output_group)
replace_group = optparse.OptionGroup(parser,
'Replace Options',
'Options applied when matches in files are replaced with substitutions.'
' (Only possible if REPLACE_FORMAT is supplied.)')
replace_group.add_option(
'-o', '--overwrite', dest='overwrite_files', default=False,
action='store_true',
help=('overwrite original files with formatted text substituted for'
' matches [default: %default]'))
replace_group.add_option(
'-b', '--backup', dest='backup_ext', default='', metavar='EXTENSION',
help=('if supplied, and file would be overwritten, backup original'
' file with the supplied extension [default is no backups of'
' overwritten files are kept]'))
replace_group.add_option(
'-n', '--new', dest='new_ext', default='', metavar='EXTENSION',
help=('if supplied, and file has matches and and is altered by'
' substitutions, create a new file with the supplied extension'
' [default is no new file is created]'))
parser.add_option_group(replace_group)
return parser | 143fdb5ba476d4b4f5203ce7bf8fca5f6b4964f5 | 4,906 |
def _sizeof_fmt(num, suffix='B'):
"""Format a number as human readable, based on 1024 multipliers.
Suited to be used to reformat a size expressed in bytes.
By Fred Cirera, after https://stackoverflow.com/a/1094933/1870254
Args:
num (int): The number to be formatted.
suffix (str): the measure unit to append at the end of the formatted
number.
Returns:
str: The formatted number including multiplier and measure unit.
"""
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix) | c70c9ce46f6b391e2389329a6fcd50bf863ea041 | 4,907 |
from typing import Optional
from typing import Tuple
from typing import Union
import click
import subprocess
import os
def train(
package: str,
config: str,
gpus: int,
gpus_per_node: int = None,
cpus_per_task: int = 2,
partition: str = None,
launcher: str = 'none',
port: int = None,
srun_args: Optional[str] = None,
yes: bool = True,
other_args: tuple = ()
) -> Tuple[bool, Union[str, Exception]]:
"""Train a model with given config.
Args:
package (str): The codebase name.
config (str): The config file path. If not exists, will search in the
config files of the codebase.
gpus (int): Number of gpus used for training.
gpus_per_node (int, optional): Number of gpus per node to use
(only applicable to launcher == "slurm"). Defaults to None.
cpus_per_task (int, optional): Number of cpus per task to use
(only applicable to launcher == "slurm"). Defaults to None.
partition (str, optional): The partition name
(only applicable to launcher == "slurm"). Defaults to None.
launcher (str, optional): The launcher used to launch jobs.
Defaults to 'none'.
port (int | None, optional): The port used for inter-process
communication (only applicable to slurm / pytorch launchers).
Default to None. If set to None, will randomly choose a port
between 20000 and 30000.
srun_args (str, optional): Other srun arguments that might be
used, all arguments should be in a string. Defaults to None.
yes (bool): Don’t ask for confirmation. Default: True.
other_args (tuple, optional): Other arguments, will be passed to the
codebase's training script. Defaults to ().
"""
full_name = module_full_name(package)
if full_name == '':
msg = f"Can't determine a unique package given abbreviation {package}"
raise ValueError(highlighted_error(msg))
package = full_name
# If launcher == "slurm", must have following args
if launcher == 'slurm':
msg = ('If launcher is slurm, '
'gpus-per-node and partition should not be None')
flag = (gpus_per_node is not None) and (partition is not None)
assert flag, msg
if port is None:
port = rd.randint(20000, 30000)
if launcher in ['slurm', 'pytorch']:
click.echo(f'Using port {port} for synchronization. ')
if not is_installed(package):
msg = (f'The codebase {package} is not installed, '
'do you want to install the latest release? ')
if yes or click.confirm(msg):
click.echo(f'Installing {package}')
cmd = ['mim', 'install', package]
ret = subprocess.check_call(cmd)
if ret != 0:
msg = f'{package} is not successfully installed'
raise RuntimeError(highlighted_error(msg))
else:
click.echo(f'{package} is successfully installed')
else:
msg = f'You can not train this model without {package} installed.'
return False, msg
pkg_root = get_installed_path(package)
if not osp.exists(config):
# configs is put in pkg/.mim in PR #68
config_root = osp.join(pkg_root, '.mim', 'configs')
if not osp.exists(config_root):
# If not pkg/.mim/config, try to search the whole pkg root.
config_root = pkg_root
# pkg/.mim/configs is a symbolic link to the real config folder,
# so we need to follow links.
files = recursively_find(
pkg_root, osp.basename(config), followlinks=True)
if len(files) == 0:
msg = (f"The path {config} doesn't exist and we can not find "
f'the config file in codebase {package}.')
raise ValueError(highlighted_error(msg))
elif len(files) > 1:
msg = (
f"The path {config} doesn't exist and we find multiple "
f'config files with same name in codebase {package}: {files}.')
raise ValueError(highlighted_error(msg))
# Use realpath instead of the symbolic path in pkg/.mim
config_path = osp.realpath(files[0])
click.echo(
f"The path {config} doesn't exist but we find the config file "
f'in codebase {package}, will use {config_path} instead.')
config = config_path
# tools will be put in package/.mim in PR #68
train_script = osp.join(pkg_root, '.mim', 'tools', 'train.py')
if not osp.exists(train_script):
train_script = osp.join(pkg_root, 'tools', 'train.py')
common_args = ['--launcher', launcher] + list(other_args)
if launcher == 'none':
if gpus:
cmd = ['python', train_script, config, '--gpus',
str(gpus)] + common_args
else:
cmd = ['python', train_script, config, '--device', 'cpu'
] + common_args
elif launcher == 'pytorch':
cmd = [
'python', '-m', 'torch.distributed.launch',
f'--nproc_per_node={gpus}', f'--master_port={port}', train_script,
config
] + common_args
elif launcher == 'slurm':
parsed_srun_args = srun_args.split() if srun_args else []
has_job_name = any([('--job-name' in x) or ('-J' in x)
for x in parsed_srun_args])
if not has_job_name:
job_name = osp.splitext(osp.basename(config))[0]
parsed_srun_args.append(f'--job-name={job_name}_train')
cmd = [
'srun', '-p', f'{partition}', f'--gres=gpu:{gpus_per_node}',
f'--ntasks={gpus}', f'--ntasks-per-node={gpus_per_node}',
f'--cpus-per-task={cpus_per_task}', '--kill-on-bad-exit=1'
] + parsed_srun_args + ['python', '-u', train_script, config
] + common_args
cmd_text = ' '.join(cmd)
click.echo(f'Training command is {cmd_text}. ')
ret = subprocess.check_call(
cmd, env=dict(os.environ, MASTER_PORT=str(port)))
if ret == 0:
return True, 'Training finished successfully. '
else:
return False, 'Training not finished successfully. ' | f2bd2dc2d73612e3f3f8bda477cb6838915a2209 | 4,908 |
def CrawlWithSmbclient(config):
"""Crawls a list of SMB file shares, using smbclient.
Args:
config: Config object holding global configuration from commands flags
Returns:
report: Report object holding the results from the crawling the shares.
"""
shares = config.Shares()
if not shares:
print "No shares found!"
sys.exit(1)
if config.debug > 0:
print "Shares to crawl: \n - %s" % '\\\n - '.join([str(s) for s in shares])
report = Report()
for share in shares:
# builds SMB client command using either smbclient
opts = ["-N"]
if share.domain is not None:
opts.append("-W%s" % share.domain)
if share.username is not None:
if share.password is not None:
opts.append('-U"%s%%%%%s"' % (share.username, share.password))
else:
opts.append('-U"%s"' % share.username)
else:
opts.append("-Uguest")
client = "%s %s //%s/%s -c'%%s'" % (
config.client, " ".join(opts), share.hostname, share.share)
crawl_queue = [share.filename]
crawled = []
report.Add(Document(share))
while crawl_queue:
filename = crawl_queue.pop()
while filename in crawled:
filename = crawl_queue.pop()
crawled.append(filename)
file_url = share.Root() + filename[1:]
if config.debug > 1:
print "Trying %s" % share.Url(filename)
if filename[-1] == "/":
cmd = "ls \"%s*\"" % filename.replace("/", '\\')
else:
cmd = "get \"%s\" /dev/null" % filename.replace("/", '\\')
if config.debug > 3:
print "Running command: %s" % (client % cmd,)
status, output = commands.getstatusoutput(client % cmd)
if filename[-1] == "/":
# Get filenames out of directory listing
for line in output.split("\n"):
if line[:2] == " " and line[2:4] != "..":
parts = line.split()
timestamp = ":".join((parts[-1], str(
SHORT_MONTH_NAMES.index(parts[-4])+1), parts[-3], parts[-2]))
timestamp = datetime.datetime(*map(int, timestamp.split(":")))
size = int(parts[-6])
child = " ".join(parts[:-6])
if child[-1] == "D": child = child[:-2] + "/" # sript D attribute
regex = re.compile("^(A|H|S)(A|H|S)?(A|H|S)?$")
if regex.match(child[-1]): child = child[:-2] # strip attributes
if child[0] == ".":
doc = Document(share, filename, lastmod=timestamp)
doc.list_size = doc.real_size = 4096 # Just to avoid zero-sized
report.Add(doc)
continue
path = os.path.join(filename, child)
if config.maxdepth != -1 and (
path[:-1].count("/") - share.depth) > config.maxdepth:
continue
doc = Document(share, path, lastmod=timestamp)
crawl_queue.append(path)
if doc.IsFile:
doc.list_size = size
report.Add(doc)
if config.debug > 3:
print "Found '%s' last modified %s" % (doc.Url(), doc.lastmod)
elif line[:3] == "NT_":
status = line.split()[0]
if config.debug > 2:
print "%s for %s" % (status, filename)
report.Update(file_url, status=status)
else:
words = line.split()
for error in words:
if error[:3] == "NT_":
report.Update(file_url, status=error)
else:
# Get result of downloading the file
for line in output.split("\n"):
if line[:7] == "getting":
parts = line.split()
size = int(parts[parts.index("size") + 1])
if config.debug > 2:
print "%s has size %s" % (filename, size)
report.Update(file_url, real_size=size)
elif line[:3] == "NT_":
error = line.split()[0]
if config.debug > 2:
print "%s for %s" % (error, filename)
report.Update(file_url, status=error)
else:
words = line.split()
for error in words:
if error[:3] == "NT_":
report.Update(file_url, status=error)
return report | d29c7bbf185f56555ab7cd8dd775063525e96b22 | 4,909 |
import sys
def new_image():
"""
Display an image, and ask the user to label it
"""
user_id = current_user.user_id
categories = current_app.config["CATEGORIES"]
label_form = LabelForm()
label_form.cat_radio.choices = [(cat,cat) for cat in categories]
if request.method=="POST":
if not "image_id" in session.keys():
print("No session ID - how did this happen?", file=sys.stderr)
else:
image_id = session["image_id"]
label = label_form.cat_radio.data
notes = label_form.notes.data
save_label(user_id, image_id, label, notes)
# get the next image
image_location, is_url, image_id = get_image(user_id)
if not image_location:
return render_template("no_images.html")
# store the image id in the session
session["image_id"] = image_id
# now reset the form to re-render the page
new_label_form = LabelForm(formdata=None)
new_label_form.cat_radio.choices = [(cat,cat) for cat in categories]
return render_template("new_image.html",
new_image=image_location,
img_id=image_id,
form=new_label_form) | f7b9cb489ccef2fb17103e69386c9f508d97b6ed | 4,910 |
def num_cluster_members(matrix, identity_threshold):
"""
Calculate number of sequences in alignment
within given identity_threshold of each other
Parameters
----------
matrix : np.array
N x L matrix containing N sequences of length L.
Matrix must be mapped to range(0, num_symbols) using
map_matrix function
identity_threshold : float
Sequences with at least this pairwise identity will be
grouped in the same cluster.
Returns
-------
np.array
Vector of length N containing number of cluster
members for each sequence (inverse of sequence
weight)
"""
N, L = matrix.shape
L = 1.0 * L
# minimal cluster size is 1 (self)
num_neighbors = np.ones((N))
# compare all pairs of sequences
for i in range(N - 1):
for j in range(i + 1, N):
pair_id = 0
for k in range(L):
if matrix[i, k] == matrix[j, k]:
pair_id += 1
if pair_id / L >= identity_threshold:
num_neighbors[i] += 1
num_neighbors[j] += 1
return num_neighbors | e9034a728b22f7a594ef7842f2a4039559751e21 | 4,911 |
def get_score(command: str) -> float:
"""Get pylint score"""
output = check_output(command, shell=True).decode("utf-8")
start = output.find("Your code has been rated at ")
if start == -1:
raise ValueError(f'Could not find quality score in "{output.rstrip()}".')
start += len("Your code has been rated at ")
end = start + output[start:].find("/")
score = float(output[start:end])
return score | d32b6f9496033d4c2b569ebc7403be43bb43ceb1 | 4,912 |
def tf_abstract_eval(f):
"""Returns a function that evaluates `f` given input shapes and dtypes.
It transforms function `f` to a function that performs the same computation as
`f` but only on shapes and dtypes (a.k.a. shape inference).
Args:
f: the function to be transformed.
Returns:
A function whose input arguments can be either the same as `f`'s or only
their shapes/dtypes represented by `ShapeDtype`, and whose return values are
`ShapeDtype`s with the same nested structure as `f`'s return values.
"""
f_shape = tf_np_extensions.eval_on_shapes(f)
def from_shape_type(x):
if isinstance(x, ShapeDtype):
return tf.TensorSpec(x.shape, x.dtype)
else:
return x
def to_shape_type(x): # pylint: disable=missing-docstring
# TODO(wangpeng): handle partial output shapes using `tf.shape`.
def to_numpy_shape(s):
if s.is_fully_defined():
return tuple(s.as_list())
else:
raise ValueError("The output shapes (%s) of the dry-run'ed function are"
' not fully defined.' % s)
def to_numpy_dtype(t):
return np.dtype(t.as_numpy_dtype)
if isinstance(x, tf.TensorSpec):
return ShapeDtype(to_numpy_shape(x.shape), to_numpy_dtype(x.dtype))
else:
return x
def f_return(*args):
args = tf.nest.map_structure(from_shape_type, args)
res = f_shape(*args)
return tf.nest.map_structure(to_shape_type, res)
return f_return | 7bdd5ddfa69f3be635aed08be9476296c108f79e | 4,913 |
import json
def __process_input(request_data: str) -> np.array:
"""
Converts input request data into numpy array
:param request_data in json format
:return: numpy array
"""
return np.asarray(json.loads(request_data)["input"]) | 7639018f69a4e72568cdf86abc503133ebd734af | 4,914 |
import math
def getDist_P2L(PointP,Pointa,Pointb):
"""计算点到直线的距离
PointP:定点坐标
Pointa:直线a点坐标
Pointb:直线b点坐标
"""
#求直线方程
A=0
B=0
C=0
A=Pointa[1]-Pointb[1]
B=Pointb[0]-Pointa[0]
C=Pointa[0]*Pointb[1]-Pointa[1]*Pointb[0]
#代入点到直线距离公式
distance=0
distance=(A*PointP[0]+B*PointP[1]+C)/math.sqrt(A*A+B*B)
return distance | ca0ec1fc25183a240179faef7473d7b86758a92b | 4,915 |
def skipgram_batch(centers, contexts, num_tokens, dtype, index_dtype):
"""Create a batch for SG training objective."""
contexts = mx.nd.array(contexts[2], dtype=index_dtype)
indptr = mx.nd.arange(len(centers) + 1)
centers = mx.nd.array(centers, dtype=index_dtype)
centers_csr = mx.nd.sparse.csr_matrix(
(mx.nd.ones(centers.shape), centers, indptr), dtype=dtype,
shape=(len(centers), num_tokens))
return centers_csr, contexts, centers | e16c7ffd6c4f18e247a885de0b7477ddfa5ed02c | 4,916 |
def JD2RA(JD, longitude=21.42830, latitude=-30.72152, epoch='current'):
"""
Convert from Julian date to Equatorial Right Ascension at zenith
during a specified epoch.
Parameters:
-----------
JD : type=float, a float or an array of Julian Dates
longitude : type=float, longitude of observer in degrees east, default=HERA longitude
latitude : type=float, latitude of observer in degrees north, default=HERA latitutde
This only matters when using epoch="J2000"
epoch : type=str, epoch for RA calculation. options=['current', 'J2000'].
The 'current' epoch is the epoch at JD. Note that
LST is defined as the zenith RA in the current epoch. Note that
epoch='J2000' corresponds to the ICRS standard.
Output:
-------
RA : type=float, right ascension [degrees] at zenith JD times
in the specified epoch.
"""
# get JD type
if isinstance(JD, list) or isinstance(JD, np.ndarray):
_array = True
else:
_array = False
JD = [JD]
# setup RA list
RA = []
# iterate over jd
for jd in JD:
# use current epoch calculation
if epoch == 'current':
ra = JD2LST(jd, longitude=longitude) * 180 / np.pi
RA.append(ra)
# use J2000 epoch
elif epoch == 'J2000':
loc = crd.EarthLocation(lat=latitude * unt.deg, lon=longitude * unt.deg)
t = Time(jd, format='jd', scale='utc')
zen = crd.SkyCoord(frame='altaz', alt=90 * unt.deg, az=0 * unt.deg, obstime=t, location=loc)
RA.append(zen.icrs.ra.degree)
else:
raise ValueError("didn't recognize {} epoch".format(epoch))
RA = np.array(RA)
if _array:
return RA
else:
return RA[0] | 14bb4d621449a7fd9fa57acecb107aaa4ea61010 | 4,917 |
def _average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list is
over individual gradients. The inner list is over the gradient calculation
for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
print(len(grad_and_vars))
for g, v in grad_and_vars:
if g is None:
print(v)
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
print(len(grad_and_vars))
for g, v in grad_and_vars:
if g is not None:
print(v)
for g, v in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
print(v)
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
capped_grad = tf.clip_by_value(grad, -200., 200.)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (capped_grad, v)
average_grads.append(grad_and_var)
return average_grads | 24be75daeeb1d5878a9a481c43be440ced40a0a2 | 4,918 |
import os
def _get_memory_banks_listed_in_dir(path):
"""Get all memory banks the kernel lists in a given directory.
Such a directory can be /sys/devices/system/node/ (contains all memory banks)
or /sys/devices/system/cpu/cpu*/ (contains all memory banks on the same NUMA node as that core)."""
# Such directories contain entries named "node<id>" for each memory bank
return [int(entry[4:]) for entry in os.listdir(path) if entry.startswith("node")] | a3ee0cf6ad043b4a18c12994b9ac028c538e631c | 4,919 |
import time
def get_online_users(guest=False): # pragma: no cover
"""Returns all online users within a specified time range
:param guest: If True, it will return the online guests
"""
current = int(time.time()) // 60
minutes = range_method(flaskbb_config['ONLINE_LAST_MINUTES'])
if guest:
return redis_store.sunion(['online-guests/%d' % (current - x)
for x in minutes])
return redis_store.sunion(['online-users/%d' % (current - x)
for x in minutes]) | 39ad71b71e8a8caac0e6a82b7992c6229f85d255 | 4,920 |
from typing import Union
def reverse_bearing(bearing: Union[int, float]):
"""
180 degrees from supplied bearing
:param bearing:
:return:
"""
assert isinstance(bearing, (float, int))
assert 0. <= bearing <= 360.
new_bearing = bearing + 180.
# Ensure strike is between zero and 360 (bearing)
return normalize_bearing(new_bearing) | 1fd01df40a23c52ff093c17fd5752f0609cee761 | 4,921 |
def signUp_page(request):
"""load signUp page"""
return render(request, 'app/signUp_page.html') | ae28acac27264dbb8d2f6a69afb01c6f96a08218 | 4,922 |
import os
def db(app, request):
"""
Session-wide test database.
"""
db_path = app.config["SQLALCHEMY_DATABASE_URI"]
db_path = db_path[len("sqlite:///"):]
print(db_path)
if os.path.exists(db_path):
os.unlink(db_path)
def teardown():
_db.drop_all()
os.unlink(db_path)
_db.app = app
apply_migrations()
request.addfinalizer(teardown)
return _db | 2f96da64ff0bbfa54f06758ba816120ccda8b16a | 4,923 |
import csv
def read_students(path):
""" Read a tab-separated file of students. The only required field is 'github_repo', which is this
student's github repository. """
students = [line for line in csv.DictReader(open(path), delimiter='\t')]
check_students(students)
return students | e64aeb1a73fb79e91d0464d6a95e509d3cc60b94 | 4,924 |
from typing import Tuple
import torch
def get_extended_attention_mask(
attention_mask: Tensor,
input_shape: Tuple[int],
device: torch.device,
is_decoder=False,
) -> Tensor:
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# extended_attention_mask = extended_attention_mask.to(
# dtype=self.dtype
# ) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask | 26104733e3cc970536a11c3930866dc3d11d3583 | 4,925 |
def getdiskuuidvm(diskuuid):
"""
get vm uuid from disk uuid and return it
"""
if debug:
print 'vm from disk uuid :',diskuuid
cmd='xe vbd-list vdi-uuid='+diskuuid
response=docmd(cmd).split('vm-uuid ( RO): ')
vmuuid=response[1].split('\n')[0]
return vmuuid | ec1ffa56c0a85f0554a367ccc41baeb98d21cd41 | 4,926 |
import os
def storedata():
""" Upload a new file """
#path = os.path.join(app.config['UPLOAD_DIR'],current_user.name)
path = os.path.join(app.config['UPLOAD_DIR'])
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
if request.method == 'POST':
if 'file' not in request.files:
flash('Chosse a file .csv',"alert alert-danger")
return render_template(
'uploadData.html',
infoUpload='Chosse a file .csv',
files=dirs)
file = request.files['file'] # get the file
if file.filename == '':
flash('File not selected',"alert alert-danger")
return render_template(
'uploadData.html',
infoUpload='file not selected',
files=dirs)
file_name = ''
data_name = ''
if file and allowed_file(file.filename):
file_name = secure_filename(file.filename)
file_path = os.path.join(path, file_name)
file.save(file_path)
dirs = os.listdir(path)
if dirs!="": # If user's directory is empty
dirs.sort(key=str.lower)
flash('Uploaded!! '+file_name,"alert alert-success")
return render_template(
'uploadData.html',
infoUpload='Uploaded!! '+file_name,
files=dirs)
flash('Error',"alert alert-danger")
return render_template(
'uploadData.html',
infoUpload='Error',
files=dirs)
else:
return redirect(url_for('defineData')) | 7795b07b296eced8e97195881d5cabbb5a2d725d | 4,927 |
def adler32(string, start=ADLER32_DEFAULT_START):
"""
Compute the Adler-32 checksum of the string, possibly with the given
start value, and return it as a unsigned 32 bit integer.
"""
return _crc_or_adler(string, start, _adler32) | ed4a0905b4891ee931ef08f91e92032a613caee7 | 4,928 |
def find_earliest_brs_idx(g: Grid, V: np.ndarray, state: np.narray, low: int, high: int) -> int:
"""
Determines the earliest time the current state is in the reachable set
Args:
g: Grid
V: Value function
state: state of dynamical system
low: lower bound of search range (inclusive)
high: upper bound of search range (inclusive)
Returns:
t: Earliest time where the state is in the reachable set
"""
epsilon = 1e-4
while low < high:
mid = np.ceil((high + low) / 2)
value = g.get_value(V[..., mid], state)
if value < epsilon:
low = mid
else:
high = mid - 1
return low | 234a201af98f74c41785a36b3391e23700ac80e6 | 4,929 |
def italic(s):
"""Returns the string italicized.
Source: http://stackoverflow.com/a/16264094/2570866
"""
return r'\textit{' + s + '}' | 7eb9e9629e8556e9410e4d92525dd8c06c3e25de | 4,930 |
import functools
def skip_if_disabled(func):
"""Decorator that skips a test if test case is disabled."""
@functools.wraps(func)
def wrapped(*a, **kwargs):
func.__test__ = False
test_obj = a[0]
message = getattr(test_obj, 'disabled_message',
'Test disabled')
if getattr(test_obj, 'disabled', False):
test_obj.skipTest(message)
func(*a, **kwargs)
return wrapped | 56d42a1e0418f4edf3d4e8478358495b1353f57a | 4,931 |
from typing import Any
import itertools
def _compare_keys(target: Any, key: Any) -> bool:
"""
Compare `key` to `target`.
Return True if each value in `key` == corresponding value in `target`.
If any value in `key` is slice(None), it is considered equal
to the corresponding value in `target`.
"""
if not isinstance(target, tuple):
return target == key
for k1, k2 in itertools.zip_longest(target, key, fillvalue=None):
if k2 == slice(None):
continue
if k1 != k2:
return False
return True | ff5c60fab8ac0cbfe02a816ec78ec4142e32cfbf | 4,932 |
def parser(_, objconf, skip=False, **kwargs):
""" Parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: content)
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from riko.utils import get_abspath
>>> from meza.fntools import Objectify
>>>
>>> feed = 'http://feeds.feedburner.com/TechCrunch/'
>>> url = 'http://query.yahooapis.com/v1/public/yql'
>>> query = "select * from feed where url='%s'" % feed
>>> conf = {'query': query, 'url': url, 'debug': False}
>>> objconf = Objectify(conf)
>>> url = get_abspath(get_path('yql.xml'))
>>>
>>> with fetch(url) as f:
... kwargs = {'stream': {}, 'response': f}
... result = parser(None, objconf, **kwargs)
>>>
>>> next(result)['title']
'Bring pizza home'
"""
if skip:
stream = kwargs['stream']
else:
f = kwargs.get('response')
if not f:
params = {'q': objconf.query, 'diagnostics': objconf.debug}
if objconf.memoize and not objconf.cache_type:
objconf.cache_type = 'auto'
f = fetch(params=params, **objconf)
# TODO: consider paging for large result sets
root = xml2etree(f).getroot()
results = root.find('results')
stream = map(etree2dict, results)
return stream | 24bb8327fef14952ce90edd41fa9dc778ac15750 | 4,933 |
import xarray as xr
import aux_functions_strat as aux
def predict_xr(result_ds, regressors):
"""input: results_ds as came out of MLR and saved to file, regressors dataset"""
# if produce_RI isn't called on data then you should explicitely put time info
rds = result_ds
regressors = regressors.sel(time=rds.time) # slice
regressors = regressors.apply(aux.normalize_xr, norm=1, verbose=False) # normalize
reg_dict = dict(zip(rds.regressors.values, regressors.data_vars.values()))
# make sure that all the regressors names are linking to their respective dataarrays
for key, value in reg_dict.items():
# print(key, value)
assert value.name == key
reg_da = xr.concat(reg_dict.values(), dim='regressors')
reg_da['regressors'] = list(reg_dict.keys())
reg_da.name = 'regressors_time_series'
rds['predicted'] = xr.dot(rds.params, reg_da) + rds.intercept
rds = aux.xr_order(rds)
# retures the same dataset but with total predicted reconstructed geo-time-series field
result_ds = rds
return result_ds | 458f3f9a17d9cc16200f1eb0e20eb2a43f095ea0 | 4,934 |
async def list_features(location_id):
"""
List features
---
get:
summary: List features
tags:
- features
parameters:
- name: envelope
in: query
required: false
description: If set, the returned list will be wrapped in an envelope with this name.
responses:
200:
description: A list of objects.
content:
application/json:
schema:
type: array
items: Feature
"""
location = g.active_incident.Location.find_by_id(location_id)
if location is None:
raise exceptions.NotFound(description="Location {} was not found".format(location_id))
features = location.Feature.find()
# Wrap the list if the caller requested an envelope.
query = request.args
if "envelope" in query:
result = {query.get("envelope"): features}
else:
result = features
return jsonify(result), HTTPStatus.OK | 0698a63af10a70cc2ae0b8734dff61fb51786829 | 4,935 |
def shot_start_frame(shot_node):
"""
Returns the start frame of the given shot
:param shot_node: str
:return: int
"""
return sequencer.get_shot_start_frame(shot_node) | f13582040ad188b6be8217a7657ce53c145fe090 | 4,936 |
def word1(x: IntVar) -> UInt8:
"""Implementation for `WORD1`."""
return word_n(x, 1) | 0cc7e254c48596d190ccb43a0e0d3c90b18f34af | 4,937 |
def _getCols1():
"""
Robs Version 1 CSV files
"""
cols = 'Date,DOY,Time,Location,Satellite,Collection,Longitude,Latitude,SolarZenith,SolarAzimuth,SensorZenith,SensorAzimuth,ScatteringAngle,nval_AOT_1020_l20,mean_AOT_1020_l20,mean_AOT_870_l20,mean_AOT_675_l20,sdev_AOT_675_l20,mean_AOT_500_l20,mean_AOT_440_l20,mean_AOT_380_l20,mean_AOT_340_l20,mean_Water_cm_l20,nval_AOT_1020_l15,mean_AOT_1020_l15,mean_AOT_870_l15,mean_AOT_675_l15,sdev_AOT_675_l15,mean_AOT_500_l15,mean_AOT_440_l15,mean_AOT_380_l15,mean_AOT_340_l15,mean_Water_cm_l15,npix_AOT0550,mean_AOT0550,sdev_AOT0550,mean_rAOTse0550,sdev_rAOTse0550,mean_AOT0470corr_l,npix_AOT0550corr_l,pval_AOT0550corr_l,mean_AOT0550corr_l,sdev_AOT0550corr_l,mean_AOT0660corr_l,mean_AOT2100corr_l,mean_rAOTse0550_l,pval_rAOTse0550_l,mean_AOT0550sm_l,pval_AOT0550sm_l,mean_Aexp0470_0670_l,mean_surfre0470_l,mean_surfre0660_l,mean_surfre2100_l,mean_fiterr_l,mean_atype_l,mean_cfrac_l,mean_mconc_l,QA0470_l,mean_mref0470_l,mean_mref0550_l,mean_mref0660_l,mean_mref0870_l,mean_mref1200_l,mean_mref1600_l,mean_mref2100_l,pval_mref0470_l,pval_mref0550_l,pval_mref0660_l,pval_mref0870_l,pval_mref1200_l,pval_mref1600_l,pval_mref2100_l,mean_AOT0470ea_o,npix_AOT0550ea_o,pval_AOT0550ea_o,mean_AOT0550ea_o,sdev_AOT0550ea_o,mean_AOT0660ea_o,mean_AOT0870ea_o,mean_AOT1200ea_o,mean_AOT1600ea_o,mean_AOT2100ea_o,mean_AOT0470sa_o,npix_AOT0550sa_o,pval_AOT0550sa_o,mean_AOT0550sa_o,sdev_AOT0550sa_o,mean_AOT0660sa_o,mean_AOT0870sa_o,mean_AOT1200sa_o,mean_AOT1600sa_o,mean_AOT2100sa_o,mean_rAOTse0550a_o,mean_effr0550a_o,sdev_effr0550a_o,mean_solindx_sa_o,mean_solindx_la_o,mean_lsqerr_a_o,mean_cfrac_o,sdev_cfrac_o,QAavg_o,mean_mref0470_o,mean_mref0550_o,mean_mref0660_o,mean_mref0870_o,mean_mref1200_o,mean_mref1600_o,mean_mref2100_o,sdev_mref0470_o,sdev_mref0550_o,sdev_mref0660_o,sdev_mref0870_o,sdev_mref1200_o,sdev_mref1600_o,sdev_mref2100_o,mean_wni,mean_wir,pval_wni,pval_wir,mean_pathrad0470_l,mean_pathrad0660_l,mean_critref0470_l,mean_critref0660_l,mean_errprad0470_l,mean_errprad0660_l,mean_errcref0470_l,mean_errcref0660_l,mean_qwtprad0470_l,mean_qwtprad0660_l,mean_qwtcref0470_l,mean_qwtcref0660_l,npix_AOT0550dpbl_l,pval_AOT0550dpbl_l,mean_AOT0550dpbl_l,sdev_AOT0550dpbl_l,mean_AOT0412dpbl_l,mean_AOT0470dpbl_l,mean_AOT0660dpbl_l,mean_Aext0412_0470dpbl_l,mean_SSA0412dpbl_l,mean_SSA0470dpbl_l,mean_SSA0660dpbl_l,mean_surfre0412dpbl_l,mean_surfre0470dpbl_l,mean_surfre0660dpbl_l,tau_550_norm,eta_norm,tau_f,tau_c,alpha_norm,alpha_f,Deta,tau_466,tau_553,tau_644,tau_866,tau_2119,Angs_466_644,exp_errorO_pct,exp_errorL_pct,ncep_pwat,ncep_O3,ncep_pres,ncep_windspd,ncep_winddir'
return cols | bf3148b53effc18e212e03cf70673dc25e1d0005 | 4,938 |
from typing import Optional
from pathlib import Path
def run_primer3(sequence, region, primer3_exe: str, settings_dict: dict,
padding=True, thermodynamic_params: Optional[Path] = None):
"""Run primer 3. All other kwargs will be passed on to primer3"""
if padding:
target_start = region.padding_left
target_len = len(sequence) - region.padding_left - region.padding_right
else:
target_start = 1
target_len = len(sequence)
target = ",".join(map(str, [target_start, target_len]))
p3 = Primer3(primer3_exe, sequence, target, target, settings_dict,
thermodynamic_params=thermodynamic_params)
p3_out = p3.run()
primers = parse_primer3_output(p3_out)
return primers | 670f70b5b50200da5c9cd13b447a5836b748fd31 | 4,939 |
import inspect
def filter_safe_dict(data, attrs=None, exclude=None):
"""
Returns current names and values for valid writeable attributes. If ``attrs`` is given, the
returned dict will contain only items named in that iterable.
"""
def is_member(cls, k):
v = getattr(cls, k)
checks = [
not k.startswith("_"),
not inspect.ismethod(v) or getattr(v, "im_self", True),
not inspect.isfunction(v),
not isinstance(v, (classmethod, staticmethod, property)),
]
return all(checks)
cls = None
if inspect.isclass(data):
cls = data
data = {k: getattr(cls, k) for k in dir(cls) if is_member(cls, k)}
ret = {}
for k, v in data.items():
checks = [
not k.startswith("_"),
not inspect.ismethod(v) or getattr(v, "im_self", True),
not isinstance(v, (classmethod, staticmethod, property)),
not attrs or (k in attrs),
not exclude or (k not in exclude),
]
if all(checks):
ret[k] = v
return ret | ce457615ba8e360243912c3bba532e8327b8def4 | 4,940 |
def fixture_loqus_exe():
"""Return the path to a loqus executable"""
return "a/path/to/loqusdb" | 647b31e37854a5cbc8fd066c982e67f976100c03 | 4,941 |
def get_reads_section(read_length_r1, read_length_r2):
"""
Yield a Reads sample sheet section with the specified R1/R2 length.
:rtype: SampleSheetSection
"""
rows = [[str(read_length_r1)], [str(read_length_r2)]]
return SampleSheetSection(SECTION_NAME_READS, rows) | 19f3e36e34471c6bac89f2a42bdcb3f4b79c22c7 | 4,942 |
def validate(number):
"""Check if the number is valid. This checks the length, format and check
digit."""
number = compact(number)
if not all(x in _alphabet for x in number):
raise InvalidFormat()
if len(number) != 16:
raise InvalidLength()
if number[-1] == '-':
raise InvalidFormat()
if number[-1] != calc_check_digit(number):
raise InvalidChecksum()
return number | e191ee9d8631dfd843276b2db7ee9699b974e555 | 4,943 |
import re
def parse_directory(filename):
""" read html file (nook directory listing),
return users as [{'name':..., 'username':...},...] """
try:
file = open(filename)
html = file.read()
file.close()
except:
return []
users = []
for match in re.finditer(r'<b>([^<]+)</b>.*?mailto:([^@]+)@', html):
groups = match.groups()
users.append({'name':groups[0], 'username':groups[1]})
users.sort(key=lambda x:x['username'])
return users | 1b7fc5b6257b5c382f520a60c9227e8b458d482d | 4,944 |
import time
def ShortAge(dt):
"""Returns a short string describing a relative time in the past.
Args:
dt: A datetime.
Returns:
A short string like "5d" (5 days) or "32m" (32 minutes).
"""
# TODO(kpy): This is English-specific and needs localization.
seconds = time.time() - UtcToTimestamp(dt)
minutes = int(seconds / 60 + 0.5)
hours = int(seconds / 3600 + 0.5)
days = int(seconds / 86400 + 0.5)
if seconds < 60:
return 'just now'
if minutes < 100:
return '%dm ago' % minutes
if hours < 48:
return '%dh ago' % hours
return '%dd ago' % days | 1de2391d8f604d145a1771596def13460fd0e982 | 4,945 |
import sys
import tempfile
import os
import shutil
def _raxml(exe, msa, tree, model, gamma, alpha, freq, outfile):
"""
Reconstruct ancestral sequences using RAxML_.
:param exe: str, path to the executable of an ASR program.
:param msa: str, path to the MSA file (must in FASTA format).
:param tree: str, path to the tree file (must in NEWICK format) or a NEWICK
format tree string (must start with "(" and end with ";").
:param model: namedtuple, substitution model for ASR.
:param gamma: int, The number of categories for the discrete gamma rate
heterogeneity model.
:param freq: str, the equilibrium frequencies of the twenty amino acids.
:param alpha: float, the shape (alpha) for the gamma rate heterogeneity.
:param outfile: str, path to the output file.
:return: tuple, a tree object and a dict for sequences.
.. note::
See doc string of function ``asr()`` for details of all arguments.
.. _RAxML: https://sco.h-its.org/exelixis/web/software/raxml/
"""
if model.type == 'custom':
mf = model.name
name = 'WAG'
info('Use model file {} for ancestral states reconstruction.'.format(mf))
else:
name = model.name
if name.upper() in RAXML_MODELS:
mf = ''
info('Use {} model for ancestral states '
'reconstruction.'.format(name))
else:
error('RAxML does not accept {} model, aborted.'.format(name))
sys.exit(1)
wd, tf = tempfile.mkdtemp(dir=os.path.dirname(msa)), 'raxml.tree.newick'
tf = tree.file(os.path.join(wd, tf), brlen=False)
cmd = [exe, '-f', 'A', '-s', msa, '-t', tf, '-n', 'iMC']
m = 'PROTGAMMA' if (gamma or model.gamma) else 'PROTCAT'
m += name.upper()
freq = 'F' if freq == 'estimate' or model.frequency == 'estimate' else 'X'
if 'AUTO' not in m:
m += freq
cmd.extend(['-m', m])
if mf:
cmd.extend(['-P', mf])
cmd.append('--silent')
try:
info('Reconstructing ancestral states for {} using RAxML.'.format(msa))
process = Popen(cmd, cwd=wd, stdout=PIPE,
stderr=PIPE, universal_newlines=True)
code = process.wait()
msg = process.stdout.read() or process.stderr.read()
# Sometime RAxML does not return a non-zero code when it runs error
if code:
error('Ancestral reconstruction via RAxML failed for {} due to:'
'\n{}'.format(msa, indent(msg, prefix='\t')))
sys.exit(1)
else:
ancestor = os.path.join(wd, 'RAxML_marginalAncestralStates.iMC')
# Check again to see if reconstruction success
if not os.path.isfile(ancestor):
msg = '\n'.join([line for line in msg.splitlines()
if line.strip().startswith('ERROR')])
error('Ancestral reconstruction via RAxML failed for {} due to:'
'\n{}'.format(msa, indent(msg, prefix='\t')))
sys.exit(1)
info('Parsing ancestral sequence reconstruction results.')
with open(ancestor) as handle:
ancestor = dict(line.strip().split() for line in handle)
tree = os.path.join(wd, 'RAxML_nodeLabelledRootedTree.iMC')
tree = Phylo.read(tree, 'newick')
for clade in tree.find_clades():
if clade.confidence and not clade.name:
clade.name = str(clade.confidence)
tree, ancestor = _label(tree, ancestor)
for record in AlignIO.read(msa, 'fasta'):
ancestor[record.id] = record.seq
_write(tree, ancestor, [], {}, outfile)
info('Successfully save ancestral states reconstruction '
'results to {}.'.format(outfile))
return outfile
except OSError as err:
print(err)
error('Invalid RAxML executable {}, running RAxML failed for '
'{}.'.format(exe, msa))
sys.exit(1)
finally:
shutil.rmtree(wd) | dda8cf011f1a1ec2cce0501e35df24a9fa4a90b3 | 4,946 |
from typing import Optional
def dec_multiply(*args) -> Optional[Decimal]:
"""
Multiplication of numbers passed as *args.
Args:
*args: numbers we want to multiply
Returns:
The result of the multiplication as a decimal number
Examples:
>>> dec_multiply(3, 3.5, 4, 2.34)
Decimal('98.280')
>>> dec_multiply() is None
True
"""
if not args:
return
total = Decimal(str(args[0]))
for element in args[1:]:
total *= Decimal(str(element))
return total | f7d953debc5d24c97ee274ec13683be3fda302eb | 4,947 |
import json
def get_networks():
"""
Returns a list of all available network names
:return: JSON string, ex. "['bitcoin','bitcoin-cash','dash','litecoin']"
"""
return json.dumps([x[0] for x in db.session.query(Node.network).distinct().all()]) | 755e0238463aabed0a38102ca793842dd54a6c87 | 4,948 |
def get_cache_key_generator(request=None, generator_cls=None, get_redis=None):
"""Return an instance of ``CacheKeyGenerator`` configured with a redis
client and the right cache duration.
"""
# Compose.
if generator_cls is None:
generator_cls = CacheKeyGenerator
if get_redis is None:
get_redis = get_redis_client
# Instantiate and return the cache key generator.
return generator_cls(get_redis(request)) | 80c25a204976492e2741e46bd79d70d0e6b62b1a | 4,949 |
import pathlib
def is_from_derms(output):
"""Given an output, check if it's from DERMS simulation.
Parameters
----------
output: str or pathlib.Path
"""
if not isinstance(output, pathlib.Path):
output = pathlib.Path(output)
derms_info_file = output / DERMS_INFO_FILENAME
if derms_info_file.exists():
return True
return False | e9a9be7e18cda3b22661f773e6bb585c833b74d6 | 4,950 |
def js_squeeze(parser, token):
"""
{% js_squeeze "js/dynamic_minifyed.js" "js/script1.js,js/script2.js" %}
will produce STATIC_ROOT/js/dynamic_minifyed.js
"""
bits = token.split_contents()
if len(bits) != 3:
raise template.TemplateSyntaxError, "%r tag requires exactly two arguments" % bits[0]
return SqueezeNode('js', *bits[1:]) | 30b10b85001bbb5710584fb41469e1c36d50f086 | 4,951 |
def view_extracted_data() -> str:
"""
Display Raw extracted data from Documents
"""
extracted_data = read_collection(FIRESTORE_PROJECT_ID, FIRESTORE_COLLECTION)
if not extracted_data:
return render_template("index.html", message_error="No data to display")
return render_template("index.html", extracted_data=extracted_data) | 9eccebd4952fc3c988bfc6014d2c12944a197ac4 | 4,952 |
import json
def get_lstm_trump_text():
"""Use the LSTM trump tweets model to generate text."""
data = json.loads(request.data)
sl = data["string_length"]
st = data["seed_text"]
gen_text = lstm_trump.generate_text(seed_text=st, pred_len=int(sl))
return json.dumps(gen_text) | 9fbad3e7abcfcbbbfb5919a5c37cf607e972592e | 4,953 |
import os
def collect_bazel_rules(root_path):
"""Collects and returns all bazel rules from root path recursively."""
rules = []
for cur, _, _ in os.walk(root_path):
build_path = os.path.join(cur, "BUILD.bazel")
if os.path.exists(build_path):
rules.extend(read_bazel_build("//" + cur))
return rules | 4f7d9f1a8c768136aac8009eae284df0c526da62 | 4,954 |
def countSort(alist):
"""计数排序"""
if alist == []:
return []
cntLstLen = max(alist) + 1
cntLst = [0] * cntLstLen
for i in range(len(alist)):
cntLst[alist[i]] += 1 #数据alist[i] = k就放在第k位
alist.clear()
for i in range(cntLstLen):
while cntLst[i] > 0: #将每个位置的数据k循环输出多次
alist.append(i)
cntLst[i] -= 1
return alist | 6727b41794dc2a2f826023c2a53202798dfa49ab | 4,955 |
def _FloatsTraitsBase_read_values_dataset(arg2, arg3, arg4, arg5):
"""_FloatsTraitsBase_read_values_dataset(hid_t arg2, hid_t arg3, hid_t arg4, unsigned int arg5) -> FloatsList"""
return _RMF_HDF5._FloatsTraitsBase_read_values_dataset(arg2, arg3, arg4, arg5) | 4f2cfb17e5f0b3cfc980f51ef8e9ae8d7d38ba2c | 4,956 |
import requests
from bs4 import BeautifulSoup
import io
def TRI_Query(state=None, county=None,area_code=None, year=None,chunk_size=100000):
"""Query the EPA Toxic Release Inventory Database
This function constructs a query for the EPA Toxic Release Inventory API, with optional arguments for details such as the two-letter state, county name, area code, and year. More info here: https://www.epa.gov/enviro/envirofacts-data-service-api
"""
base_url='https://data.epa.gov/efservice/'
#Declare the names of the tables that we want to pull
table_name1='TRI_FACILITY'
table_name2='TRI_REPORTING_FORM'
table_name3 = 'TRI_TRANSFER_QTY'
output_format='CSV'
query = base_url
query+=table_name1+'/'
#Add in the state qualifier, if the desired_state variable is named
if state:
query+='state_abbr/=/'+state+'/'
#Add in the county qualifier, if the desired_county variable is named
if county:
query+='county_name/'+county+'/'
#Add in the area code qualifier, if the desired_area_code variable is named
if area_code:
query+='zip_code/'+str(area_code)+'/'
#Add in the next table name and year qualifier, if the desired_year variable is named
query += table_name2+'/'
if year:
if type(year) is list:
query+='reporting_year/'+str(year[0])+'/'+str(year[1])+'/'
else:
query+='reporting_year/'+str(year)+'/'
#add the third table
query += table_name3+'/'
count_query = query+'count/'
count_xml = requests.get(count_query).content
nrows= int(BeautifulSoup(count_xml,features="lxml").find('requestrecordcount').contents[0])
#Add in the desired output format to the query
csv_query = query+ output_format
#Return the completed query
bar = Bar('Downloading Records:',max=nrows,\
suffix='%(index)d/%(max)d %(percent).1f%% - %(eta)ds')
bar.check_tty = False
s=requests.get(csv_query).content
dataframe=pd.read_csv(io.StringIO(s.decode('utf-8')), engine='python',
encoding='utf-8', error_bad_lines=False)
bar.next(n = dataframe.shape[0])
nrows_prev = dataframe.shape[0]
while dataframe.shape[0] < nrows:
new_query = query + 'rows/'+str(dataframe.shape[0])+':'\
+str(dataframe.shape[0]+chunk_size)+'/'
csv_query = new_query+ output_format
s=requests.get(csv_query).content
dataframe = dataframe.append(pd.read_csv(io.StringIO(s.decode('utf-8')),
engine='python',encoding='utf-8',
error_bad_lines=False))
bar.next(n=dataframe.shape[0]-nrows_prev)
nrows_prev = dataframe.shape[0]
bar.finish()
# do the replacement:
if 'TRI_TRANSFER_QTY.TYPE_OF_WASTE_MANAGEMENT' in dataframe.columns:
dataframe.replace({'TRI_TRANSFER_QTY.TYPE_OF_WASTE_MANAGEMENT':wm_dict},inplace=True)
return dataframe | 48eef06d1409dfe4404c6548435196cc95baff62 | 4,957 |
import requests
import html
def get_monthly_schedule(year, month):
"""
:param year: a string, e.g. 2018
:param month: a string, e.g. january
:return schedule: a pd.DataFrame containing game info for the month
"""
url = f'https://www.basketball-reference.com/leagues/NBA_{year}_games-{month}.html'
page = requests.get(url)
tree = html.fromstring(page.content)
game_date = tree.xpath('//*[@data-stat="date_game"]/a/text()')
road_team = tree.xpath('//*[@data-stat="visitor_team_name"]/a/text()')
road_pts = tree.xpath('//*[@data-stat="visitor_pts"]/text()')
road_pts.pop(0) # Remove column name
home_team = tree.xpath('//*[@data-stat="home_team_name"]/a/text()')
home_pts = tree.xpath('//*[@data-stat="home_pts"]/text()')
home_pts.pop(0) # Remove column name
box_score_url = tree.xpath('//*[@data-stat="box_score_text"]/a/@href')
schedule = {
'DATE': game_date,
'ROAD_TEAM': road_team,
'ROAD_PTS': road_pts,
'HOME_TEAM': home_team,
'HOME_PTS': home_pts,
'BOX_SCORE_URL': box_score_url,
}
# Create a dictionary with different length columns (Series) that is
# suitable for a DataFrame
schedule = dict([ (k, pd.Series(v)) for k, v in schedule.items() ])
schedule = pd.DataFrame(schedule)
schedule.dropna(how='any', inplace=True)
schedule['ROAD_TM'] = schedule['ROAD_TEAM'].map(team_name_abbrev)
schedule['HOME_TM'] = schedule['HOME_TEAM'].map(team_name_abbrev)
schedule = schedule[['DATE', 'ROAD_TEAM', 'ROAD_TM', 'ROAD_PTS',
'HOME_TEAM', 'HOME_TM', 'HOME_PTS', 'BOX_SCORE_URL']]
BBALLREF = 'https://www.basketball-reference.com'
schedule['BOX_SCORE_URL'] = \
schedule['BOX_SCORE_URL'].apply(lambda x: BBALLREF + x)
def format_date(date):
return arrow.get(date, 'ddd, MMM D, YYYY').datetime.strftime('%Y-%m-%d')
schedule['DATE'] = schedule['DATE'].apply(format_date)
return schedule | 1aa48abaa274166110df8dfd55b49560f72db054 | 4,958 |
import csv
import zlib
def get_gzip_guesses(preview, stream, chunk_size, max_lines):
"""
:type preview: str
:param preview: The initial chunk of content read from the s3
file stream.
:type stream: botocore.response.StreamingBody
:param stream: StreamingBody object of the s3 dataset file.
:type chunk_size: int
:param chunk_size: Maximum size of the chunk in bytes peeking.
:type max_lines: int
:param max_lines: Maximum number of lines to peek into.
"""
COMPRESSION_TYPE = 'GZIP'
guesses = dict()
dialect = csv.Sniffer().sniff(zlib.decompressobj(zlib.MAX_WBITS|16).decompress(preview))
has_header = csv.Sniffer().has_header(zlib.decompressobj(zlib.MAX_WBITS|16).decompress(preview))
d = zlib.decompressobj(zlib.MAX_WBITS|16)
lines_read = 0
first_row = True
data = ''
while True:
if first_row:
chunk = preview
else:
chunk = stream.read(chunk_size)
if not chunk:
break
data += d.decompress(chunk)
if '\n' in data:
guesses, data, lines_read = analyze_data(data, lines_read, max_lines, first_row, guesses, dialect, has_header)
first_row = False
if lines_read >= max_lines:
return guesses, has_header, COMPRESSION_TYPE, dialect
return guesses, has_header, COMPRESSION_TYPE, dialect | aa6185ed31fc4bb5d85e991702925502beff86c0 | 4,959 |
from typing import List
def make_preds_epoch(classifier: nn.Module,
data: List[SentenceEvidence],
batch_size: int,
device: str=None,
criterion: nn.Module=None,
tensorize_model_inputs: bool=True):
"""Predictions for more than one batch.
Args:
classifier: a module that looks like an AttentiveClassifier
data: a list of elements to make predictions over. These must be SentenceEvidence objects.
batch_size: the biggest chunk we can fit in one batch.
device: Optional; what compute device this should run on
criterion: Optional; a loss function
tensorize_model_inputs: should we convert our data to tensors before passing it to the model? Useful if we have a model that performs its own tokenization
"""
epoch_loss = 0
epoch_soft_pred = []
epoch_hard_pred = []
epoch_truth = []
batches = _grouper(data, batch_size)
classifier.eval()
for batch in batches:
loss, soft_preds, hard_preds, targets = make_preds_batch(classifier, batch, device, criterion=criterion, tensorize_model_inputs=tensorize_model_inputs)
if loss is not None:
epoch_loss += loss.sum().item()
epoch_hard_pred.extend(hard_preds)
epoch_soft_pred.extend(soft_preds.cpu())
epoch_truth.extend(targets)
epoch_loss /= len(data)
epoch_hard_pred = [x.item() for x in epoch_hard_pred]
epoch_truth = [x.item() for x in epoch_truth]
return epoch_loss, epoch_soft_pred, epoch_hard_pred, epoch_truth | 67cecfc6648ef4ad10531b086dab2fc9e6e2f6f3 | 4,960 |
def array_3_1(data):
"""
功能:将3维数组转换成1维数组 \n
参数: \n
data:图像数据,3维数组 \n
返回值:图像数据,1维数组 \n
"""
# 受不了了,不判断那么多了
shape = data.shape
width = shape[0]
height = shape[1]
# z = list()
z = np.zeros([width * height, 1])
for i in range(0, width):
for j in range(0, height):
index = i * width + j
z[index][0] = data[i, j, 0]
# z.append(data[i, j, 0])
return z | 0b2991da94102e5ecf47d037f995b95a3fd28ac8 | 4,961 |
import json
def UpdateString(update_intervals):
"""Calculates a short and long message to represent frequency of updates.
Args:
update_intervals: A list of interval numbers (between 0 and 55) that
represent the times an update will occur
Returns:
A two-tuple of the long and short message (respectively) corresponding to
the frequency. This is intended to be sent via AJAX and hence the
tuple is turned into json before being returned.
Raises:
BadInterval in the case that the length of update_intervals is not
a key in the constant RESPONSES
"""
length = len(update_intervals)
if length not in RESPONSES:
raise BadInterval(length)
else:
return json.dumps(RESPONSES[length]) | 35ba60e028c238f304bcf03d745865c93408b9c1 | 4,962 |
from typing import Optional
from re import T
def coalesce(*xs: Optional[T]) -> T:
"""Return the first non-None value from the list; there must be at least one"""
for x in xs:
if x is not None:
return x
assert False, "Expected at least one element to be non-None" | fe388a40ff200f9988514563d0e37d2d604317a7 | 4,963 |
def check_phil(phil, scope=True, definition=True, raise_error=True):
"""
Convenience function for checking if the input is a libtbx.phil.scope
only or a libtbx.phil.definition only or either.
Parameters
----------
phil: object
The object to be tested
scope: bool
Flag to check if phil is a libtbx.phil.scope
definition: bool
Flag to check if phil is a libtbx.phil.definition
raise_error: bool
If true, a RuntimeError is raised if the check(s) fail
Returns
-------
value: bool
"""
value = False
if scope: # check for only libtbx.phil.scope
value = isinstance(phil, libtbx.phil.scope)
if definition: # check for only libtbx.phil.definition
value = isinstance(phil, libtbx.phil.definition)
if scope and definition: # check for either
value = isinstance(phil, libtbx.phil.scope) or isinstance(phil, libtbx.phil.definition)
if (scope and definition) and not value and raise_error:
raise RuntimeError('A libtbx.phil.scope or libtbx.phil.definition is expected.')
elif scope and not value and raise_error:
raise RuntimeError('A libtbx.phil.scope is expected.')
elif definition and not value and raise_error:
raise RuntimeError('A libtbx.phil.definition is expected.')
return value | 11a59bb25689bfc5882b8e0b0b9c2e9a5f233db0 | 4,964 |
import json
def get_ssm_environment() -> dict:
"""Get the value of environment variables stored in the SSM param store under $DSS_DEPLOYMENT_STAGE/environment"""
p = ssm_client.get_parameter(Name=fix_ssm_variable_prefix("environment"))
parms = p["Parameter"]["Value"] # this is a string, so convert to dict
return json.loads(parms) | 2f5a44c7e01f87c0aff092f9fed83f0030d4f7da | 4,965 |
from datetime import datetime
def get_default_date_stamp():
"""
Returns the default date stamp as 'now', as an ISO Format string 'YYYY-MM-DD'
:return:
"""
return datetime.now().strftime('%Y-%m-%d') | 672cd98265b19da2df92c7849f1059e5988473d7 | 4,966 |
import re
def check_for_launchpad(old_vendor, name, urls):
"""Check if the project is hosted on launchpad.
:param name: str, name of the project
:param urls: set, urls to check.
:return: the name of the project on launchpad, or an empty string.
"""
if old_vendor != "pypi":
# XXX This might work for other starting vendors
# XXX but I didn't check. For now only allow
# XXX pypi -> launchpad.
return ''
for url in urls:
try:
return re.match(r"https?://launchpad.net/([\w.\-]+)",
url).groups()[0]
except AttributeError:
continue
return '' | 87fc4be32cd93671b5d9fe43697d9e6918675843 | 4,967 |
import os
def apply_file_collation_and_strip(args, fname):
"""Apply collation path or component strip to a remote filename
Parameters:
args - arguments
fname - file name
Returns:
remote filename
Raises:
No special exception handling
"""
remotefname = fname.replace(os.path.sep,"/").strip("/") # replace os.path.sep with python's universal sep
if args.collate is not None:
remotefname = remotefname.split("/")[-1]
if args.collate != '.':
remotefname = "/".join((args.collate, remotefname))
elif args.stripcomponents > 0:
rtmp = remotefname.split("/")
nsc = min((len(rtmp) - 1, args.stripcomponents))
if nsc > 0:
remotefname = "/".join(rtmp[nsc:])
return remotefname | 7422a4c4b979dedc58a32a65f139f764cd0e78cb | 4,968 |
import torch
def constructRBFStates(L1, L2, W1, W2, sigma):
"""
Constructs a dictionary dict[tuple] -> torch.tensor that converts
tuples (x,y) representing positions to torch tensors used as input to the
neural network. The tensors have an entry for each valid position on the
race track. For each position (x,y), the tensor is constructed using the gaussian
radial basis function with standard deviation sigma. In other words, if entry i corresponds
to the position p2 = (x2, y2), then the tensor for a point p1 = (x1,y1) will have
tensor[i] = Gaussian_RBF(p1, p2).
@type L1: int
See description in the @RaceCar class.
@type L2: int
See description in the @RaceCar class.
@type W1: int
See description in the @RaceCar class.
@type W2: int
See description in the @RaceCar class.
@type sigma: float
The standard deviation of the gaussian radial basis function.
"""
N_states = (L1+1)*(W1+W2+1)+L2*(W2+1)
x_coords = torch.zeros(N_states, dtype=torch.float32)
y_coords = torch.zeros(N_states, dtype=torch.float32)
state_to_basis = {}
ind = 0
for x in range(L1+L2+1):
for y in range(W1+W2+1):
if (0<=x<=L1 and 0<=y<=W1+W2) or (0<=x<=L1+L2 and W1<=y<=W1+W2):
x_coords[ind] = x
y_coords[ind] = y
ind += 1
for x in range(L1 + L2 + 1):
for y in range(W1 + W2 + 1):
if (0 <= x <= L1 and 0 <= y <= W1 + W2) or (0 <= x <= L1 + L2 and W1 <= y <= W1 + W2):
basis = torch.exp(-((x_coords-x)**2 + (y_coords-y)**2)/(2*sigma**2))
state_to_basis[(x,y)] = basis.view(1, -1).to(device)
return state_to_basis | 575572e40f66c121468d547b45fa92c23f78f99f | 4,969 |
import os
import json
import tqdm
def gather_data(path, save_file=None, path_json='src/python_code/settings.json'):
"""
Gather data from different experiments
:param path: path of the experiments
:param save_file: path if you want to save data as csv (Default None)
:param path_json: setting file
:return: dataframe
"""
experiments = next(os.walk(path))[1]
settings = json.load(open(path_json))["OOD"]["Gather_Data"]
methods = settings["Feature_methods"]
for j, experiment in enumerate(tqdm(experiments)):
df = get_data(path + experiment + '/logs')
df = process_RandNet(df)
df = separate_ood(df, path_json=path_json)
df2 = remove_latent(df)
df2['auc'] = df2.apply(lambda x: auc(x, df2) if x.ood == 1 else None, axis=1)
print("methods ", methods)
for method in methods:
methods2 = methods.copy()
methods2.remove(method)
print("methods ", method, methods2)
df3 = keep_feature(df, methods2, path_json=path_json)
df3['auc'] = df3.apply(lambda x: auc(x, df3) if x.ood == 1 else None, axis=1)
df2 = pd.concat([df2, df3])
df = df2
"""
df3 = keep_latent(df)
df3['auc'] = df3.apply(lambda x: auc(x, df3) if x.ood == 1 else None, axis=1)
df4 = keep_like(df)
df4['auc'] = df4.apply(lambda x: auc(x, df4) if x.ood == 1 else None, axis=1)
df = pd.concat([df2, df3, df4])
"""
#df['Metric use'] = df['Values'].apply(lambda x: np.mean(np.array(x).reshape(-1)))
df['Epoch'] = df['Epoch'].apply(lambda x: int(x))
df = df[['Model', 'Latent Space', 'Hidden Space', 'DataSet', 'Epoch', 'ood', 'auc']]
if j == 0:
final_df = df.copy()
else:
final_df = pd.concat([final_df, df])
if save_file is not None:
final_df.to_csv(save_file)
return final_df | ccada8ba540f61b04e37f18d3fd3ffbe7b4e893e | 4,970 |
from typing import Union
from typing import Iterator
def tile_grid_intersection(
src0: DatasetReader,
src1: DatasetReader,
blockxsize: Union[None, int] = None,
blockysize: Union[None, int] = None
) -> tuple[Iterator[Window], Iterator[Window], Iterator[Window], Affine, int, int]:
"""Generate tiled windows for the intersection between two grids.
Given two rasters having different dimensions calculate read-window generators for each
and a write-window generator for the intersecion.
Parameters:
src0: rasterio read source
src1: rasterio read source
blockxsize: write-window width
blockysize: write-window height
Returns:
read windows for src0,
read windows for src1,
write windows for the intersection,
write raster Affine,
write raster width in columns
write raster height in rows
"""
bbox0 = window_bounds(((0, 0), src0.shape), src0.transform, offset='ul')
bbox1 = window_bounds(((0, 0), src1.shape), src1.transform, offset='ul')
bounds = intersect_bounds(bbox0, bbox1)
(row_start0, row_stop0), (col_start0, col_stop0) = bounds_window(
bounds, src0.transform
)
(row_start1, row_stop1), (col_start1, col_stop1) = bounds_window(
bounds, src1.transform
)
ncols = col_stop0 - col_start0
nrows = row_stop0 - row_start0
affine = from_bounds(bounds[0], bounds[1], bounds[2], bounds[3], ncols, nrows)
if blockxsize is None:
blockxsize = ncols
if blockysize is None:
blockysize = nrows
windows0 = tile_grid(
ncols,
nrows,
blockxsize,
blockysize,
col_offset=col_start0,
row_offset=row_start0,
)
windows1 = tile_grid(
ncols,
nrows,
blockxsize,
blockysize,
col_offset=col_start1,
row_offset=row_start1,
)
write_windows = tile_grid(ncols, nrows, blockxsize, blockysize)
return (windows0, windows1, write_windows, affine, nrows, ncols) | 847092e1a02ed446d7873658340d578248b1e80c | 4,971 |
def etapes_index_view(request):
"""
GET etapes index
"""
# Check connected
if not check_connected(request):
raise exc.HTTPForbidden()
records = request.dbsession.query(AffaireEtapeIndex).filter(
AffaireEtapeIndex.ordre != None
).order_by(AffaireEtapeIndex.ordre.asc()).all()
return Utils.serialize_many(records) | a79ec31c3849a7e77528d4607859f9bf77899ffb | 4,972 |
from typing import Optional
def brute_force(ciphered_text: str, charset: str = DEFAULT_CHARSET, _database_path: Optional[str] = None) -> int:
""" Get Caesar ciphered text key.
Uses a brute force technique trying the entire key space until finding a text
that can be identified with any of our languages.
**You should not use this function. Use *brute_force_mp* instead.** This
function is slower than *mp* one because is sequential while the other uses a
multiprocessing approach. This function only stay here to allow comparisons
between sequential and multiprocessing approaches.
:param ciphered_text: Text to be deciphered.
:param charset: Charset used for Caesar method substitution. Both ends, ciphering
and deciphering, should use the same charset or original text won't be properly
recovered.
:param _database_path: Absolute pathname to database file. Usually you don't
set this parameter, but it is useful for tests.
:return: Caesar key found.
"""
key_space_length = len(charset)
return simple_brute_force(key_generator=integer_key_generator(key_space_length),
assess_function=_assess_caesar_key,
# key_space_length=key_space_length,
ciphered_text=ciphered_text,
charset=charset,
_database_path=_database_path) | 9b23b4b5068dd36345d6aa43f71bd307f8b24e0c | 4,973 |
def SqZerniketoOPD(x,y,coef,N,xwidth=1.,ywidth=1.):
"""
Return an OPD vector based on a set of square Zernike coefficients
"""
stcoef = np.dot(zern.sqtost[:N,:N],coef)
x = x/xwidth
y = y/ywidth
zm = zern.zmatrix(np.sqrt(x**2+y**2),np.arctan2(y,x),N)
opd = np.dot(zm,stcoef)
return opd | 4243f2c4106d5de0b7f6966cfb3244644beff100 | 4,974 |
import random
def build_voterinfo(campaign, state):
"""Render a tweet of voting info for a state"""
state_info = campaign.info_by_state[state]
num_cities = len(state_info[CITIES])
assert num_cities == len(set(state_info[CITIES])), f"Duplicate entries in CITIES for {state}."
city_ct = num_cities
effective_length = 0
tweet_text = ""
while city_ct > 0:
# Iterate on building a tweet until it fits within the limit.
# Return none if unsuccessful
city_set = set(state_info[CITIES])
try:
# Select up to city_ct cities
cities = []
cities_found = 0
while cities_found < city_ct:
city_idx = random.randint(0, num_cities - 1)
city = state_info[CITIES][city_idx]
if city in city_set:
cities.append(hashtag(city))
city_set.remove(city)
cities_found += 1
effective_length, tweet_text = render_voterinfo(campaign, state, cities)
break
except AssertionError:
tweet_text = ""
city_ct -= 1
return effective_length, tweet_text | a3f6b7aea9b84174ed1e825cacb38966e099c7eb | 4,975 |
def train_model(training_df, stock):
"""
Summary: Trains XGBoost model on stock prices
Inputs: stock_df - Pandas DataFrame containing data about stock price, date, and daily tweet sentiment regarding that stock
stock - String representing stock symbol to be used in training
Return value: Trained XGBoost model
"""
print("Beginning training model for ", stock)
X_train, X_test, y_train, y_test = create_train_data(training_df)
print("Created data")
xgb = XGBRegressor(objective="reg:squarederror", random_state=42)
parameters = {
'n_estimators': [100, 200, 300, 400],
'learning_rate': [0.001, 0.005, 0.01, 0.05],
'max_depth': [8, 10, 12, 15],
'gamma': [0.001, 0.005, 0.01, 0.02],
}
print("Performing Grid Search")
gs = GridSearchCV(xgb, parameters)
gs.fit(X_train, y_train, verbose=2)
print("Grid Search Done")
model = XGBRegressor(**gs.best_params_, objective="reg:squarederror")
model.fit(X_train, y_train)
print("Model fit")
y_pred = model.predict(X_test)
print(stock)
print(f'y_true = {np.array(y_test)[:5]}')
print(f'y_pred = {y_pred[:5]}')
print(f'mean_squared_error = {mean_squared_error(y_test, y_pred)}')
print("----------------")
return model | be28e84c6796bd002217ab56c85958b52fbc199c | 4,976 |
import os
def find_ext(files, ext):
"""
Finds all files with extension `ext` in `files`.
Parameters
----------
files : list
List of files to search in
ext : str
File extension
Returns
-------
dict
A dictionary of pairs (filename, full_filename)
"""
dic = defaultdict(lambda: None)
for full_filename in files:
filename, fext = os.path.splitext(full_filename)
if fext.lower() == ext:
dic[filename] = full_filename
return dic | 62f64b10ef00290dfbe2feae5d8a7d92ced4a1b0 | 4,977 |
def create_test_node(context, **kw):
"""Create and return a test Node object.
Create a node in the DB and return a Node object with appropriate
attributes.
"""
node = get_test_node(context, **kw)
node.create()
return node | 21ff9931a7c6859bbe924014cb3a06b9890f7a63 | 4,978 |
def get_physical_id(r_properties):
""" Generated resource id """
bucket = r_properties['Bucket']
key = r_properties['Key']
return f's3://{bucket}/{key}' | 2cd467d9b1df72a4573d99f7a5d799f9612239c9 | 4,979 |
def entity_test_models(translation0, locale1):
"""This fixture provides:
- 2 translations of a plural entity
- 1 translation of a non-plural entity
- A subpage that contains the plural entity
"""
entity0 = translation0.entity
locale0 = translation0.locale
project0 = entity0.resource.project
locale0.cldr_plurals = "0,1"
locale0.save()
translation0.plural_form = 0
translation0.save()
resourceX = Resource.objects.create(
project=project0, path="resourceX.po")
entity0.string = "Entity zero"
entity0.key = entity0.string
entity0.string_plural = "Plural %s" % entity0.string
entity0.save()
entityX = Entity.objects.create(
resource=resourceX,
string="entityX",
key='Key%sentityX' % KEY_SEPARATOR)
translation0pl = Translation.objects.create(
entity=entity0,
locale=locale0,
plural_form=1,
string="Plural %s" % translation0.string)
translationX = Translation.objects.create(
entity=entityX,
locale=locale0,
string="Translation %s" % entityX.string)
subpageX = Subpage.objects.create(
project=project0, name="Subpage")
subpageX.resources.add(entity0.resource)
return translation0, translation0pl, translationX, subpageX | 36c6962a69d241e395af1c7ebe16271dcaed975d | 4,980 |
def paris_topology(self, input_path):
"""Generation of the Paris metro network topology
Parameters:
input_path: string, input folder path
Returns:
self.g: nx.Graph(), Waxman graph topology
self.length: np.array, lengths of edges
"""
adj_file = open(input_path + "adj.dat", "r")
lines = adj_file.readlines()
# graph adjacency list
topol = np.zeros([len(lines), 2], dtype=int)
for iedge, line in enumerate(lines):
topol[iedge][:] = [int(w) for w in line.split()[0:2]]
self.g.add_edges_from(topol)
# coordinate of nodes
coord_file = open(input_path + "coord.dat", "r")
lines = coord_file.readlines()
for inode, line in enumerate(lines):
self.g.nodes[inode]["pos"] = tuple([float(w) for w in line.split()[0:2]])
# length of edges
self.length = np.zeros(self.g.number_of_edges())
for i, edge in enumerate(self.g.edges()):
self.length[i] = distance.euclidean(self.g.nodes[edge[0]]["pos"], self.g.nodes[edge[1]]["pos"])
# right hand side construction
forcing_path = input_path + "rhs.dat"
self.forcing = forcing_generation(self, forcing_path)
return self.g, self.length, self.forcing | 9f81e111cfe9adf265b9a3aa58390935b752f242 | 4,981 |
def _rescale(vector):
"""Scale values in vector to the range [0, 1].
Args:
vector: A list of real values.
"""
# Subtract min, making smallest value 0
min_val = min(vector)
vector = [v - min_val for v in vector]
# Divide by max, making largest value 1
max_val = float(max(vector))
try:
return [v / max_val for v in vector]
except ZeroDivisionError: # All values are the same
return [1.0] * len(vector) | 0091deb65c67ef55b2632ac8d5ff8a15b275d12e | 4,982 |
from datetime import datetime
def validate_travel_dates(departure, arrival):
"""It validates arrival and departure dates
:param departure: departure date
:param arrival: arrival date
:returns: error message or Boolean status
"""
date_format = "%Y-%m-%dT%H:%M:%SZ"
status = True
error_message = ""
if datetime.strptime(departure, date_format) < datetime.now():
status = False
error_message = Response(
{"message": "Departure time cannot be in the past"},
status=HTTP_400_BAD_REQUEST,
)
elif datetime.strptime(arrival, date_format) < datetime.now():
status = False
error_message = Response(
{"message": "Arrival time cannot be in the past"},
status=HTTP_400_BAD_REQUEST,
)
elif datetime.strptime(departure, date_format) > datetime.strptime(
arrival, date_format
):
status = False
error_message = Response(
{"message": "Departure time cannot be greater than arrival time"},
status=HTTP_400_BAD_REQUEST,
)
return status, error_message | 41759684517daece729ba845b7afc80c6e6b01ea | 4,983 |
def xmatch_arguments():
""" Obtain information about the xmatch service
"""
return jsonify({'args': args_xmatch}) | 393e74df6900b8c4ed6f0eac82c162a7287a9b6d | 4,984 |
def rosenbrock_func(x):
"""Rosenbrock objective function.
Also known as the Rosenbrock's valley or Rosenbrock's banana
function. Has a global minimum of :code:`np.ones(dimensions)` where
:code:`dimensions` is :code:`x.shape[1]`. The search domain is
:code:`[-inf, inf]`.
Parameters
----------
x : numpy.ndarray
set of inputs of shape :code:`(n_particles, dimensions)`
Returns
-------
numpy.ndarray
computed cost of size :code:`(n_particles, )`
"""
r = np.sum(100*(x.T[1:] - x.T[:-1]**2.0)**2 + (1-x.T[:-1])**2.0, axis=0)
return r | 5d89e22fde50032175b69f36a4c0031bfc07c2bb | 4,985 |
def isHdf5Dataset(obj):
"""Is `obj` an HDF5 Dataset?"""
return isinstance(obj, h5py.Dataset) | b674106e05d5f10585b58d246654987f174d2048 | 4,986 |
import numpy
def writing_height(sample_wrapper, in_air):
"""
Returns writing height.
:param sample_wrapper: sample wrapper object
:type sample_wrapper: HandwritingSampleWrapper
:param in_air: in-air flag
:type in_air: bool
:return: writing height
:rtype: float
"""
# Get the on-surface/in-air sample data
sample = sample_wrapper.on_surface_data \
if not in_air \
else sample_wrapper.in_air_data
# Check the presence of sample data
if not sample:
return numpy.nan
# Return the writing height
return float(numpy.max(sample.y) - numpy.min(sample.y)) | fce6c0abcc65484088278eddd3bb77541725934c | 4,987 |
def simplify_index_permutations(expr, permutation_operators):
"""
Performs simplification by introducing PermutationOperators where appropriate.
Schematically:
[abij] - [abji] - [baij] + [baji] -> P(ab)*P(ij)*[abij]
permutation_operators is a list of PermutationOperators to consider.
If permutation_operators=[P(ab),P(ij)] we will try to introduce the
permutation operators P(ij) and P(ab) in the expression. If there are other
possible simplifications, we ignore them.
>>> from sympy import symbols, Function
>>> from sympy.physics.secondquant import simplify_index_permutations
>>> from sympy.physics.secondquant import PermutationOperator
>>> p,q,r,s = symbols('p,q,r,s')
>>> f = Function('f')
>>> g = Function('g')
>>> expr = f(p)*g(q) - f(q)*g(p); expr
f(p)*g(q) - f(q)*g(p)
>>> simplify_index_permutations(expr,[PermutationOperator(p,q)])
f(p)*g(q)*PermutationOperator(p, q)
>>> PermutList = [PermutationOperator(p,q),PermutationOperator(r,s)]
>>> expr = f(p,r)*g(q,s) - f(q,r)*g(p,s) + f(q,s)*g(p,r) - f(p,s)*g(q,r)
>>> simplify_index_permutations(expr,PermutList)
f(p, r)*g(q, s)*PermutationOperator(p, q)*PermutationOperator(r, s)
"""
def _get_indices(expr, ind):
"""
Collects indices recursively in predictable order.
"""
result = []
for arg in expr.args:
if arg in ind:
result.append(arg)
else:
if arg.args:
result.extend(_get_indices(arg,ind))
return result
def _choose_one_to_keep(a,b,ind):
# we keep the one where indices in ind are in order ind[0] < ind[1]
if _get_indices(a,ind) < _get_indices(b,ind):
return a
else:
return b
expr = expr.expand()
if isinstance(expr,Add):
terms = set(expr.args)
for P in permutation_operators:
new_terms = set([])
on_hold = set([])
while terms:
term = terms.pop()
permuted = P.get_permuted(term)
if permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
# Some terms must get a second chance because the permuted
# term may already have canonical dummy ordering. Then
# substitute_dummies() does nothing. However, the other
# term, if it exists, will be able to match with us.
permuted1 = permuted
permuted = substitute_dummies(permuted)
if permuted1 == permuted:
on_hold.add(term)
elif permuted in terms | on_hold:
try:
terms.remove(permuted)
except KeyError:
on_hold.remove(permuted)
keep = _choose_one_to_keep(term, permuted, P.args)
new_terms.add(P*keep)
else:
new_terms.add(term)
terms = new_terms | on_hold
return Add(*terms)
return expr | 3a72459c9f9ee9e1f0f030fa96f5a38c0a1985c0 | 4,988 |
from typing import Set
from typing import Any
from typing import Tuple
def get_classification_outcomes(
confusion_matrix: pd.DataFrame,
classes: Set[Any],
class_name: str,
) -> Tuple[int, int, int, int]:
"""
Given a confusion matrix, this function counts the cases of:
- **True Positives** : classifications that accurately labeled a class
- **True Negatives** : classifications that accurately labeled an example as
not belonging to a class.
- **False Positives** : classifications that attributed the wrong label to an
example.
- **False Negatives** : classifications that falsely claimed that an example
does not belong to a class.
Args:
confusion_matrix: The result of calling [generate_confusion_matrix]
[toolbox.algorithms.learning.evaluation.generate_confusion_matrix]
classes: The set of all class labels
class_name: The name (label) of the class being evaluated.
Returns:
- `tp`: Count of True Positives
- `tn`: Count of True Negatives
- `fp`: Count of False Positives
- `fn`: Count of False Negatives
"""
excl_idx = classes.difference(set((class_name,)))
tp = confusion_matrix.loc[class_name, class_name]
tn = confusion_matrix.loc[excl_idx, excl_idx].sum().sum()
fp = confusion_matrix.loc[class_name, excl_idx].sum()
fn = confusion_matrix.loc[excl_idx, class_name].sum()
return (tp, tn, fp, fn) | c8d84aa5d84e9405539fa40cd05101ac84eda871 | 4,989 |
def points_in_convex_polygon_3d_jit(points,
polygon_surfaces,
):
"""check points is in 3d convex polygons.
Args:
points: [num_points, 3] array.
polygon_surfaces: [num_polygon, max_num_surfaces,
max_num_points_of_surface, 3]
array. all surfaces' normal vector must direct to internal.
max_num_points_of_surface must at least 3.
num_surfaces: [num_polygon] array. indicate how many surfaces
a polygon contain
Returns:
[num_points, num_polygon] bool array.
"""
max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]
num_points = points.shape[0]
num_polygons = polygon_surfaces.shape[0]
num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)
normal_vec, d = surface_equ_3d_jit(polygon_surfaces[:, :, :3, :])
# normal_vec: [num_polygon, max_num_surfaces, 3]
# d: [num_polygon, max_num_surfaces]
ret = np.ones((num_points, num_polygons), dtype=np.bool_)
sign = 0.0
for i in range(num_points):
for j in range(num_polygons):
for k in range(max_num_surfaces):
if k > num_surfaces[j]:
break
sign = points[i, 0] * normal_vec[j, k, 0] \
+ points[i, 1] * normal_vec[j, k, 1] \
+ points[i, 2] * normal_vec[j, k, 2] + d[j, k]
if sign >= 0:
ret[i, j] = False
break
return ret | b3834ec647fcb6b156f57a36e11dd5dd22bec1d9 | 4,990 |
def _get_spamassassin_flag_path(domain_or_user):
"""
Get the full path of the file who's existence is used as a flag to turn
SpamAssassin on.
Args:
domain_or_user - A full email address or a domain name
"""
domain = domain_or_user.lower()
user = False
if '@' in domain:
user, domain = domain.split('@')
sys_user = get_account_from_domain(domain)
if user:
return '/home/' + sys_user + '/etc/' + domain + '/' + user + '/enable_spamassassin'
else:
return '/home/' + sys_user + '/etc/' + domain + '/enable_spamassassin' | e29055f2cbe81dd7ad2083f5bfdc46d02b354dba | 4,991 |
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return '-'.join((number[:3], number[3:-1], number[-1])) | 90ad8360ef773a9386a122d3f44870a6b371d370 | 4,992 |
def get_terms(request):
"""Returns list of terms matching given query"""
if TEST_MODE:
thesaurus_name = request.params.get('thesaurus_name')
extract_name = request.params.get('extract_name')
query = request.params.get('term')
else:
thesaurus_name = request.validated.get('thesaurus_name')
extract_name = request.validated.get('extract_name')
query = request.validated.get('term')
if not (thesaurus_name or query):
return {}
thesaurus = query_utility(IThesaurus, name=thesaurus_name)
if thesaurus is None:
return {}
try:
return {
'results': [
{
'id': term.label,
'text': term.label
}
for term in unique(thesaurus.find_terms(query, extract_name,
exact=True, stemmed=True))
if term.status != STATUS_ARCHIVED
]
}
except ParseError:
return [] | b6e6810a1858de9da609b2e42b39a933ee9fbb04 | 4,993 |
from .. import sim
import __main__ as top
def createExportNeuroML2(netParams=None, simConfig=None, output=False, reference=None, connections=True, stimulations=True, format='xml'):
"""
Wrapper function create and export a NeuroML2 simulation
Parameters
----------
netParams : ``netParams object``
NetPyNE netParams object specifying network parameters.
**Default:** *required*.
simConfig : ``simConfig object``
NetPyNE simConfig object specifying simulation configuration.
**Default:** *required*.
output : bool
Whether or not to return output from the simulation.
**Default:** ``False`` does not return anything.
**Options:** ``True`` returns output.
reference : <``None``?>
<Short description of reference>
**Default:** ``None``
**Options:** ``<option>`` <description of option>
connections : bool
<Short description of connections>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
stimulations : bool
<Short description of stimulations>
**Default:** ``True``
**Options:** ``<option>`` <description of option>
format : str
<Short description of format>
**Default:** ``'xml'``
**Options:** ``<option>`` <description of option>
Returns
-------
data : tuple
If ``output`` is ``True``, returns (pops, cells, conns, stims, rxd, simData)
"""
if not netParams: netParams = top.netParams
if not simConfig: simConfig = top.simConfig
sim.initialize(netParams, simConfig) # create network object and set cfg and net params
pops = sim.net.createPops() # instantiate network populations
cells = sim.net.createCells() # instantiate network cells based on defined populations
conns = sim.net.connectCells() # create connections between cells based on params
stims = sim.net.addStims() # add external stimulation to cells (IClamps etc)
rxd = sim.net.addRxD() # add reaction-diffusion (RxD)
simData = sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.exportNeuroML2(reference, connections, stimulations,format) # export cells and connectivity to NeuroML 2 format
if output:
return (pops, cells, conns, stims, rxd, simData) | 84b3fb607ab30b17222143c46d839bab087c4916 | 4,994 |
from pathlib import Path
def load(file: str) -> pd.DataFrame:
"""Load custom file into dataframe. Currently will work with csv
Parameters
----------
file: str
Path to file
Returns
-------
pd.DataFrame:
Dataframe with custom data
"""
if not Path(file).exists():
return pd.DataFrame()
file_type = Path(file).suffix
# TODO More data types
if file_type != ".csv":
return pd.DataFrame()
return pd.read_csv(file) | 97ec2656b81bf722fcb34007f00e5abc9b2aa37e | 4,995 |
def better_get_first_model_each_manufacturer(car_db):
"""Uses map function and lambda to avoid code with side effects."""
result = map(lambda x: x[0], car_db.values())
# convert map to list
return list(result) | 8969c23bfe4df2b1c164dca6c4f929a62de5ba2a | 4,996 |
import os
import inspect
import logging
def configurable_testcase(default_config_function):
"""Decorator to make a test case configurable."""
def internal_configurable_testcase(testcase):
_log_testcase_header(testcase.__name__, testcase.__doc__)
def wrapper_function(func, name, config, generate_default_func):
@wraps(func)
def _func(*a):
if generate_default_func:
generate_default_func(*a)
_releaseAllPorts()
_log_testcase_header(name, func.__doc__)
return func(*a, config_filename=config)
_func.__name__ = name
return _func
def generate_default(func, default_filename):
@wraps(func)
def _func(*a):
return func(*a, filename=default_filename)
return _func
# Create config directory for this function if it doesn't already exist.
harness_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
config_dir = os.path.join(harness_dir, 'testcases', 'configs',
testcase.__name__)
config_names = os.listdir(config_dir) if os.path.exists(config_dir) else []
# No existing configs => generate default config.
generate_default_func = None
if not config_names:
default_config_filename = os.path.join(config_dir, 'default.config')
logging.info("%s: Creating default config at '%s'", testcase.__name__,
default_config_filename)
generate_default_func = generate_default(default_config_function,
default_config_filename)
config_names.append('default.config')
# Run once for each config.
stack = inspect.stack()
frame = stack[1] # Picks the 'testcase' frame.
frame_locals = frame[0].f_locals
for i, config_name in enumerate(config_names):
base_config_name = os.path.splitext(config_name)[0]
name = '%s_%d_%s' % (testcase.__name__, i, base_config_name)
config_filename = os.path.join(config_dir, config_name)
frame_locals[name] = wrapper_function(testcase, name, config_filename,
generate_default_func)
return internal_configurable_testcase | 30082e0fa793ea9d45af8b2913eef95fc7dd7856 | 4,997 |
from collections import Iterable
def _is_scalar(value):
"""Whether to treat a value as a scalar.
Any non-iterable, string, or 0-D array
"""
return (getattr(value, 'ndim', None) == 0
or isinstance(value, (str, bytes))
or not isinstance(value, (Iterable,))) | 725aa4a6002146ecb3dca3a17faa829e213cb3f7 | 4,998 |
def copy_random(x, y):
""" from 2 randInt calls """
seed = find_seed(x, y)
rand = JavaRandom(seed)
rand.next() # this will be y so we discard it
return rand | f1a1019ed7f012d83edca77ba3c7ccd2a806ee01 | 4,999 |
Subsets and Splits