filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_18664
|
import os
import torch
import copy
from data_utils import datasets
from training.solver import Solver
from data_utils import distorters
# In a single experiment an arbitrary number of models can be trained sequentially. First the
# experimental setup is configured (setting base parameters and hyperparameter options) and then
# the whole process can be started using the run() method.
# Only a single dataset per experiment is supported.
# The results are stored to the 'results' directory. The class creates a new subfolder (experiment
# directory) equal to the experiment name (specified in the config). Under this directory it creates
# a results directory for each distinct hyperparamter combination (using increasing integers as
# names) and adds another layer of repetition subdirectories for each repetition.
# The information that is saved:
# - config.pt: experiment config and solver config (placed only once in experiment directory)
# - params.pt: hyperparameters in each results directory
# - log.pt: training log for a single training session (loss, intermediate evaluations)
# - eval.pt: final results (all metrics, per sample)
# - weights.pt: final weights of the model (from the best epoch)
# - examples.pt: example predictions for some selected samples from the validation set collected
# during training for later visualization
class Experiment:
def __init__(self, experiment_config, solver_config, base_hyperparams):
self.config = {
'experiment': experiment_config,
'solver': solver_config,
}
self.base_hyperparams = base_hyperparams
self.options = {}
# To perform a grid search register the desired parameter by it's name and specify a list of
# values that should be tried in different training sessions.
# IMPORTANT: The param_name parameter must be tuple of length 2. The hyperparameters dictionary
# can be nested up to 2 layers. If the hyperparameter is on the highest level, the first entry
# of the tuple needs to be None and the second element contains the dictionary key. If the
# hyperparameter is contained in a sub dictionary B contained in the hyperparameter dictionary
# A, the first parameter specifies the key in A for B and the second parameter contains the key
# from B for the target parameter.
def add_options(self, param_name, values):
self.options[param_name] = values
self.config['solver']['interest_keys'].append(param_name)
def run(self):
distorter = self.base_hyperparams['distorter'](self.base_hyperparams['distorter_args'])
if self.config['experiment']['normalizer'] is not None:
train_data = datasets.NormalizedPairedPoseDataset(
self.config['experiment']['train_set'],
distorter,
self.config['experiment']['normalizer'],
self.config['experiment']['use_preset'],
self.config['experiment']['train_set_size'],
self.config['experiment']['target_device'])
val_data = datasets.NormalizedPairedPoseDataset(
self.config['experiment']['val_set'],
distorters.NoDistorter(),
self.config['experiment']['normalizer'],
True,
self.config['experiment']['val_set_size'],
self.config['experiment']['target_device'])
else:
train_data = datasets.PairedPoseDataset(self.config['experiment']['train_set'],
distorter,
self.config['experiment']['use_preset'],
self.config['experiment']['train_set_size'],
self.config['experiment']['target_device'])
val_data = datasets.PairedPoseDataset(self.config['experiment']['val_set'],
distorters.NoDistorter(),
True,
self.config['experiment']['val_set_size'],
self.config['experiment']['target_device'])
self.config['experiment']['train_set_size'] = len(train_data)
self.config['experiment']['val_set_size'] = len(val_data)
experiment_dir = os.path.join('results', self.config['experiment']['name'])
os.mkdir(experiment_dir)
torch.save(self.config, os.path.join(experiment_dir, 'config.pt'))
solver = Solver(self.config['solver'], train_data, val_data)
combinations_of_configs = self._generate_combinations()
for i, hyperparams in enumerate(combinations_of_configs):
print('\n\n' + '#' * 100)
print('START OF SESSION {}/{}'.format(i + 1, len(combinations_of_configs)))
results_dir = os.path.join(experiment_dir, str(i))
os.mkdir(results_dir)
torch.save(hyperparams, os.path.join(results_dir, 'params.pt'))
distorter = hyperparams['distorter'](hyperparams['distorter_args'])
train_data.distorter = distorter
for j in range(self.config['experiment']['n_repetitions']):
print('\nRepetition {}/{} ({}):'.format(j + 1,
self.config['experiment']['n_repetitions'],
self.config['experiment']['name']))
print('*' * 50)
if self.config['experiment']['deterministic_mode']:
torch.manual_seed(0)
model = self._create_model_and_normalizer(hyperparams)
log, eval_results, weights, example_predictions = solver.train(model, hyperparams)
repetition_dir = os.path.join(results_dir, str(j))
os.mkdir(repetition_dir)
torch.save(log, os.path.join(repetition_dir, 'log.pt'))
torch.save(eval_results, os.path.join(repetition_dir, 'eval.pt'))
torch.save(weights, os.path.join(repetition_dir, 'weights.pt'))
torch.save(example_predictions, os.path.join(repetition_dir, 'examples.pt'))
print('\nExperiment >> {} << finished.\n'.format(self.config['experiment']['name']))
# From the registered options this functions generates all possible combinations. Be careful,
# the number of training runs increases exponentially!
def _generate_combinations(self):
hyper_param_configs = [self.base_hyperparams]
for (sub_dict, param_name), values in self.options.items():
new_list = []
for i, config in enumerate(hyper_param_configs):
for val in values:
new_config = copy.deepcopy(config)
if sub_dict is None:
new_config[param_name] = val
else:
new_config[sub_dict][param_name] = val
new_list.append(new_config)
hyper_param_configs = new_list
return hyper_param_configs
def _create_model_and_normalizer(self, hyperparams):
model = hyperparams['model'](hyperparams['model_args'])
model.to(self.config['experiment']['target_device'])
if self.config['experiment']['init_weights_path'] is not None:
weights = torch.load(self.config['experiment']['init_weights_path'],
map_location=self.config['experiment']['target_device'])
model.load_state_dict(weights)
return model
|
the-stack_106_18665
|
r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
import warnings
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
# Catch syntax warnings after the first compile
# to emit warnings (SyntaxWarning, DeprecationWarning) at most once.
with warnings.catch_warnings():
warnings.simplefilter("error")
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
try:
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
finally:
err1 = err2 = None
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default), "exec"
or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
|
the-stack_106_18668
|
import time
import sys
import multiprocessing
from collections import deque
import gym
import numpy as np
import tensorflow as tf
from stable_baselines import logger
from stable_baselines.common import explained_variance, ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from raisim_gym.archi.policies import LstmPolicy, ActorCriticPolicy
from stable_baselines.a2c.utils import total_episode_reward_logger
class PPO2(ActorCriticRLModel):
"""
Proximal Policy Optimization algo (GPU version).
Paper: https://arxiv.org/abs/1707.06347
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) Discount factor
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param ent_coef: (float) Entropy coefficient for the loss caculation
:param learning_rate: (float or callable) The learning rate, it can be a function
:param vf_coef: (float) Value function coefficient for the loss calculation
:param max_grad_norm: (float) The maximum value for the gradient clipping
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
:param nminibatches: (int) Number of training minibatches per update. For recurrent policies,
the number of environments run in parallel should be a multiple of nminibatches.
:param noptepochs: (int) Number of epoch when optimizing the surrogate
:param cliprange: (float or callable) Clipping parameter, it can be a function
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
"""
def __init__(self, policy, env, gamma=0.99, n_steps=128, ent_coef=0.01, learning_rate=2.5e-4, vf_coef=0.5,
max_grad_norm=0.5, lam=0.95, nminibatches=4, noptepochs=4, cliprange=0.2, verbose=0,
tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False):
super(PPO2, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs)
self.learning_rate = learning_rate
self.cliprange = cliprange
self.n_steps = n_steps
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.gamma = gamma
self.lam = lam
self.nminibatches = nminibatches
self.noptepochs = noptepochs
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.graph = None
self.sess = None
self.action_ph = None
self.advs_ph = None
self.rewards_ph = None
self.old_neglog_pac_ph = None
self.old_vpred_ph = None
self.learning_rate_ph = None
self.clip_range_ph = None
self.entropy = None
self.vf_loss = None
self.pg_loss = None
self.approxkl = None
self.clipfrac = None
self.params = None
self._train = None
self.loss_names = None
self.train_model = None
self.act_model = None
self.step = None
self.proba_step = None
self.value = None
self.initial_state = None
self.n_batch = None
self.summary = None
self.episode_reward = None
if _init_setup_model:
self.setup_model()
def _get_pretrain_placeholders(self):
policy = self.act_model
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, self.action_ph, policy.policy
return policy.obs_ph, self.action_ph, policy.deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the PPO2 model must be " \
"an instance of common.policies.ActorCriticPolicy."
self.n_batch = self.n_envs * self.n_steps
n_cpu = multiprocessing.cpu_count()
if sys.platform == 'darwin':
n_cpu //= 2
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.make_session(num_cpu=n_cpu, graph=self.graph)
n_batch_step = None
n_batch_train = None
if issubclass(self.policy, LstmPolicy):
assert self.n_envs % self.nminibatches == 0, "For recurrent policies, "\
"the number of environments run in parallel should be a multiple of nminibatches."
n_batch_step = self.n_envs
n_batch_train = self.n_batch // self.nminibatches
act_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
with tf.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space,
self.n_envs // self.nminibatches, self.n_steps, n_batch_train,
reuse=True, **self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.action_ph = train_model.pdtype.sample_placeholder([None], name="action_ph")
self.advs_ph = tf.placeholder(tf.float32, [None], name="advs_ph")
self.rewards_ph = tf.placeholder(tf.float32, [None], name="rewards_ph")
self.old_neglog_pac_ph = tf.placeholder(tf.float32, [None], name="old_neglog_pac_ph")
self.old_vpred_ph = tf.placeholder(tf.float32, [None], name="old_vpred_ph")
self.learning_rate_ph = tf.placeholder(tf.float32, [], name="learning_rate_ph")
self.clip_range_ph = tf.placeholder(tf.float32, [], name="clip_range_ph")
neglogpac = train_model.proba_distribution.neglogp(self.action_ph)
self.entropy = tf.reduce_mean(train_model.proba_distribution.entropy())
vpred = train_model._value
vpredclipped = self.old_vpred_ph + tf.clip_by_value(
train_model._value - self.old_vpred_ph, - self.clip_range_ph, self.clip_range_ph)
vf_losses1 = tf.square(vpred - self.rewards_ph)
vf_losses2 = tf.square(vpredclipped - self.rewards_ph)
self.vf_loss = .5 * tf.reduce_mean(tf.maximum(vf_losses1, vf_losses2))
ratio = tf.exp(self.old_neglog_pac_ph - neglogpac)
pg_losses = -self.advs_ph * ratio
pg_losses2 = -self.advs_ph * tf.clip_by_value(ratio, 1.0 - self.clip_range_ph, 1.0 +
self.clip_range_ph)
self.pg_loss = tf.reduce_mean(tf.maximum(pg_losses, pg_losses2))
self.approxkl = .5 * tf.reduce_mean(tf.square(neglogpac - self.old_neglog_pac_ph))
self.clipfrac = tf.reduce_mean(tf.cast(tf.greater(tf.abs(ratio - 1.0),
self.clip_range_ph), tf.float32))
loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef
tf.summary.scalar('entropy_loss', self.entropy)
tf.summary.scalar('policy_gradient_loss', self.pg_loss)
tf.summary.scalar('value_function_loss', self.vf_loss)
tf.summary.scalar('approximate_kullback-leiber', self.approxkl)
tf.summary.scalar('clip_factor', self.clipfrac)
tf.summary.scalar('loss', loss)
with tf.variable_scope('model'):
self.params = tf.trainable_variables()
if self.full_tensorboard_log:
for var in self.params:
tf.summary.histogram(var.name, var)
grads = tf.gradients(loss, self.params)
if self.max_grad_norm is not None:
grads, _grad_norm = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
trainer = tf.train.AdamOptimizer(learning_rate=self.learning_rate_ph, epsilon=1e-5)
self._train = trainer.apply_gradients(grads)
self.loss_names = ['policy_loss', 'value_loss', 'policy_entropy', 'approxkl', 'clipfrac']
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('discounted_rewards', tf.reduce_mean(self.rewards_ph))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate_ph))
tf.summary.scalar('advantage', tf.reduce_mean(self.advs_ph))
tf.summary.scalar('clip_range', tf.reduce_mean(self.clip_range_ph))
tf.summary.scalar('old_neglog_action_probabilty', tf.reduce_mean(self.old_neglog_pac_ph))
tf.summary.scalar('old_value_pred', tf.reduce_mean(self.old_vpred_ph))
if self.full_tensorboard_log:
tf.summary.histogram('discounted_rewards', self.rewards_ph)
tf.summary.histogram('learning_rate', self.learning_rate_ph)
tf.summary.histogram('advantage', self.advs_ph)
tf.summary.histogram('clip_range', self.clip_range_ph)
tf.summary.histogram('old_neglog_action_probabilty', self.old_neglog_pac_ph)
tf.summary.histogram('old_value_pred', self.old_vpred_ph)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
self.train_model = train_model
self.act_model = act_model
self.step = act_model.step
self.proba_step = act_model.proba_step
self.value = act_model.value
self.initial_state = act_model.initial_state
tf.global_variables_initializer().run(session=self.sess) # pylint: disable=E1101
self.summary = tf.summary.merge_all()
def _train_step(self, learning_rate, cliprange, obs, returns, masks, actions, values, neglogpacs, update,
writer, states=None):
"""
Training of PPO2 Algorithm
:param learning_rate: (float) learning rate
:param cliprange: (float) Clipping factor
:param obs: (np.ndarray) The current observation of the environment
:param returns: (np.ndarray) the rewards
:param masks: (np.ndarray) The last masks for done episodes (used in recurent policies)
:param actions: (np.ndarray) the actions
:param values: (np.ndarray) the values
:param neglogpacs: (np.ndarray) Negative Log-likelihood probability of Actions
:param update: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:param states: (np.ndarray) For recurrent policies, the internal state of the recurrent model
:return: policy gradient loss, value function loss, policy entropy,
approximation of kl divergence, updated clipping range, training update operation
"""
advs = returns - values
advs = (advs - advs.mean()) / (advs.std() + 1e-8)
td_map = {self.train_model.obs_ph: obs, self.action_ph: actions, self.advs_ph: advs, self.rewards_ph: returns,
self.learning_rate_ph: learning_rate, self.clip_range_ph: cliprange,
self.old_neglog_pac_ph: neglogpacs, self.old_vpred_ph: values}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.masks_ph] = masks
if states is None:
update_fac = self.n_batch // self.nminibatches // self.noptepochs + 1
else:
update_fac = self.n_batch // self.nminibatches // self.noptepochs // self.n_steps + 1
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if self.full_tensorboard_log and (1 + update) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % (update * update_fac))
else:
summary, policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train],
td_map)
writer.add_summary(summary, (update * update_fac))
else:
policy_loss, value_loss, policy_entropy, approxkl, clipfrac, _ = self.sess.run(
[self.pg_loss, self.vf_loss, self.entropy, self.approxkl, self.clipfrac, self._train], td_map)
return policy_loss, value_loss, policy_entropy, approxkl, clipfrac
def learn(self, total_timesteps, callback=None, seed=None, log_interval=1, tb_log_name="PPO2",
eval_every_n=5, reset_num_timesteps=True, record_video=False, log_dir=""):
# Define model saving variables
# Current Iteration is basically the update
# Initialise variable
_savediter = 0
_counter = (200//eval_every_n)
# Transform to callable if needed
self.learning_rate = get_schedule_fn(self.learning_rate)
self.cliprange = get_schedule_fn(self.cliprange)
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn(seed)
runner = Runner(env=self.env, model=self, n_steps=self.n_steps, gamma=self.gamma, lam=self.lam)
self.episode_reward = np.zeros((self.n_envs,))
ep_info_buf = deque(maxlen=100)
t_first_start = time.time()
nupdates = total_timesteps // self.n_batch
for update in range(1, nupdates + 1):
# Do the following except keyboard interrupt the learning process.
try:
if update % eval_every_n == 1:
print("[RAISIM_GYM] Visualizing in RaiSimOgre")
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = \
runner.run(test_mode=True, record_video=record_video, video_name=log_dir+"/"+str(update-1)+".mp4")
print("Average rewards in this test episode ", ep_infos[0]['r'])
model_name = log_dir + "_Iteration_{}".format(update-1)
self.save(model_name)
print("Saving model " + model_name)
# tensorboard_log(logger, ep_infos, self.sess)
assert self.n_batch % self.nminibatches == 0
batch_size = self.n_batch // self.nminibatches
t_start = time.time()
frac = 1.0 - (update - 1.0) / nupdates
lr_now = self.learning_rate(frac)
cliprangenow = self.cliprange(frac)
# true_reward is the reward without discount
obs, returns, masks, actions, values, neglogpacs, states, ep_infos, true_reward = runner.run()
ep_info_buf.extend(ep_infos)
mb_loss_vals = []
if states is None: # nonrecurrent version
update_fac = self.n_batch // self.nminibatches // self.noptepochs + 1
inds = np.arange(self.n_batch)
for epoch_num in range(self.noptepochs):
np.random.shuffle(inds)
for start in range(0, self.n_batch, batch_size):
timestep = self.num_timesteps // update_fac + ((self.noptepochs * self.n_batch + epoch_num *
self.n_batch + start) // batch_size)
end = start + batch_size
mbinds = inds[start:end]
slices = (arr[mbinds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, writer=writer,
update=timestep))
self.num_timesteps += (self.n_batch * self.noptepochs) // batch_size * update_fac
else: # recurrent version
update_fac = self.n_batch // self.nminibatches // self.noptepochs // self.n_steps + 1
assert self.n_envs % self.nminibatches == 0
env_indices = np.arange(self.n_envs)
flat_indices = np.arange(self.n_envs * self.n_steps).reshape(self.n_envs, self.n_steps)
envs_per_batch = batch_size // self.n_steps
for epoch_num in range(self.noptepochs):
np.random.shuffle(env_indices)
for start in range(0, self.n_envs, envs_per_batch):
timestep = self.num_timesteps // update_fac + ((self.noptepochs * self.n_envs + epoch_num *
self.n_envs + start) // envs_per_batch)
end = start + envs_per_batch
mb_env_inds = env_indices[start:end]
mb_flat_inds = flat_indices[mb_env_inds].ravel()
slices = (arr[mb_flat_inds] for arr in (obs, returns, masks, actions, values, neglogpacs))
mb_states = states[mb_env_inds]
mb_loss_vals.append(self._train_step(lr_now, cliprangenow, *slices, update=timestep,
writer=writer, states=mb_states))
self.num_timesteps += (self.n_envs * self.noptepochs) // envs_per_batch * update_fac
loss_vals = np.mean(mb_loss_vals, axis=0)
t_now = time.time()
fps = int(self.n_batch / (t_now - t_start))
if writer is not None:
self.episode_reward = total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, self.num_timesteps)
# Verbose just mean that it will show you the logger on the terminal screen.
if self.verbose >= 1 and (update % log_interval == 0 or update == 1):
explained_var = explained_variance(values, returns)
logger.logkv("serial_timesteps", update * self.n_steps)
logger.logkv("nupdates", update)
logger.logkv("total_timesteps", self.num_timesteps)
logger.logkv("fps", fps)
logger.logkv("explained_variance", float(explained_var))
if len(ep_info_buf) > 0 and len(ep_info_buf[0]) > 0:
logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in ep_info_buf]))
logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in ep_info_buf]))
logger.logkv('time_elapsed', t_start - t_first_start)
for (loss_val, loss_name) in zip(loss_vals, self.loss_names):
logger.logkv(loss_name, loss_val)
logger.dumpkvs()
if callback is not None:
# Only stop training if return value is False, not when it is None. This is for backwards
# compatibility with callbacks that have no return statement.
if callback(locals(), globals()) is False:
break
except KeyboardInterrupt:
print("You have stopped the learning process by keyboard interrupt. Model Parameter is saved. \n")
# You can actually save files using the instance of self. save the model parameters.
self.save(log_dir + "_Iteration_{}".format(update))
sys.exit()
return self
def save(self, save_path):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"vf_coef": self.vf_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"lam": self.lam,
"nminibatches": self.nminibatches,
"noptepochs": self.noptepochs,
"cliprange": self.cliprange,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params = self.sess.run(self.params)
self._save_to_file(save_path, data=data, params=params, cloudpickle=True)
class Runner(AbstractEnvRunner):
def __init__(self, *, env, model, n_steps, gamma, lam):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
:param lam: (float) Factor for trade-off of bias vs variance for Generalized Advantage Estimator
"""
super().__init__(env=env, model=model, n_steps=n_steps)
self.lam = lam
self.gamma = gamma
def run(self, test_mode=False, record_video=False, video_name=""):
"""
Run a learning step of the model
:return:
- observations: (np.ndarray) the observations
- rewards: (np.ndarray) the rewards
- masks: (numpy bool) whether an episode is over or not
- actions: (np.ndarray) the actions
- values: (np.ndarray) the value function output
- negative log probabilities: (np.ndarray)
- states: (np.ndarray) the internal states of the recurrent policies
- infos: (dict) the extra information of the model
"""
# mb stands for minibatch
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_neglogpacs = [], [], [], [], [], []
mb_states = self.states
ep_infos = []
if test_mode:
self.env.show_window()
if record_video:
self.env.start_recording_video(video_name)
for _ in range(self.n_steps):
actions, values, self.states, neglogpacs = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(self.obs.copy())
mb_actions.append(actions)
mb_values.append(values)
mb_neglogpacs.append(neglogpacs)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
self.obs[:], rewards, self.dones, infos = self.env.step(clipped_actions, visualize=test_mode)
if np.isinf(rewards).any():
print("something wrong here")
for info in infos:
maybe_ep_info = info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
mb_rewards.append(rewards)
if test_mode:
self.env.hide_window()
if record_video:
self.env.stop_recording_video()
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32)
if np.isinf(mb_rewards).any():
print("something wrong here")
mb_actions = np.asarray(mb_actions)
mb_values = np.asarray(mb_values, dtype=np.float32)
mb_neglogpacs = np.asarray(mb_neglogpacs, dtype=np.float32)
mb_dones = np.asarray(mb_dones, dtype=np.bool)
last_values = self.model.value(self.obs, self.states, self.dones)
# discount/bootstrap off value fn
mb_advs = np.zeros_like(mb_rewards)
true_reward = np.copy(mb_rewards)
if np.isinf(mb_rewards).any():
print("something wrong here")
last_gae_lam = 0
for step in reversed(range(self.n_steps)):
if step == self.n_steps - 1:
nextnonterminal = 1.0 - self.dones
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[step + 1]
nextvalues = mb_values[step + 1]
delta = mb_rewards[step] + self.gamma * nextvalues * nextnonterminal - mb_values[step]
mb_advs[step] = last_gae_lam = delta + self.gamma * self.lam * nextnonterminal * last_gae_lam
mb_returns = mb_advs + mb_values
np.set_printoptions(threshold=sys.maxsize)
mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward = \
map(swap_and_flatten, (mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, true_reward))
# resetting environments, added by Jemin
self.obs, infos = self.env.reset_and_update_info()
for info in infos:
ep_infos.append(info.get('episode'))
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_neglogpacs, mb_states, ep_infos, true_reward
def get_schedule_fn(value_schedule):
"""
Transform (if needed) learning rate and clip range
to callable.
:param value_schedule: (callable or float)
:return: (function)
"""
# If the passed schedule is a float
# create a constant function
if isinstance(value_schedule, float):
value_schedule = constfn(value_schedule)
else:
assert callable(value_schedule)
return value_schedule
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def swap_and_flatten(arr):
"""
swap and then flatten axes 0 and 1
:param arr: (np.ndarray)
:return: (np.ndarray)
"""
shape = arr.shape
return arr.swapaxes(0, 1).reshape(shape[0] * shape[1], *shape[2:])
def constfn(val):
"""
Create a function that returns a constant
It is useful for learning rate schedule (to avoid code duplication)
:param val: (float)
:return: (function)
"""
def func(_):
return val
return func
def safe_mean(arr):
"""
Compute the mean of an array if there is at least one element.
For empty array, return nan. It is used for logging only.
:param arr: (np.ndarray)
:return: (float)
"""
return np.nan if len(arr) == 0 else np.mean(arr)
|
the-stack_106_18669
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2015 Wind River Systems, Inc.
#
import netaddr
from sysinv.common import constants
from sysinv.openstack.common import log
from sysinv.openstack.common.gettextutils import _
LOG = log.getLogger(__name__)
class InvalidProfileData(Exception):
pass
class Network(object):
def __init__(self, node, networkType):
self.networkType = networkType
self.providerNetworks = []
providerNetworksNode = node.find('providerNetworks')
if providerNetworksNode:
for pnetNode in providerNetworksNode.findall('providerNetwork'):
pnetName = pnetNode.get('name')
self.addProviderNetwork(pnetName)
def addProviderNetwork(self, pnet):
if pnet not in self.providerNetworks:
self.providerNetworks.append(pnet)
# ignore if provider network is duplicated within one interface
def validate(self):
if len(self.providerNetworks) == 0:
# caller will do the translation
raise InvalidProfileData("At least one provider network must be selected.")
class DataclassNetwork(Network):
def __init__(self, node):
super(DataclassNetwork, self).__init__(node, constants.NETWORK_TYPE_DATA)
self.ipv4Mode = DataclassNetwork.getIpMode(node, "ipv4")
self.ipv6Mode = DataclassNetwork.getIpMode(node, "ipv6")
self.routes = DataclassNetwork.getRoutes(node)
@staticmethod
def getRoutes(node):
routesNode = node.find('routes')
if routesNode is None:
return []
routes = []
for routeNode in routesNode.findall('route'):
route = {}
route['metric'] = int(routeNode.get('metric'))
network = routeNode.get('network')
gateway = routeNode.get('gateway')
try:
addr = netaddr.IPAddress(gateway)
except netaddr.core.AddrFormatError:
raise InvalidProfileData(_('%s is not a valid IP address') % gateway)
try:
net = netaddr.IPNetwork(network)
except netaddr.core.AddrFormatError:
raise InvalidProfileData(_('%s is not a valid network') % network)
if addr.format() != gateway:
raise InvalidProfileData(_('%s is not a valid IP address') % gateway)
if net.version != addr.version:
raise InvalidProfileData(_('network "%s" and gateway "%s" must be the same version.') %
(network, gateway))
route['network'] = net.network.format()
route['prefix'] = net.prefixlen
route['gateway'] = gateway
route['family'] = net.version
routes.append(route)
return routes
@staticmethod
def getIpMode(node, name):
modeNode = node.find(name)
if modeNode is None:
raise InvalidProfileData(_('%s is required for a datanetwork') % name)
mode = modeNode.get('mode')
pool = None
if mode == 'pool':
poolNode = modeNode.find('pool')
if poolNode is None:
raise InvalidProfileData(_('A pool is required for a %s defined as "pool"') % name)
pool = poolNode.get('name')
return {'mode': mode, 'pool': pool}
class ExternalNetwork(object):
def __init__(self, node, networktype):
self.networkType = networktype
def validate(self):
pass
class PciPassthrough(Network):
def __init__(self, node):
super(PciPassthrough, self).__init__(node, constants.NETWORK_TYPE_PCI_PASSTHROUGH)
class PciSriov(Network):
def __init__(self, node):
super(PciSriov, self).__init__(node, constants.NETWORK_TYPE_PCI_SRIOV)
self.virtualFunctions = int(node.get('virtualFunctions'))
class Interface(object):
def __init__(self, ifNode):
self.providerNetworks = []
self.networks = []
self.name = ifNode.get('ifName')
self.mtu = ifNode.get('mtu')
self.ipv4Mode = {'mode': None, 'pool': None}
self.ipv6Mode = {'mode': None, 'pool': None}
self.routes = []
self.virtualFunctions = 0
networksNode = ifNode.find('networks')
if networksNode is not None:
for netNode in networksNode:
self.addNetwork(netNode)
def getNetworkMap(self):
return {}
def addNetwork(self, node):
tag = node.tag
networkMap = self.getNetworkMap()
if tag in networkMap:
network = networkMap[tag](node)
self.networks.append(network)
if network.networkType == constants.NETWORK_TYPE_DATA:
self.ipv4Mode = network.ipv4Mode
self.ipv6Mode = network.ipv6Mode
self.routes = network.routes
elif network.networkType == constants.NETWORK_TYPE_INFRA:
self.ipv4Mode = {'mode': constants.IPV4_STATIC, 'pool': None}
self.ipv6Mode = {'mode': constants.IPV6_DISABLED, 'pool': None}
elif network.networkType == constants.NETWORK_TYPE_PCI_SRIOV:
self.virtualFunctions = network.virtualFunctions
if isinstance(network, Network):
self.providerNetworks = network.providerNetworks
else:
raise InvalidProfileData(_('network type (%s) not recognizable') % tag)
def validate(self):
# raise InvalidProfileData exception with detail msg
numberOfNetworks = len(self.networks)
if numberOfNetworks > 2:
raise InvalidProfileData(_('Too many network types selected for the interface.'))
# when change, make sure modify the displayText as well
combineTypes = [constants.NETWORK_TYPE_MGMT, constants.NETWORK_TYPE_INFRA, constants.NETWORK_TYPE_DATA]
displayText = _('Only mgmt, infra, data network types can be combined on a single interface')
if numberOfNetworks == 2:
if self.networks[0].networkType not in combineTypes or \
self.networks[1].networkType not in combineTypes:
raise InvalidProfileData(displayText)
if self.networks[0].networkType == self.networks[1].networkType:
raise InvalidProfileData(_('Interface can not combine with 2 networks with the same type.'))
# if self.networks[0].networkType == constants.NETWORK_TYPE_INFRA or self.networks[1].networkType == constants.NETWORK_TYPE_INFRA and \
# self.ipv6Mode != None and self.ipv4Mode != 'dhcp':
try:
for network in self.networks:
network.validate()
except InvalidProfileData as e:
raise InvalidProfileData(_(e.message + ' Interface: %s') % self.name)
def getNetworks(self):
pnets = ''
networkTypes = ''
hasNT = False
for network in self.networks:
if network.networkType is None:
continue
hasNT = True
if networkTypes:
networkTypes += ','
networkTypes = networkTypes + network.networkType
if hasattr(network, 'providerNetworks'):
# there should be only one network has providerNetwork
for pnet in network.providerNetworks:
if pnets:
pnets += ','
pnets = pnets + pnet
if not hasNT:
networkTypes = None
pnets = None
return networkTypes, pnets
class EthInterface(Interface):
def __init__(self, ifNode):
super(EthInterface, self).__init__(ifNode)
self.port, self.pciAddress, self.pclass, self.pdevice = self.getPort(ifNode)
def getPort(self, ifNode):
portNode = ifNode.find('port')
if portNode is None:
raise InvalidProfileData(_('Ethernet interface %s requires an Ethernet port ') %
ifNode.get('ifName'))
pciAddress = ''
tmp = portNode.get('pciAddress')
try:
pciAddress = EthInterface.formatPciAddress(tmp)
except InvalidProfileData as exc:
raise InvalidProfileData(exc.message + _('Interface %s, pciAddress %s') % (ifNode.get('ifName'), tmp))
pclass = portNode.get('class')
if pclass:
pclass = pclass.strip()
pdevice = portNode.get('device')
if pdevice:
pdevice = pdevice.strip()
return portNode.get('name'), pciAddress, pclass, pdevice
@staticmethod
def formatPciAddress(value):
# To parse a [X]:[X]:[X].[X] formatted pci address into [04x]:[02x]:[02x].[01x] pci address format
if value:
section_list1 = value.split(':')
else:
return ''
if len(section_list1) != 3:
raise InvalidProfileData(_('pciAddress is not well formatted.'))
section_list2 = section_list1[2].split('.')
if len(section_list2) != 2:
raise InvalidProfileData(_('pciAddress is not well formatted.'))
try:
sec1 = int(section_list1[0], 16)
sec2 = int(section_list1[1], 16)
sec3 = int(section_list2[0], 16)
sec4 = int(section_list2[1], 16)
except (TypeError, ValueError):
raise InvalidProfileData(_('pciAddress is not well formatted.'))
result = '{0:04x}:{1:02x}:{2:02x}.{3:01x}'.format(sec1, sec2, sec3, sec4)
return result
def getNetworkMap(self):
return {
'dataclassNetwork': lambda node: DataclassNetwork(node),
'infraNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_INFRA),
'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM),
'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT),
'pciPassthrough': lambda node: PciPassthrough(node),
'pciSriov': lambda node: PciSriov(node)
}
class AeInterface(Interface):
def __init__(self, ifNode):
super(AeInterface, self).__init__(ifNode)
self.usesIf = []
aeModeNode = ifNode.find('aeMode') # aeMode is mandatory required by schema
node = aeModeNode[0] # it is mandatory required by schema
if node.tag == 'activeStandby':
self.aeMode = 'activeStandby'
self.txPolicy = None
elif node.tag == 'balanced':
self.aeMode = 'balanced'
self.txPolicy = node.get('txPolicy')
elif node.tag == 'ieee802.3ad':
self.aeMode = '802.3ad'
self.txPolicy = node.get('txPolicy')
node = ifNode.find('interfaces')
if node:
for usesIfNode in node.findall('interface'):
self.addUsesIf(usesIfNode.get('name'))
def addUsesIf(self, ifName):
if not ifName:
raise InvalidProfileData(_('Interface name value cannot be empty.'))
if ifName == self.name:
raise InvalidProfileData(_('Aggregrated ethernet interface (%s) cannot use itself.') % self.name)
if ifName not in self.usesIf:
self.usesIf.append(ifName)
def getNetworkMap(self):
return {
'dataclassNetwork': lambda node: DataclassNetwork(node),
'infraNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_INFRA),
'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM),
'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT)
}
def validateWithIfNames(self, allInterfaceNames):
# raise InvalidProfileData exception if invalid
if len(self.usesIf) == 0:
msg = _('Aggregrated ethernet interface (%s) should have at least one interface.') % self.name
raise InvalidProfileData(msg)
for usesIfName in self.usesIf:
if usesIfName not in allInterfaceNames:
msg = _('Aggregrated ethernet interface (%s) uses a undeclared interface (%s)') % \
(self.name, usesIfName)
raise InvalidProfileData(msg)
super(AeInterface, self).validate()
class VlanInterface(Interface):
def __init__(self, ifNode):
super(VlanInterface, self).__init__(ifNode)
self.vlanId = int(ifNode.get('vlanId'))
usesIf = ifNode.get('interface')
if not usesIf:
raise InvalidProfileData(_('<usesIf> value cannot be empty.'))
if usesIf == self.name:
raise InvalidProfileData(_('vlan interface (%s) cannot use itself.') % self.name)
self.usesIfName = usesIf
self.usesIf = [usesIf]
def getNetworkMap(self):
return {
'dataclassNetwork': lambda node: DataclassNetwork(node),
'infraNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_INFRA),
'oamNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_OAM),
'mgmtNetwork': lambda node: ExternalNetwork(node, constants.NETWORK_TYPE_MGMT)
}
@staticmethod
def isEthInterface(ifName, ethIfMap):
return ifName in ethIfMap
def validateWithIfNames(self, allInterfaceNames, aeIfMap, vlanIfMap, ethIfMap):
# raise InvalidProfileData exception if invalid
if self.usesIfName not in allInterfaceNames:
msg = _('vlan interface (%s) uses a undeclared interface (%s)') % \
(self.name, self.usesIfName)
raise InvalidProfileData(msg)
isEthIf = self.isEthInterface(self.usesIfName, ethIfMap)
good = True
if not isEthIf:
ifNameToCheck = [self.usesIfName]
while len(ifNameToCheck) > 0:
ifName = ifNameToCheck.pop(0)
if ifName in aeIfMap:
aeIf = aeIfMap[ifName]
for n in aeIf.usesIf:
ifNameToCheck.append(n)
elif ifName in vlanIfMap:
good = False
break # not good,a vlan in uses tree
if not good:
raise InvalidProfileData(_('A vlan interface cannot use a vlan interface.'))
super(VlanInterface, self).validate()
|
the-stack_106_18672
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=3
# total number=10
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.rx(-0.09738937226128368).on(input_qubit[2])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=3
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=4
c.append(cirq.Z.on(input_qubit[1])) # number=9
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=5
c.append(cirq.X.on(input_qubit[1])) # number=6
c.append(cirq.Z.on(input_qubit[1])) # number=8
c.append(cirq.X.on(input_qubit[1])) # number=7
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq54.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
the-stack_106_18674
|
from marshmallow import (
fields,
post_load,
Schema,
validate,
validates_schema,
ValidationError,
)
from .state import State
class MulticloudStackSchema(Schema):
stack_name = fields.Str(required=True, validate=[validate.Length(min=1)])
count = fields.Int(required=True, validate=[validate.Range(min=0)])
count_parameter = fields.Str(required=True)
weights = fields.Dict(
required=True, keys=fields.Str(), values=fields.Float()
)
@validates_schema
def validate_weights(self, data, **kwargs):
total_weight = sum(data["weights"].values())
if total_weight > 1:
raise ValidationError(
f"Total cloud weight over 1 (total weight: {total_weight})",
"weights",
)
@post_load
def make_multicloud_stack(self, data, **kwargs):
return MulticloudStack(**data)
class MulticloudStackListSchema(Schema):
stacks = fields.List(fields.Nested(MulticloudStackSchema))
class MulticloudStack(State):
schema = MulticloudStackSchema
list_schema = MulticloudStackListSchema
def __init__(self, stack_name, count, count_parameter, weights={}):
self.stack_name = stack_name
self.count = count
self.count_parameter = count_parameter
self.weights = weights
def __eq__(self, other):
return (
self.stack_name == other.stack_name
and self.count == other.count
and self.count_parameter == other.count_parameter
and self.weights == other.weights
)
|
the-stack_106_18676
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class MrpRoutingWorkcenter(models.Model):
_name = 'mrp.routing.workcenter'
_description = 'Work Center Usage'
_order = 'sequence, id'
_check_company_auto = True
name = fields.Char('Operation', required=True)
workcenter_id = fields.Many2one('mrp.workcenter', 'Work Center', required=True, check_company=True)
sequence = fields.Integer(
'Sequence', default=100,
help="Gives the sequence order when displaying a list of routing Work Centers.")
bom_id = fields.Many2one(
'mrp.bom', 'Bill of Material', check_company=True,
index=True, ondelete='cascade',
help="The Bill of Material this operation is linked to")
company_id = fields.Many2one(
'res.company', 'Company', default=lambda self: self.env.company)
worksheet_type = fields.Selection([
('pdf', 'PDF'), ('google_slide', 'Google Slide'), ('text', 'Text')],
string="Work Sheet", default="text",
help="Defines if you want to use a PDF or a Google Slide as work sheet."
)
note = fields.Text('Description', help="Text worksheet description")
worksheet = fields.Binary('PDF')
worksheet_google_slide = fields.Char('Google Slide', help="Paste the url of your Google Slide. Make sure the access to the document is public.")
time_mode = fields.Selection([
('auto', 'Compute based on tracked time'),
('manual', 'Set duration manually')], string='Duration Computation',
default='manual')
time_mode_batch = fields.Integer('Based on', default=10)
time_cycle_manual = fields.Float(
'Manual Duration', default=60,
help="Time in minutes:"
"- In manual mode, time used"
"- In automatic mode, supposed first time when there aren't any work orders yet")
time_cycle = fields.Float('Duration', compute="_compute_time_cycle")
workorder_count = fields.Integer("# Work Orders", compute="_compute_workorder_count")
workorder_ids = fields.One2many('mrp.workorder', 'operation_id', string="Work Orders")
@api.depends('time_cycle_manual', 'time_mode', 'workorder_ids')
def _compute_time_cycle(self):
manual_ops = self.filtered(lambda operation: operation.time_mode == 'manual')
for operation in manual_ops:
operation.time_cycle = operation.time_cycle_manual
for operation in self - manual_ops:
data = self.env['mrp.workorder'].read_group([
('operation_id', '=', operation.id),
('qty_produced', '>', 0),
('state', '=', 'done')], ['operation_id', 'duration', 'qty_produced'], ['operation_id'],
limit=operation.time_mode_batch)
count_data = dict((item['operation_id'][0], (item['duration'], item['qty_produced'])) for item in data)
if count_data.get(operation.id) and count_data[operation.id][1]:
operation.time_cycle = (count_data[operation.id][0] / count_data[operation.id][1]) * (operation.workcenter_id.capacity or 1.0)
else:
operation.time_cycle = operation.time_cycle_manual
def _compute_workorder_count(self):
data = self.env['mrp.workorder'].read_group([
('operation_id', 'in', self.ids),
('state', '=', 'done')], ['operation_id'], ['operation_id'])
count_data = dict((item['operation_id'][0], item['operation_id_count']) for item in data)
for operation in self:
operation.workorder_count = count_data.get(operation.id, 0)
|
the-stack_106_18677
|
import eHive
import os
import subprocess
class CompressBybgzip(eHive.BaseRunnable):
"""Store the file in the DB"""
def param_defaults(self):
return {
'compression' : None
}
def run(self):
self.warning('Compressing file: %s'% self.param_required('filename'))
filename = self.param_required('filename')
basename = os.path.basename(filename).split('.')[0]
outfile = filename+".bgz"
command = "{0}/bgzip -c {1} > {2}".format(self.param_required('bgzip_folder'),
filename, outfile)
try:
subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as e:
print(e.output)
if self.param_required('delete_uncompressed') == 'True':
os.remove(filename)
if self.param_required('create_index') == 'True':
outfile_ix = outfile+".tbi"
command_ix = "{0}/tabix {1}".format(self.param_required('tabix_folder'), outfile)
try:
subprocess.check_output(command_ix, shell=True)
self.param('compressed_file_ix', outfile_ix)
except subprocess.CalledProcessError as e:
print(e.output)
self.param('compressed_file', outfile)
def write_output(self):
self.warning('Work is done!')
self.dataflow({'compressed_file': self.param('compressed_file')}, 1)
self.dataflow({'compressed_file_ix': self.param('compressed_file_ix')}, 1)
|
the-stack_106_18679
|
import logging
import warnings
from string import Template
from great_expectations.datasource.types import SqlAlchemyDatasourceTableBatchKwargs
from great_expectations.exceptions import BatchKwargsError, GreatExpectationsError
from great_expectations.marshmallow__shade import (
Schema,
ValidationError,
fields,
post_load,
)
from .batch_kwargs_generator import BatchKwargsGenerator
logger = logging.getLogger(__name__)
try:
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.engine import reflection
except ImportError:
sqlalchemy = None
create_engine = None
reflection = None
logger.debug("Unable to import sqlalchemy.")
class AssetConfigurationSchema(Schema):
table = fields.Str()
schema = fields.Str()
@post_load
def make_asset_configuration(self, data, **kwargs):
return AssetConfiguration(**data)
class AssetConfiguration:
def __init__(self, table, schema=None):
self.__table = table
self.__schema = schema
@property
def table(self):
return self.__table
@property
def schema(self):
return self.__schema
assetConfigurationSchema = AssetConfigurationSchema()
class TableBatchKwargsGenerator(BatchKwargsGenerator):
"""Provide access to already materialized tables or views in a database.
TableBatchKwargsGenerator can be used to define specific data asset names that take and substitute parameters,
for example to support referring to the same data asset but with different schemas depending on provided
batch_kwargs.
The python template language is used to substitute table name portions. For example, consider the
following configurations::
my_generator:
class_name: TableBatchKwargsGenerator
assets:
my_table:
schema: $schema
table: my_table
In that case, the asset my_datasource/my_generator/my_asset will refer to a table called my_table in a schema
defined in batch_kwargs.
"""
recognized_batch_parameters = {
"data_asset_name",
"limit",
"offset",
"query_parameters",
}
def __init__(self, name="default", datasource=None, assets=None):
super().__init__(name=name, datasource=datasource)
if not assets:
assets = {}
try:
self._assets = {
asset_name: assetConfigurationSchema.load(asset_config)
for (asset_name, asset_config) in assets.items()
}
except ValidationError as err:
raise GreatExpectationsError(
"Unable to load asset configuration in TableBatchKwargsGenerator '%s': "
"validation error: %s." % (name, str(err))
)
if datasource is not None:
self.engine = datasource.engine
try:
self.inspector = sqlalchemy.inspect(self.engine)
except sqlalchemy.exc.OperationalError:
logger.warning(
"Unable to create inspector from engine in batch kwargs generator '%s'"
% name
)
self.inspector = None
def _get_iterator(
self,
data_asset_name,
query_parameters=None,
limit=None,
offset=None,
partition_id=None,
):
batch_kwargs = None
# First, we check if we have a configured asset
if data_asset_name in self._assets:
asset_config = self._assets[data_asset_name]
try:
if query_parameters is None:
query_parameters = {}
table_name = Template(asset_config.table).substitute(query_parameters)
schema_name = None
if asset_config.schema is not None:
schema_name = Template(asset_config.schema).substitute(
query_parameters
)
except KeyError:
raise BatchKwargsError(
"Unable to generate batch kwargs for asset '"
+ data_asset_name
+ "': "
"missing template key",
{
"data_asset_name": data_asset_name,
"table_template": asset_config.table,
"schema_template": asset_config.schema,
},
)
batch_kwargs = SqlAlchemyDatasourceTableBatchKwargs(
table=table_name, schema=schema_name
)
# If this is not a manually configured asset, we fall back to inspection of the database
elif self.engine is not None and self.inspector is not None:
split_data_asset_name = data_asset_name.split(".")
if len(split_data_asset_name) == 2:
schema_name = split_data_asset_name[0]
if self.engine.dialect.name.lower() == "bigquery":
table_name = data_asset_name
else:
table_name = split_data_asset_name[1]
elif len(split_data_asset_name) == 1:
schema_name = self.inspector.default_schema_name
table_name = split_data_asset_name[0]
else:
raise ValueError(
"Table name must be of shape '[SCHEMA.]TABLE'. Passed: "
+ split_data_asset_name
)
tables = self.inspector.get_table_names(schema=schema_name)
try:
tables.extend(self.inspector.get_view_names(schema=schema_name))
except NotImplementedError:
# Not implemented by bigquery dialect
pass
if table_name in tables:
batch_kwargs = SqlAlchemyDatasourceTableBatchKwargs(
table=table_name, schema=schema_name
)
else:
raise BatchKwargsError(
"TableBatchKwargsGenerator cannot access the following data:"
f"SCHEMA : {schema_name}"
f"TABLE : {table_name}",
{},
)
if batch_kwargs is not None:
if partition_id is not None:
logger.warning(
"table_generator cannot identify partitions; provided partition id will be recorded "
"only"
)
batch_kwargs["partition_id"] = partition_id
if limit is not None:
batch_kwargs["limit"] = limit
if offset is not None:
batch_kwargs["offset"] = offset
return iter([batch_kwargs])
# Otherwise, we return None
return
def get_available_data_asset_names(self):
# TODO: limit and is_complete_list logic
is_complete_list = True
defined_assets = list(self._assets.keys())
tables = []
if self.engine is not None and self.inspector is not None:
for schema_name in self.inspector.get_schema_names():
known_information_schemas = [
"INFORMATION_SCHEMA", # snowflake, mssql, mysql, oracle
"information_schema", # postgres, redshift, mysql
"performance_schema", # mysql
"sys", # mysql
"mysql", # mysql
]
known_system_tables = ["sqlite_master"] # sqlite
if schema_name in known_information_schemas:
continue
if self.engine.dialect.name.lower() == "bigquery":
tables.extend(
[
(table_name, "table")
for table_name in self.inspector.get_table_names(
schema=schema_name
)
if table_name not in known_system_tables
]
)
else:
# set default_schema_name
if self.engine.dialect.name.lower() == "sqlite":
# Workaround for compatibility with sqlalchemy < 1.4.0 and is described in issue #2641
default_schema_name = None
else:
default_schema_name = self.inspector.default_schema_name
tables.extend(
[
(table_name, "table")
if default_schema_name == schema_name
else (schema_name + "." + table_name, "table")
for table_name in self.inspector.get_table_names(
schema=schema_name
)
if table_name not in known_system_tables
]
)
try:
tables.extend(
[
(table_name, "view")
if default_schema_name == schema_name
else (schema_name + "." + table_name, "view")
for table_name in self.inspector.get_view_names(
schema=schema_name
)
if table_name not in known_system_tables
]
)
except NotImplementedError:
# Not implemented by bigquery dialect
pass
return {"names": defined_assets + tables, "is_complete_list": is_complete_list}
def _build_batch_kwargs(self, batch_parameters):
return next(
self._get_iterator(
data_asset_name=batch_parameters.get("data_asset_name"),
query_parameters=batch_parameters.get("query_parameters", {}),
limit=batch_parameters.get("limit"),
offset=batch_parameters.get("offset"),
)
)
# TODO: deprecate generator_asset argument
def get_available_partition_ids(self, generator_asset=None, data_asset_name=None):
assert (generator_asset and not data_asset_name) or (
not generator_asset and data_asset_name
), "Please provide either generator_asset or data_asset_name."
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
raise BatchKwargsError(
"TableBatchKwargsGenerator cannot identify partitions, however any existing table may"
"already be referenced by accessing a data_asset with the name of the "
"table or of the form SCHEMA.TABLE",
{},
)
|
the-stack_106_18680
|
__author__ = "Maja Bojarska"
import logging
import threading
import RPi.GPIO as GPIO
from . import battery_guard
from . import gamepad
from . import motor_controller
logging.getLogger(__name__)
class Tadpole(threading.Thread):
""" Class for controlling the Tadpole vehicle. """
def __init__(self):
super(Tadpole, self).__init__()
self.motor_ctrl = motor_controller.MotorController()
self.xbox_pad = gamepad.Gamepad(invert_y=True)
self.xbox_pad.start()
self.battery_guard = battery_guard.BatteryGuard()
self.battery_guard.start()
def __del__(self):
GPIO.cleanup()
def run(self):
""" Main activity thread method. """
while True:
if not self.battery_guard.is_battery_ok:
self.motor_ctrl.stop()
logging.info("Battery low, switching to standby.")
while not self.battery_guard.is_battery_ok:
self.xbox_pad.get_xy_vector_from_queue(block=True)
new_xy_vector = self.xbox_pad.get_xy_vector_from_queue()
if new_xy_vector:
self.motor_ctrl.handle_vector_input(new_xy_vector)
|
the-stack_106_18683
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sphinx_rtd_theme
import sys
_pysrc = os.path.abspath(os.path.join(os.path.abspath(__file__), '..', '..', '..'))
sys.path.insert(0, _pysrc)
autodoc_mock_imports = ["pyspark", "tensorflow"]
# -- Project information -----------------------------------------------------
project = 'TensorFlowOnSpark'
copyright = '2019, Yahoo Inc'
author = 'Yahoo Inc'
# The short X.Y version
version = '2.1.3'
# The full version, including alpha/beta/rc tags
release = '2.1.3'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx_rtd_theme'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TensorFlowOnSparkdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TensorFlowOnSpark.tex', 'TensorFlowOnSpark Documentation',
'Lee Yang', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorflowonspark', 'TensorFlowOnSpark Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TensorFlowOnSpark', 'TensorFlowOnSpark Documentation',
author, 'TensorFlowOnSpark', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
|
the-stack_106_18685
|
from datetime import datetime
import os
import boto3
S3_CLIENT = boto3.client('s3')
PROJECT_NAME = os.getenv('PROJECT_NAME')
def read_from(key):
params = {
'Bucket': PROJECT_NAME,
'Key': key
}
try:
response = S3_CLIENT.get_object(**params)
except S3_CLIENT.exceptions.NoSuchKey:
return None
else:
return response['Body'].read()
def write_to(key, content, content_type):
params = {
'Bucket': PROJECT_NAME,
'ACL': 'public-read',
'Key': key,
'Body': content,
'ContentType': content_type
}
return S3_CLIENT.put_object(**params)
def archive(content, content_type, path=''):
ext = content_type.split(";")[0].split("/")[-1]
latest_key = f"{path}/latest.{ext}"
previous_content = read_from(latest_key)
has_diffs = content != previous_content
if has_diffs:
recorded_at_key = f"{path}/{datetime.now()}.{ext}"
write_to(latest_key, content, content_type)
write_to(recorded_at_key, content, content_type)
return has_diffs
|
the-stack_106_18686
|
"""
MIT License
Copyright (c) present TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import asyncio
from base64 import b64decode
from io import BytesIO
from random import randint
import aiofiles
import requests
from bs4 import BeautifulSoup
from pyrogram import filters
from pyrogram.types import InputMediaPhoto, Message
from wbb import MESSAGE_DUMP_CHAT, SUDOERS, USERBOT_PREFIX, app, app2, eor
from wbb.core.decorators.errors import capture_err
from wbb.utils.functions import get_file_id_from_message
from wbb.utils.http import get
async def get_soup(url: str, headers):
html = await get(url, headers=headers)
return BeautifulSoup(html, "html.parser")
@app2.on_message(
filters.command("reverse", prefixes=USERBOT_PREFIX)
& SUDOERS
)
@app.on_message(
filters.command("reverse")
)
@capture_err
async def reverse_image_search(client, message: Message):
if not message.reply_to_message:
return await eor(
message, text="Reply to a message to reverse search it."
)
reply = message.reply_to_message
if (
not reply.document
and not reply.photo
and not reply.sticker
and not reply.animation
and not reply.video
):
return await eor(
message,
text="Reply to an image/document/sticker/animation to reverse search it.",
)
m = await eor(message, text="Searching...")
file_id = get_file_id_from_message(reply)
if not file_id:
return await m.edit("Can't reverse that")
image = await client.download_media(file_id, f"{randint(1000, 10000)}.jpg")
async with aiofiles.open(image, "rb") as f:
if image:
search_url = "http://www.google.com/searchbyimage/upload"
multipart = {
"encoded_image": (image, await f.read()),
"image_content": "",
}
def post_non_blocking():
return requests.post(
search_url, files=multipart, allow_redirects=False
)
loop = asyncio.get_running_loop()
response = await loop.run_in_executor(None, post_non_blocking)
location = response.headers.get("Location")
os.remove(image)
else:
return await m.edit("Something wrong happened.")
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:58.0) Gecko/20100101 Firefox/58.0"
}
try:
soup = await get_soup(location, headers=headers)
div = soup.find_all("div", {"class": "r5a77d"})[0]
text = div.find("a").text
text = f"**Result**: [{text}]({location})"
except Exception:
return await m.edit(
f"**Result**: [Link]({location})",
disable_web_page_preview=True,
)
# Pass if no images detected
try:
url = "https://google.com" + soup.find_all(
"a", {"class": "ekf0x hSQtef"}
)[0].get("href")
soup = await get_soup(url, headers=headers)
media = []
for img in soup.find_all("img"):
if len(media) == 2:
break
if img.get("src"):
img = img.get("src")
if "image/gif" in img:
continue
img = BytesIO(b64decode(img))
img.name = "img.png"
media.append(img)
elif img.get("data-src"):
img = img.get("data-src")
media.append(img)
# Cache images, so we can use file_ids
tasks = [client.send_photo(MESSAGE_DUMP_CHAT, img) for img in media]
messages = await asyncio.gather(*tasks)
await message.reply_media_group(
[
InputMediaPhoto(
i.photo.file_id,
caption=text,
)
for i in messages
]
)
except Exception:
pass
await m.edit(
text,
disable_web_page_preview=True,
)
|
the-stack_106_18688
|
# -*- python -*-
# Copyright (C) 2009-2019 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/mnt/workspace/workspace/GCC-9-pipeline/jenkins-GCC-9-pipeline-100_20191030_1572397542/install-native/share/gcc-arm-none-eabi'
libdir = '/mnt/workspace/workspace/GCC-9-pipeline/jenkins-GCC-9-pipeline-100_20191030_1572397542/install-native/arm-none-eabi/lib/thumb/v8-m.main+fp/hard'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Call a function as a plain import would not execute body of the included file
# on repeated reloads of this object file.
from libstdcxx.v6 import register_libstdcxx_printers
register_libstdcxx_printers(gdb.current_objfile())
|
the-stack_106_18689
|
import mysql.connector
import click
from toDo.schema import instructions
from flask import current_app, g
from flask.cli import with_appcontext
def get_db():
if 'db' not in g:
g.db = mysql.connector.connect(
host= current_app.config['DATABASE_HOST'],
user= current_app.config['DATABASE_USER'],
password= current_app.config['DATABASE_PASSWORD'],
database=current_app.config['DATABASE']
)
g.c = g.db.cursor(dictionary = True)
return g.db, g.c
def close_db(e=None):
db = g.pop('db', None)
if db is not None:
db.close()
def init_db():
db, c = get_db()
for i in instructions:
c.execute(i) #ejecuta cada linea de schema
db.commit()
@click.command('init-db')
@with_appcontext
def init_db_command():
init_db()
click.echo("Base de datos inicializada")
def init_app(app):
app.teardown_appcontext(close_db)
app.cli.add_command(init_db_command)
|
the-stack_106_18691
|
import pandas as pd
import sys, os
import numpy as np
from snapy import MinHash, LSH
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import random
import re
import Settings as Settings
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("project-test").config("spark.some.config.option", "some-value").getOrCreate()
spark.sparkContext.addPyFile("/home/wh2099/project/mmh3.py")
spark.sparkContext.addPyFile("/home/wh2099/project/snapy.zip")
import mmh3
from snapy import MinHash, LSH
class SpatialColumnDetection:
def __init__(self, df, defaultFiles, index):
self.types = Settings.types
self.df = df
# column names
self.columnNames = list(df.columns)
# lower case column names
self.lcColumnNames = []
# lower case column name to orginal name map
self.lowerUpperDict = {}
self.upperLowerDict = {}
# each column is what type
self.colNameType = {}
self.defaultFiles = defaultFiles
self.index = index
# get the dtype dictionary first
self.dtypes = {}
for keyVal in df.dtypes:
self.dtypes[keyVal[0]] = keyVal[1]
def initReturnResult(self):
self.colNameType["file_index"] = str(self.index)
self.colNameType["total_spatial_attributes"] = 0
self.colNameType["attributes"] = {}
for colName in self.columnNames:
self.colNameType["attributes"][colName] = {}
# change all column names to lower cases
def changeLowerCase(self, ):
for colName in self.columnNames:
lwColName = colName.lower()
self.lcColumnNames.append(lwColName)
self.lowerUpperDict[lwColName] = colName
self.upperLowerDict[colName] = lwColName
def lcs(self, X, Y):
# find the length of the strings
m = len(X)
n = len(Y)
# declaring the array for storing the dp values
L = [[None] * (n + 1) for i in range(m + 1)]
"""Following steps build L[m + 1][n + 1] in bottom up fashion
Note: L[i][j] contains length of LCS of X[0..i-1]
and Y[0..j-1]"""
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
elif X[i - 1] == Y[j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
# L[m][n] contains the length of LCS of X[0..n-1] & Y[0..m-1]
return L[m][n]
def editDistance(self, x, y):
return len(x) + len(y) - 2 * self.lcs(x, y)
def detect(self, ):
self.changeLowerCase()
self.initReturnResult()
colIndex = 0
for colName in self.columnNames:
# change to lower case and trip it
colNameLwStrip = colName.lower().strip()
if self.detectLongitude(colNameLwStrip, colName):
type = "longitude"
elif self.detectLatitude(colNameLwStrip, colName):
type = "latitude"
elif self.detectAddress(colNameLwStrip, colName):
type = "address"
# elif self.detectCounty(colNameLwStrip, colName):
# type = "county"
elif self.detectBorough(colNameLwStrip, colName):
type = "borough"
elif self.detectCity(colNameLwStrip, colName):
type = "city"
elif self.detectState(colNameLwStrip, colName):
type = "state"
elif self.detectCountry(colNameLwStrip, colName):
type = "country"
elif self.detectZipcode(colNameLwStrip, colName):
type = "zipcode"
elif self.detectOtherLocationAttribute(colNameLwStrip, colName):
type = "other location attribute"
else:
type = "not spatial attribute"
self.colNameType["attributes"][colName]["type"] = type
self.colNameType["attributes"][colName]["index"] = colIndex
colIndex += 1
count = 0
for colName in self.columnNames:
if self.colNameType["attributes"][colName]["type"] != "not spatial attribute":
count += 1
self.colNameType["attributes"][colName]["is_spatial"] = True
else:
self.colNameType["attributes"][colName]["is_spatial"] = False
self.colNameType["total_spatial_attributes"] = count
return self.colNameType
def commonDetectMethod(self, colNameLw, names, thredshold):
for name in names:
if (colNameLw in name) or (name in colNameLw):
return True
# use fuzz ratio, which uses the idea of levestain distance, we can specify elsewhere
for name in names:
if fuzz.ratio(name, colNameLw) > thredshold:
return True
return False
# TODO: to be finish, colName passed in is lowercase, colNameLw is the lower case and stip()
def detectLongitude(self, colNameLw, colName):
names = ["longitude", "lon"]
thredshold = 75
if self.commonDetectMethod(colNameLw, names, thredshold):
return True
return False
# TODO: use datamart
def detectLatitude(self, colNameLw, colName):
names = ["latitude"]
thredshold = 75
if self.commonDetectMethod(colNameLw, names, thredshold):
return True
return False
# list all the country out, count how many of the sample column values are in country list
def detectCountry(self, colNameLw,colName):
# quite similar word
if "county" in colNameLw:
return False
names = ["country"]
thredshold = 100
if self.commonDetectMethod(colNameLw, names, thredshold):
return True
# this not work for "yes" or "no" column because "no" stands for norway
if (self.dtypes[colName] == object) or (self.dtypes[colName] == "string"):
dfCountryNames = self.defaultFiles.dfCountryNames
# sampling and pair wise comparison
sampleSize = 500
sampleSize = min(sampleSize, self.df.count())
columnValuesSample = random.sample(self.df.select(colName).rdd.flatMap(lambda x: x).collect(), sampleSize)
columnValuesSample = [x for x in columnValuesSample if type(x) == str]
sampleLength = len(columnValuesSample)
# meanning many are nan
if sampleLength / sampleSize < 0.1:
return False
# get the average length of the values
avgLen = sum(map(len, columnValuesSample)) / len(columnValuesSample)
# compare with country code, other wise compare with country full name
if avgLen <= 2.3:
countries = dfCountryNames.select("Code").rdd.flatMap(lambda x: x).collect()
countries = [code for code in countries if code != "NO"]
else:
countries = dfCountryNames.select("Name").rdd.flatMap(lambda x: x).collect()
countries = [code for code in countries if code != "Norway"]
# equality count
count = 0
for value in columnValuesSample:
for country in countries:
if value.lower() == country.lower():
count += 1
break
if count / sampleLength > 0.7:
return True
return False
# list all the state out, fullname and abbreviation, can use sampling
def detectState(self, colNameLw, colName):
names = ["state"]
thredshold = 90
if self.commonDetectMethod(colNameLw, names, thredshold):
return True
if (self.dtypes[colName] == object) or (self.dtypes[colName] == "string"):
dfStateNames = self.defaultFiles.dfStateNames
# sampling and pair wise comparison
sampleSize = 500
sampleSize = min(sampleSize, self.df.count())
columnValuesSample = random.sample(self.df.select(colName).rdd.flatMap(lambda x: x).collect(), sampleSize)
columnValuesSample = [x for x in columnValuesSample if type(x) == str]
sampleLength = len(columnValuesSample)
# meanning many are nan
if sampleLength / sampleSize < 0.1:
return False
# get the average length of the values
avgLen = sum(map(len, columnValuesSample)) / len(columnValuesSample)
# compare with state code, other wise compare with state full name
if avgLen <= 2.3:
states = dfStateNames.select("Code").rdd.flatMap(lambda x: x).collect()
else:
states = dfStateNames.select("Name").rdd.flatMap(lambda x: x).collect()
# equality count
count = 0
for value in columnValuesSample:
for state in states:
if value.lower() == state.lower():
count += 1
break
if count / sampleLength > 0.6:
return True
return False
# can use sampling
def detectCity(self, colNameLw, colName):
names = ["city", "town"]
thredshold = 90
# if self.commonDetectMethod(colNameLw, names, thredshold):
# return True
# avoid capcity, ethnicity, electricity words
for name in names:
if colNameLw == name:
return True
# some column name states, county but have New York inside
if self.commonDetectMethod(colNameLw, ["state", "county"], thredshold):
return False
if (self.dtypes[colName] == object) or (self.dtypes[colName] == "string"):
dfCityNames = self.defaultFiles.dfCityNames
# sampling and pair wise comparison
sampleSize = 500
sampleSize = min(sampleSize, self.df.count())
columnValuesSample = random.sample(self.df.select(colName).rdd.flatMap(lambda x: x).collect(), sampleSize)
columnValuesSample = [x for x in columnValuesSample if type(x) == str]
sampleLength = len(columnValuesSample)
# meanning many are nan
if sampleLength / sampleSize < 0.1:
return False
cities = dfCityNames.select("Name").rdd.flatMap(lambda x: x).collect()
# equality count
count = 0
for value in columnValuesSample:
for city in cities:
if value == city:
count += 1
break
if count / sampleLength > 0.6:
return True
return False
# list all the county out, count how many of the sample column values are in county list. Not country!!!
def detectCounty(self, colNameLw, colName):
# quite similar word
if "country" in colNameLw:
return False
names = ["county"]
thredshold = 70
if self.commonDetectMethod(colNameLw, names, thredshold):
return True
if (self.dtypes[colName] == object) or (self.dtypes[colName] == "string"):
dfCountyNames = self.defaultFiles.dfCountyNames
# sampling and pair wise comparison
sampleSize = 500
sampleSize = min(sampleSize, self.df.count())
columnValuesSample = random.sample(self.df.select(colName).rdd.flatMap(lambda x: x).collect(), sampleSize)
columnValuesSample = [x for x in columnValuesSample if type(x) == str]
sampleLength = len(columnValuesSample)
# meaning many are nan
if sampleLength / sampleSize < 0.1:
return False
counties = dfCountyNames.select("Name").rdd.flatMap(lambda x: x).collect()
# equality count
count = 0
for value in columnValuesSample:
for county in counties:
if value.lower() == county.lower():
count += 1
break
if count / sampleLength > 0.6:
return True
return False
def detectBorough(self, colNameLw, colName):
names = ["borough", "boro", "borocode"]
thredshold = 80
if self.commonDetectMethod(colNameLw, names, thredshold):
return True
return False
# need to use sampling
def detectAddress(self, colNameLw, colName):
names = ["address", "street", "block"]
thredshold = 80
if self.commonDetectMethod(colNameLw, names, thredshold):
# add one more condition
if (self.dtypes[colName] == object) or (self.dtypes[colName] == "string"):
return True
if (self.dtypes[colName] == object) or (self.dtypes[colName] == "string"):
# sampling and pair wise comparison
sampleSize = 500
sampleSize = min(sampleSize, self.df.count())
columnValuesSample = random.sample(self.df.select(colName).rdd.flatMap(lambda x: x).collect(), sampleSize)
columnValuesSample = [x for x in columnValuesSample if type(x) == str]
sampleLength = len(columnValuesSample)
# meanning many are nan
if sampleLength / sampleSize < 0.1:
return False
# get the average length of the values
avgLen = sum(map(len, columnValuesSample)) / len(columnValuesSample)
# probably not address
if avgLen < 5:
return False
# use regex expression, detect full address
regexPattern ="""
\b\d{1,6} +.{2,25}\b(avenue|ave|court|ct|street|st|
drive|dr|lane|ln|road|rd|blvd|plaza|parkway|pkwy|
boulevard|)[.,]?(.{0,25} +\b\d{5}\b)?
"""
count = 0
for x in columnValuesSample:
result = re.match(regexPattern, x)
if result != None:
if result.group() > 10:
count += 1
if count / sampleLength > 0.6:
return True
# use regex expression to detect street name like
regexPattern2 ="""
\d+[ ](?:[A-Za-z0-9.-]+[ ]?)+(?:Avenue|Lane|Road|
Boulevard|Drive|Street|Ave|Dr|Rd|Blvd|Ln|St)\.?
"""
count = 0
for x in columnValuesSample:
result = re.match(regexPattern2, x)
if result != None:
if result.group() > 10:
count += 1
if count / sampleLength > 0.7:
return True
# TODO use the addr_detection package
# try:
# clf2 = Postal_clf()
# result = clf2.predict(columnValuesSample)
# except:
# return False
return False
# need to use sampling, and regex
def detectZipcode(self, colNameLw, colName):
names = ["zip", "zipcode", "zcode", "postcode"]
thredshold = 70
if self.commonDetectMethod(colNameLw, names, thredshold):
return True
if (self.dtypes[colName] == object) or (self.dtypes[colName] == "string") or \
(self.dtypes[colName] == "int") or (self.dtypes[colName] == "bigint"):
# sampling and pair wise comparison
sampleSize = 500
sampleSize = min(sampleSize, self.df.count())
columnValuesSample = random.sample(self.df.select(colName).rdd.flatMap(lambda x: x).collect(), sampleSize)
if (self.dtypes[colName] == "int") or (self.dtypes[colName] == "bigint"):
columnValuesSample = [str(x) for x in columnValuesSample]
else:
columnValuesSample = [x for x in columnValuesSample if type(x) == str]
sampleLength = len(columnValuesSample)
# meanning many are nan
if sampleLength / sampleSize < 0.1:
return False
# just us country here, can use other api?
regexPattern = r"^[0-9]{5}(?:-[0-9]{3})?$"
matches = []
for x in columnValuesSample:
result = re.match(regexPattern, x)
if result != None:
matches.append(bool(x))
count = sum([bool(x) for x in matches])
if count / sampleLength > 0.6:
return True
return False
def detectOtherLocationAttribute(self, colNameLw, colName):
names = ["location", "home", "house", "lot", "bin", "bbl", "nta", "geom", "precinct",
"census_tract", "community", "district", "building"]
thredshold = 70
if self.commonDetectMethod(colNameLw, names, thredshold):
return True
return False
|
the-stack_106_18692
|
from bxgateway.utils import configuration_utils
from bxcommon.test_utils.mocks.mock_node import MockNode
from bxcommon.test_utils import helpers
from bxgateway import gateway_constants
from bxcommon.models.config.gateway_node_config_model import GatewayNodeConfigModel
import unittest
class ConfigToolsTests(unittest.TestCase):
def setUp(self):
self.node = MockNode(helpers.get_common_opts(8888))
def test_update_node_config_update_value(self):
old_value = self.node.opts.throughput_stats_interval
new_value = 90
self.assertFalse(old_value == new_value)
configuration_utils.compare_and_update(new_value,
self.node.opts.throughput_stats_interval,
item="throughput_stats_interval",
setter=lambda val: self.node.opts.__setattr__("throughput_stats_interval", val))
self.assertEqual(self.node.opts.throughput_stats_interval, new_value)
def test_update_node_config_ignore_missing_new_value(self):
old_value = self.node.opts.throughput_stats_interval
new_value = None
self.assertIsNotNone(old_value)
configuration_utils.compare_and_update(new_value,
self.node.opts.throughput_stats_interval,
item="throughput_stats_interval",
setter=lambda val: self.node.opts.__setattr__("throughput_stats_interval", val))
self.assertEqual(self.node.opts.throughput_stats_interval, old_value)
def test_read_file(self):
node_config_model = configuration_utils.read_config_file(gateway_constants.CONFIG_FILE_NAME)
self.assertIsInstance(node_config_model, GatewayNodeConfigModel)
node_config_model = configuration_utils.read_config_file("NotAFileName.json")
self.assertIsInstance(node_config_model, GatewayNodeConfigModel)
|
the-stack_106_18693
|
"""EvoNormB0 (Batched) and EvoNormS0 (Sample) in PyTorch
An attempt at getting decent performing EvoNorms running in PyTorch.
While currently faster than other impl, still quite a ways off the built-in BN
in terms of memory usage and throughput (roughly 5x mem, 1/2 - 1/3x speed).
Still very much a WIP, fiddling with buffer usage, in-place/jit optimizations, and layouts.
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
class EvoNormBatch2d(nn.Module):
def __init__(self, num_features, apply_act=True, momentum=0.1, eps=1e-5, drop_block=None):
super(EvoNormBatch2d, self).__init__()
self.apply_act = apply_act # apply activation (non-linearity)
self.momentum = momentum
self.eps = eps
param_shape = (1, num_features, 1, 1)
self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True)
self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True)
if apply_act:
self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True)
self.register_buffer('running_var', torch.ones(1, num_features, 1, 1))
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
if self.apply_act:
nn.init.ones_(self.v)
def forward(self, x):
assert x.dim() == 4, 'expected 4D input'
x_type = x.dtype
if self.training:
var = x.var(dim=(0, 2, 3), unbiased=False, keepdim=True)
n = x.numel() / x.shape[1]
m = self.momentum
self.running_var.copy_(var.detach() * m * (n / (n - 1)) + self.running_var * (1 - m))
else:
var = self.running_var
if self.apply_act:
v = self.v.to(dtype=x_type)
d = x * v + (x.var(dim=(2, 3), unbiased=False, keepdim=True) + self.eps).sqrt().to(dtype=x_type)
d = d.max((var + self.eps).sqrt().to(dtype=x_type))
x = x / d
return x * self.weight + self.bias
class EvoNormSample2d(nn.Module):
def __init__(self, num_features, apply_act=True, groups=8, eps=1e-5, drop_block=None):
super(EvoNormSample2d, self).__init__()
self.apply_act = apply_act # apply activation (non-linearity)
self.groups = groups
self.eps = eps
param_shape = (1, num_features, 1, 1)
self.weight = nn.Parameter(torch.ones(param_shape), requires_grad=True)
self.bias = nn.Parameter(torch.zeros(param_shape), requires_grad=True)
if apply_act:
self.v = nn.Parameter(torch.ones(param_shape), requires_grad=True)
self.reset_parameters()
def reset_parameters(self):
nn.init.ones_(self.weight)
nn.init.zeros_(self.bias)
if self.apply_act:
nn.init.ones_(self.v)
def forward(self, x):
assert x.dim() == 4, 'expected 4D input'
B, C, H, W = x.shape
assert C % self.groups == 0
if self.apply_act:
n = x * (x * self.v).sigmoid()
x = x.reshape(B, self.groups, -1)
x = n.reshape(B, self.groups, -1) / (x.var(dim=-1, unbiased=False, keepdim=True) + self.eps).sqrt()
x = x.reshape(B, C, H, W)
return x * self.weight + self.bias
|
the-stack_106_18694
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# Contributors: Bill L.Nieuwendorp
"""
This script Exports Lightwaves MotionDesigner format.
The .mdd format has become quite a popular Pipeline format<br>
for moving animations from package to package.
Be sure not to use modifiers that change the number or order of verts in the mesh
"""
import bpy
import mathutils
from struct import pack
def zero_file(filepath):
"""
If a file fails, this replaces it with 1 char, better not remove it?
"""
file = open(filepath, 'w')
file.write('\n') # apparently macosx needs some data in a blank file?
file.close()
def check_vertcount(mesh, vertcount):
"""
check and make sure the vertcount is consistent throughout the frame range
"""
if len(mesh.vertices) != vertcount:
raise Exception('Error, number of verts has changed during animation, cannot export')
def save(context, filepath="", frame_start=1, frame_end=300, fps=25.0, use_rest_frame=False):
"""
Blender.Window.WaitCursor(1)
mesh_orig = Mesh.New()
mesh_orig.getFromObject(obj.name)
"""
scene = context.scene
obj = context.object
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
orig_frame = scene.frame_current
scene.frame_set(frame_start)
me = obj.to_mesh(scene, True, 'PREVIEW')
#Flip y and z
'''
mat_flip = mathutils.Matrix(((1.0, 0.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 0.0),
(0.0, 1.0, 0.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
))
'''
mat_flip = mathutils.Matrix()
numverts = len(me.vertices)
numframes = frame_end - frame_start + 1
if use_rest_frame:
numframes += 1
f = open(filepath, 'wb') # no Errors yet:Safe to create file
# Write the header
f.write(pack(">2i", numframes, numverts))
# Write the frame times (should we use the time IPO??)
f.write(pack(">%df" % (numframes), *[frame / fps for frame in range(numframes)])) # seconds
if use_rest_frame:
check_vertcount(me, numverts)
me.transform(mat_flip * obj.matrix_world)
f.write(pack(">%df" % (numverts * 3), *[axis for v in me.vertices for axis in v.co]))
bpy.data.meshes.remove(me, do_unlink=True)
for frame in range(frame_start, frame_end + 1): # in order to start at desired frame
scene.frame_set(frame)
me = obj.to_mesh(scene, True, 'PREVIEW')
check_vertcount(me, numverts)
me.transform(mat_flip * obj.matrix_world)
# Write the vertex data
f.write(pack(">%df" % (numverts * 3), *[axis for v in me.vertices for axis in v.co]))
bpy.data.meshes.remove(me, do_unlink=True)
f.close()
print('MDD Exported: %r frames:%d\n' % (filepath, numframes - 1))
scene.frame_set(orig_frame)
return {'FINISHED'}
|
the-stack_106_18696
|
# encoding: utf-8
"""Tests for genutils.path"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import os
import sys
import tempfile
import nose.tools as nt
from ..testing.decorators import skip_if_not_win32, skip_win32
from .. import path
from .. import py3compat
from ..tempdir import TemporaryDirectory
def test_filefind():
f = tempfile.NamedTemporaryFile()
t = path.filefind(f.name, '.')
def test_ensure_dir_exists():
with TemporaryDirectory() as td:
d = os.path.join(td, u'∂ir')
path.ensure_dir_exists(d) # create it
assert os.path.isdir(d)
path.ensure_dir_exists(d) # no-op
f = os.path.join(td, u'ƒile')
open(f, 'w').close() # touch
with nt.assert_raises(IOError):
path.ensure_dir_exists(f)
class TestLinkOrCopy(object):
def setUp(self):
self.tempdir = TemporaryDirectory()
self.src = self.dst("src")
with open(self.src, "w") as f:
f.write("Hello, world!")
def tearDown(self):
self.tempdir.cleanup()
def dst(self, *args):
return os.path.join(self.tempdir.name, *args)
def assert_inode_not_equal(self, a, b):
nt.assert_not_equals(os.stat(a).st_ino, os.stat(b).st_ino,
"%r and %r do reference the same indoes" %(a, b))
def assert_inode_equal(self, a, b):
nt.assert_equals(os.stat(a).st_ino, os.stat(b).st_ino,
"%r and %r do not reference the same indoes" %(a, b))
def assert_content_equal(self, a, b):
with open(a) as a_f:
with open(b) as b_f:
nt.assert_equals(a_f.read(), b_f.read())
@skip_win32
def test_link_successful(self):
dst = self.dst("target")
path.link_or_copy(self.src, dst)
self.assert_inode_equal(self.src, dst)
@skip_win32
def test_link_into_dir(self):
dst = self.dst("some_dir")
os.mkdir(dst)
path.link_or_copy(self.src, dst)
expected_dst = self.dst("some_dir", os.path.basename(self.src))
self.assert_inode_equal(self.src, expected_dst)
@skip_win32
def test_target_exists(self):
dst = self.dst("target")
open(dst, "w").close()
path.link_or_copy(self.src, dst)
self.assert_inode_equal(self.src, dst)
@skip_win32
def test_no_link(self):
real_link = os.link
try:
del os.link
dst = self.dst("target")
path.link_or_copy(self.src, dst)
self.assert_content_equal(self.src, dst)
self.assert_inode_not_equal(self.src, dst)
finally:
os.link = real_link
@skip_if_not_win32
def test_windows(self):
dst = self.dst("target")
path.link_or_copy(self.src, dst)
self.assert_content_equal(self.src, dst)
def test_link_twice(self):
# Linking the same file twice shouldn't leave duplicates around.
# See https://github.com/ipython/ipython/issues/6450
dst = self.dst('target')
path.link_or_copy(self.src, dst)
path.link_or_copy(self.src, dst)
self.assert_inode_equal(self.src, dst)
nt.assert_equal(sorted(os.listdir(self.tempdir.name)), ['src', 'target'])
|
the-stack_106_18698
|
import json
import os
import logging
from os.path import join, normpath
from django.core.cache import cache
from django.conf import settings
from datetime import datetime, timedelta
from django.http import HttpResponse
from biostar.accounts.models import Profile, User
from . import util
from .models import Post, Vote, Subscription, PostView
logger = logging.getLogger("engine")
def api_error(msg="Api Error"):
return {'error': msg}
def stat_file(date, data=None, load=False, dump=False):
os.makedirs(settings.STATS_DIR, exist_ok=True)
file_name = f'{date.year}-{date.month}-{date.day}.json'
file_path = normpath(join(settings.STATS_DIR, file_name))
def load_file():
# This will be FileNotFoundError in Python3.
if not os.path.isfile(file_path):
raise IOError
with open(file_path, 'r') as fin:
return json.loads(fin.read())
def dump_into_file():
with open(file_path, 'w') as fout:
fout.write(json.dumps(data))
if load:
return load_file()
if dump:
return dump_into_file()
def get_counts(end):
questions = Post.objects.filter(type=Post.QUESTION, creation_date__lt=end).count()
answers = Post.objects.filter(type=Post.ANSWER, creation_date__lt=end).count()
toplevel = Post.objects.filter(type__in=Post.TOP_LEVEL, creation_date__lt=end).exclude(type=Post.BLOG).count()
comments = Post.objects.filter(type=Post.COMMENT, creation_date__lt=end).count()
votes = Vote.objects.filter(date__lt=end).count()
users = User.objects.filter(profile__date_joined__lt=end).count()
data = {
'questions': questions,
'answers': answers,
'toplevel': toplevel,
'comments': comments,
'votes': votes,
'users': users,
}
return data
def compute_stats(date):
"""
Statistics about this website for the given date.
Statistics are stored to a json file for caching purpose.
Parameters:
date -- a `datetime`.
"""
start = date.date()
end = start + timedelta(days=1)
try:
return stat_file(date=start, load=True)
except Exception as exc: # This will be FileNotFoundError in Python3.
logger.info('No stats file for {}.'.format(start))
new_users = Profile.objects.filter(date_joined__gte=start,
date_joined__lt=end).values_list("uid", flat=True)
new_posts = Post.objects.filter(creation_date__gte=start,
creation_date__lt=end).values_list("uid", flat=True)
new_votes = Vote.objects.filter(date__gte=start,
date__lt=end).values_list("uid", flat=True)
data = {
'date': util.datetime_to_iso(start),
'timestamp': util.datetime_to_unix(start),
'new_users': list(new_users),
'new_posts': list(new_posts),
'new_votes': list(new_votes),
}
data.update(get_counts(end=end))
if not settings.DEBUG:
stat_file(dump=True, date=start, data=data)
return data
def json_response(f):
"""
Converts any functions which returns a dictionary to a proper HttpResponse with json content.
"""
def to_json(request, *args, **kwargs):
"""
Creates the actual HttpResponse with json content.
"""
try:
data = f(request, *args, **kwargs)
except Exception as exc:
logger.error(exc)
data = api_error(msg=f"Error: {exc}")
payload = json.dumps(data, sort_keys=True, indent=4)
response = HttpResponse(payload, content_type="application/json")
if not data:
response.status_code = 404
response.reason_phrase = 'Not found'
return response
return to_json
@json_response
def daily_stats_on_day(request, day):
"""
Statistics about this website for the given day.
Day-0 is the day of the first post.
Parameters:
day -- a day, given as a number of days from day-0 (the day of the first post).
"""
day_zero = cache.get('day_zero')
first_post = Post.objects.order_by('creation_date').only('creation_date')
if day_zero is None and not first_post:
return False
if day_zero is None:
day_zero = first_post[0].creation_date
cache.set('day_zero', day_zero, 60 * 60 * 24 * 7) # Cache valid for a week.
date = day_zero + timedelta(days=int(day))
# We don't provide stats for today or the future.
if not date or date.date() >= datetime.today().date():
return {}
return compute_stats(date)
@json_response
def daily_stats_on_date(request, year, month, day):
"""
Statistics about this website for the given date.
Parameters:
year -- Year, 4 digits.
month -- Month, 2 digits.
day -- Day, 2 digits.
"""
date = datetime(int(year), int(month), int(day))
# We don't provide stats for today or the future.
if date.date() >= datetime.today().date():
return {}
return compute_stats(date)
@json_response
def traffic(request):
"""
Traffic as post views in the last 60 min.
"""
now = datetime.now()
start = now - timedelta(minutes=60)
post_views = PostView.objects.filter(date__gt=start).exclude(date__gt=now).distinct('ip').count()
data = {
'date': util.datetime_to_iso(now),
'timestamp': util.datetime_to_unix(now),
'post_views_last_60_min': post_views,
}
return data
@json_response
def user_email(request, email):
user = User.objects.filter(email__iexact=email.lower())
if user.exists():
return True
return False
@json_response
def user_details(request, uid):
"""
Details for a user.
Parameters:
id -- the uid of the `User`.
"""
user = User.objects.filter(profile__uid=uid).first()
if not user:
return {}
days_ago = (datetime.now().date() - user.profile.date_joined.date()).days
data = {
'uid': user.profile.uid,
'name': user.profile.name,
'date_joined': util.datetime_to_iso(user.profile.date_joined),
'last_login': util.datetime_to_iso(user.profile.last_login),
'joined_days_ago': days_ago,
'vote_count': Vote.objects.filter(author=user).count(),
}
return data
@json_response
def post_details(request, uid):
"""
Details for a post.
Parameters:
id -- the id of the `Post`.
"""
post = Post.objects.filter(uid=uid).first()
if not post:
return {}
return post.json_data()
@json_response
def watched_tags(request, email):
"""
Show watched tags for a user, given API key.
Parameters:
uid -- the id of the `User`.
"""
user = User.objects.filter(email=email).first()
if user:
data = {'watched_tags': user.profile.watched_tags}
else:
data = {}
return data
@json_response
def vote_details(request, uid):
"""
Details for a vote.
Parameters:
uid -- the id of the `Vote`.
"""
vote = Vote.objects.filter(uid=uid).first()
if not vote:
return {}
data = {
'uid': vote.uid,
'author_uid': vote.author.profile.uid,
'author': vote.author.profile.name,
'post_uid': vote.post.uid,
'type': vote.get_type_display(),
'type_id': vote.type,
'date': util.datetime_to_iso(vote.date),
}
return data
|
the-stack_106_18701
|
import json, re
ontologies = ['ontocompchem', 'ontokin', 'ontospecies', 'wiki']
def process_puncutation(string):
# Load the regular expression library
# Remove punctuation
string_temp = re.sub('[-\n,.!?()\[\]0-9]', '', string)
# Convert the titles to lowercase
string_temp = string_temp.lower()
# Print out the first rows of papers
return string_temp
arrays = []
for o in ontologies:
f_name = '%s_corpus' % o
with open(f_name) as f:
content = json.loads(f.read())
content = [process_puncutation(x) for x in content]
arrays.append(content)
with open('corpus', 'w') as f:
f.write(json.dumps(arrays))
|
the-stack_106_18702
|
import keras
from keras.layers import Input
from keras.models import Model
from .utils.cloud import remoteModel, modelOut
import numpy as np
class BrokenModel(object):
"""Can split the model at the given layer into two parts.
"""
def __init__(self, model, splitLayer, custom_objects):
"""
# Arguments
model: keras model to be split
splitLayer: layer to split the model at
"""
super(BrokenModel, self).__init__()
self.model = model
self.layers = [i.name for i in self.model.layers]
self.splitLayer = splitLayer
self.layerLoc = self.layers.index(self.splitLayer)
self.custom_objects = custom_objects
def splitModel(self):
"""Splits the given keras model at the specified layer.
"""
deviceOuts, remoteIns, skipNames = modelOut(self.model, self.layers, self.layerLoc)
self.deviceModel = Model(inputs=self.model.input, outputs=deviceOuts)
self.remoteModel = remoteModel(self.model, self.splitLayer, self.custom_objects)
|
the-stack_106_18703
|
# Copyright (c) 2013-2015 University Corporation for Atmospheric Research/Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
"""Read upper air data from the Wyoming archives."""
#!/usr/bin/python3
from io import StringIO
import warnings
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from siphon._tools import get_wind_components
from siphon.http_util import HTTPEndPoint
import datetime
import metpy.units as units
import pyLARDA.helpers as h
warnings.filterwarnings('ignore', 'Pandas doesn\'t allow columns to be created', UserWarning)
class WyomingUpperAir(HTTPEndPoint):
"""Download and parse data from the University of Wyoming's upper air archive."""
def __init__(self):
"""Set up endpoint."""
super(WyomingUpperAir, self).__init__('http://weather.uwyo.edu/cgi-bin/sounding')
@classmethod
def request_data(cls, time, site_id, **kwargs):
r"""Retrieve upper air observations from the Wyoming archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
kwargs
Arbitrary keyword arguments to use to initialize source
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
endpoint = cls()
df = endpoint._get_data(time, site_id, **kwargs)
return df
def _get_data(self, time, site_id, region='naconf'):
r"""Download and parse upper air observations from an online archive.
Parameters
----------
time : datetime
The date and time of the desired observation.
site_id : str
The three letter ICAO identifier of the station for which data should be
downloaded.
region
Region to request data from
Returns
-------
:class:`pandas.DataFrame` containing the data
"""
raw_data, meta_data = self._get_data_raw(time, site_id, region)
col_names = ['pressure', 'range', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(raw_data, skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
# Add unit dictionary
df.units = {'pressure': 'hPa',
'range': 'meter',
'temperature': 'degC',
'dewpoint': 'degC',
'direction': 'degrees',
'speed': 'knot',
'u_wind': 'knot',
'v_wind': 'knot'}
for item in list(meta_data.split('\n'))[1:-1]:
var, value = item.split(': ')
df._metadata.append({var.strip(): value})
return df
def _get_data_raw(self, time, site_id, region='naconf'):
"""Download data from the University of Wyoming's upper air archive.
Parameters
----------
time : datetime
Date and time for which data should be downloaded
site_id : str
Site id for which data should be downloaded
region : str
The region in which the station resides. Defaults to `naconf`.
Returns
-------
a file-like object from which to read the data
"""
path = ('?region={region}&TYPE=TEXT%3ALIST'
'&YEAR={time:%Y}&MONTH={time:%m}&FROM={time:%d%H}&TO={time:%d%H}'
'&STNM={stid}').format(region=region, time=time, stid=site_id)
resp = self.get_path(path)
# See if the return is valid, but has no data
if resp.text.find('Can\'t') != -1:
raise ValueError(
'No data available for {time:%Y-%m-%d %HZ} from region {region} '
'for station {stid}.'.format(time=time, region=region,
stid=site_id))
soup = BeautifulSoup(resp.text, 'html.parser')
return StringIO(soup.find_all('pre')[0].contents[0]), soup.find_all('pre')[1].contents[0]
def wyoming_pandas_to_dict(df):
# extract metadata
metadata = {k: v for d in df._metadata for k, v in d.items()}
sounding_time = metadata['Observation time']
date_sounding = datetime.datetime(int('20' + sounding_time[0:2]), int(sounding_time[2:4]), int(sounding_time[4:6]),
int(sounding_time[7:9], int(sounding_time[9:11])))
# build dictionary
sounding = {}
sounding['dimlabel'] = ['range']
sounding['range'] = df['range'].values
sounding['speed'] = (df['speed'].values * units.units('knots')).to_base_units().magnitude
sounding['time'] = h.dt_to_ts(date_sounding)
sounding['u_wind'] = (df['u_wind'].values * units.units('knots')).to_base_units().magnitude
sounding['v_wind'] = (df['v_wind'].values * units.units('knots')).to_base_units().magnitude
sounding['dewpoint'] = df['dewpoint']
sounding['direction'] = df['direction']
sounding['pressure'] = df['pressure']
sounding['temperature'] = df['temperature']
return sounding
def get_sounding(date, station_identifier):
"""Download Sounding from Uni Wyoming
Args:
date (datetime) of sounding of interest
station_identifier (str), e.g. "SCCI" is Punta Arenas
Returns:
A dictionary containing the sounding data. More metadata (CAPE etc.) can be added later.
"""
df = WyomingUpperAir.request_data(date, station_identifier)
sounding = wyoming_pandas_to_dict(df)
return sounding
|
the-stack_106_18704
|
from __future__ import print_function
from lldbsuite.test.lldbtest import *
import os
import vscode
class VSCodeTestCaseBase(TestBase):
def create_debug_adaptor(self):
'''Create the Visual Studio Code debug adaptor'''
self.assertTrue(os.path.exists(self.lldbVSCodeExec),
'lldb-vscode must exist')
self.vscode = vscode.DebugAdaptor(
executable=self.lldbVSCodeExec, init_commands=self.setUpCommands())
def build_and_create_debug_adaptor(self):
self.build()
self.create_debug_adaptor()
def set_source_breakpoints(self, source_path, lines, condition=None,
hitCondition=None):
'''Sets source breakpoints and returns an array of strings containing
the breakpoint location IDs ("1.1", "1.2") for each breakpoint
that was set.
'''
response = self.vscode.request_setBreakpoints(
source_path, lines, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
response_id = breakpoint['id']
bp_id = response_id >> 32
bp_loc_id = response_id & 0xffffffff
breakpoint_ids.append('%i.%i' % (bp_id, bp_loc_id))
return breakpoint_ids
def set_function_breakpoints(self, functions, condition=None,
hitCondition=None):
'''Sets breakpoints by function name given an array of function names
and returns an array of strings containing the breakpoint location
IDs ("1.1", "1.2") for each breakpoint that was set.
'''
response = self.vscode.request_setFunctionBreakpoints(
functions, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
response_id = breakpoint['id']
bp_id = response_id >> 32
bp_loc_id = response_id & 0xffffffff
breakpoint_ids.append('%i.%i' % (bp_id, bp_loc_id))
return breakpoint_ids
def verify_breakpoint_hit(self, breakpoint_ids):
'''Wait for the process we are debugging to stop, and verify we hit
any breakpoint location in the "breakpoint_ids" array.
"breakpoint_ids" should be a list of breakpoint location ID strings
(["1.1", "2.1"]). The return value from
self.set_source_breakpoints() can be passed to this function'''
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'breakpoint':
continue
if 'description' not in body:
continue
# Description is "breakpoint 1.1", so look for any location id
# ("1.1") in the description field as verification that one of
# the breakpoint locations was hit
description = body['description']
for breakpoint_id in breakpoint_ids:
if breakpoint_id in description:
return True
return False
def verify_exception_breakpoint_hit(self, filter_label):
'''Wait for the process we are debugging to stop, and verify the stop
reason is 'exception' and that the description matches
'filter_label'
'''
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'exception':
continue
if 'description' not in body:
continue
description = body['description']
if filter_label == description:
return True
return False
def verify_commands(self, flavor, output, commands):
self.assertTrue(output and len(output) > 0, "expect console output")
lines = output.splitlines()
prefix = '(lldb) '
for cmd in commands:
found = False
for line in lines:
if line.startswith(prefix) and cmd in line:
found = True
break
self.assertTrue(found,
"verify '%s' found in console output for '%s'" % (
cmd, flavor))
def get_dict_value(self, d, key_path):
'''Verify each key in the key_path array is in contained in each
dictionary within "d". Assert if any key isn't in the
corresponding dictionary. This is handy for grabbing values from VS
Code response dictionary like getting
response['body']['stackFrames']
'''
value = d
for key in key_path:
if key in value:
value = value[key]
else:
self.assertTrue(key in value,
'key "%s" from key_path "%s" not in "%s"' % (
key, key_path, d))
return value
def get_stackFrames(self, threadId=None, startFrame=None, levels=None,
dump=False):
response = self.vscode.request_stackTrace(threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
if response:
return self.get_dict_value(response, ['body', 'stackFrames'])
return None
def get_source_and_line(self, threadId=None, frameIndex=0):
stackFrames = self.get_stackFrames(threadId=threadId,
startFrame=frameIndex,
levels=1)
if stackFrames is not None:
stackFrame = stackFrames[0]
['source', 'path']
if 'source' in stackFrame:
source = stackFrame['source']
if 'path' in source:
if 'line' in stackFrame:
return (source['path'], stackFrame['line'])
return ('', 0)
def get_stdout(self, timeout=0.0):
return self.vscode.get_output('stdout', timeout=timeout)
def get_console(self, timeout=0.0):
return self.vscode.get_output('console', timeout=timeout)
def get_local_as_int(self, name, threadId=None):
value = self.vscode.get_local_variable_value(name, threadId=threadId)
if value.startswith('0x'):
return int(value, 16)
elif value.startswith('0'):
return int(value, 8)
else:
return int(value)
def set_local(self, name, value, id=None):
'''Set a top level local variable only.'''
return self.vscode.request_setVariable(1, name, str(value), id=id)
def set_global(self, name, value, id=None):
'''Set a top level global variable only.'''
return self.vscode.request_setVariable(2, name, str(value), id=id)
def stepIn(self, threadId=None, waitForStop=True):
self.vscode.request_stepIn(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOver(self, threadId=None, waitForStop=True):
self.vscode.request_next(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOut(self, threadId=None, waitForStop=True):
self.vscode.request_stepOut(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def continue_to_next_stop(self):
self.vscode.request_continue()
return self.vscode.wait_for_stopped()
def continue_to_breakpoints(self, breakpoint_ids):
self.vscode.request_continue()
self.verify_breakpoint_hit(breakpoint_ids)
def continue_to_exception_breakpoint(self, filter_label):
self.vscode.request_continue()
self.assertTrue(self.verify_exception_breakpoint_hit(filter_label),
'verify we got "%s"' % (filter_label))
def continue_to_exit(self, exitCode=0):
self.vscode.request_continue()
stopped_events = self.vscode.wait_for_stopped()
self.assertTrue(len(stopped_events) == 1,
"expecting single 'exited' event")
self.assertTrue(stopped_events[0]['event'] == 'exited',
'make sure program ran to completion')
self.assertTrue(stopped_events[0]['body']['exitCode'] == exitCode,
'exitCode == %i' % (exitCode))
def attach(self, program=None, pid=None, waitFor=None, trace=None,
initCommands=None, preRunCommands=None, stopCommands=None,
exitCommands=None, attachCommands=None):
'''Build the default Makefile target, create the VSCode debug adaptor,
and attach to the process.
'''
# Make sure we disconnect and terminate the VSCode debug adaptor even
# if we throw an exception during the test case.
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_attach(
program=program, pid=pid, waitFor=waitFor, trace=trace,
initCommands=initCommands, preRunCommands=preRunCommands,
stopCommands=stopCommands, exitCommands=exitCommands,
attachCommands=attachCommands)
if not (response and response['success']):
self.assertTrue(response['success'],
'attach failed (%s)' % (response['message']))
def launch(self, program=None, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,sourcePath= None,
debuggerRoot=None, launchCommands=None):
'''Sending launch request to vscode
'''
# Make sure we disconnet and terminate the VSCode debug adaptor,
# if we throw an exception during the test case
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_launch(
program,
args=args,
cwd=cwd,
env=env,
stopOnEntry=stopOnEntry,
disableASLR=disableASLR,
disableSTDIO=disableSTDIO,
shellExpandArguments=shellExpandArguments,
trace=trace,
initCommands=initCommands,
preRunCommands=preRunCommands,
stopCommands=stopCommands,
exitCommands=exitCommands,
sourcePath=sourcePath,
debuggerRoot=debuggerRoot,
launchCommands=launchCommands)
if not (response and response['success']):
self.assertTrue(response['success'],
'launch failed (%s)' % (response['message']))
def build_and_launch(self, program, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,
sourcePath=None, debuggerRoot=None):
'''Build the default Makefile target, create the VSCode debug adaptor,
and launch the process.
'''
self.build_and_create_debug_adaptor()
self.assertTrue(os.path.exists(program), 'executable must exist')
self.launch(program, args, cwd, env, stopOnEntry, disableASLR,
disableSTDIO, shellExpandArguments, trace,
initCommands, preRunCommands, stopCommands, exitCommands,
sourcePath, debuggerRoot)
|
the-stack_106_18706
|
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
# author: Daniel Kloeser
import time, os
import numpy as np
from acados_settings import *
from plotFcn import *
from tracks.readDataFcn import getTrack
import matplotlib.pyplot as plt
"""
Example of the frc_racecars in simulation without obstacle avoidance:
This example is for the optimal racing of the frc race cars. The model is a simple bycicle model and the lateral acceleration is constraint in order to validate the model assumptions.
The simulation starts at s=-2m until one round is completed(s=8.71m). The beginning is cut in the final plots to simulate a 'warm start'.
"""
track = "LMS_Track.txt"
[Sref, _, _, _, _] = getTrack(track)
Tf = 1.0 # prediction horizon
N = 50 # number of discretization steps
T = 10.00 # maximum simulation time[s]
sref_N = 3 # reference for final reference progress
# load model
constraint, model, acados_solver = acados_settings(Tf, N, track)
# dimensions
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
Nsim = int(T * N / Tf)
# initialize data structs
simX = np.ndarray((Nsim, nx))
simU = np.ndarray((Nsim, nu))
s0 = model.x0[0]
tcomp_sum = 0
tcomp_max = 0
# simulate
for i in range(Nsim):
# update reference
sref = s0 + sref_N
for j in range(N):
yref = np.array([s0 + (sref - s0) * j / N, 0, 0, 0, 0, 0, 0, 0])
# yref=np.array([1,0,0,1,0,0,0,0])
acados_solver.set(j, "yref", yref)
yref_N = np.array([sref, 0, 0, 0, 0, 0])
# yref_N=np.array([0,0,0,0,0,0])
acados_solver.set(N, "yref", yref_N)
# solve ocp
t = time.time()
status = acados_solver.solve()
if status != 0:
raise Exception("acados returned status {} in closed loop iteration {}. Exiting.".format(status, i))
elapsed = time.time() - t
# manage timings
tcomp_sum += elapsed
if elapsed > tcomp_max:
tcomp_max = elapsed
# get solution
x0 = acados_solver.get(0, "x")
u0 = acados_solver.get(0, "u")
for j in range(nx):
simX[i, j] = x0[j]
for j in range(nu):
simU[i, j] = u0[j]
# update initial condition
x0 = acados_solver.get(1, "x")
acados_solver.set(0, "lbx", x0)
acados_solver.set(0, "ubx", x0)
s0 = x0[0]
# check if one lap is done and break and remove entries beyond
if x0[0] > Sref[-1] + 0.1:
# find where vehicle first crosses start line
N0 = np.where(np.diff(np.sign(simX[:, 0])))[0][0]
Nsim = i - N0 # correct to final number of simulation steps for plotting
simX = simX[N0:i, :]
simU = simU[N0:i, :]
break
# Plot Results
t = np.linspace(0.0, Nsim * Tf / N, Nsim)
plotRes(simX, simU, t)
plotTrackProj(simX, track)
plotalat(simX, simU, constraint, t)
# Print some stats
print("Average computation time: {}".format(tcomp_sum / Nsim))
print("Maximum computation time: {}".format(tcomp_max))
print("Average speed:{}m/s".format(np.average(simX[:, 3])))
print("Lap time: {}s".format(Tf * Nsim / N))
# avoid plotting when running on Travis
if os.environ.get("ACADOS_ON_TRAVIS") is None:
plt.show()
|
the-stack_106_18708
|
import logging
from crypto_exchange.utils.rest.okex import OKExREST
logger = logging.getLogger(__name__)
class OKExSpot(OKExREST):
def __init__(self, api_key: str = '', secret_key: str = '', ):
self._api_key = api_key
self._secret_key = secret_key
self.headers = {
"Content-type": "application/x-www-form-urlencoded",
}
super(OKExSpot, self).__init__(api_key, secret_key)
async def ticker(self, symbol: str):
"""
获取币币行情数据
:param symbol: 交易对
:return:date: 返回数据时服务器时间
buy: 买一价
high: 最高价
last: 最新成交价
low: 最低价
sell: 卖一价
vol: 成交量(最近的24小时)
"""
ticker_resource = "ticker.do"
params = {
'symbol': symbol
}
return await self.http_get(ticker_resource, params, self.headers)
async def depth(self, symbol: str, size: int = 200):
"""
获取币币市场深度
:param symbol:交易对
:param size:value:1-200
:return:asks :卖方深度
bids :买方深度
"""
depth_resource = "depth.do"
params = {
'symbol': symbol,
'size': size
}
return await self.http_get(depth_resource, params, self.headers)
async def trades_info(self, symbol: str, since: int = None):
"""
获取币币历史交易信息(60条)
:param symbol: 交易对
:param since: 交易记录ID,返回数据不包括该记录
:return:
"""
trades_resource = "trades.do"
params = {
'symbol': symbol
}
if since:
params['since'] = since
return await self.http_get(trades_resource, params, self.headers)
async def k_line(self, symbol: str, k_line_type: str, size: int = None, since: int = None):
"""
获取币币K线数据
:param symbol: 交易对
:param k_line_type: 1min/3min/5min/15min/30min/1day/3day/1week/1hour/2hour/4hour/6hour/12hour
:param size: 获取数据的条数,默认全部获取
:param since: 时间戳,返回时间戳以后的数据,默认全部获取
:return:时间戳,开,高,低,收,交易量
"""
k_line_resource = 'kline.do'
params = {
'symbol': symbol,
'type': k_line_type,
}
if size:
params['size'] = size
if since:
params['since'] = since
return await self.http_get(k_line_resource, params, self.headers)
async def user_info(self):
"""
获取用户信息
:return: free:账户余额,freezed:账户冻结余额
"""
user_info_resource = "userinfo.do"
params = {
'api_key': self._api_key
}
params['sign'] = self.sign(params)
return await self.http_post(user_info_resource, params, self.headers)
async def trade(self, symbol: str, trade_type: str, price: float, amount: float):
"""
下单交易
:param symbol: 交易对
:param trade_type: 买卖类型
:param price: 下单价格,市价卖单不传price
:param amount: 交易数量,市价买单不传amount,市价买单需传peice作为买入总金额
:return: result:交易成功或失败
order_id:订单ID
"""
trade_resource = "trade.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'type': trade_type
}
if price:
params['price'] = price
if amount:
params['amount'] = amount
params['sign'] = self.sign(params)
return await self.http_post(trade_resource, params, self.headers)
async def batch_trade(self, symbol: str, orders_data: str, trade_type: str = None):
"""
批量下单交易
:param symbol:交易对
:param trade_type: buy/sell/
:param orders_data: '[{价格,数量,买卖类型},{}]'
:return: result任一成功返回true,order_id下单失败返回-1,返回信息与上传信息一致
"""
bath_trade_resource = "batch_trade.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'orders_data': orders_data,
}
if trade_type:
params['type'] = trade_type
params['sign'] = self.sign(params)
return await self.http_post(bath_trade_resource, params, self.headers)
async def cancel_order(self, symbol: str, order_id: str):
"""
撤销币币订单
:param symbol: 交易对
:param order_id: 订单ID,多个以','分隔,一次最多撤销三个
:return: result,order_id,success(多笔),error(多笔失败的ID)
"""
# 校验参数
if len(order_id.split(',')) > 3:
raise 1 - 3
cancel_order_resource = 'cancel_order.do'
params = {
'api_key': self._api_key,
'symbol': symbol,
'order_id': order_id,
}
params['sign'] = self.sign(params)
return await self.http_post(cancel_order_resource, params, self.headers)
async def order_info(self, symbol: str, order_id: int):
"""
获取用户订单信息
:param symbol: 交易对
:param order_id: 订单ID,-1:未完成订单
:return: status:-1已撤销,0未成交,1部分成交,2完全成交,3撤单处理中
"""
order_info_resource = "order_info.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'order_id': order_id,
}
params['sign'] = self.sign(params)
return await self.http_post(order_info_resource, params, self.headers)
async def orders_info(self, symbol: str, order_id: str, info_type: int):
"""
批量获取订单信息
:param symbol: 交易对
:param order_id: 订单ID
:param info_type: 查询类型
:return:
"""
# 校验参数
orders_info_resource = "orders_info.do"
params = {
'api_key': self._api_key,
'type': info_type,
'symbol': symbol,
'order_id': order_id,
}
params['sign'] = self.sign(params)
return await self.http_post(orders_info_resource, params, self.headers)
async def order_history(self, symbol: str, status: int, current_page: int, page_length: int):
"""
获取历史订单信息
:param symbol: 交易对
:param status: 查询状态
:param current_page: 当前页数
:param page_length: 每页条数
:return:result:返回与否,total:总条数,currency_page:页数,page_length:每页条数,orders:订单列表
"""
order_history_resource = "order_history.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'status': status,
'current_page': current_page,
'page_length': page_length,
}
params['sign'] = self.sign(params)
return await self.http_post(order_history_resource, params, self.headers)
async def withdraw(self, symbol: str, charge_fee: float, trade_pwd: str, withdraw_address: str, withdraw_amount: float,
target: str = 'OKEX'):
"""
提币
:param symbol: 交易对
:param charge_fee: 网路手续费 BTC[0.002,0.005] LTC[0.001,0.2] ETH[0.01] ETC[0.0001,0.2] BCH范围 [0.0005,0.002]
:param trade_pwd: 交易密码
:param withdraw_address: 提币认证地址
:param withdraw_amount: 提币数量
:param target: 地址类型
:return: reault withdraw_id
"""
withdraw_resource = "withdraw.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'chargefee': charge_fee,
'trde_pwd': trade_pwd,
'withdraw_address': withdraw_address,
'withdraw_amount': withdraw_amount,
'target': target,
}
params['sign'] = self.sign(params)
return await self.http_post(withdraw_resource, params, self.headers)
async def cancel_withdraw(self, symbol: str, withdraw_id: str):
"""
取消提币BTC/LTC/ETH/ETC/BCH
:param symbol:
:param withdraw_id:
:return:
"""
cancel_withdraw_resource = "cancel_withdraw.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'withdraw_id': withdraw_id
}
params['sign'] = self.sign(params)
return await self.http_post(cancel_withdraw_resource, params, self.headers)
async def withdraw_info(self, symbol: str, withdraw_id: str):
"""
查询提币BTC/LTC/ETH/ETC/BCH信息
:param symbol:
:param withdraw_id:
:return:
"""
withdraw_info_resource = "withdraw_info.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'withdraw_id': withdraw_id
}
params['sign'] = self.sign(params)
return await self.http_post(withdraw_info_resource, params, self.headers)
async def account_records(self, symbol: str, account_type: int, current_page: int, page_length: int):
"""
获取用户提现/充值记录
:param symbol:
:param account_type:
:param current_page:
:param page_length:
:return:
"""
account_records = "account_records.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'type': account_type,
'current_page': current_page,
'page_length': page_length,
}
params['sign'] = self.sign(params)
return await self.http_post(account_records, params, headers=self.headers)
async def funds_transfer(self, symbol: str, amount: int, funds_from: int, funds_to: int):
"""
资金划转
:param symbol:
:param amount:
:param funds_from:
:param funds_to:
:return:
"""
funds_transfer = "funds_transfer.do"
params = {
'api_key': self._api_key,
'symbol': symbol,
'amount': amount,
'from': funds_from,
'to': funds_to,
}
params['sign'] = self.sign(params)
return await self.http_post(funds_transfer, params, headers=self.headers)
async def wallet_info(self):
"""
获取用户钱包账户信息
:return: free:账户余额,freezed:账户冻结余额
"""
wallet_info_resource = "wallet_info.do"
params = {
'api_key': self._api_key,
}
params['sign'] = self.sign(params)
return await self.http_post(wallet_info_resource, params, self.headers)
|
the-stack_106_18709
|
"""
SymPy statistics module
Introduces a random variable type into the SymPy language.
Random variables may be declared using prebuilt functions such as
Normal, Exponential, Coin, Die, etc... or built with functions like FiniteRV.
Queries on random expressions can be made using the functions
========================= =============================
Expression Meaning
------------------------- -----------------------------
``P(condition)`` Probability
``E(expression)`` Expected value
``H(expression)`` Entropy
``variance(expression)`` Variance
``density(expression)`` Probability Density Function
``sample(expression)`` Produce a realization
``where(condition)`` Where the condition is true
========================= =============================
Examples
========
>>> from sympy.stats import P, E, variance, Die, Normal
>>> from sympy import Eq, simplify
>>> X, Y = Die('X', 6), Die('Y', 6) # Define two six sided dice
>>> Z = Normal('Z', 0, 1) # Declare a Normal random variable with mean 0, std 1
>>> P(X>3) # Probability X is greater than 3
1/2
>>> E(X+Y) # Expectation of the sum of two dice
7
>>> variance(X+Y) # Variance of the sum of two dice
35/6
>>> simplify(P(Z>1)) # Probability of Z being greater than 1
1/2 - erf(sqrt(2)/2)/2
"""
__all__ = [
'P', 'E', 'H', 'density', 'where', 'given', 'sample', 'cdf','median',
'characteristic_function', 'pspace', 'sample_iter', 'variance', 'std',
'skewness', 'kurtosis', 'covariance', 'dependent', 'entropy', 'independent',
'random_symbols', 'correlation', 'factorial_moment', 'moment', 'cmoment',
'sampling_density', 'moment_generating_function', 'smoment', 'quantile',
'FiniteRV', 'DiscreteUniform', 'Die', 'Bernoulli', 'Coin', 'Binomial',
'BetaBinomial', 'Hypergeometric', 'Rademacher',
'ContinuousRV', 'Arcsin', 'Benini', 'Beta', 'BetaNoncentral', 'BetaPrime',
'Cauchy', 'Chi', 'ChiNoncentral', 'ChiSquared', 'Dagum', 'Erlang',
'ExGaussian', 'Exponential', 'ExponentialPower', 'FDistribution',
'FisherZ', 'Frechet', 'Gamma', 'GammaInverse', 'Gompertz', 'Gumbel',
'Kumaraswamy', 'Laplace', 'Levy', 'Logistic', 'LogLogistic', 'LogNormal', 'Moyal',
'Maxwell', 'Nakagami', 'Normal', 'GaussianInverse', 'Pareto', 'PowerFunction',
'QuadraticU', 'RaisedCosine', 'Rayleigh','Reciprocal', 'StudentT', 'ShiftedGompertz',
'Trapezoidal', 'Triangular', 'Uniform', 'UniformSum', 'VonMises', 'Wald',
'Weibull', 'WignerSemicircle',
'Geometric','Hermite', 'Logarithmic', 'NegativeBinomial', 'Poisson', 'Skellam',
'YuleSimon', 'Zeta',
'JointRV', 'Dirichlet', 'GeneralizedMultivariateLogGamma',
'GeneralizedMultivariateLogGammaOmega', 'Multinomial', 'MultivariateBeta',
'MultivariateEwens', 'MultivariateT', 'NegativeMultinomial',
'NormalGamma',
'StochasticProcess', 'DiscreteTimeStochasticProcess',
'DiscreteMarkovChain', 'TransitionMatrixOf', 'StochasticStateSpaceOf',
'GeneratorMatrixOf', 'ContinuousMarkovChain', 'BernoulliProcess',
'CircularEnsemble', 'CircularUnitaryEnsemble',
'CircularOrthogonalEnsemble', 'CircularSymplecticEnsemble',
'GaussianEnsemble', 'GaussianUnitaryEnsemble',
'GaussianOrthogonalEnsemble', 'GaussianSymplecticEnsemble',
'joint_eigen_distribution', 'JointEigenDistribution',
'level_spacing_distribution',
'Probability', 'Expectation', 'Variance', 'Covariance',
]
from .rv_interface import (P, E, H, density, where, given, sample, cdf, median,
characteristic_function, pspace, sample_iter, variance, std, skewness,
kurtosis, covariance, dependent, entropy, independent, random_symbols,
correlation, factorial_moment, moment, cmoment, sampling_density,
moment_generating_function, smoment, quantile)
from .frv_types import (FiniteRV, DiscreteUniform, Die, Bernoulli, Coin,
Binomial, BetaBinomial, Hypergeometric, Rademacher)
from .crv_types import (ContinuousRV, Arcsin, Benini, Beta, BetaNoncentral,
BetaPrime, Cauchy, Chi, ChiNoncentral, ChiSquared, Dagum, Erlang,
ExGaussian, Exponential, ExponentialPower, FDistribution, FisherZ,
Frechet, Gamma, GammaInverse, Gompertz, Gumbel, Kumaraswamy, Laplace,
Levy, Logistic, LogLogistic, LogNormal, Maxwell, Moyal, Nakagami, Normal,
GaussianInverse, Pareto, QuadraticU, RaisedCosine, Rayleigh, Reciprocal, StudentT,
PowerFunction, ShiftedGompertz, Trapezoidal, Triangular, Uniform, UniformSum,
VonMises, Wald, Weibull, WignerSemicircle)
from .drv_types import (Geometric, Hermite, Logarithmic, NegativeBinomial, Poisson,
Skellam, YuleSimon, Zeta)
from .joint_rv_types import (JointRV, Dirichlet,
GeneralizedMultivariateLogGamma, GeneralizedMultivariateLogGammaOmega,
Multinomial, MultivariateBeta, MultivariateEwens, MultivariateT,
NegativeMultinomial, NormalGamma)
from .stochastic_process_types import (StochasticProcess,
DiscreteTimeStochasticProcess, DiscreteMarkovChain,
TransitionMatrixOf, StochasticStateSpaceOf, GeneratorMatrixOf,
ContinuousMarkovChain, BernoulliProcess)
from .random_matrix_models import (CircularEnsemble, CircularUnitaryEnsemble,
CircularOrthogonalEnsemble, CircularSymplecticEnsemble,
GaussianEnsemble, GaussianUnitaryEnsemble, GaussianOrthogonalEnsemble,
GaussianSymplecticEnsemble, joint_eigen_distribution,
JointEigenDistribution, level_spacing_distribution)
from .symbolic_probability import (Probability, Expectation, Variance,
Covariance)
|
the-stack_106_18710
|
from pynput import keyboard
def on_press(key):
try:
print('alphanumeric key {0} pressed'.format(key.char))
except AttributeError:
print('special key {0} pressed'.format(key))
def on_release(key):
print('{0} released'.format(key))
if key == keyboard.Key.esc:
# Stop listener
return False
# Collect events until released
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
# ...or, in a non-blocking fashion:
listener = keyboard.Listener(
on_press=on_press,
on_release=on_release)
listener.start()
|
the-stack_106_18712
|
import json
import matplotlib as mp
import matplotlib._version
import pandas as pd
import smtplib # required to send email
from os import environ
from datetime import date, datetime
from io import BytesIO # required for converting matplotlib figure to bytes
from email.mime.image import MIMEImage # required for image attachment
from email.mime.multipart import MIMEMultipart # required for image attachment
from email.mime.text import MIMEText # required for message body
# Boy is this job hard! We need some light natured jovial code to lighten our
# path so we'll use status code 418 - 'I'm a teapot' to signify a client error
# The RFC specifies this code should be returned by teapots requested to brew
# coffee.
def failure(message):
return {
'statusCode': 418, # paying attention? change to 400 - Bad Request
'body': json.dumps(message)
}
def lambda_handler(event, context):
# used to calculate the number of incidents per day
day_accumulator = {}
# because we are using AWS Lambda behind their new? AWS proxy, the
# event object is different than what is documented. It contains the
# request data within event['body', for this reason we need to make sure
# we are using event consistently in deployment and development
eventobj = None
if 'AWSDEPLOY' in environ and environ['AWSDEPLOY'] == 'TRUE':
try:
eventobj = json.loads(event['body'])
except Exception as e:
# don't throw an error because AWS Lambda uses the non
# proxy technique for their tests
eventobj = event
else:
eventobj = event
# put all of the entries into a dict
try:
if len(eventobj['egyptsecurity']) == 0:
return failure('Request made with event list size of 0')
for item in eventobj['egyptsecurity']:
eventdate = date(int(item['year']),
int(item['month']),
int(item['day']))
if eventdate in day_accumulator:
day_accumulator[eventdate] += 1
else:
day_accumulator[eventdate] = 1
except Exception as e:
return failure(str(e))
# make a list out of the keys (dates) for creating indices for the dataframe
pddates = day_accumulator.keys()
# make a list out of the values (integer values) for creating the first
# column of the dataframe this is list comprehension, more info:
# https://www.pythonforbeginners.com/basics/list-comprehensions-in-python
# we are using it because day_accumulator.values is of type
# <class 'dict_values'>, an iterable but not a list
pdevents = [y for y in day_accumulator.values()]
# because we have gaps in dates that we want to fill in we will need the
# min and max date from our list of dates
oldest = min(pddates)
newest = max(pddates)
# Finally create our dataframe
df = pd.DataFrame({'Number of Attacks':pd.Series(pdevents,index=pddates)})
# Create a daterange for re-indexing
daterange = pd.date_range(oldest, newest)
# Re-index our dataframe, filling the columns of newly created rows for
# days with no data with values of 0
df = df.reindex(daterange, fill_value=0)
# get ready for plotting with matplotlib, because TKinter is not installed
# use Agg as a renderedfor more information about renderers:
# https://matplotlib.org/faq/usage_faq.html?highlight=backend#what-is-a-backend
mp.use('Agg')
# create a line graph
plot = df.plot.line()
# create a figure for the line graph
fig = plot.get_figure()
# create a bytesIO object because we don't have persistent storage on lambda
figdata = BytesIO()
# save the figure to the BytesIO object, SVG was having a difficulty so I
# chose PNG, I suppose SVG header information is being saved wrong
fig.savefig(figdata, format='png')
# after writing the byte stream the seek position will be at the end
# of the file, seek to position 0 for re-reading
figdata.seek(0)
# Create a mail msg object
# for an explanation of 'alternative' please see:
# https://en.wikipedia.org/wiki/MIME#Alternative
msg = MIMEMultipart('alternative')
msg['Subject'] = 'New report for ' + '%s' % datetime.now()
if 'FROM' not in environ:
return failure('Misconfigured environment variable FROM')
msg['From'] = environ['FROM']
if 'TO' not in environ:
return failure('Misconfigured environment variable TO')
msg['To'] = environ['TO']
# Create the body of the message (a plain-text and an HTML version).
text = "Hi!\nPlease find attached the graph of today's report."
html = """\
<html>
<head></head>
<body>
<p>
Hi!
<br><br>
The below graph shows number of attacks per day in Cairo.
<br><br>
<img src="cid:image1">
</p>
</body>
</html>
"""
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
msg.attach(part1)
msg.attach(part2)
# figdata.read(-1) reads the bytes of the figures till the end of the file
img = MIMEImage(figdata.read(-1), _subtype="png")
# add a header entry for the filename, name it simply report.png
img.add_header('Content-Disposition', 'attachment; filename="report.png"')
# add a Content-ID tag for embedding the image in the email
img.add_header('Content-ID', '<image1>')
# finally attach the img to the email message
msg.attach(img)
# set up variables for mailing
username = environ['FROM']
if 'GMAILPASS' not in environ:
return failure('Misconfigured environment variables')
password = environ['GMAILPASS']
server = smtplib.SMTP('smtp.gmail.com:587')
# login and send the mail
# it would be advisable to check the response, and log the response if it's
# a failure, logging is outside the scope of this tutorial
server.starttls()
server.login(username,password)
server.send_message(msg)
# close the server connection
server.quit()
return {
'statusCode': 200,
'body': json.dumps('Success')
}
def handler(event,context):
return lambda_handler(event, context)
|
the-stack_106_18714
|
#!/usr/bin/env python3
import glob
import re
import contextlib
import os
import platform
import sys
import shutil
import subprocess
import tarfile
import zipfile
import click
import cryptography.fernet
import parver
@contextlib.contextmanager
def chdir(path: str): # pragma: no cover
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
class BuildError(Exception):
pass
class BuildEnviron:
PLATFORM_TAGS = {
"Darwin": "osx",
"Windows": "windows",
"Linux": "linux",
}
def __init__(
self,
*,
system = "",
root_dir = "",
travis_tag = "",
travis_branch = "",
travis_pull_request = "",
appveyor_repo_tag_name = "",
appveyor_repo_branch = "",
appveyor_pull_request_number = "",
should_build_wheel = False,
should_build_docker = False,
should_build_pyinstaller = False,
has_aws_creds = False,
has_twine_creds = False,
docker_username = "",
docker_password = "",
):
self.system = system
self.root_dir = root_dir
self.travis_tag = travis_tag
self.travis_branch = travis_branch
self.travis_pull_request = travis_pull_request
self.should_build_wheel = should_build_wheel
self.should_build_docker = should_build_docker
self.should_build_pyinstaller = should_build_pyinstaller
self.appveyor_repo_tag_name = appveyor_repo_tag_name
self.appveyor_repo_branch = appveyor_repo_branch
self.appveyor_pull_request_number = appveyor_pull_request_number
self.has_aws_creds = has_aws_creds
self.has_twine_creds = has_twine_creds
self.docker_username = docker_username
self.docker_password = docker_password
@classmethod
def from_env(klass):
return klass(
system = platform.system(),
root_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), "..")),
travis_tag = os.environ.get("TRAVIS_TAG", ""),
travis_branch = os.environ.get("TRAVIS_BRANCH", ""),
travis_pull_request = os.environ.get("TRAVIS_PULL_REQUEST"),
appveyor_repo_tag_name = os.environ.get("APPVEYOR_REPO_TAG_NAME", ""),
appveyor_repo_branch = os.environ.get("APPVEYOR_REPO_BRANCH", ""),
appveyor_pull_request_number = os.environ.get("APPVEYOR_PULL_REQUEST_NUMBER"),
should_build_wheel = "WHEEL" in os.environ,
should_build_pyinstaller = "PYINSTALLER" in os.environ,
should_build_docker = "DOCKER" in os.environ,
has_aws_creds = "AWS_ACCESS_KEY_ID" in os.environ,
has_twine_creds= (
"TWINE_USERNAME" in os.environ and
"TWINE_PASSWORD" in os.environ
),
docker_username = os.environ.get("DOCKER_USERNAME"),
docker_password = os.environ.get("DOCKER_PASSWORD"),
)
def archive(self, path):
# ZipFile and tarfile have slightly different APIs. Fix that.
if self.system == "Windows":
a = zipfile.ZipFile(path, "w")
a.add = a.write
return a
else:
return tarfile.open(path, "w:gz")
def archive_name(self, bdist: str) -> str:
if self.system == "Windows":
ext = "zip"
else:
ext = "tar.gz"
return "{project}-{version}-{platform}.{ext}".format(
project=bdist,
version=self.version,
platform=self.platform_tag,
ext=ext
)
@property
def bdists(self):
ret = {
"mitmproxy": ["mitmproxy", "mitmdump", "mitmweb"],
"pathod": ["pathoc", "pathod"]
}
if self.system == "Windows":
ret["mitmproxy"].remove("mitmproxy")
return ret
@property
def branch(self):
return self.travis_branch or self.appveyor_repo_branch
@property
def build_dir(self):
return os.path.join(self.release_dir, "build")
@property
def dist_dir(self):
return os.path.join(self.release_dir, "dist")
@property
def docker_tag(self):
if self.branch == "master":
t = "dev"
else:
t = self.version
return "mitmproxy/mitmproxy:{}".format(t)
def dump_info(self, fp=sys.stdout):
lst = [
"version",
"tag",
"branch",
"platform_tag",
"root_dir",
"release_dir",
"build_dir",
"dist_dir",
"bdists",
"upload_dir",
"should_build_wheel",
"should_build_pyinstaller",
"should_build_docker",
"should_upload_docker",
"should_upload_pypi",
]
for attr in lst:
print("cibuild.%s=%s" % (attr, getattr(self, attr)), file=fp)
@property
def has_docker_creds(self) -> bool:
return self.docker_username and self.docker_password
@property
def is_prod_release(self) -> bool:
try:
v = parver.Version.parse(self.version)
except (parver.ParseError, BuildError):
return False
return not v.is_prerelease
@property
def is_pull_request(self) -> bool:
if self.appveyor_pull_request_number:
return True
if self.travis_pull_request and self.travis_pull_request != "false":
return True
return False
@property
def platform_tag(self):
if self.system in self.PLATFORM_TAGS:
return self.PLATFORM_TAGS[self.system]
raise BuildError("Unsupported platform: %s" % self.system)
@property
def release_dir(self):
return os.path.join(self.root_dir, "release")
@property
def should_upload_docker(self) -> bool:
return all([
(self.tag or self.branch == "master"),
self.should_build_docker,
self.has_docker_creds,
])
@property
def should_upload_pypi(self) -> bool:
return all([
self.tag,
self.is_prod_release,
self.should_build_wheel,
self.has_twine_creds,
])
@property
def tag(self):
return self.travis_tag or self.appveyor_repo_tag_name
@property
def upload_dir(self):
if self.tag:
return self.version
else:
return "branches/%s" % self.version
@property
def version(self):
name = self.tag or self.branch
if not name:
raise BuildError("We're on neither a tag nor a branch - could not establish version")
return re.sub('^v', "", name)
def build_wheel(be: BuildEnviron): # pragma: no cover
click.echo("Building wheel...")
subprocess.check_call([
"python",
"setup.py",
"-q",
"bdist_wheel",
"--dist-dir", be.dist_dir,
])
whl = glob.glob(os.path.join(be.dist_dir, 'mitmproxy-*-py3-none-any.whl'))[0]
click.echo("Found wheel package: {}".format(whl))
subprocess.check_call(["tox", "-e", "wheeltest", "--", whl])
return whl
def build_docker_image(be: BuildEnviron, whl: str): # pragma: no cover
click.echo("Building Docker image...")
subprocess.check_call([
"docker",
"build",
"--tag", be.docker_tag,
"--build-arg", "WHEEL_MITMPROXY={}".format(whl),
"--build-arg", "WHEEL_BASENAME_MITMPROXY={}".format(os.path.basename(whl)),
"--file", "docker/Dockerfile",
"."
])
def build_pyinstaller(be: BuildEnviron): # pragma: no cover
click.echo("Building pyinstaller package...")
PYINSTALLER_SPEC = os.path.join(be.release_dir, "specs")
# PyInstaller 3.2 does not bundle pydivert's Windivert binaries
PYINSTALLER_HOOKS = os.path.abspath(os.path.join(be.release_dir, "hooks"))
PYINSTALLER_TEMP = os.path.abspath(os.path.join(be.build_dir, "pyinstaller"))
PYINSTALLER_DIST = os.path.abspath(os.path.join(be.build_dir, "binaries", be.platform_tag))
# https://virtualenv.pypa.io/en/latest/userguide.html#windows-notes
# scripts and executables on Windows go in ENV\Scripts\ instead of ENV/bin/
if platform.system() == "Windows":
PYINSTALLER_ARGS = [
# PyInstaller < 3.2 does not handle Python 3.5's ucrt correctly.
"-p", r"C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\x86",
]
else:
PYINSTALLER_ARGS = []
if os.path.exists(PYINSTALLER_TEMP):
shutil.rmtree(PYINSTALLER_TEMP)
if os.path.exists(PYINSTALLER_DIST):
shutil.rmtree(PYINSTALLER_DIST)
for bdist, tools in sorted(be.bdists.items()):
with be.archive(os.path.join(be.dist_dir, be.archive_name(bdist))) as archive:
for tool in tools:
# We can't have a folder and a file with the same name.
if tool == "mitmproxy":
tool = "mitmproxy_main"
# This is PyInstaller, so it messes up paths.
# We need to make sure that we are in the spec folder.
with chdir(PYINSTALLER_SPEC):
click.echo("Building PyInstaller %s binary..." % tool)
excludes = []
if tool != "mitmweb":
excludes.append("mitmproxy.tools.web")
if tool != "mitmproxy_main":
excludes.append("mitmproxy.tools.console")
subprocess.check_call(
[
"pyinstaller",
"--clean",
"--workpath", PYINSTALLER_TEMP,
"--distpath", PYINSTALLER_DIST,
"--additional-hooks-dir", PYINSTALLER_HOOKS,
"--onefile",
"--console",
"--icon", "icon.ico",
# This is PyInstaller, so setting a
# different log level obviously breaks it :-)
# "--log-level", "WARN",
]
+ [x for e in excludes for x in ["--exclude-module", e]]
+ PYINSTALLER_ARGS
+ [tool]
)
# Delete the spec file - we're good without.
os.remove("{}.spec".format(tool))
# Test if it works at all O:-)
executable = os.path.join(PYINSTALLER_DIST, tool)
if platform.system() == "Windows":
executable += ".exe"
# Remove _main suffix from mitmproxy executable
if "_main" in executable:
shutil.move(
executable,
executable.replace("_main", "")
)
executable = executable.replace("_main", "")
click.echo("> %s --version" % executable)
click.echo(subprocess.check_output([executable, "--version"]).decode())
archive.add(executable, os.path.basename(executable))
click.echo("Packed {}.".format(be.archive_name(bdist)))
@click.group(chain=True)
def cli(): # pragma: no cover
"""
mitmproxy build tool
"""
pass
@cli.command("build")
def build(): # pragma: no cover
"""
Build a binary distribution
"""
be = BuildEnviron.from_env()
be.dump_info()
os.makedirs(be.dist_dir, exist_ok=True)
if be.should_build_wheel:
whl = build_wheel(be)
# Docker image requires wheels
if be.should_build_docker:
build_docker_image(be, whl)
if be.should_build_pyinstaller:
build_pyinstaller(be)
@cli.command("upload")
def upload(): # pragma: no cover
"""
Upload build artifacts
Uploads the wheels package to PyPi.
Uploads the Pyinstaller and wheels packages to the snapshot server.
Pushes the Docker image to Docker Hub.
"""
be = BuildEnviron.from_env()
if be.is_pull_request:
click.echo("Refusing to upload artifacts from a pull request!")
return
if be.has_aws_creds:
subprocess.check_call([
"aws", "s3", "cp",
"--acl", "public-read",
be.dist_dir + "/",
"s3://snapshots.mitmproxy.org/{}/".format(be.upload_dir),
"--recursive",
])
if be.should_upload_pypi:
whl = glob.glob(os.path.join(be.dist_dir, 'mitmproxy-*-py3-none-any.whl'))[0]
click.echo("Uploading {} to PyPi...".format(whl))
subprocess.check_call(["twine", "upload", whl])
if be.should_upload_docker:
click.echo("Uploading Docker image to tag={}...".format(be.docker_tag))
subprocess.check_call([
"docker",
"login",
"-u", be.docker_username,
"-p", be.docker_password,
])
subprocess.check_call(["docker", "push", be.docker_tag])
@cli.command("decrypt")
@click.argument('infile', type=click.File('rb'))
@click.argument('outfile', type=click.File('wb'))
@click.argument('key', envvar='RTOOL_KEY')
def decrypt(infile, outfile, key): # pragma: no cover
f = cryptography.fernet.Fernet(key.encode())
outfile.write(f.decrypt(infile.read()))
if __name__ == "__main__": # pragma: no cover
cli()
|
the-stack_106_18717
|
import json
import paste.fixture
from ckan import model
from ckan.lib.create_test_data import CreateTestData
import ckan.lib.helpers as h
from ckan.tests import WsgiAppCase
import ckan.plugins as plugins
TEST_VOCAB_NAME = 'test-vocab'
# paste.fixture.Field.Select does not handle multiple selects currently,
# so replace with our own implementations of Form and Select
class Form(paste.fixture.Form):
def __init__(self, response, text):
paste.fixture.Form.__init__(self, response, text)
def submit_fields(self, name=None, index=None):
"""
Return a list of ``[(name, value), ...]`` for the current
state of the form.
"""
submit = []
if name is not None:
field = self.get(name, index=index)
submit.append((field.name, field.value_if_submitted()))
for name, fields in self.fields.items():
if name is None:
continue
for field in fields:
value = field.value
if value is None:
continue
if isinstance(value, list):
for v in value:
submit.append((name, v))
else:
submit.append((name, value))
return submit
class Select(paste.fixture.Field):
def __init__(self, *args, **attrs):
paste.fixture.Field.__init__(self, *args, **attrs)
self.options = []
self.selectedIndex = None
def value__set(self, value):
if not value:
self.selectedIndex = None
self.options = [(option, False) for (option, checked) in self.options]
return
for v in value:
if not v in [option for (option, checked) in self.options]:
raise ValueError("Option %r not found (from %s)"
% (value, ', '.join(
[repr(o) for o, checked in self.options]))
)
new_options = [(option, True) for (option, checked) in self.options if option in value]
new_options += [(option, False) for (option, checked) in self.options if not option in value]
self.options = new_options
def value__get(self):
return [option for (option, checked) in self.options if checked]
value = property(value__get, value__set)
class TestWUI(WsgiAppCase):
@classmethod
def setup_class(cls):
plugins.load('test_tag_vocab_plugin')
CreateTestData.create(package_type='mock_vocab_tags_plugin')
cls.sysadmin_user = model.User.get('testsysadmin')
cls.dset = model.Package.get('warandpeace')
cls.tag1_name = 'vocab-tag-1'
cls.tag2_name = 'vocab-tag-2'
# use our custom select class for this test suite
cls.old_select = paste.fixture.Field.classes['select']
paste.fixture.Field.classes['select'] = Select
# create a test vocab
params = json.dumps({'name': TEST_VOCAB_NAME})
extra_environ = {'Authorization' : str(cls.sysadmin_user.apikey)}
cls.extra_environ = {'Authorization' : str(cls.sysadmin_user.apikey)}
response = cls.app.post('/api/action/vocabulary_create', params=params,
extra_environ=extra_environ)
assert json.loads(response.body)['success']
vocab_id = json.loads(response.body)['result']['id']
# add tags to the vocab
extra_environ = {'Authorization' : str(cls.sysadmin_user.apikey)}
params = json.dumps({'name': cls.tag1_name, 'vocabulary_id': vocab_id})
response = cls.app.post('/api/action/tag_create', params=params,
extra_environ=extra_environ)
assert json.loads(response.body)['success']
params = json.dumps({'name': cls.tag2_name, 'vocabulary_id': vocab_id})
response = cls.app.post('/api/action/tag_create', params=params,
extra_environ=extra_environ)
assert json.loads(response.body)['success']
@classmethod
def teardown_class(cls):
plugins.unload('test_tag_vocab_plugin')
paste.fixture.Field.classes['select'] = cls.old_select
model.repo.rebuild_db()
def _get_vocab_id(self, vocab_name):
params = json.dumps({'id': vocab_name})
response = self.app.post('/api/action/vocabulary_show', params=params)
assert json.loads(response.body)['success']
return json.loads(response.body)['result']['id']
def _add_vocab_tag_to_dataset(self, dataset_id, vocab_id, tag_name):
params = json.dumps({'id': dataset_id})
response = self.app.post('/api/action/package_show', params=params)
dataset = json.loads(response.body)['result']
dataset['tags'] = []
dataset['tags'].append({'name': tag_name, 'vocabulary_id': vocab_id})
params = json.dumps(dataset)
response = self.app.post('/api/action/package_update', params=params,
extra_environ={'Authorization': str(self.sysadmin_user.apikey)})
assert json.loads(response.body)['success']
def _remove_vocab_tags(self, dataset_id, vocab_id, tag_name):
params = json.dumps({'id': dataset_id})
response = self.app.post('/api/action/package_show', params=params)
dataset = json.loads(response.body)['result']
dataset['vocab_tag_selected'] = []
params = json.dumps(dataset)
response = self.app.post('/api/action/package_update', params=params,
extra_environ={'Authorization': str(self.sysadmin_user.apikey)})
assert json.loads(response.body)['success']
def test_01_dataset_view(self):
vocab_id = self._get_vocab_id(TEST_VOCAB_NAME)
self._add_vocab_tag_to_dataset(self.dset.id, vocab_id, self.tag1_name)
response = self.app.get(h.url_for(controller='package', action='read',
id=self.dset.id))
assert self.tag1_name in response.body, self.tag1_name
self._remove_vocab_tags(self.dset.id, vocab_id, self.tag1_name)
def test_02_dataset_edit_add_vocab_tag(self):
vocab_id = self._get_vocab_id(TEST_VOCAB_NAME)
url = h.url_for(controller='package', action='edit', id=self.dset.id)
response = self.app.get(url, extra_environ=self.extra_environ)
fv = response.forms['dataset-edit']
fv = Form(fv.response, fv.text)
fv['vocab_tags'] = [self.tag2_name]
response = fv.submit('save', extra_environ=self.extra_environ)
response = response.follow()
assert not self.tag1_name in response.body
assert self.tag2_name in response.body
self._remove_vocab_tags(self.dset.id, vocab_id, self.tag1_name)
self._remove_vocab_tags(self.dset.id, vocab_id, self.tag2_name)
def test_02_dataset_edit_add_free_and_vocab_tags_then_edit_again(self):
vocab_id = self._get_vocab_id(TEST_VOCAB_NAME)
url = h.url_for(controller='package', action='edit', id=self.dset.id)
response = self.app.get(url, extra_environ=self.extra_environ)
fv = response.forms['dataset-edit']
fv = Form(fv.response, fv.text)
# Add a free tag with a space in its name.
fv['tag_string'] = 'water quality'
# Add a vocab tag.
fv['vocab_tags'] = [self.tag2_name]
# Save the dataset and visit the page again
response = fv.submit('save', extra_environ=self.extra_environ)
response = response.follow()
assert not self.tag1_name in response.body
assert self.tag2_name in response.body
url = h.url_for(controller='package', action='edit', id=self.dset.id)
response = self.app.get(url, extra_environ=self.extra_environ)
fv = response.forms['dataset-edit']
fv = Form(fv.response, fv.text)
assert fv['vocab_tags'].value == [self.tag2_name], fv['vocab_tags'].value
self._remove_vocab_tags(self.dset.id, vocab_id, self.tag2_name)
def test_03_dataset_edit_remove_vocab_tag(self):
vocab_id = self._get_vocab_id(TEST_VOCAB_NAME)
self._add_vocab_tag_to_dataset(self.dset.id, vocab_id, self.tag1_name)
url = h.url_for(controller='package', action='edit', id=self.dset.id)
response = self.app.get(url, extra_environ=self.extra_environ)
fv = response.forms['dataset-edit']
fv = Form(fv.response, fv.text)
fv['vocab_tags'] = []
response = fv.submit('save', extra_environ=self.extra_environ)
response = response.follow()
assert not self.tag1_name in response.body
self._remove_vocab_tags(self.dset.id, vocab_id, self.tag1_name)
def test_04_dataset_edit_change_vocab_tag(self):
vocab_id = self._get_vocab_id(TEST_VOCAB_NAME)
self._add_vocab_tag_to_dataset(self.dset.id, vocab_id, self.tag1_name)
url = h.url_for(controller='package', action='edit', id=self.dset.id)
response = self.app.get(url, extra_environ=self.extra_environ)
fv = response.forms['dataset-edit']
fv = Form(fv.response, fv.text)
fv['vocab_tags'] = [self.tag2_name]
response = fv.submit('save', extra_environ=self.extra_environ)
response = response.follow()
assert not self.tag1_name in response.body
assert self.tag2_name in response.body
self._remove_vocab_tags(self.dset.id, vocab_id, self.tag2_name)
def test_05_dataset_edit_add_multiple_vocab_tags(self):
vocab_id = self._get_vocab_id(TEST_VOCAB_NAME)
url = h.url_for(controller='package', action='edit', id=self.dset.id)
response = self.app.get(url, extra_environ=self.extra_environ)
fv = response.forms['dataset-edit']
fv = Form(fv.response, fv.text)
fv['vocab_tags'] = [self.tag1_name, self.tag2_name]
response = fv.submit('save', extra_environ=self.extra_environ)
response = response.follow()
assert self.tag1_name in response.body
assert self.tag2_name in response.body
self._remove_vocab_tags(self.dset.id, vocab_id, self.tag1_name)
self._remove_vocab_tags(self.dset.id, vocab_id, self.tag2_name)
|
the-stack_106_18718
|
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 事务控制
Case Name : 开启事务执行delete语句后是否可以再次设置事务隔离级别
Description :
1.创建测试表
2.以默认方式开启事务后执行delete语句
3.重新开启事务隔离级别为REPEATABLE READ的事务
Expect :
1.创建测试表成功
2.默认方式开启事务执行delete语句成功
3.重新开启事务隔离级别为REPEATABLE READ的事务失败
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class TransactionFile(unittest.TestCase):
def setUp(self):
logger.info('----Opengauss_Function_DML_Transaction_Case0020开始执行----')
self.PrimaryNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
self.Constant = Constant()
def test_transaction_file(self):
logger.info('------创建测试表并插入数据------')
sql_cmd = '''drop table if exists testzl;
create table testzl(sk integer,id char(16),
name varchar(20),sq_ft integer);
insert into testzl values (001,'sk1','tt',3332);
insert into testzl values (001,'sk1','tt',3332);
insert into testzl values (001,'sk1','tt',3332);'''
excute_cmd = f'''source {self.DB_ENV_PATH};
gsql -d {self.PrimaryNode.db_name} \
-p {self.PrimaryNode.db_port} \
-c "{sql_cmd}"'''
logger.info(excute_cmd)
msg = self.PrimaryNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn(self.Constant.INSERT_SUCCESS_MSG, msg)
logger.info('以默认方式开启数据库进行delete后重新开启事务隔离级别为REPEATABLE READ的事务')
sql_cmd = f'''start transaction;
delete from testzl where sk = 1;
start transaction isolation level repeatable read;'''
excute_cmd = f'''source {self.DB_ENV_PATH};
gsql -d {self.PrimaryNode.db_name} \
-p {self.PrimaryNode.db_port} \
-c "{sql_cmd}"'''
logger.info(excute_cmd)
msg = self.PrimaryNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn(self.Constant.START_TRANSACTION_SUCCESS_MSG, msg)
self.assertIn(self.Constant.SET_TRANSACTION_ERROR_MSG, msg)
def tearDown(self):
logger.info('------清理环境------')
sql_cmd = 'drop table if exists testzl;'
excute_cmd = f'''source {self.DB_ENV_PATH};
gsql -d {self.PrimaryNode.db_name} \
-p {self.PrimaryNode.db_port} \
-c "{sql_cmd}"'''
logger.info(excute_cmd)
msg = self.PrimaryNode.sh(excute_cmd).result()
logger.info(msg)
self.assertIn(self.Constant.TABLE_DROP_SUCCESS, msg)
logger.info('----Opengauss_Function_DML_Transaction_Case0020执行完成----')
|
the-stack_106_18720
|
# Filename: HCm_Teff_v3.1.py
import string
import numpy as np
import sys
print (' ---------------------------------------------------------------------')
print ('This is HII-CHI-mistry_Teff v. 3.1')
print (' See Perez-Montero et al (2019) for details')
print (' Insert the name of your input text file with the following columns:')
print ('12+log(O/H), 3727 [OII], 5007 [OIII], 6725 [SII], 9069 [SIII]')
print ('with their corresponding errors in adjacent columns')
print ('relative to Hbeta or 0 for missing information.')
print ('---------------------------------------------------------------------')
print ('')
# Input file reading
if len(sys.argv) == 1:
if int(sys.version[0]) < 3:
input00 = raw_input('Insert input file name:')
else:
input00 = input('Insert input file name:')
else:
input00 = str(sys.argv[1])
try:
input0 = np.loadtxt(input00)
if (input0.ndim == 1 and input0.shape[0] != 10) or (input0.ndim > 1 and input0.shape[1] != 10):
print ('The input file does not have 10 columns. Please check')
sys.exit()
print ('The input file is:'+input00)
except:
print ('Input file error: It does not exist or has wrong format')
sys.exit()
print ('')
output = []
# Iterations for Montecarlo error derivation
if len(sys.argv) < 3:
n = 25
else:
n = int(sys.argv[2])
print ('The number of iterations for MonteCarlo simulation is: ',n)
print ('')
# Reading of models grids. These can be changed
print ('')
question = True
while question:
print ('---------------------------------------------------------------------')
print ('(1) Plane-parallel geometry')
print ('(2) Spherical geometry')
print ('---------------------------------------------------------------------')
if int(sys.version[0]) < 3:
geo = raw_input('Choose geometry of the models:')
else:
geo = input('Choose geometry of the models:')
if geo == '1' or geo == '2': question = False
geo = int(geo)
if geo==1:
geo_type = 'Plane-parallel geometry'
grid = np.loadtxt('C17_WMb_Teff_30-55_pp.dat')
print ('')
elif geo==2:
geo_type = 'Spherical geometry'
grid = np.loadtxt('C17_WMb_Teff_30-55_sph.dat')
print ('')
print ('')
if input0.shape == (10,):
input1 = [0,0,0,0,0,0,0,0,0,0,input0[0],input0[1],input0[2],input0[3],input0[4],input0[5],input0[6],input0[7],input0[8],input0[9]]
input = np.reshape(input1,(2,10))
else:
input = input0
print ('Reading grids ...')
print ('')
print ('')
print ('---------------------------------------------------------------------')
print ('(%) 12+log(O/H) T_eff(K) log(U)')
print ('---------------------------------------------------------------------')
# Beginning of loop of calculation
count = 0
for tab in input:
count = count + 1
OH_mc = []
Teff_mc = []
logU_mc = []
eOH_mc = []
eTeff_mc = []
elogU_mc = []
for monte in range(0,n,1):
OH_p = 0
logU_p = 0
Teff_p = 0
den_OH = 0
den_Teff = 0
OH_e = 0
Teff_e = 0
logU_e = 0
den_OH_e = 0
den_Teff_e = 0
tol_max = 1e2
OII_3727_obs = np.random.normal(tab[2],tab[3]+1e-3)
if OII_3727_obs <= 0 : OII_3727_obs = 0
OIII_5007_obs = np.random.normal(tab[4],tab[5]+1e-3)
if OIII_5007_obs <= 0: OIII_5007_obs = 0
SII_6725_obs = np.random.normal(tab[6],tab[7]+1e-3)
if SII_6725_obs <= 0: SII_6725_obs = 0
SIII_9069_obs = np.random.normal(tab[8],tab[9]+1e-3)
if SIII_9069_obs <= 0: SIII_9069_obs = 0
if tab[2] == 0 or tab[4] == 0 or OII_3727_obs == 0 or OIII_5007_obs == 0:
O2O3_obs = -10
R23_obs = -10
else:
O2O3_obs = np.log10(OII_3727_obs / OIII_5007_obs)
R23_obs = np.log10(OII_3727_obs + OIII_5007_obs )
if tab[6] == 0 or tab[8] == 0 or SII_6725_obs == 0 or SIII_9069_obs == 0:
S2S3_obs = -10
S23_obs = -10
else:
S2S3_obs = np.log10(SII_6725_obs / SIII_9069_obs )
S23_obs = (SII_6725_obs + SIII_9069_obs )
# Interpolation of grid at specific O/H
if tab[0] > 0:
OH = np.random.normal(tab[0],tab[1]+1e-3)
OH_mc.append(OH)
grid_T0 = []
if OH <= 7.1:
OH = 7.1
i0 = 0
i1 = 72
elif OH >= 7.1 and OH < 7.4:
i0 = 0
i1 = 72
elif OH >= 7.4 and OH < 7.7:
i0 = 72
i1 = 144
elif OH >= 7.7 and OH < 8.0:
i0 = 144
i1 = 216
elif OH >= 8.0 and OH < 8.3:
i0 = 216
i1 = 288
elif OH >= 8.3 and OH < 8.6:
i0 = 288
i1 = 360
elif OH >= 8.6 and OH < 8.9:
i0 = 360
i1 = 432
elif OH >= 8.9:
OH = 8.9
i0 = 360
i1 = 432
for x in range(0,72):
for y in range(0,7):
grid_T0.append(grid[i0+x,y]*np.abs(0.3-OH+grid[i0,0])/0.3+grid[i1+x,y]*np.abs(0.3-grid[i1,0]+OH)/0.3)
# grid_T0.append(grid[i0+x,y]*np.abs(0.3-grid[i0,0]+OH)/0.3 + grid[i1+x,y]*np.abs(0.3-grid[i1,0]+OH)/0.3)
grid_T = np.reshape(grid_T0,(72,7))
else:
OH = 0
OH_mc.append(OH)
grid_T = grid
# np.savetxt('int_models.dat',grid_T)
# Calculation of T and log U
if S2S3_obs == -10 and O2O3_obs == -10:
Teff = 0
logU = 0
else:
CHI_O2O3 = 0
CHI_S2S3 = 0
CHI_S23 = 0
for index in grid_T:
if index[5] == 0 or index[6] == 0:
CHI_S2S3 = tol_max
CHI_S23 = tol_max
elif S2S3_obs == -10:
CHI_S2S3 = 0
CHI_S23 = 0
else:
CHI_S2S3 = (np.log10(index[5]/index[6]) - S2S3_obs)**2/S2S3_obs
CHI_S23 = (index[5]+index[6]-S23_obs)**2/S23_obs
if index[3] == 0 or index[4] == 0:
CHI_O2O3 = tol_max
elif O2O3_obs == -10:
CHI_O2O3 = 0
else:
CHI_O2O3 = (np.log10(index[3]/index[4]) - O2O3_obs)**2/O2O3_obs
CHI_Teff = (CHI_S2S3**2 + CHI_O2O3**2 )**0.5
Teff_p = index[1]*(1/CHI_Teff)**2 + Teff_p
logU_p = index[2] *(1/CHI_Teff)**2 + logU_p
den_Teff = (1/CHI_Teff)**2 + den_Teff
Teff = Teff_p / den_Teff
logU = logU_p / den_Teff
# Calculation of T and log U errors
if S2S3_obs == -10 and O2O3_obs == -10:
eTeff = 0
elogU = 0
else:
CHI_O2O3 = 0
CHI_S2S3 = 0
CHI_S23 = 0
for index in grid_T:
if index[5] == 0 or index[6] == 0:
CHI_S2S3 = tol_max
CHI_S23 = tol_max
elif S2S3_obs == -10:
CHI_S2S3 = 0
CHI_S23 = 0
else:
CHI_S2S3 = (np.log10(index[5]/index[6]) - S2S3_obs)**2/S2S3_obs
CHI_S23 = (index[5]+index[6]-S23_obs)**2/S23_obs
if index[3] == 0 or index[4] == 0:
CHI_O2O3 = tol_max
elif O2O3_obs == -10:
CHI_O2O3 = 0
else:
CHI_O2O3 = (np.log10(index[3]/index[4]) - O2O3_obs)**2/O2O3_obs
CHI_Teff = (CHI_S2S3**2 + CHI_O2O3**2 )**0.5
Teff_e = np.abs(index[1] - Teff) * (1/CHI_Teff)**2 + Teff_e
logU_e = np.abs(index[2] - logU) * (1/CHI_Teff)**2 + logU_e
den_Teff_e = 1 * (1/CHI_Teff**2) + den_Teff_e
eTeff = Teff_e / den_Teff_e
elogU = logU_e / den_Teff_e
Teff_mc.append(Teff)
logU_mc.append(logU)
eTeff_mc.append(eTeff)
elogU_mc.append(elogU)
if tab[0] > 0:
OHf = tab[0]
eOHf = tab[1]
else:
OHf = 0
eOHf = 0
Tefff = np.mean(Teff_mc)
eTefff = (np.std(Teff_mc)**2 + np.mean(eTeff_mc)**2)**0.5
logUf = np.mean(logU_mc)
elogUf = (np.std(logU_mc)**2+np.mean(elogU_mc)**2)**0.5
output.append(tab[2])
output.append(tab[3])
output.append(tab[4])
output.append(tab[5])
output.append(tab[6])
output.append(tab[7])
output.append(tab[8])
output.append(tab[9])
output.append(OHf)
output.append(eOHf)
output.append(Tefff)
output.append(eTefff)
output.append(logUf)
output.append(elogUf)
if input0.shape >= (10,) and count == 1: continue
print (round(100*(count)/float(len(input)),1),'%','', round(OHf,2), round(eOHf,2), 100*int(Tefff/100),100*int(eTefff/100),round(logUf,2),round(elogUf,2))
out = np.reshape(output,(len(input),14))
lineas_header = [' HII-CHI-mistry v.3.1 output file',' Input file:'+input00,'Iterations for MonteCarlo: '+str(n),'Used models: '+geo_type,'','O2Hb eO2Hb O3Hb eO3Hb eO3Hb S2Hb eS2Hb S3Hb eS3HbO/H eO/H Teff eTeff logU elogU']
header = '\n'.join(lineas_header)
np.savetxt(input00+'_hcm-output.dat',out,fmt=' '.join(['%.3f']*8+['%.2f']*2+['%.i']*2+['%.2f']*2),header=header)
print ('________________________________')
print ('Results are stored in '+ input00+'_hcm-output.dat')
|
the-stack_106_18721
|
#!/usr/bin/env python3
import re
import setuptools
with open("pullnrun/_version.py", "r") as f:
try:
version = re.search(
r"__version__\s*=\s*[\"']([^\"']+)[\"']",f.read()).group(1)
except:
raise RuntimeError('Version info not available')
with open("README.md", "r") as f:
long_description = f.read()
setuptools.setup(
name="pullnrun",
version=version,
author="Toni Kangas",
description="A simple python app for running a set of commands from remote sources and pushing result files to remote targets.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kangasta/pullnrun",
packages=setuptools.find_packages(),
package_data={
'pullnrun': ['schemas/*.yml', 'templates/*.j2']
},
scripts=["bin/pullnrun"],
install_requires=[
"importlib_resources; python_version<'3.7'",
"Jinja2~=2.0",
"jsonschema~=3.0",
"pyyaml~=5.0",
"requests~=2.0",
],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
)
)
|
the-stack_106_18727
|
import tensorflow as tf
import numpy as np
import traceback
from tensorflow.python.keras.applications.mobilenet_v2 import _inverted_res_block
import os
from . import utils
import cv2
class PoseErrorCallback(tf.keras.callbacks.Callback):
def __init__(
self,
model,
ref_points,
crop_size,
focal_length,
object_name,
real_image_dir=None,
experiment=None,
):
super().__init__()
self.model = model
self.ref_points = ref_points
self.focal_length = focal_length
self.experiment = experiment
self.targets = tf.Variable(0.0, shape=tf.TensorShape(None))
self.outputs = tf.Variable(0.0, shape=tf.TensorShape(None))
self.train_errors = []
self.test_errors = []
self.comet_step = 0
self.real_image_dataset = []
if real_image_dir:
self.real_image_dataset = utils.data.dataset_from_directory(
real_image_dir, crop_size, len(ref_points), object_name
).batch(32)
def assign_metric(self, y_true, y_pred):
self.targets.assign(y_true)
kps = utils.model.decode_displacement_field(y_pred)
# kps = tf.reduce_min(tf.math.top_k(kps, tf.shape(kps)[0] // 2, sorted=False).values, axis=-1)
self.outputs.assign(kps)
return 0
def calc_pose_error(self):
truth_batch = KeypointsModel.decode_label(self.targets.numpy())
kps_batch = self.outputs.numpy()
kps_batch_uncropped = (
kps_batch
/ 2
* truth_batch["bbox_size"][
:,
None,
None,
None,
]
+ truth_batch["centroid"][:, None, None, :]
)
error_batch = np.zeros(kps_batch.shape[0])
for i, kps in enumerate(kps_batch_uncropped.numpy()):
r_vec, t_vec = utils.pose.solve_pose(
self.ref_points,
kps,
[self.focal_length, self.focal_length],
[truth_batch["height"][i], truth_batch["width"][i]],
ransac=True,
reduce_mean=False,
)
error_batch[i] = utils.pose.geodesic_error(
r_vec, truth_batch["pose"][i].numpy()
)
return error_batch
def on_epoch_begin(self, epoch, logs=None):
self.train_errors = []
def on_epoch_end(self, epoch, logs=None):
errs_pose = []
errs_pose_flip = []
errs_position = []
for image_batch, truth_batch in self.real_image_dataset:
kps_batch = self.model.predict(image_batch)
kps_batch = utils.model.decode_displacement_field(kps_batch)
kps_batch_uncropped = (
kps_batch
/ 2
* truth_batch["bbox_size"][
:,
None,
None,
None,
]
+ truth_batch["centroid"][:, None, None, :]
)
for i, kps in enumerate(kps_batch_uncropped.numpy()):
r_vec, t_vec = utils.pose.solve_pose(
self.ref_points,
kps,
[truth_batch["focal_length"][i], truth_batch["focal_length"][i]],
truth_batch["imdims"][i],
ransac=True,
reduce_mean=False,
)
errs_pose.append(
utils.pose.geodesic_error(r_vec, truth_batch["pose"][i])
)
errs_pose_flip.append(
utils.pose.geodesic_error(r_vec, truth_batch["pose"][i], flip=True)
)
errs_position.append(
utils.pose.position_error(t_vec, truth_batch["position"][i])[1]
)
err_pose = np.degrees(np.mean(errs_pose))
err_pose_flip = np.degrees(np.mean(errs_pose_flip))
err_position = np.mean(errs_position)
print(
f"\nREAL IMAGE ERROR: {err_pose:.2f} ({err_pose_flip:.2f} flip) deg, {err_position:.2f} pos"
)
if self.experiment:
with self.experiment.validate():
self.experiment.log_metric("real_image_pose_error_deg", err_pose)
self.experiment.log_metric(
"real_image_pose_error_flip_deg", err_pose_flip
)
self.experiment.log_metric("real_image_position_error", err_position)
def on_train_batch_end(self, batch, logs=None):
self.comet_step += 1
mean = np.mean(self.calc_pose_error())
self.train_errors.append(mean)
running_mean = np.mean(self.train_errors)
print(
f" - pose error: {running_mean:.4f} ({np.degrees(running_mean):.2f} deg)\n",
)
if self.experiment:
self.experiment.log_metric(
"pose_error_deg", np.degrees(mean), step=self.comet_step
)
def on_test_begin(self, logs=None):
self.test_errors = []
def on_test_batch_end(self, batch, logs=None):
self.comet_step += 1
self.test_errors.append(np.mean(self.calc_pose_error()))
def on_test_end(self, logs=None):
mean = np.mean(self.test_errors)
print(f"Eval pose error: {mean:.4f} ({np.degrees(mean):.2f} deg)", flush=True)
if self.experiment:
with self.experiment.validate():
self.experiment.log_metric(
"pose_error_deg", np.degrees(mean), step=self.comet_step
)
class KeypointsModel:
OPTIMIZERS = {
"SGD": tf.keras.optimizers.SGD,
"Adam": tf.keras.optimizers.Adam,
"Adagrad": tf.keras.optimizers.Adagrad,
"RMSProp": tf.keras.optimizers.RMSprop,
}
def __init__(self, data_dir, hp, keypoints_3d):
"""
:param data_dir: path to data directory
:param hp: dictionary of hyperparameters
"""
self.data_dir = data_dir
self.hp = hp
self.nb_keypoints = hp["keypoints"]
self.keypoints_3d = keypoints_3d[: self.nb_keypoints]
self.crop_size = hp["crop_size"]
def _get_dataset(self, split_name, train):
"""
Each item in the dataset is a tuple (image, truth).
image is a Tensor of shape (crop_size, crop_size, 3).
truth is a unit quaternion Tensor of shape (4,)
:param split_name: name of split (e.g. 'train' for 'train-0000-of-0001.tfrecord')
:param train: boolean, whether or not to perform training augmentations
:return: a tuple (TFRecordDataset, num_examples) for that split
"""
features = {
"image/height": tf.io.FixedLenFeature([], tf.int64),
"image/width": tf.io.FixedLenFeature([], tf.int64),
"image/object/keypoints": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/xmin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/xmax": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymin": tf.io.VarLenFeature(tf.float32),
"image/object/bbox/ymax": tf.io.VarLenFeature(tf.float32),
"image/encoded": tf.io.FixedLenFeature([], tf.string),
"image/object/pose": tf.io.FixedLenFeature([4], tf.float32),
"image/imageset": tf.io.FixedLenFeature([], tf.string),
}
def _parse_function(parsed):
# find an approximate bounding box to crop the image
height = tf.cast(parsed["image/height"], tf.float32)
width = tf.cast(parsed["image/width"], tf.float32)
pose = parsed["image/object/pose"]
xmin = parsed["image/object/bbox/xmin"].values[0] * width
xmax = parsed["image/object/bbox/xmax"].values[0] * width
ymin = parsed["image/object/bbox/ymin"].values[0] * height
ymax = parsed["image/object/bbox/ymax"].values[0] * height
centroid = tf.stack([(ymax + ymin) / 2, (xmax + xmin) / 2], axis=-1)
bbox_size = tf.maximum(xmax - xmin, ymax - ymin)
# random positioning
if train:
expand_factor = tf.random.uniform(
[],
minval=self.hp["bbox_expand_min"],
maxval=self.hp["bbox_expand_max"],
)
# ensures that object does not go off screen
shift_amount = (expand_factor - 1) * bbox_size / 2
centroid += tf.random.uniform(
[2], minval=-shift_amount, maxval=shift_amount
)
bbox_size *= expand_factor
else:
bbox_size *= 1.25
# decode, preprocess to [-1, 1] range, and crop image/keypoints
old_dims, image = utils.model.preprocess_image(
parsed["image/encoded"], centroid, bbox_size, self.crop_size
)
keypoints = utils.model.preprocess_keypoints(
parsed["image/object/keypoints"].values,
centroid,
bbox_size,
old_dims,
self.nb_keypoints,
)
# other augmentations
if train:
# random multiple of 90 degree rotation
k = tf.random.uniform(
[], 0, 4, tf.int32
) # number of CCW 90-deg rotations
angle = tf.cast(k, tf.float32) * (np.pi / 2)
# adjust keypoints
cos = tf.cos(angle)
sin = tf.sin(angle)
rot_matrix = tf.convert_to_tensor([[cos, -sin], [sin, cos]])
keypoints = tf.reshape(keypoints, [-1, 2])[..., None]
keypoints = tf.reshape(tf.linalg.matmul(rot_matrix, keypoints), [-1])
# adjust pose
# TODO this doesn't work with the new CV2-compliant pose reference frame
w = tf.cos(angle / 2)
z = tf.sin(angle / 2)
wn = w * pose[0] - z * pose[3]
xn = w * pose[1] - z * pose[2]
yn = z * pose[1] + w * pose[2]
zn = w * pose[3] + z * pose[0]
pose = tf.stack([wn, xn, yn, zn], axis=-1)
# adjust image
image = tf.image.rot90(image, k)
# image values into the [0, 1] format
image = (image + 1) / 2
if "random_hue" in self.hp:
image = tf.image.random_hue(image, self.hp["random_hue"])
image = tf.clip_by_value(image, 0, 1)
if "random_brightness" in self.hp:
image = tf.image.random_brightness(
image, self.hp["random_brightness"]
)
image = tf.clip_by_value(image, 0, 1)
if "random_saturation" in self.hp:
image = tf.image.random_saturation(
image, *self.hp["random_saturation"]
)
image = tf.clip_by_value(image, 0, 1)
if "random_contrast" in self.hp:
image = tf.image.random_contrast(image, *self.hp["random_contrast"])
image = tf.clip_by_value(image, 0, 1)
if "random_gaussian" in self.hp:
if tf.random.uniform([], 0, 1) > 0.5:
image += tf.random.normal(
tf.shape(image), stddev=self.hp["random_gaussian"]
)
image = tf.clip_by_value(image, 0, 1)
if "random_jpeg" in self.hp:
image = tf.image.random_jpeg_quality(image, *self.hp["random_jpeg"])
image = tf.clip_by_value(image, 0, 1)
# convert back to [-1, 1] format
image = image * 2 - 1
truth = self.encode_label(
keypoints=keypoints,
pose=pose,
height=height,
width=width,
bbox_size=bbox_size,
centroid=centroid,
)
return image, truth
# with open(
# os.path.join(self.data_dir, f"{split_name}.record.numexamples"), "r"
# ) as f:
# num_examples = int(f.read())
filenames = tf.io.gfile.glob(
os.path.join(self.data_dir, f"{split_name}.record-*")
)
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=16)
if self.hp["cache_train_data"]:
dataset = dataset.cache()
if train:
dataset = dataset.shuffle(self.hp["shuffle_buffer_size"])
dataset = dataset.map(
lambda example: tf.io.parse_single_example(example, features),
num_parallel_calls=16,
)
if self.hp["excluded_imagesets"]:
dataset = dataset.filter(
lambda parsed: tf.math.reduce_all(
parsed["image/imageset"]
!= tf.convert_to_tensor(self.hp["excluded_imagesets"])
)
)
dataset = dataset.filter(
lambda parsed: len(parsed["image/object/bbox/xmin"].values) > 0
)
return dataset.map(_parse_function, num_parallel_calls=16)
def train(self, logdir, experiment=None):
train_dataset = self._get_dataset("train", True)
train_dataset = train_dataset.batch(self.hp["batch_size"])
if self.hp["prefetch_num_batches"]:
train_dataset = train_dataset.prefetch(self.hp["prefetch_num_batches"])
val_dataset = self._get_dataset("test", False)
val_dataset = val_dataset.batch(self.hp["batch_size"])
# imgs, kps = list(train_dataset.take(1).as_numpy_iterator())[0]
# for img, kp in zip(imgs, kps):
# kp = self.decode_label(kp)["keypoints"].numpy()
# kp = kp * (self.crop_size / 2) + (self.crop_size / 2)
# img = ((img + 1) / 2 * 255).astype(np.uint8)
# for i in range(len(kp)):
# y = int(kp[i, 0])
# x = int(kp[i, 1])
# cv2.circle(img, (x, y), 4, (255, 255, 255), -1)
# cv2.imshow("test.png", cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
# cv2.waitKey(0)
for i, phase in enumerate(self.hp["phases"]):
# if this is the first phase, generate a new model with fresh weights.
# otherwise, load the model from the previous phase's best checkpoint
if i == 0:
model = self._gen_model()
else:
model = tf.keras.models.load_model(
os.path.join(logdir, f"phase_{i - 1}", "model.h5"), compile=False
)
# allow training on only the layers from start_layer onwards
start_layer_index = model.layers.index(
model.get_layer(phase["start_layer"])
)
for layer in model.layers[:start_layer_index]:
layer.trainable = False
for layer in model.layers[start_layer_index:]:
layer.trainable = True
print(model.summary())
def schedule(epoch):
curr_stage = next(
stage for stage in phase["lr_schedule"] if epoch < stage["epoch"]
)
i = phase["lr_schedule"].index(curr_stage)
prev_epoch = phase["lr_schedule"][i - 1]["epoch"] if i > 0 else 0
return curr_stage["lr"] * tf.math.exp(
tf.cast(curr_stage["exp"] * (prev_epoch - epoch), tf.float32)
)
phase_logdir = os.path.join(logdir, f"phase_{i}")
model_path = os.path.join(phase_logdir, "model.h5")
model_path_latest = os.path.join(phase_logdir, "model-latest.h5")
pose_error_callback = PoseErrorCallback(
model,
self.keypoints_3d,
self.crop_size,
self.hp["pnp_focal_length"],
object_name=self.hp.get("object_name", "cygnus"),
real_image_dir=self.hp.get("real_image_dir"),
experiment=experiment,
)
callbacks = [
tf.keras.callbacks.TensorBoard(
log_dir=phase_logdir, write_graph=False, profile_batch=0
),
tf.keras.callbacks.ModelCheckpoint(
model_path, monitor="val_loss", save_best_only=True, mode="min"
),
tf.keras.callbacks.ModelCheckpoint(
model_path_latest,
save_best_only=False,
),
tf.keras.callbacks.LearningRateScheduler(schedule),
pose_error_callback,
]
optimizer = self.OPTIMIZERS[phase["optimizer"]](**phase["optimizer_args"])
model.compile(
optimizer=optimizer,
loss=self.get_mobilepose_loss(model.output_shape[-3:-1]),
metrics=[pose_error_callback.assign_metric],
)
try:
model.fit(
train_dataset,
epochs=phase["lr_schedule"][-1]["epoch"],
# steps_per_epoch=num_train // self.hp["batch_size"],
validation_data=val_dataset,
# validation_steps=num_val // self.hp["batch_size"],
callbacks=callbacks,
)
except Exception:
print(traceback.format_exc())
return model_path
finally:
if experiment:
experiment.log_model(f"phase_{i}", model_path)
experiment.log_model(f"phase_{i}", model_path_latest)
return model_path
def _gen_model(self):
init_weights = self.hp.get("model_init_weights", "")
assert init_weights in ["imagenet", ""]
mobilenet = tf.keras.applications.MobileNetV2(
include_top=False,
weights=init_weights if init_weights != "" else None,
input_shape=(self.crop_size, self.crop_size, 3),
pooling=None,
alpha=1.0,
)
x = mobilenet.get_layer("block_16_project_BN").output
# 7x7x160 -> 14x14x96
x = tf.keras.layers.Conv2DTranspose(
filters=96, kernel_size=3, strides=2, padding="same", use_bias=False
)(x)
x = tf.keras.layers.BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
x = tf.keras.layers.ReLU(6.0)(x)
x = tf.keras.layers.concatenate([x, mobilenet.get_layer("block_12_add").output])
x = _inverted_res_block(
x, filters=96, alpha=1.0, stride=1, expansion=6, block_id=17
)
x = _inverted_res_block(
x, filters=96, alpha=1.0, stride=1, expansion=6, block_id=18
)
# 14x14x96 -> 28x28x32
# x = tf.keras.layers.Conv2DTranspose(
# filters=32, kernel_size=3, strides=2, padding="same", use_bias=False
# )(x)
# x = tf.keras.layers.BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
# x = tf.keras.layers.ReLU(6.0)(x)
# x = tf.keras.layers.concatenate([x, mobilenet.get_layer("block_5_add").output])
# x = _inverted_res_block(
# x, filters=32, alpha=1.0, stride=1, expansion=6, block_id=19
# )
# x = _inverted_res_block(
# x, filters=32, alpha=1.0, stride=1, expansion=6, block_id=20
# )
#
# # 28x28x32 -> 56x56x24
# x = tf.keras.layers.Conv2DTranspose(
# filters=24, kernel_size=3, strides=2, padding="same", use_bias=False
# )(x)
# x = tf.keras.layers.BatchNormalization(epsilon=1e-3, momentum=0.999)(x)
# x = tf.keras.layers.ReLU(6.0)(x)
# x = tf.keras.layers.concatenate([x, mobilenet.get_layer("block_2_add").output])
# x = _inverted_res_block(
# x, filters=24, alpha=1.0, stride=1, expansion=6, block_id=21
# )
# x = _inverted_res_block(
# x, filters=24, alpha=1.0, stride=1, expansion=6, block_id=22
# )
x = tf.keras.layers.SpatialDropout2D(self.hp["dropout"])(x)
# output 1x1 conv
x = tf.keras.layers.Conv2D(self.nb_keypoints * 2, kernel_size=1, use_bias=True)(
x
)
return tf.keras.models.Model(mobilenet.input, x, name="mobilepose")
@staticmethod
def encode_label(*, keypoints, pose, height, width, bbox_size, centroid):
if len(keypoints.shape) == 3:
keypoints = tf.reshape(keypoints, [tf.shape(keypoints)[0], -1])
return tf.concat(
(
tf.reshape(tf.cast(pose, tf.float32), [-1, 4]),
tf.reshape(tf.cast(height, tf.float32), [-1, 1]),
tf.reshape(tf.cast(width, tf.float32), [-1, 1]),
tf.reshape(tf.cast(bbox_size, tf.float32), [-1, 1]),
tf.reshape(tf.cast(centroid, tf.float32), [-1, 2]),
tf.cast(keypoints, tf.float32),
),
axis=-1,
)
else:
keypoints = tf.reshape(keypoints, [-1])
return tf.concat(
(
tf.reshape(tf.cast(pose, tf.float32), [4]),
tf.reshape(tf.cast(height, tf.float32), [1]),
tf.reshape(tf.cast(width, tf.float32), [1]),
tf.reshape(tf.cast(bbox_size, tf.float32), [1]),
tf.reshape(tf.cast(centroid, tf.float32), [2]),
tf.cast(keypoints, tf.float32),
),
axis=-1,
)
@staticmethod
def decode_label(label):
if len(label.shape) == 1:
label = label[None, ...]
return {
"pose": tf.squeeze(label[:, :4]),
"height": tf.squeeze(label[:, 4]),
"width": tf.squeeze(label[:, 5]),
"bbox_size": tf.squeeze(label[:, 6]),
"centroid": tf.squeeze(label[:, 7:9]),
"keypoints": tf.squeeze(
tf.reshape(label[:, 9:], [tf.shape(label)[0], -1, 2])
),
}
@staticmethod
def get_mobilepose_loss(dfdims):
def mobilepose_loss(y_true, y_pred):
kps = KeypointsModel.decode_label(y_true)["keypoints"]
df_true = utils.model.encode_displacement_field(kps, dfdims)
df_true_flat = tf.reshape(df_true, [tf.shape(df_true)[0], -1])
df_pred_flat = tf.reshape(y_pred, [tf.shape(y_pred)[0], -1])
return tf.keras.losses.mean_absolute_error(df_true_flat, df_pred_flat)
return mobilepose_loss
|
the-stack_106_18728
|
#!/usr/bin/python3
import random
import itertools
def q(n):
"""
We want to solve the N-queens problem: put n queens on a n*n board,
with no queen attacking each other.
"""
print("Hill climbing:")
# We put the queens on each column on the board
queens = tuple([random.randint(0, n - 1) for _i in range(n)])
# print_board(queens, n)
def neighbors(queens, n):
"""
we define the neighborhood of a solution: one possible neighborhood
is moving one queen somewhere else of the board
"""
def q_exhaustive(n):
print("Exhaustive search:")
def h(queens):
counter = 0
for i in range(len(queens)):
for j in range(i + 1, len(queens)):
if queens[i] == queens[j]:
counter += 1
elif abs(queens[i] - queens[j]) == abs(i - j):
counter += 1
return counter
def print_board(queens, n):
board = [["." for i in range(n)] for i in range(n)]
for i in range(len(queens)):
board[queens[i]][i] = "Q"
for l in board:
for c in l:
print(c, end=" ")
print()
def q_minimax():
"""
You are player 0 at a game of tic tac toe, and you want to play against
the computer using mix-max algorithm
"""
state = [[".", ".", "."], [".", ".", "."], [".", ".", "."]]
play = True
while play:
val = has_winner(state)
if val == -1 or val == 1:
print_ttt(state)
print(f"Player {val} has won !")
break
elif val == 0:
print_ttt(state)
print("It's a tie !")
break
print_ttt(state)
# your turn:
x = int(input("Please enter line number of your move (0-2): "))
y = int(input("Please enter column number of your move (0-2): "))
if not is_valid_move(state, x, y):
print("Invalid move, try again!")
continue
state[x][y] = "O"
# computer turn:
# move = minimax(state, 0)
# state = move[1]
def is_valid_move(state, x, y):
if x < 0 or x > 2 or y < 0 or y > 2:
return False
elif state[x][y] != ".":
return False
else:
return True
def has_winner(state):
for l in state: # check lines for winner
if l == ["X"] * 3:
return 1
elif l == ["O"] * 3:
return -1
for c in [[l[i] for l in state] for i in range(len(state))]: # cols
if c == ["X"] * 3:
return 1
elif c == ["O"] * 3:
return -1
d = [l[i] for i, l in enumerate(state)] # first diag
if d == ["X"] * 3:
return 1
elif d == ["O"] * 3:
return -1
d = [l[-i - 1] for i, l in enumerate(state)] # second diag
if d == ["X"] * 3:
return 1
elif d == ["O"] * 3:
return -1
for l in state: # continue
for s in l:
if s == ".":
return None
return 0 # its a tie
def print_ttt(state):
for l in state:
print("| ", end="")
for v in l:
print(v, end=" | ")
print()
print(" __ __ __")
if __name__ == "__main__":
q(4)
q_exhaustive(4)
q_minimax()
|
the-stack_106_18732
|
from django.core import mail
from hc.api.models import Channel
from hc.test import BaseTestCase
class SendTestNotificationTestCase(BaseTestCase):
def setUp(self):
super(SendTestNotificationTestCase, self).setUp()
self.channel = Channel(kind="email", project=self.project)
self.channel.email_verified = True
self.channel.value = "[email protected]"
self.channel.save()
self.url = "/integrations/%s/test/" % self.channel.code
def test_it_sends_test_email(self):
self.client.login(username="[email protected]", password="password")
r = self.client.post(self.url, {}, follow=True)
self.assertRedirects(r, "/integrations/")
self.assertContains(r, "Test notification sent!")
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "[email protected]")
self.assertTrue("X-Bounce-Url" in email.extra_headers)
self.assertTrue("List-Unsubscribe" in email.extra_headers)
|
the-stack_106_18734
|
from .base import *
DEBUG = False
ALLOWED_HOSTS = [
'fahimtran.com',
'www.fahimtran.com',
'pure-faculty-274606.uc.r.appspot.com',
'127.0.0.1',
'localhost',
]
# SECURITY: To secure payloads and user information
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
|
the-stack_106_18736
|
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the python library parsing Revisited Oxford/Paris datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from delf.python.datasets.revisited_op import dataset
FLAGS = flags.FLAGS
class DatasetTest(tf.test.TestCase):
def testParseEasyMediumHardGroundTruth(self):
# Define input.
ground_truth = [{
'easy': np.array([10, 56, 100]),
'hard': np.array([0]),
'junk': np.array([6, 90])
}, {
'easy': np.array([], dtype='int64'),
'hard': [5],
'junk': [99, 100]
}, {
'easy': [33],
'hard': [66, 99],
'junk': np.array([], dtype='int64')
}]
# Run tested function.
(easy_ground_truth, medium_ground_truth,
hard_ground_truth) = dataset.ParseEasyMediumHardGroundTruth(ground_truth)
# Define expected outputs.
expected_easy_ground_truth = [{
'ok': np.array([10, 56, 100]),
'junk': np.array([6, 90, 0])
}, {
'ok': np.array([], dtype='int64'),
'junk': np.array([99, 100, 5])
}, {
'ok': np.array([33]),
'junk': np.array([66, 99])
}]
expected_medium_ground_truth = [{
'ok': np.array([10, 56, 100, 0]),
'junk': np.array([6, 90])
}, {
'ok': np.array([5]),
'junk': np.array([99, 100])
}, {
'ok': np.array([33, 66, 99]),
'junk': np.array([], dtype='int64')
}]
expected_hard_ground_truth = [{
'ok': np.array([0]),
'junk': np.array([6, 90, 10, 56, 100])
}, {
'ok': np.array([5]),
'junk': np.array([99, 100])
}, {
'ok': np.array([66, 99]),
'junk': np.array([33])
}]
# Compare actual versus expected.
def _AssertListOfDictsOfArraysAreEqual(ground_truth, expected_ground_truth):
"""Helper function to compare ground-truth data.
Args:
ground_truth: List of dicts of arrays.
expected_ground_truth: List of dicts of arrays.
"""
self.assertEqual(len(ground_truth), len(expected_ground_truth))
for i, ground_truth_entry in enumerate(ground_truth):
self.assertEqual(sorted(ground_truth_entry.keys()), ['junk', 'ok'])
self.assertAllEqual(ground_truth_entry['junk'],
expected_ground_truth[i]['junk'])
self.assertAllEqual(ground_truth_entry['ok'],
expected_ground_truth[i]['ok'])
_AssertListOfDictsOfArraysAreEqual(easy_ground_truth,
expected_easy_ground_truth)
_AssertListOfDictsOfArraysAreEqual(medium_ground_truth,
expected_medium_ground_truth)
_AssertListOfDictsOfArraysAreEqual(hard_ground_truth,
expected_hard_ground_truth)
def testAdjustPositiveRanksWorks(self):
# Define inputs.
positive_ranks = np.array([0, 2, 6, 10, 20])
junk_ranks = np.array([1, 8, 9, 30])
# Run tested function.
adjusted_positive_ranks = dataset.AdjustPositiveRanks(
positive_ranks, junk_ranks)
# Define expected output.
expected_adjusted_positive_ranks = [0, 1, 5, 7, 17]
# Compare actual versus expected.
self.assertAllEqual(adjusted_positive_ranks,
expected_adjusted_positive_ranks)
def testComputeAveragePrecisionWorks(self):
# Define input.
positive_ranks = [0, 2, 5]
# Run tested function.
average_precision = dataset.ComputeAveragePrecision(positive_ranks)
# Define expected output.
expected_average_precision = 0.677778
# Compare actual versus expected.
self.assertAllClose(average_precision, expected_average_precision)
def testComputePRAtRanksWorks(self):
# Define inputs.
positive_ranks = np.array([0, 2, 5])
desired_pr_ranks = np.array([1, 5, 10])
# Run tested function.
precisions, recalls = dataset.ComputePRAtRanks(positive_ranks,
desired_pr_ranks)
# Define expected outputs.
expected_precisions = [1.0, 0.4, 0.5]
expected_recalls = [0.333333, 0.666667, 1.0]
# Compare actual versus expected.
self.assertAllClose(precisions, expected_precisions)
self.assertAllClose(recalls, expected_recalls)
def testComputeMetricsWorks(self):
# Define inputs: 3 queries. For the last one, there are no expected images
# to be retrieved
sorted_index_ids = np.array([[4, 2, 0, 1, 3], [0, 2, 4, 1, 3],
[0, 1, 2, 3, 4]])
ground_truth = [{
'ok': np.array([0, 1]),
'junk': np.array([2])
}, {
'ok': np.array([0, 4]),
'junk': np.array([], dtype='int64')
}, {
'ok': np.array([], dtype='int64'),
'junk': np.array([], dtype='int64')
}]
desired_pr_ranks = [1, 2, 5]
# Run tested function.
(mean_average_precision, mean_precisions, mean_recalls, average_precisions,
precisions, recalls) = dataset.ComputeMetrics(sorted_index_ids,
ground_truth,
desired_pr_ranks)
# Define expected outputs.
expected_mean_average_precision = 0.604167
expected_mean_precisions = [0.5, 0.5, 0.666667]
expected_mean_recalls = [0.25, 0.5, 1.0]
expected_average_precisions = [0.416667, 0.791667, float('nan')]
expected_precisions = [[0.0, 0.5, 0.666667], [1.0, 0.5, 0.666667],
[float('nan'),
float('nan'),
float('nan')]]
expected_recalls = [[0.0, 0.5, 1.0], [0.5, 0.5, 1.0],
[float('nan'), float('nan'),
float('nan')]]
# Compare actual versus expected.
self.assertAllClose(mean_average_precision, expected_mean_average_precision)
self.assertAllClose(mean_precisions, expected_mean_precisions)
self.assertAllClose(mean_recalls, expected_mean_recalls)
self.assertAllClose(average_precisions, expected_average_precisions)
self.assertAllClose(precisions, expected_precisions)
self.assertAllClose(recalls, expected_recalls)
def testSaveMetricsFileWorks(self):
# Define inputs.
mean_average_precision = {'hard': 0.7, 'medium': 0.9}
mean_precisions = {
'hard': np.array([1.0, 0.8]),
'medium': np.array([1.0, 1.0])
}
mean_recalls = {
'hard': np.array([0.5, 0.8]),
'medium': np.array([0.5, 1.0])
}
pr_ranks = [1, 5]
output_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt')
# Run tested function.
dataset.SaveMetricsFile(mean_average_precision, mean_precisions,
mean_recalls, pr_ranks, output_path)
# Define expected results.
expected_metrics = ('hard\n'
' mAP=70.0\n'
' mP@k[1 5] [100. 80.]\n'
' mR@k[1 5] [50. 80.]\n'
'medium\n'
' mAP=90.0\n'
' mP@k[1 5] [100. 100.]\n'
' mR@k[1 5] [ 50. 100.]\n')
# Parse actual results, and compare to expected.
with tf.io.gfile.GFile(output_path) as f:
metrics = f.read()
self.assertEqual(metrics, expected_metrics)
def testSaveAndReadMetricsWorks(self):
# Define inputs.
mean_average_precision = {'hard': 0.7, 'medium': 0.9}
mean_precisions = {
'hard': np.array([1.0, 0.8]),
'medium': np.array([1.0, 1.0])
}
mean_recalls = {
'hard': np.array([0.5, 0.8]),
'medium': np.array([0.5, 1.0])
}
pr_ranks = [1, 5]
output_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt')
# Run tested functions.
dataset.SaveMetricsFile(mean_average_precision, mean_precisions,
mean_recalls, pr_ranks, output_path)
(read_mean_average_precision, read_pr_ranks, read_mean_precisions,
read_mean_recalls) = dataset.ReadMetricsFile(output_path)
# Compares actual and expected metrics.
self.assertEqual(read_mean_average_precision, mean_average_precision)
self.assertEqual(read_pr_ranks, pr_ranks)
self.assertEqual(read_mean_precisions.keys(), mean_precisions.keys())
self.assertAllEqual(read_mean_precisions['hard'], mean_precisions['hard'])
self.assertAllEqual(read_mean_precisions['medium'],
mean_precisions['medium'])
self.assertEqual(read_mean_recalls.keys(), mean_recalls.keys())
self.assertAllEqual(read_mean_recalls['hard'], mean_recalls['hard'])
self.assertAllEqual(read_mean_recalls['medium'], mean_recalls['medium'])
def testReadMetricsWithRepeatedProtocolFails(self):
# Define inputs.
input_path = os.path.join(FLAGS.test_tmpdir, 'metrics.txt')
with tf.io.gfile.GFile(input_path, 'w') as f:
f.write('hard\n'
' mAP=70.0\n'
' mP@k[1 5] [ 100. 80.]\n'
' mR@k[1 5] [ 50. 80.]\n'
'medium\n'
' mAP=90.0\n'
' mP@k[1 5] [ 100. 100.]\n'
' mR@k[1 5] [ 50. 100.]\n'
'medium\n'
' mAP=90.0\n'
' mP@k[1 5] [ 100. 100.]\n'
' mR@k[1 5] [ 50. 100.]\n')
# Run tested functions.
with self.assertRaisesRegex(ValueError, 'Malformed input'):
dataset.ReadMetricsFile(input_path)
if __name__ == '__main__':
tf.test.main()
|
the-stack_106_18739
|
__author__ = 'sibirrer'
import numpy as np
import MultiLens.Utils.constants as const
def make_grid(numPix, deltapix):
"""
returns x, y position information in two 1d arrays
"""
a = np.arange(numPix)
matrix = np.dstack(np.meshgrid(a, a)).reshape(-1, 2)
x_grid = (matrix[:, 0] - numPix/2.)*deltapix
y_grid = (matrix[:, 1] - numPix/2.)*deltapix
return x_grid*const.arcsec, y_grid*const.arcsec
def array2image(array):
"""
returns the information contained in a 1d array into an n*n 2d array (only works when lenght of array is n**2)
:param array: image values
:type array: array of size n**2
:returns: 2d array
:raises: AttributeError, KeyError
"""
n=int(np.sqrt(len(array)))
if n**2 != len(array):
raise ValueError("lenght of input array given as %s is not square of integer number!" %(len(array)))
image = array.reshape(n, n)
return image
def image2array(image):
"""
returns the information contained in a 2d array into an n*n 1d array
:param array: image values
:type array: array of size (n,n)
:returns: 1d array
:raises: AttributeError, KeyError
"""
nx, ny = image.shape # find the size of the array
imgh = np.reshape(image, nx*ny) # change the shape to be 1d
return imgh
|
the-stack_106_18740
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import OrderedDict
from functools import partial
from classytags.utils import flatten_context
from django.contrib.sites.models import Site
from django.template import Context
from django.utils.functional import cached_property
from django.utils.safestring import mark_safe
from cms.cache.placeholder import get_placeholder_cache, set_placeholder_cache
from cms.toolbar.utils import (
get_placeholder_toolbar_js,
get_plugin_toolbar_js,
get_toolbar_from_request,
)
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.django_load import iterload_objects
from cms.utils.permissions import has_plugin_permission
from cms.utils.placeholder import get_toolbar_plugin_struct, restore_sekizai_context
from cms.utils.plugins import get_plugin_restrictions
def _get_page_ancestors(page):
"""
Returns a generator which yields the ancestors for page.
"""
if not page.parent_id:
raise StopIteration
# This is done to fetch one parent at a time vs using the tree
# to get all descendants.
# The parents have already been loaded by the placeholder pre-loading.
yield page.parent
for ancestor in _get_page_ancestors(page.parent):
yield ancestor
def _unpack_plugins(parent_plugin):
found_plugins = []
for plugin in parent_plugin.child_plugin_instances or []:
found_plugins.append(plugin)
if plugin.child_plugin_instances:
found_plugins.extend(_unpack_plugins(plugin))
return found_plugins
class RenderedPlaceholder(object):
__slots__ = (
'language',
'site_id',
'cached',
'editable',
'placeholder',
'has_content',
)
def __init__(self, placeholder, language, site_id, cached=False,
editable=False, has_content=False):
self.language = language
self.site_id = site_id
self.cached = cached
self.editable = editable
self.placeholder = placeholder
self.has_content = has_content
def __eq__(self, other):
# The same placeholder rendered with different
# parameters is considered the same.
# This behavior is compatible with previous djangoCMS releases.
return self.placeholder == other.placeholder
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.placeholder)
class BaseRenderer(object):
placeholder_edit_template = ''
def __init__(self, request):
self.request = request
self.request_language = get_language_from_request(self.request)
self._cached_templates = {}
self._cached_plugin_classes = {}
self._placeholders_content_cache = {}
self._placeholders_by_page_cache = {}
self._rendered_placeholders = OrderedDict()
self._rendered_static_placeholders = OrderedDict()
self._rendered_plugins_by_placeholder = {}
@cached_property
def current_page(self):
return self.request.current_page
@cached_property
def toolbar(self):
return get_toolbar_from_request(self.request)
@cached_property
def templates(self):
return self.toolbar.templates
@cached_property
def plugin_pool(self):
import cms.plugin_pool
return cms.plugin_pool.plugin_pool
@cached_property
def site_id(self):
site = Site.objects.get_current(self.request)
return site.pk
def get_placeholder_plugin_menu(self, placeholder, page=None):
registered_plugins = self.plugin_pool.registered_plugins
can_add_plugin = partial(has_plugin_permission, user=self.request.user, permission_type='add')
plugins = [plugin for plugin in registered_plugins if can_add_plugin(plugin_type=plugin.value)]
plugin_menu = get_toolbar_plugin_struct(
plugins=plugins,
slot=placeholder.slot,
page=page,
)
plugin_menu_template = self.templates.placeholder_plugin_menu_template
return plugin_menu_template.render({'plugin_menu': plugin_menu})
def get_placeholder_toolbar_js(self, placeholder, language, page=None):
plugins = self.plugin_pool.get_all_plugins(placeholder.slot, page) # original
plugin_types = [cls.__name__ for cls in plugins]
allowed_plugins = plugin_types + self.plugin_pool.get_system_plugins()
placeholder_toolbar_js = get_placeholder_toolbar_js(
placeholder=placeholder,
request_language=self.request_language,
render_language=language,
allowed_plugins=allowed_plugins,
)
return placeholder_toolbar_js
def get_plugin_toolbar_js(self, plugin, page=None):
placeholder_cache = self._rendered_plugins_by_placeholder.setdefault(plugin.placeholder_id, {})
child_classes, parent_classes = get_plugin_restrictions(
plugin=plugin,
page=page,
restrictions_cache=placeholder_cache,
)
content = get_plugin_toolbar_js(
plugin,
children=child_classes,
parents=parent_classes,
request_language=self.request_language,
)
return content
def get_plugin_class(self, plugin):
plugin_type = plugin.plugin_type
if not plugin_type in self._cached_plugin_classes:
self._cached_plugin_classes[plugin_type] = self.plugin_pool.get_plugin(plugin_type)
return self._cached_plugin_classes[plugin_type]
def get_plugins_to_render(self, placeholder, language, template):
from cms.utils.plugins import get_plugins
plugins = get_plugins(
request=self.request,
placeholder=placeholder,
template=template,
lang=language,
)
return plugins
def get_rendered_plugins_cache(self, placeholder):
blank = {
'plugins': [],
'plugin_parents': {},
'plugin_children': {},
}
return self._rendered_plugins_by_placeholder.get(placeholder.pk, blank)
def get_rendered_placeholders(self):
rendered = list(self._rendered_placeholders.values())
return [r.placeholder for r in rendered]
def get_rendered_editable_placeholders(self):
rendered = list(self._rendered_placeholders.values())
return [r.placeholder for r in rendered if r.editable]
def get_rendered_static_placeholders(self):
return list(self._rendered_static_placeholders.values())
class ContentRenderer(BaseRenderer):
plugin_edit_template = (
'<template class="cms-plugin '
'cms-plugin-start cms-plugin-{pk}"></template>{content}'
'<template class="cms-plugin cms-plugin-end cms-plugin-{pk}"></template>'
)
placeholder_edit_template = (
'{content} '
'<div class="cms-placeholder cms-placeholder-{placeholder_id}"></div> '
'<script data-cms>{plugin_js}\n{placeholder_js}</script>'
)
def __init__(self, request):
super(ContentRenderer, self).__init__(request)
self._placeholders_are_editable = bool(self.toolbar.edit_mode_active)
def placeholder_cache_is_enabled(self):
if not get_cms_setting('PLACEHOLDER_CACHE'):
return False
if self.request.user.is_staff:
return False
return not self._placeholders_are_editable
def render_placeholder(self, placeholder, context, language=None, page=None,
editable=False, use_cache=False, nodelist=None, width=None):
from sekizai.helpers import Watcher
language = language or self.request_language
editable = editable and self._placeholders_are_editable
if use_cache and not editable and placeholder.cache_placeholder:
use_cache = self.placeholder_cache_is_enabled()
else:
use_cache = False
if use_cache:
cached_value = self._get_cached_placeholder_content(
placeholder=placeholder,
language=language,
)
else:
cached_value = None
if cached_value is not None:
# User has opted to use the cache
# and there is something in the cache
restore_sekizai_context(context, cached_value['sekizai'])
return mark_safe(cached_value['content'])
context.push()
width = width or placeholder.default_width
template = page.get_template() if page else None
if width:
context['width'] = width
# Add extra context as defined in settings, but do not overwrite existing context variables,
# since settings are general and database/template are specific
# TODO this should actually happen as a plugin context processor, but these currently overwrite
# existing context -- maybe change this order?
for key, value in placeholder.get_extra_context(template).items():
if key not in context:
context[key] = value
if use_cache:
watcher = Watcher(context)
plugin_content = self.render_plugins(
placeholder,
language=language,
context=context,
editable=editable,
template=template,
)
placeholder_content = ''.join(plugin_content)
if not placeholder_content and nodelist:
# should be nodelist from a template
placeholder_content = nodelist.render(context)
if use_cache:
content = {
'content': placeholder_content,
'sekizai': watcher.get_changes(),
}
set_placeholder_cache(
placeholder,
lang=language,
site_id=self.site_id,
content=content,
request=self.request,
)
rendered_placeholder = RenderedPlaceholder(
placeholder=placeholder,
language=language,
site_id=self.site_id,
cached=use_cache,
editable=editable,
has_content=bool(placeholder_content),
)
if placeholder.pk not in self._rendered_placeholders:
# First time this placeholder is rendered
if not self.toolbar._cache_disabled:
# The toolbar middleware needs to know if the response
# is to be cached.
# Set the _cache_disabled flag to the value of cache_placeholder
# only if the flag is False (meaning cache is enabled).
self.toolbar._cache_disabled = not use_cache
self._rendered_placeholders[placeholder.pk] = rendered_placeholder
if editable:
data = self.get_editable_placeholder_context(placeholder, language, page=page)
data['content'] = placeholder_content
placeholder_content = self.placeholder_edit_template.format(**data)
context.pop()
return mark_safe(placeholder_content)
def get_editable_placeholder_context(self, placeholder, language, page=None):
placeholder_cache = self.get_rendered_plugins_cache(placeholder)
placeholder_toolbar_js = self.get_placeholder_toolbar_js(
placeholder,
language=language,
page=page,
)
plugin_toolbar_js_bits = (self.get_plugin_toolbar_js(plugin, page=page)
for plugin in placeholder_cache['plugins'])
context = {
'plugin_js': ''.join(plugin_toolbar_js_bits),
'placeholder_js': placeholder_toolbar_js,
'placeholder_id': placeholder.pk,
}
return context
def render_page_placeholder(self, slot, context, inherit,
page=None, nodelist=None, editable=True):
if not self.current_page:
# This method should only be used when rendering a cms page.
return ''
current_page = page or self.current_page
placeholder_cache = self._placeholders_by_page_cache
if current_page.pk not in placeholder_cache:
# Instead of loading plugins for this one placeholder
# try and load them for all placeholders on the page.
self._preload_placeholders_for_page(current_page)
try:
placeholder = placeholder_cache[current_page.pk][slot]
except KeyError:
content = ''
placeholder = None
else:
content = self.render_placeholder(
placeholder,
context=context,
page=current_page,
editable=editable,
use_cache=True,
nodelist=None,
)
should_inherit = (
inherit
and not content and current_page.parent_id
# The placeholder cache is primed when the first placeholder
# is loaded. If the current page's parent is not in there,
# it means its cache was never primed as it wasn't necessary.
and current_page.parent_id in placeholder_cache
# don't display inherited plugins in edit mode, so that the user doesn't
# mistakenly edit/delete them. This is a fix for issue #1303. See the discussion
# there for possible enhancements
and not self.toolbar.edit_mode_active
)
if should_inherit:
# nodelist is set to None to avoid rendering the nodes inside
# a {% placeholder or %} block tag.
content = self.render_page_placeholder(
slot,
context,
inherit=True,
page=current_page.parent,
nodelist=None,
editable=False,
)
if placeholder and (editable and self._placeholders_are_editable):
# In edit mode, the contents of the placeholder are mixed with our
# internal toolbar markup, so the content variable will always be True.
# Use the rendered placeholder has_content flag instead.
has_content = self._rendered_placeholders[placeholder.pk].has_content
else:
# User is not in edit mode or the placeholder doesn't exist.
# Either way, we can trust the content variable.
has_content = bool(content)
if not has_content and nodelist:
return content + nodelist.render(context)
return content
def render_static_placeholder(self, static_placeholder, context, nodelist=None):
user = self.request.user
if self.toolbar.edit_mode_active and user.has_perm('cms.edit_static_placeholder'):
placeholder = static_placeholder.draft
editable = True
use_cache = False
else:
placeholder = static_placeholder.public
editable = False
use_cache = True
# I really don't like these impromptu flags...
placeholder.is_static = True
content = self.render_placeholder(
placeholder,
context=context,
editable=editable,
use_cache=use_cache,
nodelist=nodelist,
)
if static_placeholder.pk not in self._rendered_static_placeholders:
# First time this static placeholder is rendered
self._rendered_static_placeholders[static_placeholder.pk] = static_placeholder
return content
def render_plugin(self, instance, context, placeholder=None, editable=False):
if not placeholder:
placeholder = instance.placeholder
instance, plugin = instance.get_plugin_instance()
if not instance or not plugin.render_plugin:
return ''
# we'd better pass a flat dict to template.render
# as plugin.render can return pretty much any kind of context / dictionary
# we'd better flatten it and force to a Context object
# flattening the context means that template must be an engine-specific template object
# which is guaranteed by get_cached_template if the template returned by
# plugin._get_render_template is either a string or an engine-specific template object
context = PluginContext(context, instance, placeholder)
context = plugin.render(context, instance, placeholder.slot)
context = flatten_context(context)
template = plugin._get_render_template(context, instance, placeholder)
template = self.templates.get_cached_template(template)
content = template.render(context)
for processor in iterload_objects(get_cms_setting('PLUGIN_PROCESSORS')):
content = processor(instance, placeholder, content, context)
if editable:
content = self.plugin_edit_template.format(pk=instance.pk, content=content)
placeholder_cache = self._rendered_plugins_by_placeholder.setdefault(placeholder.pk, {})
placeholder_cache.setdefault('plugins', []).append(instance)
return mark_safe(content)
def render_plugins(self, placeholder, language, context, editable=False, template=None):
plugins = self.get_plugins_to_render(
placeholder=placeholder,
template=template,
language=language,
)
for plugin in plugins:
plugin._placeholder_cache = placeholder
yield self.render_plugin(plugin, context, placeholder, editable)
def _get_cached_placeholder_content(self, placeholder, language):
"""
Returns a dictionary mapping placeholder content and sekizai data.
Returns None if no cache is present.
"""
# Placeholders can be rendered multiple times under different sites
# it's important to have a per-site "cache".
site_cache = self._placeholders_content_cache.setdefault(self.site_id, {})
# Placeholders can be rendered multiple times under different languages
# it's important to have a per-language "cache".
language_cache = site_cache.setdefault(language, {})
if placeholder.pk not in language_cache:
cached_value = get_placeholder_cache(
placeholder,
lang=language,
site_id=self.site_id,
request=self.request,
)
if cached_value != None:
# None means nothing in the cache
# Anything else is a valid value
language_cache[placeholder.pk] = cached_value
return language_cache.get(placeholder.pk)
def _preload_placeholders_for_page(self, page, slots=None, inherit=False):
"""
Populates the internal plugin cache of each placeholder
in the given page if the placeholder has not been
previously cached.
"""
from cms.utils.plugins import assign_plugins
if slots:
placeholders = page.get_placeholders().filter(slot__in=slots)
else:
# Creates any placeholders missing on the page
placeholders = page.rescan_placeholders().values()
if inherit:
# When the inherit flag is True,
# assume all placeholders found are inherited and thus prefetch them.
slots_w_inheritance = [pl.slot for pl in placeholders]
elif not self.toolbar.edit_mode_active:
# Scan through the page template to find all placeholders
# that have inheritance turned on.
slots_w_inheritance = [pl.slot for pl in page.get_declared_placeholders() if pl.inherit]
else:
# Inheritance is turned off on edit-mode
slots_w_inheritance = []
if self.placeholder_cache_is_enabled():
_cached_content = self._get_cached_placeholder_content
# Only prefetch plugins if the placeholder
# has not been cached.
placeholders_to_fetch = [
placeholder for placeholder in placeholders
if _cached_content(placeholder, self.request_language) == None]
else:
# cache is disabled, prefetch plugins for all
# placeholders in the page.
placeholders_to_fetch = placeholders
if placeholders_to_fetch:
assign_plugins(
request=self.request,
placeholders=placeholders_to_fetch,
template=page.get_template(),
lang=self.request_language,
is_fallback=inherit,
)
# Inherit only placeholders that have no plugins
# or are not cached.
placeholders_to_inherit = [
pl.slot for pl in placeholders
if not getattr(pl, '_plugins_cache', None) and pl.slot in slots_w_inheritance
]
if placeholders_to_inherit and page.parent_id:
self._preload_placeholders_for_page(
page=page.parent,
slots=placeholders_to_inherit,
inherit=True,
)
# Internal cache mapping placeholder slots
# to placeholder instances.
page_placeholder_cache = {}
for placeholder in placeholders:
# Save a query when the placeholder toolbar is rendered.
placeholder.page = page
page_placeholder_cache[placeholder.slot] = placeholder
self._placeholders_by_page_cache[page.pk] = page_placeholder_cache
class StructureRenderer(BaseRenderer):
placeholder_edit_template = (
"""
<script data-cms id="cms-plugin-child-classes-{placeholder_id}" type="text/cms-template">
{plugin_menu_js}
</script>
<script data-cms>{plugin_js}\n{placeholder_js}</script>
"""
)
def get_plugins_to_render(self, *args, **kwargs):
plugins = super(StructureRenderer, self).get_plugins_to_render(*args, **kwargs)
for plugin in plugins:
yield plugin
if not plugin.child_plugin_instances:
continue
for plugin in _unpack_plugins(plugin):
yield plugin
def render_placeholder(self, placeholder, language, page=None):
rendered_plugins = self.render_plugins(placeholder, language=language, page=page)
plugin_js_output = ''.join(rendered_plugins)
placeholder_toolbar_js = self.get_placeholder_toolbar_js(
placeholder,
language=language,
page=page,
)
rendered_placeholder = RenderedPlaceholder(
placeholder=placeholder,
language=language,
site_id=self.site_id,
cached=False,
editable=True,
)
if placeholder.pk not in self._rendered_placeholders:
self._rendered_placeholders[placeholder.pk] = rendered_placeholder
placeholder_structure_is = self.placeholder_edit_template.format(
placeholder_id=placeholder.pk,
plugin_js=plugin_js_output,
plugin_menu_js=self.get_placeholder_plugin_menu(placeholder, page=page),
placeholder_js=placeholder_toolbar_js,
)
return mark_safe(placeholder_structure_is)
def render_page_placeholder(self, page, placeholder, language=None):
return self.render_placeholder(placeholder, language=language, page=page)
def render_static_placeholder(self, static_placeholder, language=None):
user = self.request.user
if not user.has_perm('cms.edit_static_placeholder'):
return ''
language = language or self.request_language
placeholder = static_placeholder.draft
# I really don't like these impromptu flags...
placeholder.is_static = True
content = self.render_placeholder(placeholder, language=language)
if static_placeholder.pk not in self._rendered_static_placeholders:
# First time this static placeholder is rendered
self._rendered_static_placeholders[static_placeholder.pk] = static_placeholder
return content
def render_plugin(self, instance, page=None):
placeholder_cache = self._rendered_plugins_by_placeholder.setdefault(instance.placeholder_id, {})
placeholder_cache.setdefault('plugins', []).append(instance)
return self.get_plugin_toolbar_js(instance, page=page)
def render_plugins(self, placeholder, language, page=None):
template = page.get_template() if page else None
plugins = self.get_plugins_to_render(placeholder, language, template)
for plugin in plugins:
plugin._placeholder_cache = placeholder
yield self.render_plugin(plugin, page=page)
class LegacyRenderer(ContentRenderer):
placeholder_edit_template = (
"""
{content}
<div class="cms-placeholder cms-placeholder-{placeholder_id}"></div>
<script data-cms id="cms-plugin-child-classes-{placeholder_id}" type="text/cms-template">
{plugin_menu_js}
</script>
<script data-cms>{plugin_js}\n{placeholder_js}</script>
"""
)
def get_editable_placeholder_context(self, placeholder, language, page=None):
context = super(LegacyRenderer, self).get_editable_placeholder_context(
placeholder=placeholder,
language=language,
page=page,
)
context['plugin_menu_js'] = self.get_placeholder_plugin_menu(placeholder, page=page)
return context
class PluginContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in CMS_PLUGIN_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, dict_, instance, placeholder, processors=None, current_app=None):
dict_ = flatten_context(dict_)
super(PluginContext, self).__init__(dict_)
if not processors:
processors = []
for processor in iterload_objects(get_cms_setting('PLUGIN_CONTEXT_PROCESSORS')):
self.update(processor(instance, placeholder, self))
for processor in processors:
self.update(processor(instance, placeholder, self))
|
the-stack_106_18742
|
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$',
views.AppointmentList.as_view(),
name='appointment_list',
),
url(r'^csv/$',
views.CSVAppointmentList.as_view(),
name='csv_appointment_list',
),
)
|
the-stack_106_18743
|
import numpy as np
import datetime as dt
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
last_year = dt.datetime(2017, 8, 23) - dt.timedelta(days=365)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"<h1>Hawaii Data</h1><br>"
f"<h2>Choose from the Available Routes:</h2><br/> "
f"Precipitation returns a list of precipitation values the year ranging 2016-08-23 to 2017-08-23.<br /> <br />"
f"<li>/api/v1.0/precipitation</li><br/><br />"
f"Stations returns a list of all the stations and information about that station. <br /><br />"
f"<li>/api/v1.0/stations</li><br /> <br />"
f"Returns temperature observations for the year ranging 2016-08-23 to 2017-08-23."
f"<li>/api/v1.0/tobs</li><br /><br />"
f"Type in a date in the format YYYY-MM-DD between 2016-08-23 to 2017-08-23<br />"
f"to find the Max, Min, and Average Temperature on that day."
f"<li>/api/v1.0/<start></li><br /><br />"
f"Type in a start date and end date in the format YYYY-MM-DD between 2016-08-23 to 2017-08-23<br>"
f"to find the Max, Min, and Average Temperatures for days in that range."
f"<li>/api/v1.0/<start>/<end></li><br />"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all precipitation values"""
# Query all passengers
results = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date > last_year).order_by(Measurement.date).all()
# create a list for the results
prcp_info = []
for prcp in results:
prcp_data = {}
prcp_data["Date"] = prcp.date
prcp_data["Temp"] = prcp.prcp
prcp_info.append(prcp_data)
return jsonify(prcp_info)
@app.route("/api/v1.0/station")
def station():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all stations """
results_2 = session.query(Station).all()
station_info = []
for station in results_2:
station_data = {}
station_data["Name"] = station.station
station_data["Latitude"] = station.latitude
station_data["Longitude"] = station.longitude
station_data["Elevation"] = station.elevation
station_info.append(station_data)
return jsonify(station_info)
@app.route("/api/v1.0/tobs")
def tobs():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all tobs values"""
results_3 = session.query(Measurement.station, Measurement.date, Measurement.tobs).\
group_by(Measurement.date).filter(Measurement.date > last_year).order_by(Measurement.station).all()
tobs_results = []
for tobs in results_3:
tobs_data = {}
tobs_data["Station"] = tobs.station
tobs_data["Date"] = tobs.date
tobs_data["TOBS"] = tobs.tobs
tobs_results.append(tobs_data)
return jsonify(tobs_results)
@app.route("/api/v1.0/<start>")
def start_date(start=None):
# Create our session (link) from Python to the DB
session = Session(engine)
results_4 = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.date >= start).all()
temp_stats = []
for Tmin, Tmax, Tavg in results_4:
temp_data = {}
temp_data["Minum Temp"] = Tmin
temp_data["Maximum Temp"] = Tmax
temp_data["Average Temp"] = Tavg
temp_stats.append(temp_data)
return jsonify(temp_stats)
@app.route("/api/v1.0/<start>/<end>")
def start_end_date(start=None, end=None):
# Create our session (link) from Python to the DB
session = Session(engine)
results_5 = session.query(func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)).\
filter(Measurement.date >= start).filter(Measurement.date <= end).all()
temp_stats = []
for Tmin, Tmax, Tavg in results_5:
temp_data = {}
temp_data["Minum Temp"] = Tmin
temp_data["Maximum Temp"] = Tmax
temp_data["Average Temp"] = Tavg
temp_stats.append(temp_data)
return jsonify(temp_stats)
if __name__ == '__main__':
app.run(debug=True)
|
the-stack_106_18745
|
# -*- coding: utf-8 -*-
# Copyright (C) 2012, Almar Klein, Ant1, Marius van Voorden
#
# This code is subject to the (new) BSD license:
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module images2gif
Provides functionality for reading and writing animated GIF images.
Use writeGif to write a series of numpy arrays or PIL images as an
animated GIF. Use readGif to read an animated gif as a series of numpy
arrays.
Note that since July 2004, all patents on the LZW compression patent have
expired. Therefore the GIF format may now be used freely.
Acknowledgements
----------------
Many thanks to Ant1 for:
* noting the use of "palette=PIL.Image.ADAPTIVE", which significantly
improves the results.
* the modifications to save each image with its own palette, or optionally
the global palette (if its the same).
Many thanks to Marius van Voorden for porting the NeuQuant quantization
algorithm of Anthony Dekker to Python (See the NeuQuant class for its
license).
Many thanks to Alex Robinson for implementing the concept of subrectangles,
which (depending on image content) can give a very significant reduction in
file size.
This code is based on gifmaker (in the scripts folder of the source
distribution of PIL)
Useful links
-------------
* http://tronche.com/computer-graphics/gif/
* http://en.wikipedia.org/wiki/Graphics_Interchange_Format
* http://www.w3.org/Graphics/GIF/spec-gif89a.txt
"""
# todo: This module should be part of imageio (or at least based on)
import os
import time
try:
import Image
PIL = Image
from PIL.GifImagePlugin import getheader, getdata
except ImportError:
PIL = None
try:
import numpy as np
except ImportError:
np = None
def get_cKDTree():
try:
from scipy.spatial import cKDTree
except ImportError:
cKDTree = None
return cKDTree
# getheader gives a 87a header and a color palette (two elements in a list).
# getdata()[0] gives the Image Descriptor up to (including) "LZW min code size".
# getdatas()[1:] is the image data itself in chuncks of 256 bytes (well
# technically the first byte says how many bytes follow, after which that
# amount (max 255) follows).
def checkImages(images):
""" checkImages(images)
Check numpy images and correct intensity range etc.
The same for all movie formats.
"""
# Init results
images2 = []
for im in images:
if PIL and isinstance(im, PIL.Image):
# We assume PIL images are allright
images2.append(im)
elif np and isinstance(im, np.ndarray):
# Check and convert dtype
if im.dtype == np.uint8:
images2.append(im) # Ok
elif im.dtype in [np.float32, np.float64]:
im = im.copy()
im[im<0] = 0
im[im>1] = 1
im *= 255
images2.append( im.astype(np.uint8) )
else:
im = im.astype(np.uint8)
images2.append(im)
# Check size
if im.ndim == 2:
pass # ok
elif im.ndim == 3:
if im.shape[2] not in [3,4]:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('This array can not represent an image.')
else:
raise ValueError('Invalid image type: ' + str(type(im)))
# Done
return images2
def intToBin(i):
""" Integer to two bytes """
# divide in two parts (bytes)
i1 = i % 256
i2 = int( i/256)
# make string (little endian)
return chr(i1) + chr(i2)
class GifWriter:
""" GifWriter()
Class that contains methods for helping write the animated GIF file.
"""
def getheaderAnim(self, im):
""" getheaderAnim(im)
Get animation header. To replace PILs getheader()[0]
"""
bb = "GIF89a"
bb += intToBin(im.size[0])
bb += intToBin(im.size[1])
bb += "\x87\x00\x00"
return bb
def getImageDescriptor(self, im, xy=None):
""" getImageDescriptor(im, xy=None)
Used for the local color table properties per image.
Otherwise global color table applies to all frames irrespective of
whether additional colors comes in play that require a redefined
palette. Still a maximum of 256 color per frame, obviously.
Written by Ant1 on 2010-08-22
Modified by Alex Robinson in January 2011 to implement subrectangles.
"""
# Default use full image and place at upper left
if xy is None:
xy = (0,0)
# Image separator,
bb = '\x2C'
# Image position and size
bb += intToBin( xy[0] ) # Left position
bb += intToBin( xy[1] ) # Top position
bb += intToBin( im.size[0] ) # image width
bb += intToBin( im.size[1] ) # image height
# packed field: local color table flag1, interlace0, sorted table0,
# reserved00, lct size111=7=2^(7+1)=256.
bb += '\x87'
# LZW minimum size code now comes later, begining of [image data] blocks
return bb
def getAppExt(self, loops=float('inf')):
""" getAppExt(loops=float('inf'))
Application extension. This part specifies the amount of loops.
If loops is 0 or inf, it goes on infinitely.
"""
if loops==0 or loops==float('inf'):
loops = 2**16-1
#bb = "" # application extension should not be used
# (the extension interprets zero loops
# to mean an infinite number of loops)
# Mmm, does not seem to work
if True:
bb = "\x21\xFF\x0B" # application extension
bb += "NETSCAPE2.0"
bb += "\x03\x01"
bb += intToBin(loops)
bb += '\x00' # end
return bb
def getGraphicsControlExt(self, duration=0.1, dispose=2):
""" getGraphicsControlExt(duration=0.1, dispose=2)
Graphics Control Extension. A sort of header at the start of
each image. Specifies duration and transparency.
Dispose
-------
* 0 - No disposal specified.
* 1 - Do not dispose. The graphic is to be left in place.
* 2 - Restore to background color. The area used by the graphic
must be restored to the background color.
* 3 - Restore to previous. The decoder is required to restore the
area overwritten by the graphic with what was there prior to
rendering the graphic.
* 4-7 -To be defined.
"""
bb = '\x21\xF9\x04'
bb += chr((dispose & 3) << 2) # low bit 1 == transparency,
# 2nd bit 1 == user input , next 3 bits, the low two of which are used,
# are dispose.
bb += intToBin( int(duration*100) ) # in 100th of seconds
bb += '\x00' # no transparent color
bb += '\x00' # end
return bb
def handleSubRectangles(self, images, subRectangles):
""" handleSubRectangles(images)
Handle the sub-rectangle stuff. If the rectangles are given by the
user, the values are checked. Otherwise the subrectangles are
calculated automatically.
"""
if isinstance(subRectangles, (tuple,list)):
# xy given directly
# Check xy
xy = subRectangles
if xy is None:
xy = (0,0)
if hasattr(xy, '__len__'):
if len(xy) == len(images):
xy = [xxyy for xxyy in xy]
else:
raise ValueError("len(xy) doesn't match amount of images.")
else:
xy = [xy for im in images]
xy[0] = (0,0)
else:
# Calculate xy using some basic image processing
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to use auto-subRectangles.")
# First make numpy arrays if required
for i in range(len(images)):
im = images[i]
if isinstance(im, Image.Image):
tmp = im.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
images[i] = a
# Determine the sub rectangles
images, xy = self.getSubRectangles(images)
# Done
return images, xy
def getSubRectangles(self, ims):
""" getSubRectangles(ims)
Calculate the minimal rectangles that need updating each frame.
Returns a two-element tuple containing the cropped images and a
list of x-y positions.
Calculating the subrectangles takes extra time, obviously. However,
if the image sizes were reduced, the actual writing of the GIF
goes faster. In some cases applying this method produces a GIF faster.
"""
# Check image count
if len(ims) < 2:
return ims, [(0,0) for i in ims]
# We need numpy
if np is None:
raise RuntimeError("Need Numpy to calculate sub-rectangles. ")
# Prepare
ims2 = [ims[0]]
xy = [(0,0)]
t0 = time.time()
# Iterate over images
prev = ims[0]
for im in ims[1:]:
# Get difference, sum over colors
diff = np.abs(im-prev)
if diff.ndim==3:
diff = diff.sum(2)
# Get begin and end for both dimensions
X = np.argwhere(diff.sum(0))
Y = np.argwhere(diff.sum(1))
# Get rect coordinates
if X.size and Y.size:
x0, x1 = X[0], X[-1]+1
y0, y1 = Y[0], Y[-1]+1
else: # No change ... make it minimal
x0, x1 = 0, 2
y0, y1 = 0, 2
# Cut out and store
im2 = im[y0:y1,x0:x1]
prev = im
ims2.append(im2)
xy.append((x0,y0))
# Done
#print('%1.2f seconds to determine subrectangles of %i images' %
# (time.time()-t0, len(ims2)) )
return ims2, xy
def convertImagesToPIL(self, images, dither, nq=0):
""" convertImagesToPIL(images, nq=0)
Convert images to Paletted PIL images, which can then be
written to a single animated GIF.
"""
# Convert to PIL images
images2 = []
for im in images:
if isinstance(im, Image.Image):
images2.append(im)
elif np and isinstance(im, np.ndarray):
if im.ndim==3 and im.shape[2]==3:
im = Image.fromarray(im,'RGB')
elif im.ndim==3 and im.shape[2]==4:
im = Image.fromarray(im[:,:,:3],'RGB')
elif im.ndim==2:
im = Image.fromarray(im,'L')
images2.append(im)
# Convert to paletted PIL images
images, images2 = images2, []
if nq >= 1:
# NeuQuant algorithm
for im in images:
im = im.convert("RGBA") # NQ assumes RGBA
nqInstance = NeuQuant(im, int(nq)) # Learn colors from image
if dither:
im = im.convert("RGB").quantize(palette=nqInstance.paletteImage())
else:
im = nqInstance.quantize(im) # Use to quantize the image itself
images2.append(im)
else:
# Adaptive PIL algorithm
AD = Image.ADAPTIVE
for im in images:
im = im.convert('P', palette=AD, dither=dither)
images2.append(im)
# Done
return images2
def writeGifToFile(self, fp, images, durations, loops, xys, disposes):
""" writeGifToFile(fp, images, durations, loops, xys, disposes)
Given a set of images writes the bytes to the specified stream.
"""
# Obtain palette for all images and count each occurrence
palettes, occur = [], []
for im in images:
palettes.append( getheader(im)[1] )
for palette in palettes:
occur.append( palettes.count( palette ) )
# Select most-used palette as the global one (or first in case no max)
globalPalette = palettes[ occur.index(max(occur)) ]
# Init
frames = 0
firstFrame = True
for im, palette in zip(images, palettes):
if firstFrame:
# Write header
# Gather info
header = self.getheaderAnim(im)
appext = self.getAppExt(loops)
# Write
fp.write(header)
fp.write(globalPalette)
fp.write(appext)
# Next frame is not the first
firstFrame = False
if True:
# Write palette and image data
# Gather info
data = getdata(im)
imdes, data = data[0], data[1:]
graphext = self.getGraphicsControlExt(durations[frames],
disposes[frames])
# Make image descriptor suitable for using 256 local color palette
lid = self.getImageDescriptor(im, xys[frames])
# Write local header
if (palette != globalPalette) or (disposes[frames] != 2):
# Use local color palette
fp.write(graphext)
fp.write(lid) # write suitable image descriptor
fp.write(palette) # write local color table
fp.write('\x08') # LZW minimum size code
else:
# Use global color palette
fp.write(graphext)
fp.write(imdes) # write suitable image descriptor
# Write image data
for d in data:
fp.write(d)
# Prepare for next round
frames = frames + 1
fp.write(";") # end gif
return frames
## Exposed functions
def writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None):
""" writeGif(filename, images, duration=0.1, repeat=True, dither=False,
nq=0, subRectangles=True, dispose=None)
Write an animated gif from the specified images.
Parameters
----------
filename : string
The name of the file to write the image to.
images : list
Should be a list consisting of PIL images or numpy arrays.
The latter should be between 0 and 255 for integer types, and
between 0 and 1 for float types.
duration : scalar or list of scalars
The duration for all frames, or (if a list) for each frame.
repeat : bool or integer
The amount of loops. If True, loops infinitely.
dither : bool
Whether to apply dithering
nq : integer
If nonzero, applies the NeuQuant quantization algorithm to create
the color palette. This algorithm is superior, but slower than
the standard PIL algorithm. The value of nq is the quality
parameter. 1 represents the best quality. 10 is in general a
good tradeoff between quality and speed. When using this option,
better results are usually obtained when subRectangles is False.
subRectangles : False, True, or a list of 2-element tuples
Whether to use sub-rectangles. If True, the minimal rectangle that
is required to update each frame is automatically detected. This
can give significant reductions in file size, particularly if only
a part of the image changes. One can also give a list of x-y
coordinates if you want to do the cropping yourself. The default
is True.
dispose : int
How to dispose each frame. 1 means that each frame is to be left
in place. 2 means the background color should be restored after
each frame. 3 means the decoder should restore the previous frame.
If subRectangles==False, the default is 2, otherwise it is 1.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to write animated gif files.")
# Check images
images = checkImages(images)
# Instantiate writer object
gifWriter = GifWriter()
# Check loops
if repeat is False:
loops = 1
elif repeat is True:
loops = 0 # zero means infinite
else:
loops = int(repeat)
# Check duration
if hasattr(duration, '__len__'):
if len(duration) == len(images):
duration = [d for d in duration]
else:
raise ValueError("len(duration) doesn't match amount of images.")
else:
duration = [duration for im in images]
# Check subrectangles
if subRectangles:
images, xy = gifWriter.handleSubRectangles(images, subRectangles)
defaultDispose = 1 # Leave image in place
else:
# Normal mode
xy = [(0,0) for im in images]
defaultDispose = 2 # Restore to background color.
# Check dispose
if dispose is None:
dispose = defaultDispose
if hasattr(dispose, '__len__'):
if len(dispose) != len(images):
raise ValueError("len(xy) doesn't match amount of images.")
else:
dispose = [dispose for im in images]
# Make images in a format that we can write easy
images = gifWriter.convertImagesToPIL(images, dither, nq)
# Write
fp = open(filename, 'wb')
try:
gifWriter.writeGifToFile(fp, images, duration, loops, xy, dispose)
finally:
fp.close()
def readGif(filename, asNumpy=True):
""" readGif(filename, asNumpy=True)
Read images from an animated GIF file. Returns a list of numpy
arrays, or, if asNumpy is false, a list if PIL images.
"""
# Check PIL
if PIL is None:
raise RuntimeError("Need PIL to read animated gif files.")
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy to read animated gif files.")
# Check whether it exists
if not os.path.isfile(filename):
raise IOError('File not found: '+str(filename))
# Load file using PIL
pilIm = PIL.open(filename)
pilIm.seek(0)
# Read all images inside
images = []
try:
while True:
# Get image as numpy array
tmp = pilIm.convert() # Make without palette
a = np.asarray(tmp)
if len(a.shape)==0:
raise MemoryError("Too little memory to convert PIL image to array")
# Store, and next
images.append(a)
pilIm.seek(pilIm.tell()+1)
except EOFError:
pass
# Convert to normal PIL images if needed
if not asNumpy:
images2 = images
images = []
for im in images2:
images.append( PIL.fromarray(im) )
# Done
return images
class NeuQuant:
""" NeuQuant(image, samplefac=10, colors=256)
samplefac should be an integer number of 1 or higher, 1
being the highest quality, but the slowest performance.
With avalue of 10, one tenth of all pixels are used during
training. This value seems a nice tradeof between speed
and quality.
colors is the amount of colors to reduce the image to. This
should best be a power of two.
See also:
http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
License of the NeuQuant Neural-Net Quantization Algorithm
---------------------------------------------------------
Copyright (c) 1994 Anthony Dekker
Ported to python by Marius van Voorden in 2010
NEUQUANT Neural-Net quantization algorithm by Anthony Dekker, 1994.
See "Kohonen neural networks for optimal colour quantization"
in "network: Computation in Neural Systems" Vol. 5 (1994) pp 351-367.
for a discussion of the algorithm.
See also http://members.ozemail.com.au/~dekker/NEUQUANT.HTML
Any party obtaining a copy of these files from the author, directly or
indirectly, is granted, free of charge, a full and unrestricted irrevocable,
world-wide, paid up, royalty-free, nonexclusive right and license to deal
in this software and documentation files (the "Software"), including without
limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons who receive
copies from any such party to do so, with the only requirement being
that this copyright notice remain intact.
"""
NCYCLES = None # Number of learning cycles
NETSIZE = None # Number of colours used
SPECIALS = None # Number of reserved colours used
BGCOLOR = None # Reserved background colour
CUTNETSIZE = None
MAXNETPOS = None
INITRAD = None # For 256 colours, radius starts at 32
RADIUSBIASSHIFT = None
RADIUSBIAS = None
INITBIASRADIUS = None
RADIUSDEC = None # Factor of 1/30 each cycle
ALPHABIASSHIFT = None
INITALPHA = None # biased by 10 bits
GAMMA = None
BETA = None
BETAGAMMA = None
network = None # The network itself
colormap = None # The network itself
netindex = None # For network lookup - really 256
bias = None # Bias and freq arrays for learning
freq = None
pimage = None
# Four primes near 500 - assume no image has a length so large
# that it is divisible by all four primes
PRIME1 = 499
PRIME2 = 491
PRIME3 = 487
PRIME4 = 503
MAXPRIME = PRIME4
pixels = None
samplefac = None
a_s = None
def setconstants(self, samplefac, colors):
self.NCYCLES = 100 # Number of learning cycles
self.NETSIZE = colors # Number of colours used
self.SPECIALS = 3 # Number of reserved colours used
self.BGCOLOR = self.SPECIALS-1 # Reserved background colour
self.CUTNETSIZE = self.NETSIZE - self.SPECIALS
self.MAXNETPOS = self.NETSIZE - 1
self.INITRAD = self.NETSIZE/8 # For 256 colours, radius starts at 32
self.RADIUSBIASSHIFT = 6
self.RADIUSBIAS = 1 << self.RADIUSBIASSHIFT
self.INITBIASRADIUS = self.INITRAD * self.RADIUSBIAS
self.RADIUSDEC = 30 # Factor of 1/30 each cycle
self.ALPHABIASSHIFT = 10 # Alpha starts at 1
self.INITALPHA = 1 << self.ALPHABIASSHIFT # biased by 10 bits
self.GAMMA = 1024.0
self.BETA = 1.0/1024.0
self.BETAGAMMA = self.BETA * self.GAMMA
self.network = np.empty((self.NETSIZE, 3), dtype='float64') # The network itself
self.colormap = np.empty((self.NETSIZE, 4), dtype='int32') # The network itself
self.netindex = np.empty(256, dtype='int32') # For network lookup - really 256
self.bias = np.empty(self.NETSIZE, dtype='float64') # Bias and freq arrays for learning
self.freq = np.empty(self.NETSIZE, dtype='float64')
self.pixels = None
self.samplefac = samplefac
self.a_s = {}
def __init__(self, image, samplefac=10, colors=256):
# Check Numpy
if np is None:
raise RuntimeError("Need Numpy for the NeuQuant algorithm.")
# Check image
if image.size[0] * image.size[1] < NeuQuant.MAXPRIME:
raise IOError("Image is too small")
if image.mode != "RGBA":
raise IOError("Image mode should be RGBA.")
# Initialize
self.setconstants(samplefac, colors)
self.pixels = np.fromstring(image.tostring(), np.uint32)
self.setUpArrays()
self.learn()
self.fix()
self.inxbuild()
def writeColourMap(self, rgb, outstream):
for i in range(self.NETSIZE):
bb = self.colormap[i,0];
gg = self.colormap[i,1];
rr = self.colormap[i,2];
outstream.write(rr if rgb else bb)
outstream.write(gg)
outstream.write(bb if rgb else rr)
return self.NETSIZE
def setUpArrays(self):
self.network[0,0] = 0.0 # Black
self.network[0,1] = 0.0
self.network[0,2] = 0.0
self.network[1,0] = 255.0 # White
self.network[1,1] = 255.0
self.network[1,2] = 255.0
# RESERVED self.BGCOLOR # Background
for i in range(self.SPECIALS):
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
for i in range(self.SPECIALS, self.NETSIZE):
p = self.network[i]
p[:] = (255.0 * (i-self.SPECIALS)) / self.CUTNETSIZE
self.freq[i] = 1.0 / self.NETSIZE
self.bias[i] = 0.0
# Omitted: setPixels
def altersingle(self, alpha, i, b, g, r):
"""Move neuron i towards biased (b,g,r) by factor alpha"""
n = self.network[i] # Alter hit neuron
n[0] -= (alpha*(n[0] - b))
n[1] -= (alpha*(n[1] - g))
n[2] -= (alpha*(n[2] - r))
def geta(self, alpha, rad):
try:
return self.a_s[(alpha, rad)]
except KeyError:
length = rad*2-1
mid = length/2
q = np.array(list(range(mid-1,-1,-1))+list(range(-1,mid)))
a = alpha*(rad*rad - q*q)/(rad*rad)
a[mid] = 0
self.a_s[(alpha, rad)] = a
return a
def alterneigh(self, alpha, rad, i, b, g, r):
if i-rad >= self.SPECIALS-1:
lo = i-rad
start = 0
else:
lo = self.SPECIALS-1
start = (self.SPECIALS-1 - (i-rad))
if i+rad <= self.NETSIZE:
hi = i+rad
end = rad*2-1
else:
hi = self.NETSIZE
end = (self.NETSIZE - (i+rad))
a = self.geta(alpha, rad)[start:end]
p = self.network[lo+1:hi]
p -= np.transpose(np.transpose(p - np.array([b, g, r])) * a)
#def contest(self, b, g, r):
# """ Search for biased BGR values
# Finds closest neuron (min dist) and updates self.freq
# finds best neuron (min dist-self.bias) and returns position
# for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
# self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
#
# i, j = self.SPECIALS, self.NETSIZE
# dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
# bestpos = i + np.argmin(dists)
# biasdists = dists - self.bias[i:j]
# bestbiaspos = i + np.argmin(biasdists)
# self.freq[i:j] -= self.BETA * self.freq[i:j]
# self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
# self.freq[bestpos] += self.BETA
# self.bias[bestpos] -= self.BETAGAMMA
# return bestbiaspos
def contest(self, b, g, r):
""" Search for biased BGR values
Finds closest neuron (min dist) and updates self.freq
finds best neuron (min dist-self.bias) and returns position
for frequently chosen neurons, self.freq[i] is high and self.bias[i] is negative
self.bias[i] = self.GAMMA*((1/self.NETSIZE)-self.freq[i])"""
i, j = self.SPECIALS, self.NETSIZE
dists = abs(self.network[i:j] - np.array([b,g,r])).sum(1)
bestpos = i + np.argmin(dists)
biasdists = dists - self.bias[i:j]
bestbiaspos = i + np.argmin(biasdists)
self.freq[i:j] *= (1-self.BETA)
self.bias[i:j] += self.BETAGAMMA * self.freq[i:j]
self.freq[bestpos] += self.BETA
self.bias[bestpos] -= self.BETAGAMMA
return bestbiaspos
def specialFind(self, b, g, r):
for i in range(self.SPECIALS):
n = self.network[i]
if n[0] == b and n[1] == g and n[2] == r:
return i
return -1
def learn(self):
biasRadius = self.INITBIASRADIUS
alphadec = 30 + ((self.samplefac-1)/3)
lengthcount = self.pixels.size
samplepixels = lengthcount / self.samplefac
delta = samplepixels / self.NCYCLES
alpha = self.INITALPHA
i = 0;
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
print("Beginning 1D learning: samplepixels = %1.2f rad = %i" %
(samplepixels, rad) )
step = 0
pos = 0
if lengthcount%NeuQuant.PRIME1 != 0:
step = NeuQuant.PRIME1
elif lengthcount%NeuQuant.PRIME2 != 0:
step = NeuQuant.PRIME2
elif lengthcount%NeuQuant.PRIME3 != 0:
step = NeuQuant.PRIME3
else:
step = NeuQuant.PRIME4
i = 0
printed_string = ''
while i < samplepixels:
if i%100 == 99:
tmp = '\b'*len(printed_string)
printed_string = str((i+1)*100/samplepixels)+"%\n"
print(tmp + printed_string)
p = self.pixels[pos]
r = (p >> 16) & 0xff
g = (p >> 8) & 0xff
b = (p ) & 0xff
if i == 0: # Remember background colour
self.network[self.BGCOLOR] = [b, g, r]
j = self.specialFind(b, g, r)
if j < 0:
j = self.contest(b, g, r)
if j >= self.SPECIALS: # Don't learn for specials
a = (1.0 * alpha) / self.INITALPHA
self.altersingle(a, j, b, g, r)
if rad > 0:
self.alterneigh(a, rad, j, b, g, r)
pos = (pos+step)%lengthcount
i += 1
if i%delta == 0:
alpha -= alpha / alphadec
biasRadius -= biasRadius / self.RADIUSDEC
rad = biasRadius >> self.RADIUSBIASSHIFT
if rad <= 1:
rad = 0
finalAlpha = (1.0*alpha)/self.INITALPHA
print("Finished 1D learning: final alpha = %1.2f!" % finalAlpha)
def fix(self):
for i in range(self.NETSIZE):
for j in range(3):
x = int(0.5 + self.network[i,j])
x = max(0, x)
x = min(255, x)
self.colormap[i,j] = x
self.colormap[i,3] = i
def inxbuild(self):
previouscol = 0
startpos = 0
for i in range(self.NETSIZE):
p = self.colormap[i]
q = None
smallpos = i
smallval = p[1] # Index on g
# Find smallest in i..self.NETSIZE-1
for j in range(i+1, self.NETSIZE):
q = self.colormap[j]
if q[1] < smallval: # Index on g
smallpos = j
smallval = q[1] # Index on g
q = self.colormap[smallpos]
# Swap p (i) and q (smallpos) entries
if i != smallpos:
p[:],q[:] = q, p.copy()
# smallval entry is now in position i
if smallval != previouscol:
self.netindex[previouscol] = (startpos+i) >> 1
for j in range(previouscol+1, smallval):
self.netindex[j] = i
previouscol = smallval
startpos = i
self.netindex[previouscol] = (startpos+self.MAXNETPOS) >> 1
for j in range(previouscol+1, 256): # Really 256
self.netindex[j] = self.MAXNETPOS
def paletteImage(self):
""" PIL weird interface for making a paletted image: create an image which
already has the palette, and use that in Image.quantize. This function
returns this palette image. """
if self.pimage is None:
palette = []
for i in range(self.NETSIZE):
palette.extend(self.colormap[i][:3])
palette.extend([0]*(256-self.NETSIZE)*3)
# a palette image to use for quant
self.pimage = Image.new("P", (1, 1), 0)
self.pimage.putpalette(palette)
return self.pimage
def quantize(self, image):
""" Use a kdtree to quickly find the closest palette colors for the pixels """
if get_cKDTree():
return self.quantize_with_scipy(image)
else:
print('Scipy not available, falling back to slower version.')
return self.quantize_without_scipy(image)
def quantize_with_scipy(self, image):
w,h = image.size
px = np.asarray(image).copy()
px2 = px[:,:,:3].reshape((w*h,3))
cKDTree = get_cKDTree()
kdtree = cKDTree(self.colormap[:,:3],leafsize=10)
result = kdtree.query(px2)
colorindex = result[1]
print("Distance: %1.2f" % (result[0].sum()/(w*h)) )
px2[:] = self.colormap[colorindex,:3]
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def quantize_without_scipy(self, image):
"""" This function can be used if no scipy is availabe.
It's 7 times slower though.
"""
w,h = image.size
px = np.asarray(image).copy()
memo = {}
for j in range(w):
for i in range(h):
key = (px[i,j,0],px[i,j,1],px[i,j,2])
try:
val = memo[key]
except KeyError:
val = self.convert(*key)
memo[key] = val
px[i,j,0],px[i,j,1],px[i,j,2] = val
return Image.fromarray(px).convert("RGB").quantize(palette=self.paletteImage())
def convert(self, *color):
i = self.inxsearch(*color)
return self.colormap[i,:3]
def inxsearch(self, r, g, b):
"""Search for BGR values 0..255 and return colour index"""
dists = (self.colormap[:,:3] - np.array([r,g,b]))
a= np.argmin((dists*dists).sum(1))
return a
if __name__ == '__main__':
im = np.zeros((200,200), dtype=np.uint8)
im[10:30,:] = 100
im[:,80:120] = 255
im[-50:-40,:] = 50
images = [im*1.0, im*0.8, im*0.6, im*0.4, im*0]
writeGif('lala3.gif',images, duration=0.5, dither=0)
|
the-stack_106_18746
|
from typing import *
import hail as hl
from hail.expr.expressions import *
from hail.expr.types import *
from hail.matrixtable import MatrixTable
from hail.table import Table
from hail.typecheck import *
from hail.utils import Interval, Struct, new_temp_file
from hail.utils.misc import plural
from hail.utils.java import Env, joption, info
from hail.ir import *
@typecheck(i=Expression,
j=Expression,
keep=bool,
tie_breaker=nullable(func_spec(2, expr_numeric)),
keyed=bool)
def maximal_independent_set(i, j, keep=True, tie_breaker=None, keyed=True) -> Table:
"""Return a table containing the vertices in a near
`maximal independent set <https://en.wikipedia.org/wiki/Maximal_independent_set>`_
of an undirected graph whose edges are given by a two-column table.
Examples
--------
Run PC-relate and compute pairs of closely related individuals:
>>> pc_rel = hl.pc_relate(dataset.GT, 0.001, k=2, statistics='kin')
>>> pairs = pc_rel.filter(pc_rel['kin'] > 0.125)
Starting from the above pairs, prune individuals from a dataset until no
close relationships remain:
>>> related_samples_to_remove = hl.maximal_independent_set(pairs.i, pairs.j, False)
>>> result = dataset.filter_cols(
... hl.is_defined(related_samples_to_remove[dataset.col_key]), keep=False)
Starting from the above pairs, prune individuals from a dataset until no
close relationships remain, preferring to keep cases over controls:
>>> samples = dataset.cols()
>>> pairs_with_case = pairs.key_by(
... i=hl.struct(id=pairs.i, is_case=samples[pairs.i].is_case),
... j=hl.struct(id=pairs.j, is_case=samples[pairs.j].is_case))
>>> def tie_breaker(l, r):
... return hl.cond(l.is_case & ~r.is_case, -1,
... hl.cond(~l.is_case & r.is_case, 1, 0))
>>> related_samples_to_remove = hl.maximal_independent_set(
... pairs_with_case.i, pairs_with_case.j, False, tie_breaker)
>>> result = dataset.filter_cols(hl.is_defined(
... related_samples_to_remove.key_by(
... s = related_samples_to_remove.node.id.s)[dataset.col_key]), keep=False)
Notes
-----
The vertex set of the graph is implicitly all the values realized by `i`
and `j` on the rows of this table. Each row of the table corresponds to an
undirected edge between the vertices given by evaluating `i` and `j` on
that row. An undirected edge may appear multiple times in the table and
will not affect the output. Vertices with self-edges are removed as they
are not independent of themselves.
The expressions for `i` and `j` must have the same type.
The value of `keep` determines whether the vertices returned are those
in the maximal independent set, or those in the complement of this set.
This is useful if you need to filter a table without removing vertices that
don't appear in the graph at all.
This method implements a greedy algorithm which iteratively removes a
vertex of highest degree until the graph contains no edges. The greedy
algorithm always returns an independent set, but the set may not always
be perfectly maximal.
`tie_breaker` is a Python function taking two arguments---say `l` and
`r`---each of which is an :class:`Expression` of the same type as `i` and
`j`. `tie_breaker` returns a :class:`NumericExpression`, which defines an
ordering on nodes. A pair of nodes can be ordered in one of three ways, and
`tie_breaker` must encode the relationship as follows:
- if ``l < r`` then ``tie_breaker`` evaluates to some negative integer
- if ``l == r`` then ``tie_breaker`` evaluates to 0
- if ``l > r`` then ``tie_breaker`` evaluates to some positive integer
For example, the usual ordering on the integers is defined by: ``l - r``.
The `tie_breaker` function must satisfy the following property:
``tie_breaker(l, r) == -tie_breaker(r, l)``.
When multiple nodes have the same degree, this algorithm will order the
nodes according to ``tie_breaker`` and remove the *largest* node.
If `keyed` is ``False``, then a node may appear twice in the resulting
table.
Parameters
----------
i : :class:`.Expression`
Expression to compute one endpoint of an edge.
j : :class:`.Expression`
Expression to compute another endpoint of an edge.
keep : :obj:`bool`
If ``True``, return vertices in set. If ``False``, return vertices removed.
tie_breaker : function
Function used to order nodes with equal degree.
keyed : :obj:`bool`
If ``True``, key the resulting table by the `node` field, this requires
a sort.
Returns
-------
:class:`.Table`
Table with the set of independent vertices. The table schema is one row
field `node` which has the same type as input expressions `i` and `j`.
"""
if i.dtype != j.dtype:
raise ValueError("'maximal_independent_set' expects arguments `i` and `j` to have same type. "
"Found {} and {}.".format(i.dtype, j.dtype))
source = i._indices.source
if not isinstance(source, Table):
raise ValueError("'maximal_independent_set' expects an expression of 'Table'. Found {}".format(
"expression of '{}'".format(
source.__class__) if source is not None else 'scalar expression'))
if i._indices.source != j._indices.source:
raise ValueError(
"'maximal_independent_set' expects arguments `i` and `j` to be expressions of the same Table. "
"Found\n{}\n{}".format(i, j))
node_t = i.dtype
if tie_breaker:
wrapped_node_t = ttuple(node_t)
l = construct_variable('l', wrapped_node_t)
r = construct_variable('r', wrapped_node_t)
tie_breaker_expr = hl.float64(tie_breaker(l[0], r[0]))
t, _ = source._process_joins(i, j, tie_breaker_expr)
tie_breaker_str = str(tie_breaker_expr._ir)
else:
t, _ = source._process_joins(i, j)
tie_breaker_str = None
edges = t.select(__i=i, __j=j).key_by().select('__i', '__j')
edges_path = new_temp_file()
edges.write(edges_path)
edges = hl.read_table(edges_path)
mis_nodes = construct_expr(JavaIR(Env.hail().utils.Graph.pyMaximalIndependentSet(
Env.spark_backend('maximal_independent_set')._to_java_ir(edges.collect(_localize=False)._ir),
node_t._parsable_string(),
joption(tie_breaker_str))),
hl.tset(node_t))
nodes = edges.select(node = [edges.__i, edges.__j])
nodes = nodes.explode(nodes.node)
nodes = nodes.annotate_globals(mis_nodes=mis_nodes)
nodes = nodes.filter(nodes.mis_nodes.contains(nodes.node), keep)
nodes = nodes.select_globals()
if keyed:
return nodes.key_by('node').distinct()
return nodes
def require_col_key_str(dataset: MatrixTable, method: str):
if not len(dataset.col_key) == 1 or dataset[next(iter(dataset.col_key))].dtype != hl.tstr:
raise ValueError(f"Method '{method}' requires column key to be one field of type 'str', found "
f"{list(str(x.dtype) for x in dataset.col_key.values())}")
def require_table_key_variant(ht, method):
if (list(ht.key) != ['locus', 'alleles'] or
not isinstance(ht['locus'].dtype, tlocus) or
not ht['alleles'].dtype == tarray(tstr)):
raise ValueError("Method '{}' requires key to be two fields 'locus' (type 'locus<any>') and "
"'alleles' (type 'array<str>')\n"
" Found:{}".format(method, ''.join(
"\n '{}': {}".format(k, str(ht[k].dtype)) for k in ht.key)))
def require_row_key_variant(dataset, method):
if isinstance(dataset, Table):
key = dataset.key
else:
assert isinstance(dataset, MatrixTable)
key = dataset.row_key
if (list(key) != ['locus', 'alleles'] or
not isinstance(dataset['locus'].dtype, tlocus) or
not dataset['alleles'].dtype == tarray(tstr)):
raise ValueError("Method '{}' requires row key to be two fields 'locus' (type 'locus<any>') and "
"'alleles' (type 'array<str>')\n"
" Found:{}".format(method, ''.join(
"\n '{}': {}".format(k, str(dataset[k].dtype)) for k in key)))
def require_row_key_variant_w_struct_locus(dataset, method):
if (list(dataset.row_key) != ['locus', 'alleles'] or
not dataset['alleles'].dtype == tarray(tstr) or
(not isinstance(dataset['locus'].dtype, tlocus) and
dataset['locus'].dtype != hl.dtype('struct{contig: str, position: int32}'))):
raise ValueError("Method '{}' requires row key to be two fields 'locus'"
" (type 'locus<any>' or 'struct{{contig: str, position: int32}}') and "
"'alleles' (type 'array<str>')\n"
" Found:{}".format(method, ''.join(
"\n '{}': {}".format(k, str(dataset[k].dtype)) for k in dataset.row_key)))
def require_first_key_field_locus(dataset, method):
if isinstance(dataset, Table):
key = dataset.key
else:
assert isinstance(dataset, MatrixTable)
key = dataset.row_key
if (len(key) == 0 or
not isinstance(key[0].dtype, tlocus)):
raise ValueError("Method '{}' requires first key field of type 'locus<any>'.\n"
" Found:{}".format(method, ''.join(
"\n '{}': {}".format(k, str(dataset[k].dtype)) for k in key)))
@typecheck(table=Table, method=str)
def require_key(table, method):
if len(table.key) == 0:
raise ValueError("Method '{}' requires a non-empty key".format(method))
@typecheck(dataset=MatrixTable, method=str)
def require_biallelic(dataset, method) -> MatrixTable:
require_row_key_variant(dataset, method)
return dataset._select_rows(method,
hl.case()
.when(dataset.alleles.length() == 2, dataset._rvrow)
.or_error(f"'{method}' expects biallelic variants ('alleles' field of length 2), found " +
hl.str(dataset.locus) + ", " + hl.str(dataset.alleles)))
@typecheck(dataset=MatrixTable, name=str)
def rename_duplicates(dataset, name='unique_id') -> MatrixTable:
"""Rename duplicate column keys.
.. include:: ../_templates/req_tstring.rst
Examples
--------
>>> renamed = hl.rename_duplicates(dataset).cols()
>>> duplicate_samples = (renamed.filter(renamed.s != renamed.unique_id)
... .select()
... .collect())
Notes
-----
This method produces a new column field from the string column key by
appending a unique suffix ``_N`` as necessary. For example, if the column
key "NA12878" appears three times in the dataset, the first will produce
"NA12878", the second will produce "NA12878_1", and the third will produce
"NA12878_2". The name of this new field is parameterized by `name`.
Parameters
----------
dataset : :class:`.MatrixTable`
Dataset.
name : :obj:`str`
Name of new field.
Returns
-------
:class:`.MatrixTable`
"""
require_col_key_str(dataset, 'rename_duplicates')
ids = dataset.col_key[0].collect()
uniques = set()
mapping = []
new_ids = []
fmt = lambda s, i: '{}_{}'.format(s, i)
for s in ids:
s_ = s
i = 0
while s_ in uniques:
i += 1
s_ = fmt(s, i)
if s_ != s:
mapping.append((s, s_))
uniques.add(s_)
new_ids.append(s_)
if mapping:
info(f'Renamed {len(mapping)} duplicate {plural("sample ID", len(mapping))}. Mangled IDs as follows:' +
''.join(f'\n "{pre}" => "{post}"' for pre, post in mapping))
else:
info('No duplicate sample IDs found.')
uid = Env.get_uid()
return dataset.annotate_cols(**{name: hl.literal(new_ids)[hl.int(hl.scan.count())]})
@typecheck(ds=oneof(Table, MatrixTable),
intervals=expr_array(expr_interval(expr_any)),
keep=bool)
def filter_intervals(ds, intervals, keep=True) -> Union[Table, MatrixTable]:
"""Filter rows with a list of intervals.
Examples
--------
Filter to loci falling within one interval:
>>> ds_result = hl.filter_intervals(dataset, [hl.parse_locus_interval('17:38449840-38530994')])
Remove all loci within list of intervals:
>>> intervals = [hl.parse_locus_interval(x) for x in ['1:50M-75M', '2:START-400000', '3-22']]
>>> ds_result = hl.filter_intervals(dataset, intervals, keep=False)
Notes
-----
Based on the `keep` argument, this method will either restrict to points
in the supplied interval ranges, or remove all rows in those ranges.
When ``keep=True``, partitions that don't overlap any supplied interval
will not be loaded at all. This enables :func:`.filter_intervals` to be
used for reasonably low-latency queries of small ranges of the dataset, even
on large datasets.
Parameters
----------
ds : :class:`.MatrixTable` or :class:`.Table`
Dataset to filter.
intervals : :class:`.ArrayExpression` of type :py:data:`.tinterval`
Intervals to filter on. The point type of the interval must
be a prefix of the key or equal to the first field of the key.
keep : :obj:`bool`
If ``True``, keep only rows that fall within any interval in `intervals`.
If ``False``, keep only rows that fall outside all intervals in
`intervals`.
Returns
-------
:class:`.MatrixTable` or :class:`.Table`
"""
if isinstance(ds, MatrixTable):
k_type = ds.row_key.dtype
else:
assert isinstance(ds, Table)
k_type = ds.key.dtype
point_type = intervals.dtype.element_type.point_type
def is_struct_prefix(partial, full):
if list(partial) != list(full)[:len(partial)]:
return False
for k, v in partial.items():
if full[k] != v:
return False
return True
if point_type == k_type[0]:
needs_wrapper = True
k_name = k_type.fields[0]
point_type = hl.tstruct(**{k_name: k_type[k_name]})
elif isinstance(point_type, tstruct) and is_struct_prefix(point_type, k_type):
needs_wrapper = False
else:
raise TypeError(
"The point type is incompatible with key type of the dataset ('{}', '{}')".format(repr(point_type),
repr(k_type)))
def wrap_input(interval):
if interval is None:
raise TypeError("'filter_intervals' does not allow missing values in 'intervals'.")
elif needs_wrapper:
return Interval(Struct(**{k_name: interval.start}),
Struct(**{k_name: interval.end}),
interval.includes_start,
interval.includes_end)
else:
return interval
intervals = hl.eval(intervals)
intervals = [wrap_input(i) for i in intervals]
if isinstance(ds, MatrixTable):
return MatrixTable(MatrixFilterIntervals(ds._mir, intervals, point_type, keep))
else:
return Table(TableFilterIntervals(ds._tir, intervals, point_type, keep))
@typecheck(mt=MatrixTable, bp_window_size=int)
def window_by_locus(mt: MatrixTable, bp_window_size: int) -> MatrixTable:
"""Collect arrays of row and entry values from preceding loci.
.. include:: ../_templates/req_tlocus.rst
.. include:: ../_templates/experimental.rst
Examples
--------
>>> ds_result = hl.window_by_locus(ds, 3)
Notes
-----
This method groups each row (variant) with the previous rows in a window of
`bp_window_size` base pairs, putting the row values from the previous
variants into `prev_rows` (row field of type ``array<struct>``) and entry
values from those variants into `prev_entries` (entry field of type
``array<struct>``).
The `bp_window_size` argument is inclusive; if `base_pairs` is 2 and the
loci are
.. code-block:: text
1:100
1:100
1:102
1:102
1:103
2:100
2:101
then the size of `prev_rows` is 0, 1, 2, 3, 2, 0, and 1, respectively (and
same for the size of prev_entries).
Parameters
----------
mt : :class:`.MatrixTable`
Input dataset.
bp_window_size : :obj:`int`
Base pairs to include in the backwards window (inclusive).
Returns
-------
:class:`.MatrixTable`
"""
require_first_key_field_locus(mt, 'window_by_locus')
return MatrixTable(hl.ir.MatrixToMatrixApply(mt._mir, {'name': 'WindowByLocus', 'basePairs': bp_window_size}))
|
the-stack_106_18749
|
import datetime
import typing
from warnings import warn
import discord
from discord.ext import commands
from discord.utils import snowflake_time
from . import error, http, model
from .dpy_overrides import ComponentMessage
class InteractionContext:
"""
Base context for interactions.\n
In some ways similar with discord.ext.commands.Context.
.. warning::
Do not manually init this model.
:ivar message: Message that invoked the slash command.
:ivar interaction_id: Interaction ID of the command message.
:ivar bot: discord.py client.
:ivar _http: :class:`.http.SlashCommandRequest` of the client.
:ivar _logger: Logger instance.
:ivar data: The raw data of the interaction.
:ivar values: The values sent with the interaction. Currently for selects.
:ivar deferred: Whether the command is current deferred (loading state)
:ivar _deferred_hidden: Internal var to check that state stays the same
:ivar responded: Whether you have responded with a message to the interaction.
:ivar guild_id: Guild ID of the command message. If the command was invoked in DM, then it is ``None``
:ivar author_id: User ID representing author of the command message.
:ivar channel_id: Channel ID representing channel of the command message.
:ivar author: User or Member instance of the command invoke.
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self._token = _json["token"]
self.message = None # Should be set later.
self.interaction_id = _json["id"]
self._http = _http
self.bot = _discord
self._logger = logger
self.deferred = False
self.responded = False
self.data = _json["data"]
self.values = _json["data"]["values"] if "values" in _json["data"] else None
self._deferred_hidden = False # To check if the patch to the deferred response matches
self.guild_id = int(_json["guild_id"]) if "guild_id" in _json.keys() else None
self.author_id = int(
_json["member"]["user"]["id"] if "member" in _json.keys() else _json["user"]["id"]
)
self.channel_id = int(_json["channel_id"])
if self.guild:
self.author = discord.Member(
data=_json["member"], state=self.bot._connection, guild=self.guild
)
elif self.guild_id:
self.author = discord.User(data=_json["member"]["user"], state=self.bot._connection)
else:
self.author = discord.User(data=_json["user"], state=self.bot._connection)
self.created_at: datetime.datetime = snowflake_time(int(self.interaction_id))
@property
def _deffered_hidden(self):
warn(
"`_deffered_hidden` as been renamed to `_deferred_hidden`.",
DeprecationWarning,
stacklevel=2,
)
return self._deferred_hidden
@_deffered_hidden.setter
def _deffered_hidden(self, value):
warn(
"`_deffered_hidden` as been renamed to `_deferred_hidden`.",
DeprecationWarning,
stacklevel=2,
)
self._deferred_hidden = value
@property
def deffered(self):
warn("`deffered` as been renamed to `deferred`.", DeprecationWarning, stacklevel=2)
return self.deferred
@deffered.setter
def deffered(self, value):
warn("`deffered` as been renamed to `deferred`.", DeprecationWarning, stacklevel=2)
self.deferred = value
@property
def guild(self) -> typing.Optional[discord.Guild]:
"""
Guild instance of the command invoke. If the command was invoked in DM, then it is ``None``
:return: Optional[discord.Guild]
"""
return self.bot.get_guild(self.guild_id) if self.guild_id else None
@property
def channel(self) -> typing.Optional[typing.Union[discord.TextChannel, discord.DMChannel]]:
"""
Channel instance of the command invoke.
:return: Optional[Union[discord.abc.GuildChannel, discord.abc.PrivateChannel]]
"""
return self.bot.get_channel(self.channel_id)
async def defer(self, hidden: bool = False):
"""
'Defers' the response, showing a loading state to the user
:param hidden: Whether the deferred response should be ephemeral . Default ``False``.
"""
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 5}
if hidden:
base["data"] = {"flags": 64}
self._deferred_hidden = True
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
"""
Sends response of the interaction.
.. warning::
- Since Release 1.0.9, this is completely changed. If you are migrating from older version, please make sure to fix the usage.
- You can't use both ``embed`` and ``embeds`` at the same time, also applies to ``file`` and ``files``.
- If you send files in the initial response, this will defer if it's not been deferred, and then PATCH with the message
:param content: Content of the response.
:type content: str
:param embed: Embed of the response.
:type embed: discord.Embed
:param embeds: Embeds of the response. Maximum 10.
:type embeds: List[discord.Embed]
:param tts: Whether to speak message using tts. Default ``False``.
:type tts: bool
:param file: File to send.
:type file: discord.File
:param files: Files to send.
:type files: List[discord.File]
:param allowed_mentions: AllowedMentions of the message.
:type allowed_mentions: discord.AllowedMentions
:param hidden: Whether the message is hidden, which means message content will only be seen to the author.
:type hidden: bool
:param delete_after: If provided, the number of seconds to wait in the background before deleting the message we just sent. If the deletion fails, then it is silently ignored.
:type delete_after: float
:param components: Message components in the response. The top level must be made of ActionRows.
:type components: List[dict]
:return: Union[discord.Message, dict]
"""
if embed and embeds:
raise error.IncorrectFormat("You can't use both `embed` and `embeds`!")
if embed:
embeds = [embed]
if embeds:
if not isinstance(embeds, list):
raise error.IncorrectFormat("Provide a list of embeds.")
elif len(embeds) > 10:
raise error.IncorrectFormat("Do not provide more than 10 embeds.")
if file and files:
raise error.IncorrectFormat("You can't use both `file` and `files`!")
if file:
files = [file]
if delete_after and hidden:
raise error.IncorrectFormat("You can't delete a hidden message!")
if components and not all(comp.get("type") == 1 for comp in components):
raise error.IncorrectFormat(
"The top level of the components list must be made of ActionRows!"
)
if allowed_mentions is not None:
if self.bot.allowed_mentions is not None:
allowed_mentions = self.bot.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
else:
if self.bot.allowed_mentions is not None:
allowed_mentions = self.bot.allowed_mentions.to_dict()
else:
allowed_mentions = {}
base = {
"content": content,
"tts": tts,
"embeds": [x.to_dict() for x in embeds] if embeds else [],
"allowed_mentions": allowed_mentions,
"components": components or [],
}
if hidden:
base["flags"] = 64
initial_message = False
if not self.responded:
initial_message = True
if files and not self.deferred:
await self.defer(hidden=hidden)
if self.deferred:
if self._deferred_hidden != hidden:
self._logger.warning(
"Deferred response might not be what you set it to! (hidden / visible) "
"This is because it was deferred in a different state."
)
resp = await self._http.edit(base, self._token, files=files)
self.deferred = False
else:
json_data = {"type": 4, "data": base}
await self._http.post_initial_response(json_data, self.interaction_id, self._token)
if not hidden:
resp = await self._http.edit({}, self._token)
else:
resp = {}
self.responded = True
else:
resp = await self._http.post_followup(base, self._token, files=files)
if files:
for file in files:
file.close()
if not hidden:
smsg = model.SlashMessage(
state=self.bot._connection,
data=resp,
channel=self.channel or discord.Object(id=self.channel_id),
_http=self._http,
interaction_token=self._token,
)
if delete_after:
self.bot.loop.create_task(smsg.delete(delay=delete_after))
if initial_message:
self.message = smsg
return smsg
else:
return resp
class SlashContext(InteractionContext):
"""
Context of a slash command. Has all attributes from :class:`InteractionContext`, plus the slash-command-specific ones below.
:ivar name: Name of the command.
:ivar args: List of processed arguments invoked with the command.
:ivar kwargs: Dictionary of processed arguments invoked with the command.
:ivar subcommand_name: Subcommand of the command.
:ivar subcommand_group: Subcommand group of the command.
:ivar command_id: ID of the command.
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self.name = self.command = self.invoked_with = _json["data"]["name"]
self.args = []
self.kwargs = {}
self.subcommand_name = self.invoked_subcommand = self.subcommand_passed = None
self.subcommand_group = self.invoked_subcommand_group = self.subcommand_group_passed = None
self.command_id = _json["data"]["id"]
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
class ComponentContext(InteractionContext):
"""
Context of a component interaction. Has all attributes from :class:`InteractionContext`, plus the component-specific ones below.
:ivar custom_id: The custom ID of the component (has alias component_id).
:ivar component_type: The type of the component.
:ivar component: Component data retrieved from the message. Not available if the origin message was ephemeral.
:ivar origin_message: The origin message of the component. Not available if the origin message was ephemeral.
:ivar origin_message_id: The ID of the origin message.
:ivar selected_options: The options selected (only for selects)
"""
def __init__(
self,
_http: http.SlashCommandRequest,
_json: dict,
_discord: typing.Union[discord.Client, commands.Bot],
logger,
):
self.user = _json["member"]["user"]
self.channelid = int(_json["channel_id"], 10)
self.custom_id = self.component_id = _json["data"]["custom_id"]
self.component_type = _json["data"]["component_type"]
super().__init__(_http=_http, _json=_json, _discord=_discord, logger=logger)
self.origin_message = None
self.origin_message_id = int(_json["message"]["id"]) if "message" in _json.keys() else None
self.component = None
self._deferred_edit_origin = False
if self.origin_message_id and (_json["message"]["flags"] & 64) != 64:
self.origin_message = ComponentMessage(
state=self.bot._connection, channel=self.channel, data=_json["message"]
)
self.component = self.origin_message.get_component(self.custom_id)
self.selected_options = None
if self.component_type == 3:
self.selected_options = _json["data"].get("values", [])
async def defer(self, hidden: bool = False, edit_origin: bool = False):
"""
'Defers' the response, showing a loading state to the user
:param hidden: Whether the deferred response should be ephemeral . Default ``False``.
:param edit_origin: Whether the type is editing the origin message. If ``False``, the deferred response will be for a follow up message. Defaults ``False``.
"""
if self.deferred or self.responded:
raise error.AlreadyResponded("You have already responded to this command!")
base = {"type": 6 if edit_origin else 5}
if hidden:
if edit_origin:
raise error.IncorrectFormat(
"'hidden' and 'edit_origin' flags are mutually exclusive"
)
base["data"] = {"flags": 64}
self._deferred_hidden = True
self._deferred_edit_origin = edit_origin
await self._http.post_initial_response(base, self.interaction_id, self._token)
self.deferred = True
async def send(
self,
content: str = "",
*,
embed: discord.Embed = None,
embeds: typing.List[discord.Embed] = None,
tts: bool = False,
file: discord.File = None,
files: typing.List[discord.File] = None,
allowed_mentions: discord.AllowedMentions = None,
hidden: bool = False,
delete_after: float = None,
components: typing.List[dict] = None,
) -> model.SlashMessage:
if self.deferred and self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
return await super().send(
content,
embed=embed,
embeds=embeds,
tts=tts,
file=file,
files=files,
allowed_mentions=allowed_mentions,
hidden=hidden,
delete_after=delete_after,
components=components,
)
async def edit_origin(self, **fields):
"""
Edits the origin message of the component.
Refer to :meth:`discord.Message.edit` and :meth:`InteractionContext.send` for fields.
"""
_resp = {}
try:
content = fields["content"]
except KeyError:
pass
else:
if content is not None:
content = str(content)
_resp["content"] = content
try:
components = fields["components"]
except KeyError:
pass
else:
if components is None:
_resp["components"] = []
else:
_resp["components"] = components
try:
embeds = fields["embeds"]
except KeyError:
# Nope
pass
else:
if not isinstance(embeds, list):
raise error.IncorrectFormat("Provide a list of embeds.")
if len(embeds) > 10:
raise error.IncorrectFormat("Do not provide more than 10 embeds.")
_resp["embeds"] = [e.to_dict() for e in embeds]
try:
embed = fields["embed"]
except KeyError:
pass
else:
if "embeds" in _resp:
raise error.IncorrectFormat("You can't use both `embed` and `embeds`!")
if embed is None:
_resp["embeds"] = []
else:
_resp["embeds"] = [embed.to_dict()]
file = fields.get("file")
files = fields.get("files")
if files is not None and file is not None:
raise error.IncorrectFormat("You can't use both `file` and `files`!")
if file:
files = [file]
allowed_mentions = fields.get("allowed_mentions")
if allowed_mentions is not None:
if self.bot.allowed_mentions is not None:
_resp["allowed_mentions"] = self.bot.allowed_mentions.merge(
allowed_mentions
).to_dict()
else:
_resp["allowed_mentions"] = allowed_mentions.to_dict()
else:
if self.bot.allowed_mentions is not None:
_resp["allowed_mentions"] = self.bot.allowed_mentions.to_dict()
else:
_resp["allowed_mentions"] = {}
if not self.responded:
if files and not self.deferred:
await self.defer(edit_origin=True)
if self.deferred:
if not self._deferred_edit_origin:
self._logger.warning(
"Deferred response might not be what you set it to! (edit origin / send response message) "
"This is because it was deferred with different response type."
)
_json = await self._http.edit(_resp, self._token, files=files)
self.deferred = False
else: # noqa: F841
json_data = {"type": 7, "data": _resp}
_json = await self._http.post_initial_response( # noqa: F841
json_data, self.interaction_id, self._token
)
self.responded = True
else:
raise error.IncorrectFormat("Already responded")
if files:
for file in files:
file.close()
# Commented out for now as sometimes (or at least, when not deferred) _json is an empty string?
# self.origin_message = ComponentMessage(state=self.bot._connection, channel=self.channel,
# data=_json)
|
the-stack_106_18751
|
#!/usr/bin/env python3
# Copyright 2019 Johannes von Oswald
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @title :train.py
# @author :jvo
# @contact :[email protected]
# @created :07/10/2019
# @version :1.0
# @python_version :3.6.8
"""
Continual learning of MNIST VAE with hypernetworks
---------------------------------------------------
An implementation of a simple fully-connected MNIST VAE realized through
a hypernetwork, i.e., a hypernetwork that produces the weights of the decoder.
"""
# Do not delete the following import for all executable scripts!
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
import utils.hnet_regularizer as hreg
import utils.optim_step as opstep
from mnist import train_utils
from mnist.plotting import _viz_init, _viz_training, _plotImages
from mnist.replay import train_args_replay
from mnist.replay import train_utils_replay
from mnist.replay.train_gan import train_gan_one_t
def test(enc, dec, d_hnet, device, config, writer, train_iter=None,
condition=None):
""" Test the MNIST VAE - here we only sample from a fixed noise to compare
images qualitatively. One should also keep track of the reconstruction
error of e.g. a test set.
Args:
(....): See docstring of function :func:`train`.
train_iter: The current training iteration.
condition: Condition (class/task) we are currently training.
"""
if train_iter is None:
print('### Final test run ...')
train_iter = config.n_iter
else:
print('# Testing network before running training step %d ...' % \
train_iter)
# if no condition is given, we iterate over all (trained) embeddings
if condition is None:
condition = config.num_embeddings - 1
# eval all nets
enc.eval()
dec.eval()
if d_hnet is not None:
d_hnet.eval()
with torch.no_grad():
# iterate over all conditions
for m in range(condition + 1):
# Get pre training saved noise
z = config.test_z[m]
reconstructions = sample(dec, d_hnet, config, m, device, z=z)
if config.show_plots:
fig_real = _plotImages(reconstructions, config)
writer.add_figure('test_cond_' + str(m) +
'_sampled_after_' + str(condition), fig_real,
global_step=train_iter)
if train_iter == config.n_iter:
writer.add_figure('test_cond_final_' + str(m) +
'_sampled_after_' + str(condition), fig_real,
global_step=train_iter)
# TODO write test reconstrunction error
def sample(dec, d_hnet, config, condition, device, z=None, bs=None):
"""Sample from the decoder. Given a certain condition (the task id),
we sample from the decoder model a batch of replay data. This input of the
decoder will be a noise vector (optional with a specific mean) and/or and
additional task specific input.
Args:
(....): See docstring of function :func:`train`.
condition: Condition (class/task) we want to sample from. Not to be
confused with the additional option that one can input a task specific
condition the replay model.
Returns:
Batch of replay data from the decoder, given a certain
condition / task id.
"""
if z is None:
# get the prior mean
if config.conditional_prior:
cur_prior = config.priors[condition]
else:
cur_prior = torch.zeros((config.batch_size,
config.latent_dim)).to(device)
# sample normal gaussian and build noise vector
eps = torch.randn_like(cur_prior)
z = cur_prior + eps
# get condition if given
if config.conditional_replay:
z = torch.cat([z, config.vae_conds[condition]], dim=1)
# cut for replay when we need the X_fake from all previous tasks need to sum
# up the given batch_size such that batch_size(X_fake) == batch_size(X_real)
if bs is not None:
z = z[:bs, :]
# get weights from hnet
if d_hnet is not None:
weights_d = d_hnet.forward(condition)
else:
weights_d = None
samples = dec.forward(z, weights_d)
return torch.sigmoid(samples)
def init_plotting_embedding(dhandlers, d_hnet, writer, config):
""" This is a helper function to get lists to plot embedding histories.
Args:
(....): See docstring of function :func:`train`.
Returns:
List of lists for embedding plots during training.
"""
# initial visualization and setting up training viz
if config.show_plots:
_, dec_embs = _viz_init(dhandlers, None, d_hnet, writer, config)
if d_hnet is not None:
dec_embs_history = []
if (not config.no_cuda):
dec_embs_history.append(d_hnet.get_task_emb(0).
cpu().detach().numpy())
else:
dec_embs_history.append(d_hnet.get_task_emb(0).
detach().numpy())
else:
dec_embs_history = None
return [None, dec_embs, None, dec_embs_history]
else:
return [None, None, None, None]
def reparameterize(mu, logvar):
"""Reparameterize encoder output for vae loss. Code from
https://github.com/pytorch/examples/blob/master/vae/main.py#L48
Args:
mu: Output of encoder parameterising the mean of the Gaussian.
logvar: Output of the encoder that get transformed into the
variance to be used for the reparameterization trick below.
eps: Use epsilon already drawn to reduce variance
Returns:
Sample from the Gaussian through the reparameterization trick.
"""
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def compute_kld(mu, logvar, config, t):
"""Compute the kullback-leibler divergence between normal gaussian around
zero or mu_prior and a gaussian with parameters mu, logvar.
Args:
mu: Outputs of the encoder, mean of the VAE latent Gaussian.
logvar: Outputs of the encoder, logvar of the VAE latent Gaussian.
config: Command-line arguments.
t: task id.
Returns:
LKD between gausian with parameters by encoder and prior.
"""
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
# add prior matching loss
if config.conditional_prior:
cur_prior = config.priors[t]
else:
cur_prior = 0
kld = -0.5 * torch.sum(1 + logvar - (mu - cur_prior).pow(2) - \
logvar.exp(), dim=1)
# average kl by input dim (to compare to related work, see
# https://github.com/GMvandeVen/continual-learning/blob/master/train.py)
kld = torch.mean(kld) / config.input_dim
return kld
def train_vae_one_t(dhandler, enc, dec, d_hnet, device, config, writer,
embd_list, t):
""" Train the conditional MNIST VAE for one task.
In this function the main training logic for this replay model is
implemented. After setting the optimizers for the encoder/decoder and it's
hypernetwork if applicable, a standart variational autoencoder training
scheme is implemented. To prevent the decoder (its hypernetwork) from
forgetting, we add our hypernetwork regularisation term for all tasks
seen before ``t`` to the vae loss.
Args:
(....): See docstring of function :func:`train`.
embd_list: Helper list of lists for embedding plotting.
t: Task id that will be trained.
"""
# set to training mode
enc.train()
dec.train()
if d_hnet is not None:
d_hnet.train()
# reset data handler
print("Training VAE on data handler: ", t)
# get lists for plotting embeddings
enc_embs, dec_embs, enc_embs_history, dec_embs_history = embd_list[:]
# set training_iterations if epochs are set
if config.epochs == -1:
training_iterations = config.n_iter
else:
assert (config.epochs > 0)
training_iterations = config.epochs * \
int(np.ceil(dhandler.num_train_samples / config.batch_size))
# Here we adjust the number of training iterations when we train our replay
# method to replay every single class in a task given that condition.
# We need to adjust the training iterations such that we train every
# class in the task only a portion of the time we are given for the
# whole task:
# Training_time_per_class = training_time_per_task / num_class_per_task
# This is important to compare to related work, as they set the training
# time per task which we now have to split up.
if config.single_class_replay:
training_iterations = int(training_iterations / config.out_dim)
# if we want to start training the new task with the weights of the previous
# task we have to set the start embedding for the new task to the embedding
# of the previous task.
if config.embedding_reset == "old_embedding" and t > 0:
if d_hnet is not None:
last_emb = d_hnet.get_task_embs()[t - 1].detach().clone()
d_hnet.get_task_embs()[t].data = last_emb
# Compute targets for the hnet before training.
if t > 0:
if config.rp_beta > 0 and d_hnet is not None:
targets_D = hreg.get_current_targets(t, d_hnet)
else:
targets_D = None
############
# OPTIMIZERS
############
# encoder optimizer
e_paras = enc.parameters()
eoptimizer = optim.Adam(e_paras, lr=config.enc_lr,
betas=(0.9, 0.999))
# decoder optimizer (hnet or weights directly)
if d_hnet is not None:
d_paras = list(d_hnet.theta)
if not config.dont_train_rp_embeddings:
# Set the embedding optimizer only for the current task embedding.
# Note that we could here continue training the old embeddings.
d_emb_optimizer = optim.Adam([d_hnet.get_task_emb(t)],
lr=config.dec_lr_emb, betas=(0.9, 0.999))
else:
d_emb_optimizer = None
else:
d_emb_optimizer = None
d_paras = dec.parameters()
doptimizer = optim.Adam(d_paras, lr=config.dec_lr,
betas=(0.9, 0.999))
calc_reg = config.rp_beta > 0 and t > 0 and d_hnet is not None
###########
# TRAINING
###########
for i in range(training_iterations):
### Test network.
# We test the network before we run the training iteration.
# That way, we can see the initial performance of the untrained net.
if i % config.val_iter == 0:
test(enc, dec, d_hnet, device, config, writer, i, t)
enc.train()
dec.train()
if d_hnet is not None:
d_hnet.train()
if i % 100 == 0:
print('Training iteration: %d.' % i)
# Some code for plotting.
# We want to visualize the hnet embedding trajectories.
if config.show_plots:
if d_hnet is not None:
if (not config.no_cuda):
dec_embs_history.append(d_hnet.get_task_emb(t).
clone().detach().cpu().numpy())
else:
dec_embs_history.append(d_hnet.get_task_emb(t).
clone().detach().numpy())
#######
# DATA
#######
real_batch = dhandler.next_train_batch(config.batch_size)
X_real = dhandler.input_to_torch_tensor(real_batch[0], device,
mode='train')
# set gradients again to zero
eoptimizer.zero_grad()
doptimizer.zero_grad()
if d_emb_optimizer is not None:
d_emb_optimizer.zero_grad()
############################
# KLD + RECONSTRUCTION
############################
# feed data through encoder
mu_var = enc.forward(X_real)
mu = mu_var[:, 0: config.latent_dim]
logvar = mu_var[:, config.latent_dim:2 * config.latent_dim]
# compute KLD
kld = compute_kld(mu, logvar, config, t)
# sample from encoder gaussian distribution
dec_input = reparameterize(mu, logvar)
reconstructions = sample(dec, d_hnet, config, t, device, z=dec_input)
# average reconstruction error like this to compare to related work, see
# https://github.com/GMvandeVen/continual-learning/blob/master/train.py
x_rec_loss = F.binary_cross_entropy(reconstructions,
X_real, reduction='none')
x_rec_loss = torch.mean(x_rec_loss, dim=1)
x_rec_loss = torch.mean(x_rec_loss)
loss = x_rec_loss + kld
######################################################
# HYPERNET REGULARISATION - CONTINUAL LEARNING METHOD
######################################################
loss.backward(retain_graph=calc_reg, create_graph=calc_reg and \
config.backprop_dt)
# compute hypernet loss and fix embedding -> change current embs
if calc_reg:
if config.no_lookahead:
dTheta = None
else:
dTheta = opstep.calc_delta_theta(doptimizer,
config.use_sgd_change, lr=config.dec_lr,
detach_dt=not config.backprop_dt)
dloss_reg = config.rp_beta * hreg.calc_fix_target_reg(d_hnet, t,
targets=targets_D,
mnet=dec, dTheta=dTheta, dTembs=None)
dloss_reg.backward()
else:
dloss_reg = 0
# compute gradients for generator and take gradient step
doptimizer.step()
eoptimizer.step()
if d_hnet is not None and not config.dont_train_rp_embeddings:
d_emb_optimizer.step()
# Visualization of current progress in tensorboard
if (i % config.plot_update_steps == 0 and i > 0 and config.show_plots):
if dec_embs_history is not None:
dec_embedding_cut = np.asarray(dec_embs_history[2:])
else:
dec_embedding_cut = None
if enc_embs_history is not None:
enc_embedding_cut = np.asarray(enc_embs_history[2:])
else:
enc_embedding_cut = None
_viz_training(X_real, reconstructions, enc_embs,
dec_embs, enc_embedding_cut, dec_embedding_cut,
writer, i, config, title="train_cond_" + str(t))
# track some training statistics
writer.add_scalar('train/kld_%d' % (t), kld, i)
writer.add_scalar('train/reconstruction_%d' % (t), x_rec_loss, i)
writer.add_scalar('train/all_loss_%d' % (t), loss + dloss_reg, i)
if config.rp_beta > 0:
writer.add_scalar('train/d_hnet_loss_reg_%d' % (t), dloss_reg, i)
test(enc, dec, d_hnet, device, config, writer, config.n_iter, t)
def train(dhandlers, enc, dec, d_hnet, device, config, writer):
""" Train replay model in continual fashion on MNIST dataset.
This is a helper function that loops over the range of tasks and
iteratively starts training the replay model on new tasks.
Args:
dhandlers: The dataset handlers.
enc: The model of the encoder network.
dec. The model of the decoder network.
d_hnet. The model of the decoder hyper network.
device: Torch device (cpu or gpu).
latent_sampler: An initialized distribution, we can sample from.
config: The command line arguments.
writer: The tensorboard summary writer.
"""
print('Training the MNIST replay model ...')
# get embedding lists for plotting
embd_list = init_plotting_embedding(dhandlers, d_hnet, writer, config)
# train the replay model task by task
for t in range(config.num_embeddings):
if config.replay_method == 'gan':
train_gan_one_t(dhandlers[t], enc, dec, d_hnet, device,
config, writer, embd_list, t)
else:
train_vae_one_t(dhandlers[t], enc, dec, d_hnet, device,
config, writer, embd_list, t)
def run(config, train_system=True, only_train_replay=False, train_tandem=True):
""" Method to start training MNIST replay model.
Depending on the configurations, here we control the creation and
training of the different replay modules with their corresponding
hypernetworks.
Args:
config: The command line arguments.
train_system: (optional) Set to false if we want this function
only to create config, networks and data_handlers for future
training. See :func:`mnist.train_splitMNIST.run` for a use case.
only_train_replay: (optional) If this script will only be used to
train a replay model. Normally, we use this script in tandem
with an additional classifier that uses this replay model to
replay old tasks data.
train_tandem: (optional) If we will use this script to train in
tandem i.e. in an alternating fashion with a classifier.
Returns:
(tuple): Tuple containing:
(....): See docstring of function :func:`train`.
"""
# if we want to train a classifier on single classes then we need a single
# class replay method. This need not be the case otherwise i.e. we can
# have a single class replay method but train our classifier on the
# replay data (build out of multiple replayed conidtions) and the current
# data at once.
# single class replay only implemented for splitMNIST
if config.single_class_replay:
assert (config.experiment == "splitMNIST")
if config.num_tasks > 100 and config.cl_scenario != 1:
print("Attention: Replay model not tested for num tasks > 100")
### Setup environment
device, writer = train_utils._setup_environment(config)
### Create tasks for split MNIST
if config.single_class_replay:
steps = 1
else:
steps = 2
### Create tasks for split MNIST
if train_system == False and config.upper_bound == False:
dhandlers = None
else:
dhandlers = train_utils._generate_tasks(config, steps)
### Generate networks.
if train_system == False:
enc, dec, d_hnet = None, None, None
else:
if config.rp_beta > 0:
create_rp_hnet = True
else:
create_rp_hnet = False
enc, dec, d_hnet = train_utils_replay.generate_replay_networks(config,
dhandlers, device, create_rp_hnet,
only_train_replay=only_train_replay)
### Generate task prioirs for latent space.
priors = []
test_z = []
vae_conds = []
### Save some noise vectors for testing
for t in range(config.num_embeddings):
# if conditional prior create some task priors and save them
if config.conditional_prior:
mu = torch.zeros((config.latent_dim)).to(device)
nn.init.normal_(mu, mean=0, std=1.)
mu = torch.stack([mu] * config.batch_size)
mu.requires_grad = False
priors.append(mu)
else:
mu = torch.zeros((config.batch_size,
config.latent_dim)).to(device)
priors.append(None)
### Generate sampler for latent space.
eps = torch.randn_like(mu)
sample = mu + eps
sample.requires_grad = False
test_z.append(sample)
# if vae has some conditional input, then either save hot-encodings
# or some conditions from a gaussian
if config.conditional_replay:
vae_c = torch.zeros((config.conditional_dim)).to(device)
if not config.not_conditional_hot_enc:
vae_c[t] = 1
else:
nn.init.normal_(vae_c, mean=0, std=1.)
vae_c = torch.stack([vae_c] * config.batch_size)
vae_c.requires_grad = False
vae_conds.append(vae_c)
config.test_z = test_z
config.priors = priors
config.vae_conds = vae_conds
if not train_tandem:
### Train the network.
train(dhandlers, enc, dec, d_hnet, device, config, writer)
### Test network.
test(enc, dec, d_hnet, device, config, writer)
return dec, d_hnet, enc, dhandlers, device, writer, config
if __name__ == '__main__':
### Get command line arguments.
config = train_args_replay.parse_rp_cmd_arguments(mode='perm')
### run the scripts
dec, d_hnet, enc, dhandlers, device, writer, config = \
run(config, only_train_replay=True, train_tandem=False)
writer.close()
print('Program finished successfully.')
|
the-stack_106_18753
|
#!/usr/bin/env python
import os
import numpy as np
from selfdrive.can.parser import CANParser
from cereal import car
from common.realtime import sec_since_boot
RADAR_MSGS = range(0x500, 0x540)
def _create_radar_can_parser():
dbc_f = 'ford_fusion_2018_adas.dbc'
msg_n = len(RADAR_MSGS)
signals = list(zip(['X_Rel'] * msg_n + ['Angle'] * msg_n + ['V_Rel'] * msg_n,
RADAR_MSGS * 3,
[0] * msg_n + [0] * msg_n + [0] * msg_n))
checks = list(zip(RADAR_MSGS, [20]*msg_n))
return CANParser(os.path.splitext(dbc_f)[0], signals, checks, 1)
class RadarInterface(object):
def __init__(self, CP):
# radar
self.pts = {}
self.validCnt = {key: 0 for key in RADAR_MSGS}
self.track_id = 0
self.delay = 0.0 # Delay of radar
# Nidec
self.rcp = _create_radar_can_parser()
self.trigger_msg = 0x53f
self.updated_messages = set()
def update(self, can_strings):
tm = int(sec_since_boot() * 1e9)
vls = self.rcp.update_strings(tm, can_strings)
self.updated_messages.update(vls)
if self.trigger_msg not in self.updated_messages:
return None
ret = car.RadarData.new_message()
errors = []
if not self.rcp.can_valid:
errors.append("canError")
ret.errors = errors
for ii in self.updated_messages:
cpt = self.rcp.vl[ii]
if cpt['X_Rel'] > 0.00001:
self.validCnt[ii] = 0 # reset counter
if cpt['X_Rel'] > 0.00001:
self.validCnt[ii] += 1
else:
self.validCnt[ii] = max(self.validCnt[ii] -1, 0)
#print ii, self.validCnt[ii], cpt['VALID'], cpt['X_Rel'], cpt['Angle']
# radar point only valid if there have been enough valid measurements
if self.validCnt[ii] > 0:
if ii not in self.pts:
self.pts[ii] = car.RadarData.RadarPoint.new_message()
self.pts[ii].trackId = self.track_id
self.track_id += 1
self.pts[ii].dRel = cpt['X_Rel'] # from front of car
self.pts[ii].yRel = cpt['X_Rel'] * cpt['Angle'] * np.pi / 180. # in car frame's y axis, left is positive
self.pts[ii].vRel = cpt['V_Rel']
self.pts[ii].aRel = float('nan')
self.pts[ii].yvRel = float('nan')
self.pts[ii].measured = True
else:
if ii in self.pts:
del self.pts[ii]
ret.points = self.pts.values()
self.updated_messages.clear()
return ret
|
the-stack_106_18756
|
#!/usr/bin/env python
from ncclient import manager
import sys
from lxml import etree
# Add parent directory to path to allow importing common vars
sys.path.append("..") # noqa
from device_info import sbx_n9kv_ao as device # noqa
# Loopback Info - Change the details for your interface
prefix = "10.99.99.0/24"
# create a main() method
def main():
"""
Main method that removes a prefix from bgp
"""
add_prefix = """ <config>
<System xmlns="http://cisco.com/ns/yang/cisco-nx-os-device">
<bgp-items>
<inst-items>
<dom-items>
<Dom-list>
<name>default</name>
<af-items>
<DomAf-list>
<type>ipv4-ucast</type>
<prefix-items>
<AdvPrefix-list operation="remove">
<addr>{}</addr>
</AdvPrefix-list>
</prefix-items>
</DomAf-list>
</af-items>
</Dom-list>
</dom-items>
</inst-items>
</bgp-items>
</System>
</config>""".format(prefix)
with manager.connect(host = device["address"],
port = device["netconf_port"],
username = device["username"],
password = device["password"],
hostkey_verify = False) as m:
# Add the prefix to BGP
print("\nNow removing prefix {} from device {}..\n".format(prefix, device["address"]))
netconf_response = m.edit_config(target='running', config=add_prefix)
# Parse the XML response
print(netconf_response)
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_18757
|
"""Main ecoshard module."""
import datetime
import hashlib
import logging
import json
import os
import re
import requests
import shutil
import subprocess
import sys
import time
import urllib.request
import zipfile
from osgeo import gdal
import numpy
from .geoprocessing import geoprocessing
import scipy.stats
LOGGER = logging.getLogger(__name__)
gdal.SetCacheMax(2**26)
COG_TUPLE = ('COG', (
'TILED=YES', 'BIGTIFF=YES', 'COMPRESS=LZW',
'BLOCKXSIZE=256', 'BLOCKYSIZE=256'))
def hash_file(
base_path, target_token_path=None, target_dir=None, rename=False,
hash_algorithm='md5', hash_length=None, force=False):
"""Ecoshard file by hashing it and appending hash to filename.
An EcoShard is the hashing of a file and the rename to the following
format: [base name]_[hashalg]_[hash][base extension]. If the base path
already is in this format a ValueError is raised unless `force` is True.
Args:
base_path (str): path to base file.
target_token_path (str): if not None, this file is created and written
with the timestamp at which the ecoshard was completed. This is
useful for TaskGraph to note a file being created without a priori
knowing the filename.
target_dir (str): if present, the ecoshard is created in this
directory. This value must be None if `rename` is True.
rename (bool): if True, `base_path` is renamed to the ecoshard rather
than a new file being created.
hash_algorithm (str): a hash function id that exists in
hashlib.algorithms_available.
force (bool): if True and the base_path already is in ecoshard format
the operation proceeds including the possibility that the
base_path ecoshard file name is renamed to a new hash.
hash_length (int): if not None, truncate length of hash to this
many characters.
Returns:
None.
"""
if target_dir and rename:
raise ValueError(
"`target_dir` is defined, but rename is True, either set "
"`target_dir` to None, or rename to False.")
if target_dir and not os.path.isdir(target_dir):
LOGGER.warning('target directory %s does not exist, creating it now')
os.makedirs(target_dir, exist_ok=True)
base_filename = os.path.basename(base_path)
prefix, extension = os.path.splitext(base_filename)
match_result = re.match(
'(.+)_(%s)_([0-9a-f])+%s' % (
'|'.join(hashlib.algorithms_available), extension), base_filename)
if match_result:
if not force:
raise ValueError(
'%s seems to already be an ecoshard with algorithm %s and '
'hash %s. Set `force=True` to overwrite.' % (
base_path, match_result.group(2), match_result.group(3)))
else:
LOGGER.warning(
'%s is already in ecoshard format, but overriding because '
'`force` is True.', base_path)
prefix = match_result.group(1)
LOGGER.debug('calculating hash for %s', base_path)
hash_val = calculate_hash(base_path, hash_algorithm)
if hash_length is not None:
hash_val = hash_val[:hash_length]
if target_dir is None:
target_dir = os.path.dirname(base_path)
ecoshard_path = os.path.join(target_dir, '%s_%s_%s%s' % (
prefix, hash_algorithm, hash_val, extension))
if rename:
LOGGER.info('renaming %s to %s', base_path, ecoshard_path)
os.rename(base_path, ecoshard_path)
else:
LOGGER.info('copying %s to %s', base_path, ecoshard_path)
shutil.copyfile(base_path, ecoshard_path)
if target_token_path:
with open(target_token_path, 'w') as target_token_file:
target_token_file.write(str(datetime.datetime.now()))
def build_overviews(
base_raster_path, target_token_path=None,
interpolation_method='near', overview_type='internal',
rebuild_if_exists=False):
"""Build embedded overviews on raster.
Args:
base_raster_path (str): base raster file, must be a GDAL writable
raster type.
target_token_path (str): if not None, this file is created and written
with a timestamp when overviews are successfully completed. This
file is useful for a library like `taskgraph` that needs to see
a file to know if an operation is successful.
interpolation_method (str): one of 'average', 'average_magphase',
'bilinear', 'cubic', 'cubicspline', 'gauss', 'lanczos', 'mode',
'near', or 'none'.
overview_type (str): 'internal' or 'external'
rebuild_if_exists (bool): If True overviews will be rebuilt even if
they already exist, otherwise just pass them over.
Returns:
None.
"""
raster_open_mode = gdal.OF_RASTER
if overview_type == 'internal':
raster_open_mode |= gdal.GA_Update
elif overview_type == 'external':
gdal.SetConfigOption('COMPRESS_OVERVIEW', 'LZW')
else:
raise ValueError('invalid value for overview_type: %s' % overview_type)
raster = gdal.OpenEx(base_raster_path, raster_open_mode)
if not raster:
raise ValueError(
'could not open %s as a GDAL raster' % base_raster_path)
band = raster.GetRasterBand(1)
overview_count = band.GetOverviewCount()
if overview_count == 0 or rebuild_if_exists:
# either no overviews, or we are rebuliding them
min_dimension = min(raster.RasterXSize, raster.RasterYSize)
overview_levels = []
current_level = 2
while True:
if min_dimension // current_level == 0:
break
overview_levels.append(current_level)
current_level *= 2
LOGGER.info(
'building overviews for %s at the following levels %s' % (
base_raster_path, overview_levels))
raster.BuildOverviews(
interpolation_method, overview_levels,
callback=_make_logger_callback(
'build overview for ' + os.path.basename(base_raster_path) +
'%.2f/1.0 complete'))
else:
LOGGER.warn(
'overviews already exist, set rebuild_if_exists=False to rebuild '
'them anyway')
if target_token_path:
with open(target_token_path, 'w') as token_file:
token_file.write(str(datetime.datetime.now()))
def validate(base_ecoshard_path):
"""Validate ecoshard path, through its filename.
If `base_ecoshard_path` matches an EcoShard pattern, and the hash matches
the actual hash, return True. Otherwise raise a ValueError.
Args:
base_ecoshard_path (str): path to an ecosharded file.
Returns:
True if `base_ecoshard_path` matches .*_[hashalg]_[hash][extension]
and hashalg(base_ecoshard_path) = hash. Otherwise raise a ValueError.
"""
base_filename = os.path.basename(base_ecoshard_path)
prefix, extension = os.path.splitext(base_filename)
match_result = re.match(
'.+_([^_]+)_([0-9a-f]+)%s' % extension, base_filename)
if not match_result:
raise ValueError("%s does not match an ecoshard" % base_filename)
hash_algorithm, hash_value = match_result.groups()
calculated_hash = calculate_hash(
base_ecoshard_path, hash_algorithm)
if calculated_hash != match_result.group(2):
raise ValueError(
'hash does not match, calculated %s and expected %s '
'on %s' % (calculated_hash, hash_value, base_filename))
# if we got here the hash matched the calculated hash
return True
def calculate_hash(file_path, hash_algorithm, buf_size=2**20):
"""Return a hex digest of `file_path`.
Args:
file_path (string): path to file to hash.
hash_algorithm (string): a hash function id that exists in
hashlib.algorithms_available.
buf_size (int): number of bytes to read from `file_path` at a time
for digesting.
Returns:
a hex digest with hash algorithm `hash_algorithm` of the binary
contents of `file_path`.
"""
hash_func = hashlib.new(hash_algorithm)
with open(file_path, 'rb') as f:
binary_data = f.read(buf_size)
while binary_data:
hash_func.update(binary_data)
binary_data = f.read(buf_size)
# We return the hash and CRC32 checksum in hexadecimal format
return hash_func.hexdigest()
def _make_logger_callback(message):
"""Build a timed logger callback that prints ``message`` replaced.
Args:
message (string): a string that expects 2 placement %% variables,
first for % complete from ``df_complete``, second from
``p_progress_arg[0]``.
Returns:
Function with signature:
logger_callback(df_complete, psz_message, p_progress_arg)
"""
def logger_callback(df_complete, _, p_progress_arg):
"""Argument names come from the GDAL API for callbacks."""
try:
current_time = time.time()
if ((current_time - logger_callback.last_time) > 5.0 or
(df_complete == 1.0 and
logger_callback.total_time >= 5.0)):
# In some multiprocess applications I was encountering a
# ``p_progress_arg`` of None. This is unexpected and I suspect
# was an issue for some kind of GDAL race condition. So I'm
# guarding against it here and reporting an appropriate log
# if it occurs.
if p_progress_arg:
LOGGER.info(message, df_complete * 100, p_progress_arg[0])
else:
LOGGER.info(
'p_progress_arg is None df_complete: %s, message: %s',
df_complete, message)
logger_callback.last_time = current_time
logger_callback.total_time += current_time
except AttributeError:
logger_callback.last_time = time.time()
logger_callback.total_time = 0.0
return logger_callback
def compress_raster(
base_raster_path, target_compressed_path, compression_algorithm='LZW',
compression_predictor=None):
"""Compress base raster to target.
Args:
base_raster_path (str): the original GIS raster file, presumably
uncompressed.
target_compressed_path (str): the desired output raster path with the
defined compression algorithm applied to it.
compression_algorithm (str): a valid GDAL compression algorithm eg
'LZW', 'DEFLATE', and others defined in GDAL.
compression_predictor (int): if defined uses the predictor in whatever
compression algorithm is used. In most cases this only applies to
LZW or DEFLATE.
Returns:
None.
"""
gtiff_driver = gdal.GetDriverByName('GTiff')
base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)
LOGGER.info('compress %s to %s' % (
base_raster_path, target_compressed_path))
compressed_raster = gtiff_driver.CreateCopy(
target_compressed_path, base_raster, options=(
'TILED=YES', 'BIGTIFF=YES', 'COMPRESS=%s' % compression_algorithm,
'BLOCKXSIZE=256', 'BLOCKYSIZE=256'),
callback=geoprocessing._make_logger_callback(
f"copying {target_compressed_path} %.1f%% complete %s"))
del compressed_raster
def download_url(url, target_path, skip_if_target_exists=False):
"""Download `url` to `target_path`.
Args:
url (str): url path to a file.
target_path (str): desired output target path.
skip_if_target_exists (bool): if True will not download a file if the
path already exists on disk.
Returns:
None.
"""
try:
if skip_if_target_exists and os.path.exists(target_path):
return
with open(target_path, 'wb') as target_file:
last_download_size = 0
start_time = time.time()
with urllib.request.urlopen(url) as url_stream:
meta = url_stream.info()
file_size = int(meta["Content-Length"])
LOGGER.info(
"Downloading: %s Bytes: %s" % (target_path, file_size))
downloaded_so_far = 0
block_size = 2**20
last_log_time = time.time()
while True:
data_buffer = url_stream.read(block_size)
if not data_buffer:
break
downloaded_so_far += len(data_buffer)
target_file.write(data_buffer)
time_since_last_log = time.time() - last_log_time
if time_since_last_log > 5.0:
download_rate = (
(downloaded_so_far - last_download_size)/2**20) / (
float(time_since_last_log))
last_download_size = downloaded_so_far
status = r"%10dMB [%3.2f%% @ %5.2fMB/s]" % (
downloaded_so_far/2**20, downloaded_so_far * 100. /
file_size, download_rate)
LOGGER.info(status)
last_log_time = time.time()
total_time = time.time() - start_time
final_download_rate = downloaded_so_far/2**20 / float(total_time)
status = r"%10dMB [%3.2f%% @ %5.2fMB/s]" % (
downloaded_so_far/2**20, downloaded_so_far * 100. /
file_size, final_download_rate)
LOGGER.info(status)
target_file.flush()
os.fsync(target_file.fileno())
except Exception:
LOGGER.exception(f'unable to download {url}')
raise
def download_and_unzip(url, target_dir, target_token_path=None):
"""Download `url` to `target_dir` and touch `target_token_path`.
Args:
url (str): url to file to download
target_dir (str): path to a local directory to download and unzip the
file to. The contents will be unzipped into the same directory as
the zipfile.
target_token_path (str): If not None, a path a file to touch when
the unzip is complete. This parameter is added to work well with
the ecoshard library that expects a file to be created after
an operation is complete. It may be complicated to list the files
that are unzipped, so instead this file is created and contains
the timestamp of when this function completed.
Returns:
None.
"""
zipfile_path = os.path.join(target_dir, os.path.basename(url))
LOGGER.info('download %s, to: %s', url, zipfile_path)
download_url(url, zipfile_path)
LOGGER.info('unzip %s', zipfile_path)
with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:
zip_ref.extractall(target_dir)
if target_token_path:
with open(target_token_path, 'w') as touchfile:
touchfile.write(f'unzipped {zipfile_path}')
LOGGER.info('download an unzip for %s complete', zipfile_path)
def copy_to_bucket(base_path, target_gs_path, target_token_path=None):
"""Copy base to a Google Bucket path.
This requires that "gsutil" is installed on the host machine and the
client has write access to whatever gs path is written.
Args:
base_path (str): path to base file.
target_gs_path (str): a well formated google bucket string of the
format "gs://[bucket][path][file]"
target_token_path (str): file that is written if this operation
completes successfully, contents are the timestamp of the
creation time.
Returns:
None.
"""
subprocess.run(
'gsutil cp -n %s %s' % (base_path, target_gs_path), shell=True,
check=True)
if target_token_path:
with open(target_token_path, 'w') as token_file:
token_file.write(str(datetime.datetime.now()))
def convolve_layer(
base_raster_path, integer_factor, method, target_raster_path):
"""Convolve a raster to a lower size.
Args:
base_raster_path (str): base raster.
integer_factor (int): integer number of pixels to aggregate by.
i.e. 2 -- makes 2x2 into a 1x1, 3-- 3x3 to a 1x1.
method (str): one of 'max', 'min', 'sum', 'average', 'mode'.
target_raster_path (str): based off of `base_raster_path` with size
reduced by `integer_factor`.
Return:
None.
"""
base_raster_info = geoprocessing.get_raster_info(base_raster_path)
n_cols, n_rows = numpy.ceil(base_raster_info['raster_size']).astype(
numpy.int)
n_cols_reduced = int(numpy.ceil(n_cols / integer_factor))
n_rows_reduced = int(numpy.ceil(n_rows / integer_factor))
nodata = base_raster_info['nodata'][0]
geoprocessing.new_raster_from_base(
base_raster_path, target_raster_path, base_raster_info['datatype'],
[nodata], n_rows=n_rows_reduced, n_cols=n_cols_reduced)
base_raster = gdal.OpenEx(base_raster_path, gdal.OF_RASTER)
base_band = base_raster.GetRasterBand(1)
base_geotransform = base_raster.GetGeoTransform()
target_raster = gdal.OpenEx(
target_raster_path, gdal.OF_RASTER | gdal.GA_Update)
target_geotransform = [
base_geotransform[0],
base_geotransform[1]*integer_factor,
base_geotransform[2]*integer_factor,
base_geotransform[3],
base_geotransform[4]*integer_factor,
base_geotransform[5]*integer_factor]
target_raster.SetGeoTransform(target_geotransform)
target_band = target_raster.GetRasterBand(1)
block = base_band.GetBlockSize()
cols_per_block = min(
n_cols, max(1, block[0] // integer_factor) * integer_factor * 10)
rows_per_block = min(
n_rows, max(1, block[1] // integer_factor) * integer_factor * 10)
n_col_blocks = int(numpy.ceil(n_cols / float(cols_per_block)))
n_row_blocks = int(numpy.ceil(n_rows / float(rows_per_block)))
for row_block_index in range(n_row_blocks):
row_offset = row_block_index * rows_per_block
row_block_width = n_rows - row_offset
LOGGER.info('step %d of %d', row_block_index+1, n_row_blocks)
if row_block_width > rows_per_block:
row_block_width = rows_per_block
for col_block_index in range(n_col_blocks):
col_offset = col_block_index * cols_per_block
col_block_width = n_cols - col_offset
if col_block_width > cols_per_block:
col_block_width = cols_per_block
offset_dict = {
'xoff': int(col_offset),
'yoff': int(row_offset),
'win_xsize': int(col_block_width),
'win_ysize': int(row_block_width),
}
target_offset_x = offset_dict['xoff'] // integer_factor
target_offset_y = offset_dict['yoff'] // integer_factor
block_data = base_band.ReadAsArray(**offset_dict)
rw = int(numpy.ceil(
col_block_width / integer_factor) * integer_factor)
rh = int(numpy.ceil(
row_block_width / integer_factor) * integer_factor)
w_pad = rw - col_block_width
h_pad = rh - row_block_width
j = rw // integer_factor
k = rh // integer_factor
if method == 'max':
block_data_pad = numpy.pad(
block_data, ((0, h_pad), (0, w_pad)), mode='edge')
reduced_block_data = block_data_pad.reshape(
k, integer_factor, j, integer_factor).max(axis=(-1, -3))
elif method == 'min':
block_data_pad = numpy.pad(
block_data, ((0, h_pad), (0, w_pad)), mode='edge')
reduced_block_data = block_data_pad.reshape(
k, integer_factor, j, integer_factor).min(axis=(-1, -3))
elif method == 'mode':
block_data_pad = numpy.pad(
block_data, ((0, h_pad), (0, w_pad)), mode='edge')
reduced_block_data = scipy.stats.mode(
block_data_pad.reshape(
k, integer_factor, j, integer_factor).swapaxes(
1, 2).reshape(k, j, integer_factor**2),
axis=2).mode.reshape(k, j)
elif method == 'average':
block_data_pad = numpy.pad(
block_data, ((0, h_pad), (0, w_pad)), mode='edge')
block_data_pad_copy = block_data_pad.copy()
# set any nodata to 0 so we don't average it strangely
block_data_pad[numpy.isclose(block_data_pad, nodata)] = 0.0
# straight average
reduced_block_data = block_data_pad.reshape(
k, integer_factor, j, integer_factor).mean(
axis=(-1, -3))
# this one is used to restore any nodata areas because they'll
# still be nodata when it's done
min_block_data = block_data_pad_copy.reshape(
k, integer_factor, j, integer_factor).min(
axis=(-1, -3))
reduced_block_data[
numpy.isclose(min_block_data, nodata)] = nodata
elif method == 'sum':
block_data_pad = numpy.pad(
block_data, ((0, h_pad), (0, w_pad)), mode='edge')
nodata_mask = numpy.isclose(block_data_pad, nodata)
block_data_pad_copy = block_data_pad.copy()
# set any nodata to 0 so we don't sum it strangely
block_data_pad[nodata_mask] = 0.0
# straight sum
reduced_block_data = block_data_pad.reshape(
k, integer_factor, j, integer_factor).sum(
axis=(-1, -3))
# this one is used to restore any nodata areas because they'll
# still be nodata when it's done
max_block_data = block_data_pad_copy.reshape(
k, integer_factor, j, integer_factor).max(
axis=(-1, -3))
reduced_block_data[
numpy.isclose(max_block_data, nodata)] = nodata
else:
raise ValueError("unknown method: %s" % method)
target_band.WriteArray(
reduced_block_data, xoff=target_offset_x, yoff=target_offset_y)
continue
def search(
host_port, api_key, bounding_box, description, datetime, asset_id,
catalog_list):
"""Search EcoServer.
Args:
host_port (str): `host:port` string pair to identify server to post
publish request to.
api_key (str): an api key that as write access to the catalog on the
server.
bounding_box (list): a float list of xmin,ymin,xmax,ymax to indicate
the search area in lng/lat coordinates.
description (str): description to partially search for
datetime (str): utc range or open range to search for times like
'2020-04-20 04:20:17.866142/2020-04-20 19:49:17.866142, '
'../2020-04-20 19:49:17.866142', or
'2020-04-20 04:20:17.866142/..'
asset_id (str): to search for a substring match on ids in the catalog
catalog_list (str): comma separated string of catalogs to search ex:
'salo,nasa,joe'
Returns:
None
"""
post_url = f'http://{host_port}/api/v1/search'
if bounding_box:
bounding_box_str = ','.join([str(val) for val in bounding_box])
else:
bounding_box_str = None
LOGGER.debug('search posting to here: %s' % post_url)
search_response = requests.post(
post_url,
params={'api_key': api_key},
json=json.dumps({
'bounding_box': bounding_box_str,
'description': description,
'datetime': datetime,
'asset_id': asset_id,
'catalog_list': catalog_list
}))
if not search_response:
LOGGER.error(f'response from server: {search_response.text}')
raise RuntimeError(search_response.text)
response_dict = search_response.json()
LOGGER.debug(response_dict)
for index, feature in enumerate(response_dict['features']):
LOGGER.info(
f"{index}: {feature['id']}, "
f"bbox: {feature['bbox']}, "
f"utc_datetime: {feature['utc_datetime']}, "
f"description: {feature['description']}")
def process_worker(file_path, args):
"""Do the ecoshard process commands to the given file path."""
working_file_path = file_path
LOGGER.info('processing %s', file_path)
if args.cog:
# create copy with COG
cog_driver = gdal.GetDriverByName('COG')
base_raster = gdal.OpenEx(file_path, gdal.OF_RASTER)
cog_file_path = os.path.join(
f'cog_{os.path.basename(file_path)}')
LOGGER.info(f'convert {file_path} to COG {cog_file_path}')
cog_raster = cog_driver.CreateCopy(
cog_file_path, base_raster, options=(
'COMPRESS=LZW', 'NUM_THREADS=ALL_CPUS', 'BIGTIFF=YES'),
callback=geoprocessing._make_logger_callback(
f"COGing {cog_file_path} %.1f%% complete %s"))
del cog_raster
return
if args.reduce_factor:
method = args.reduce_factor[1]
valid_methods = ["max", "min", "sum", "average", "mode"]
if method not in valid_methods:
LOGGER.error(
'--reduce_method must be one of %s' % valid_methods)
sys.exit(-1)
convolve_layer(
file_path, int(args.reduce_factor[0]),
args.reduce_factor[1],
args.reduce_factor[2])
return
if args.strip_hash:
working_file_path = _remove_hash_from_filename(
file_path, args.strip_hash)
os.rename(file_path, working_file_path)
else:
working_file_path = file_path
if args.ndv is not None:
raster_info = geoprocessing.get_raster_info(working_file_path)
current_nodata = raster_info['nodata'][0]
if current_nodata is not None and not args.force:
error_message = (
f'--ndv flag is passed but {working_file_path} already has a '
f'nodata value of {raster_info["nodata"]}, pass --force flag '
f'to override this.')
LOGGER.error(error_message)
return error_message
basename = os.path.basename(working_file_path)
target_file_path = f'ndv_{args.ndv}_{basename}'
LOGGER.info(
f"replacing nodata value of {raster_info['nodata']} with "
f"{args.ndv} on {working_file_path}")
geoprocessing.raster_calculator(
[(working_file_path, 1), (current_nodata, 'raw'),
(args.ndv, 'raw')], _reclass_op, target_file_path,
raster_info['datatype'], args.ndv)
working_file_path = target_file_path
if args.compress:
prefix, suffix = os.path.splitext(working_file_path)
compressed_filename = '%s_compressed%s' % (prefix, suffix)
compress_raster(
working_file_path, compressed_filename,
compression_algorithm='DEFLATE')
working_file_path = compressed_filename
if args.buildoverviews:
build_overviews(
working_file_path, interpolation_method=args.interpolation_method)
if args.validate:
try:
is_valid = validate(working_file_path)
if is_valid:
LOGGER.info('VALID ECOSHARD: %s', working_file_path)
else:
LOGGER.error(
'got a False, but no ValueError on validate? '
'that is not impobipible?')
except ValueError:
error_message = 'INVALID ECOSHARD: %s', working_file_path
LOGGER.error(error_message)
return error_message
elif args.hash_file:
hash_file(
working_file_path, rename=args.rename, hash_algorithm=args.hashalg,
hash_length=args.hash_length,
force=args.force)
def _reclass_op(data_array, current_nodata, target_nodata):
"""Replace data array non-finte and current nodata to target."""
result = numpy.copy(data_array)
replace_block = ~numpy.isfinite(result)
if current_nodata is not None:
replace_block |= data_array == current_nodata
result[replace_block] = target_nodata
return result
def _remove_hash_from_filename(file_path, hash_id):
"""Returns new filename without hash.
Assumes filename is of the form [prefix]_[hash_id]_[hash_value].ext
Args:
file_path (str): any filename
hash_id (str): the value of the hash id ex md5
Returns:
[prefix].ext
"""
prefix, suffix = os.path.splitext(file_path)
file_match = re.match(
f'(.*?)_{hash_id}_.*$', prefix)
if file_match:
rename_file_path = f'{file_match.group(1)}{suffix}'
return rename_file_path
else:
raise ValueError(
f"could not find a hash matching '{hash_id}'' in the filename "
f"'{file_path}'")
|
the-stack_106_18758
|
import json
from typing import Mapping, Optional
from base64 import b64encode, b64decode
from . import Handler, AuthenticatedHandler
from malon_lp.crypto.dh import KeyExchange, get_client_id
from malon_lp.crypto.sym import SymmetricCipher
KeyRespository = Mapping[str, bytes]
class DummyAuthenticationHandler(Handler):
def __init__(self, handler: AuthenticatedHandler):
self._handler = handler
def handle_msg(self, msg: bytes, client_id: Optional[str] = None) -> bytes:
"""
Desencodea el contenido del payload y delega el mensage en el AuthenticatedHandler
"""
base_msg = json.loads(msg.decode('utf-8'))
if base_msg.get('client_msg') is not None:
payload = self._handler.handle_auth_msg(
b64decode(base_msg['client_msg']['payload']),
base_msg['client_msg']['client_id']
)
server_msg = {'payload': b64encode(payload).decode('utf-8')}
base_msg = {'server_msg': server_msg}
return json.dumps(base_msg).encode('utf-8')
else:
raise RuntimeError('Unexpected message type')
|
the-stack_106_18759
|
import numpy as np
import warnings
from anndata import AnnData
from scipy.sparse import issparse, csr_matrix, lil_matrix, diags
from tqdm import tqdm
from .utils_moments import estimation
from .utils import get_mapper, elem_prod, inverse_norm
from .connectivity import mnn, normalize_knn_graph, umap_conn_indices_dist_embedding
from ..preprocessing.utils import get_layer_keys, allowed_X_layer_names, pca
# ---------------------------------------------------------------------------------------------------
# use for calculating moments for stochastic model:
def moments(adata,
genes=None,
group=None,
use_gaussian_kernel=False,
normalize=True,
use_mnn=False,
layers="all",
n_pca_components=30,
n_neighbors=30,
):
"""Calculate kNN based first and second moments (including uncentered covariance) for
different layers of data.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
genes: `np.array` (default: `None`)
The one-dimensional numpy array of the genes that you want to perform pca analysis (if adata.obsm['X'] is not
available). `X` keyname (instead of `X_pca`) was used to enable you use a different set of genes for flexible
connectivity graph construction. If `None`, by default it will select genes based `use_for_pca` key in .var
attributes if it exists otherwise it will also all genes stored in adata.X
group: `str` or None (default: `None`)
The column key/name that identifies the grouping information (for example, clusters that correspond to
different cell types or different time points) of cells. This will be used to compute kNN graph for each
group (i.e cell-type/time-point). This is important, for example, we don't want cells from different labeling
time points to be mixed when performing the kNN graph for calculating the moments.
use_gaussian_kernel: `bool` (default: `True`)
Whether to normalize the kNN graph via a Guasian kernel.
normalize: `bool` (default: `True`)
Whether to normalize the connectivity matrix so that each row sums up to 1. When `use_gaussian_kernel` is False,
this will be reset to be False because we will already normalize the connectivity matrix matrix by dividing
each row the total number of connections.
use_mnn: `bool` (default: `False`)
Whether to use mutual kNN across different layers as for the moment calculation.
layers: `str` or a list of str (default: `str`)
The layers that will be used for calculating the moments.
n_pca_components: `int` (default: `30`)
The number of pca components to use for constructing nearest neighbor graph and calculating 1/2-st moments.
n_neighbors: `int` (default: `30`)
The number of neighbors for constructing nearest neighbor graph used to calculate 1/2-st moments.
Returns
-------
adata: :class:`~anndata.AnnData`
A updated AnnData object with calculated first/second moments (including uncentered covariance) included.
"""
mapper = get_mapper()
only_splicing, only_labeling, splicing_and_labeling = allowed_X_layer_names()
if genes is None and 'use_for_pca' in adata.var.keys(): genes = adata.var_names[adata.var.use_for_pca]
if use_mnn:
if "mnn" not in adata.uns.keys():
adata = mnn(
adata,
n_pca_components=n_pca_components,
layers="all",
use_pca_fit=True,
save_all_to_adata=False,
)
conn = adata.uns["mnn"]
else:
if 'X' not in adata.obsm.keys():
if not any([i.startswith('X_') for i in adata.layers.keys()]):
from ..preprocessing.preprocess import recipe_monocle
genes_to_use = adata.var_names[genes] if genes.dtype == 'bool' else genes
adata = recipe_monocle(adata, genes_to_use=genes_to_use, n_pca_components=n_pca_components)
adata.obsm["X"] = adata.obsm["X_pca"]
else:
CM = adata.X if genes is None else adata[:, genes].X
cm_genesums = CM.sum(axis=0)
valid_ind = np.logical_and(np.isfinite(cm_genesums), cm_genesums != 0)
valid_ind = np.array(valid_ind).flatten()
CM = CM[:, valid_ind]
adata, fit, _ = pca(adata, CM, n_pca_components=n_pca_components)
adata.uns["explained_variance_ratio_"] = fit.explained_variance_ratio_[1:]
X = adata.obsm["X"][:, :n_pca_components]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if group is None:
kNN, knn_indices, knn_dists, _ = umap_conn_indices_dist_embedding(
X, n_neighbors=np.min((n_neighbors, adata.n_obs - 1)), return_mapper=False
)
if use_gaussian_kernel and not use_mnn:
conn = gaussian_kernel(X, knn_indices, sigma=10, k=None, dists=knn_dists)
else:
conn = normalize_knn_graph(kNN > 0)
normalize = False
else:
if group not in adata.obs.keys():
raise Exception(f'the group {group} provided is not a column name in .obs attribute.')
conn = csr_matrix((adata.n_obs, adata.n_obs))
cells_group = adata.obs[group]
uniq_grp = np.unique(cells_group)
for cur_grp in uniq_grp:
cur_cells = cells_group == cur_grp
cur_X = X[cur_cells, :]
cur_kNN, cur_knn_indices, cur_knn_dists, _ = umap_conn_indices_dist_embedding(
cur_X, n_neighbors=np.min((n_neighbors, sum(cur_cells) - 1)), return_mapper=False
)
if use_gaussian_kernel and not use_mnn:
cur_conn = gaussian_kernel(cur_X, cur_knn_indices, sigma=10, k=None, dists=cur_knn_dists)
else:
cur_conn = normalize_knn_graph(cur_kNN > 0)
cur_cells_ = np.where(cur_cells)[0]
conn[cur_cells_[:, None], cur_cells_] = cur_conn
layers = get_layer_keys(adata, layers, False, False)
layers = [
layer
for layer in layers
if layer.startswith("X_")
and (not layer.endswith("matrix") and not layer.endswith("ambiguous"))
]
layers.sort(
reverse=True
) # ensure we get M_us, M_tn, etc (instead of M_su or M_nt).
for i, layer in enumerate(layers):
layer_x = adata.layers[layer].copy()
layer_x_group = np.where([layer in x for x in
[only_splicing, only_labeling, splicing_and_labeling]])[0][0]
layer_x = inverse_norm(adata, layer_x)
if mapper[layer] not in adata.layers.keys():
adata.layers[mapper[layer]], conn = (
calc_1nd_moment(layer_x, conn, normalize_W=normalize)
if use_gaussian_kernel
else (conn.dot(layer_x), conn)
)
for layer2 in layers[i:]:
layer_y = adata.layers[layer2].copy()
layer_y_group = np.where([layer2 in x for x in
[only_splicing, only_labeling, splicing_and_labeling]])[0][0]
# don't calculate 2 moments among uu, ul, su, sl -
# they should be time-dependent moments and
# those calculations are model specific
if (layer_x_group != layer_y_group) or layer_x_group == 2:
continue
layer_y = inverse_norm(adata, layer_y)
if mapper[layer2] not in adata.layers.keys():
adata.layers[mapper[layer2]], conn = (
calc_1nd_moment(layer_y, conn, normalize_W=normalize)
if use_gaussian_kernel
else (conn.dot(layer_y), conn)
)
adata.layers["M_" + layer[2] + layer2[2]] = calc_2nd_moment(
layer_x, layer_y, conn, normalize_W=normalize, mX=None, mY=None
)
if (
"X_protein" in adata.obsm.keys()
): # may need to update with mnn or just use knn from protein layer itself.
adata.obsm[mapper["X_protein"]] = conn.dot(adata.obsm["X_protein"])
adata.obsp['moments_con'] = conn
return adata
def time_moment(adata,
tkey,
has_splicing,
has_labeling=True,
t_label_keys=None,
):
"""Calculate time based first and second moments (including uncentered covariance) for
different layers of data.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object.
tkey: `str` or None (default: None)
The column key for the time label of cells in .obs. Used for either "ss" or "kinetic" model.
mode with labeled data.
has_splicing: `bool`
Whether the data has splicing information.
has_labeling: `bool` (default: True)
Whether the data has labeling information.
t_label_keys: `str`, `list` or None (default: None)
The column key(s) for the labeling time label of cells in .obs. Used for either "ss" or "kinetic" model.
Not used for now and `tkey` is implicitly assumed as `t_label_key` (however, `tkey` should just be the time
of the experiment).
Returns
-------
adata: :class:`~anndata.AnnData`
A updated AnnData object with calculated first/second moments (including uncentered covariance) for
each time point for each layer included.
"""
if has_labeling:
if has_splicing:
layers = ['uu', 'ul', 'su', 'sl']
else:
layers = ['new', 'total']
else:
layers = ['unspliced', 'spliced']
time = adata.obs[tkey]
m, v = prepare_data_deterministic(adata, adata.var.index, time, layers,
use_total_layers=True, log=False)
adata.uns['time_moments'] = {'time': time}
adata.varm['m_t'] = m
adata.varm['v_t'] = v
return adata
# ---------------------------------------------------------------------------------------------------
# use for kinetic assumption
def get_layer_pair(layer):
pair = {'new': "total", 'total': "new",
'X_new': "X_total", "X_total": 'X_new',
'M_t': 'M_n', "M_n": 'M_t'}
return pair[layer] if layer in pair.keys() else None
def get_layer_group(layer):
group = {'uu': "ul", 'ul': "uu", 'su': "sl", "sl": 'su',
'X_uu': "X_ul", 'X_ul': "X_uu", 'X_su': "X_sl", "X_sl": 'X_su',
'M_uu': "M_ul", 'M_ul': "M_uu", 'M_su': "M_sl", "M_sl": 'M_su',
}
return group[layer] if layer in group.keys() else None
def prepare_data_deterministic(adata, genes, time, layers,
use_total_layers=True,
total_layers=['X_ul', 'X_sl', 'X_uu', 'X_su'],
log=False):
from ..preprocessing.utils import sz_util, normalize_util
if use_total_layers:
if 'total_Size_Factor' not in adata.obs.keys():
# total_layers = ["uu", "ul", "su", "sl"] if 'uu' in adata.layers.keys() else ['total']
sfs, _ = sz_util(adata, '_total_', round_exprs=False, method="median",
locfunc=np.nanmean, total_layers=total_layers)
else:
sfs = adata.obs.total_Size_Factor
sfs_x, sfs_y = sfs[:, None], sfs[:, None]
m = [None] * len(layers)
v = [None] * len(layers)
raw = [None] * len(layers)
for i, layer in enumerate(layers):
if layer in ['X_total', 'total', 'M_t']:
if (layer == 'X_total' and adata.uns['pp_norm_method'] is None) or layer == 'M_t':
x_layer = adata[:, genes].layers[layer]
x_layer = x_layer - adata[:, genes].layers[get_layer_pair(layer)]
else:
x_layer = adata.layers[layer]
group_pair_x_layer_ = get_layer_group(get_layer_pair(layer))
pair_x_layer, group_x_layer, group_pair_x_layer = adata.layers[get_layer_pair(layer)], \
adata.layers[get_layer_group(layer)], \
None if group_pair_x_layer_ is None else \
adata.layers[group_pair_x_layer_]
if layer.startswith('X_'):
x_layer, pair_x_layer, group_x_layer, group_pair_x_layer = inverse_norm(adata, x_layer), \
inverse_norm(adata, pair_x_layer), \
inverse_norm(adata, group_x_layer), \
0 if group_pair_x_layer_ is None else \
inverse_norm(adata, group_pair_x_layer)
if not use_total_layers:
sfs_x, _ = sz_util(adata, layer, round_exprs=False, method="median",
locfunc=np.nanmean, total_layers=None, CM=x_layer+group_x_layer)
sfs_y, _ = sz_util(adata, get_layer_pair(layer), round_exprs=False,
method="median", locfunc=np.nanmean, total_layers=None,
CM=pair_x_layer + group_pair_x_layer)
sfs_x, sfs_y = sfs_x[:, None], sfs_y[:, None]
x_layer = normalize_util(x_layer[:, adata.var_names.isin(genes)], sfs_x, relative_expr=True,
pseudo_expr=0, norm_method=None)
y_layer = normalize_util(pair_x_layer[:, adata.var_names.isin(genes)], sfs_y, relative_expr=True,
pseudo_expr=0, norm_method=None)
x_layer = x_layer - y_layer
else:
if (layer == ['X_new'] and adata.uns['pp_norm_method'] is None) or layer == 'M_n':
x_layer = adata[:, genes].layers[layer]
else:
x_layer = adata.layers[layer]
if layer.startswith('X_'):
x_layer = inverse_norm(adata, x_layer)
if not use_total_layers:
sfs, _ = sz_util(adata, layer, round_exprs=False, method="median",
locfunc=np.nanmean, total_layers=None, CM=x_layer)
x_layer = normalize_util(x_layer[:, adata.var_names.isin(genes)], szfactors=sfs[:, None],
relative_expr=True, pseudo_expr=0, norm_method=None)
if log:
if issparse(x_layer):
x_layer.data = np.log1p(x_layer.data)
else:
x_layer = np.log1p(x_layer)
m[i], v[i], _ = calc_12_mom_labeling(x_layer.T, time)
raw[i] = x_layer
return m, v, raw # each list element corresponds to a layer
def prepare_data_has_splicing(adata, genes, time, layer_u, layer_s,
use_total_layers=True,
total_layers=['X_ul', 'X_sl', 'X_uu', 'X_su'],
return_cov=True):
"""Prepare data when assumption is kinetic and data has splicing"""
from ..preprocessing.utils import sz_util, normalize_util
res = [0] * len(genes)
raw = [0] * len(genes)
U, S = adata[:, genes].layers[layer_u] if layer_u == 'M_ul' else None, \
adata[:, genes].layers[layer_s] if layer_s == 'M_sl' else None
layer_ul_data, layer_sl_data = adata.layers[layer_u], adata.layers[layer_s]
layer_uu_data, layer_su_data = adata.layers[total_layers[2]], adata.layers[total_layers[3]]
layer_ul_data, layer_sl_data = layer_ul_data if layer_u == 'M_ul' else inverse_norm(adata, layer_ul_data), \
layer_sl_data if layer_s == 'M_sl' else inverse_norm(adata, layer_sl_data)
layer_uu_data, layer_su_data = layer_uu_data if total_layers[2] == 'M_uu' else inverse_norm(adata, layer_uu_data), \
layer_su_data if total_layers[3] == 'M_su' else inverse_norm(adata, layer_su_data)
if use_total_layers:
if 'total_Size_Factor' not in adata.obs.keys():
sfs, _ = sz_util(adata, '_total_', round_exprs=False, method="median", locfunc=np.nanmean,
total_layers=total_layers, CM=layer_ul_data + layer_sl_data + layer_uu_data + layer_su_data)
sfs_u, sfs_s = sfs[:, None], sfs[:, None]
else:
sfs = adata.obs.total_Size_Factor
sfs_u, sfs_s = sfs[:, None], sfs[:, None]
else:
sfs_u, _ = sz_util(adata, layer_u, round_exprs=False, method="median",
locfunc=np.nanmean, total_layers=None, CM=layer_ul_data + layer_uu_data)
sfs_s, _ = sz_util(adata, layer_s, round_exprs=False, method="median",
locfunc=np.nanmean, total_layers=None, CM=layer_sl_data + layer_su_data)
sfs_u, sfs_s = sfs_u[:, None], sfs_s[:, None]
if U is None: U = normalize_util(layer_ul_data[:, adata.var_names.isin(genes)], sfs_u, relative_expr=True,
pseudo_expr=0, norm_method=None)
if S is None: S = normalize_util(layer_sl_data[:, adata.var_names.isin(genes)], sfs_s, relative_expr=True,
pseudo_expr=0, norm_method=None)
for i, g in enumerate(genes):
u = U[:, i]
s = S[:, i]
ut = strat_mom(u, time, np.mean)
st = strat_mom(s, time, np.mean)
uut = strat_mom(elem_prod(u, u), time, np.mean)
ust = strat_mom(elem_prod(u, s), time, np.mean)
sst = strat_mom(elem_prod(s, s), time, np.mean)
x = np.vstack([ut, st, uut, sst, ust]) if return_cov else np.vstack([ut, st, uut, sst])
res[i] = x
raw[i] = np.vstack((u, s))
return res, raw
def prepare_data_no_splicing(adata, genes, time, layer,
use_total_layers=True,
total_layer='X_total',
return_old=False):
"""Prepare data when assumption is kinetic and data has no splicing"""
from ..preprocessing.utils import sz_util, normalize_util
res = [0] * len(genes)
raw = [0] * len(genes)
U, T = adata[:, genes].layers[layer] if layer == 'M_n' else None, \
adata[:, genes].layers[total_layer] if total_layer == 'M_t' else None
layer_data = adata.layers[layer]
total_layer_data = adata.layers[total_layer]
layer_data, total_layer_data = layer_data if layer == 'M_n' else inverse_norm(adata, layer_data), \
total_layer_data if total_layer == 'M_t' else inverse_norm(adata, total_layer_data)
if use_total_layers:
if 'total_Size_Factor' not in adata.obs.keys():
sfs, _ = sz_util(adata, '_total_', round_exprs=False, method="median",
locfunc=np.nanmean, total_layers=total_layer, CM=total_layer_data)
else:
sfs = adata.obs.total_Size_Factor
sfs, tot_sfs = sfs[:, None], sfs[:, None]
else:
sfs, _ = sz_util(adata, layer, round_exprs=False, method="median",
locfunc=np.nanmean, total_layers=None, CM=layer_data)
tot_sfs, _ = sz_util(adata, layer, round_exprs=False, method="median",
locfunc=np.nanmean, total_layers=None, CM=total_layer_data)
sfs, tot_sfs = sfs[:, None], tot_sfs[:, None]
if U is None: U = normalize_util(layer_data[:, adata.var_names.isin(genes)], sfs, relative_expr=True, pseudo_expr=0,
norm_method=None)
if T is None: T = normalize_util(total_layer_data[:, adata.var_names.isin(genes)], tot_sfs, relative_expr=True,
pseudo_expr=0, norm_method=None)
for i, g in enumerate(genes):
u, t = U[:, i], T[:, i]
ut = strat_mom(u, time, np.mean)
uut = strat_mom(elem_prod(u, u), time, np.mean)
res[i] = np.vstack([ut, uut])
raw[i] = np.vstack([u, t - u]) if return_old else u
return res, raw
def prepare_data_mix_has_splicing(adata, genes, time, layer_u='X_uu', layer_s='X_su',
layer_ul='X_ul', layer_sl='X_sl', use_total_layers=True,
total_layers=['X_ul', 'X_sl', 'X_uu', 'X_su'], mix_model_indices=None):
"""Prepare data for mixture modeling when assumption is kinetic and data has splicing.
Note that the mix_model_indices is indexed on 10 total species, which can be used to specify
the data required for different mixture models.
"""
from ..preprocessing.utils import sz_util, normalize_util
res = [0] * len(genes)
raw = [0] * len(genes)
U, S = adata[:, genes].layers[layer_u] if layer_u == 'M_uu' else None, \
adata[:, genes].layers[layer_s] if layer_u == 'M_su' else None
Ul, Sl = adata[:, genes].layers[layer_ul] if layer_u == 'M_ul' else None, \
adata[:, genes].layers[layer_sl] if layer_u == 'M_sl' else None
layer_u_data, layer_s_data = adata.layers[layer_u], adata.layers[layer_s]
layer_ul_data, layer_sl_data = adata.layers[layer_ul], adata.layers[layer_sl]
layer_u_data, layer_s_data = layer_u_data if layer_u == 'M_uu' else inverse_norm(adata, layer_u_data), \
layer_s_data if layer_s == 'M_su' else inverse_norm(adata, layer_s_data)
layer_ul_data, layer_sl_data = layer_ul_data if layer_ul == 'M_ul' else inverse_norm(adata, layer_ul_data), \
layer_sl_data if layer_sl == 'M_sl' else inverse_norm(adata, layer_sl_data)
if use_total_layers:
if 'total_Size_Factor' not in adata.obs.keys():
sfs, _ = sz_util(adata, '_total_', False, "median", np.nanmean,
total_layers=total_layers, CM=layer_u_data + layer_s_data + layer_ul_data + layer_sl_data)
sfs_u, sfs_s = sfs[:, None], sfs[:, None]
else:
sfs = adata.obs.total_Size_Factor
sfs_u, sfs_s = sfs[:, None], sfs[:, None]
else:
sfs_u, _ = sz_util(adata, layer_u, False, "median", np.nanmean, total_layers=None,
CM=layer_u_data + layer_ul_data)
sfs_s, _ = sz_util(adata, layer_s, False, "median", np.nanmean, total_layers=None,
CM=layer_s_data + layer_sl_data)
sfs_u, sfs_s = sfs_u[:, None], sfs_s[:, None]
if U is None: U = normalize_util(layer_u_data[:, adata.var_names.isin(genes)], sfs_u, relative_expr=True,
pseudo_expr=0, norm_method=None)
if S is None: S = normalize_util(layer_s_data[:, adata.var_names.isin(genes)], sfs_s, relative_expr=True,
pseudo_expr=0, norm_method=None)
if Ul is None: Ul = normalize_util(layer_ul_data[:, adata.var_names.isin(genes)], sfs_u, relative_expr=True,
pseudo_expr=0, norm_method=None)
if Sl is None: Sl = normalize_util(layer_sl_data[:, adata.var_names.isin(genes)], sfs_s, relative_expr=True,
pseudo_expr=0, norm_method=None)
for i, g in enumerate(genes):
ul = Ul[:, i]
sl = Sl[:, i]
ult = strat_mom(ul, time, np.mean)
slt = strat_mom(sl, time, np.mean)
ul_ult = strat_mom(elem_prod(ul, ul), time, np.mean)
ul_slt = strat_mom(elem_prod(ul, sl), time, np.mean)
sl_slt = strat_mom(elem_prod(sl, sl), time, np.mean)
u = U[:, i]
s = S[:, i]
ut = strat_mom(u, time, np.mean)
st = strat_mom(s, time, np.mean)
uut = strat_mom(elem_prod(u, u), time, np.mean)
ust = strat_mom(elem_prod(u, s), time, np.mean)
sst = strat_mom(elem_prod(s, s), time, np.mean)
x = np.vstack([ult, slt, ul_ult, sl_slt, ul_slt, ut, st, uut, sst, ust])
if mix_model_indices is not None:
x = x[mix_model_indices]
res[i] = x
raw[i] = np.vstack((ul, sl, u, s))
return res, raw
def prepare_data_mix_no_splicing(adata, genes, time, layer_n, layer_t, use_total_layers=True,
total_layer='X_total', mix_model_indices=None):
"""Prepare data for mixture modeling when assumption is kinetic and data has NO splicing.
Note that the mix_model_indices is indexed on 4 total species, which can be used to specify
the data required for different mixture models.
"""
from ..preprocessing.utils import sz_util, normalize_util
res = [0] * len(genes)
raw = [0] * len(genes)
N, T = adata[:, genes].layers[layer_n] if layer_n == 'M_n' else None, \
adata[:, genes].layers[layer_t] if layer_t == 'M_t' else None
layer_n_data = adata.layers[layer_n]
layer_t_data = adata.layers[layer_t]
layer_n_data, layer_t_data = layer_n_data if layer_n == 'M_n' else inverse_norm(adata, layer_n_data), \
layer_t_data if layer_t == 'M_t' else inverse_norm(adata, layer_t_data)
if use_total_layers:
if 'total_Size_Factor' not in adata.obs.keys():
sfs, _ = sz_util(adata, total_layer, False, "median", np.nanmean, total_layers='total', CM=layer_t_data)
sfs_n, sfs_t = sfs[:, None], sfs[:, None]
else:
sfs = adata.obs.total_Size_Factor
sfs_n, sfs_t = sfs[:, None], sfs[:, None]
else:
sfs_n, _ = sz_util(adata, layer_n, False, "median", np.nanmean, total_layers=None, CM=layer_n_data)
sfs_t, _ = sz_util(adata, layer_t, False, "median", np.nanmean, total_layers=None, CM=layer_t_data)
sfs_n, sfs_t = sfs_n[:, None], sfs_t[:, None]
if N is None: N = normalize_util(layer_n_data[:, adata.var_names.isin(genes)], sfs_n, relative_expr=True,
pseudo_expr=0, norm_method=None)
if T is None: T = normalize_util(layer_t_data[:, adata.var_names.isin(genes)], sfs_t, relative_expr=True,
pseudo_expr=0, norm_method=None)
for i, g in enumerate(genes):
n = N[:, i]
nt = strat_mom(n, time, np.mean)
nnt = strat_mom(elem_prod(n, n), time, np.mean)
o = T[:, i] - n
ot = strat_mom(o, time, np.mean)
oot = strat_mom(elem_prod(o, o), time, np.mean)
x = np.vstack([nt, nnt, ot, oot])
if mix_model_indices is not None:
x = x[mix_model_indices]
res[i] = x
raw[i] = np.vstack((n, o))
return res, raw
# ---------------------------------------------------------------------------------------------------
# moment related:
def stratify(arr, strata):
s = np.unique(strata)
return [arr[strata == s[i]] for i in range(len(s))]
def strat_mom(arr, strata, fcn_mom):
arr = arr.A if issparse(arr) else arr
x = stratify(arr, strata)
return np.array([fcn_mom(y) for y in x])
def calc_mom_all_genes(T, adata, fcn_mom):
ng = adata.var.shape[0]
nT = len(np.unique(T))
Mn = np.zeros((ng, nT))
Mo = np.zeros((ng, nT))
Mt = np.zeros((ng, nT))
Mr = np.zeros((ng, nT))
for g in tqdm(range(ng), desc="calculating 1/2 moments"):
L = np.array(adata[:, g].layers["X_new"], dtype=float)
U = np.array(adata[:, g].layers["X_total"], dtype=float) - L
rho = L / (L + U + 0.01)
Mn[g] = strat_mom(L, T, fcn_mom)
Mo[g] = strat_mom(U, T, fcn_mom)
Mt[g] = strat_mom(L + U, T, fcn_mom)
Mr[g] = strat_mom(rho, T, fcn_mom)
return Mn, Mo, Mt, Mr
def _calc_1nd_moment(X, W, normalize_W=True):
"""deprecated"""
if normalize_W:
d = np.sum(W, 1)
W = np.diag(1 / d) @ W
return W @ X
def _calc_2nd_moment(X, Y, W, normalize_W=True, center=False, mX=None, mY=None):
"""deprecated"""
if normalize_W:
d = np.sum(W, 1)
W = np.diag(1 / d) @ W
XY = np.multiply(W @ Y, X)
if center:
mX = calc_1nd_moment(X, W, False) if mX is None else mX
mY = calc_1nd_moment(Y, W, False) if mY is None else mY
XY = XY - np.multiply(mX, mY)
return XY
def gaussian_kernel(X, nbr_idx, sigma, k=None, dists=None):
n = X.shape[0]
if dists is None:
dists = []
for i in range(n):
d = X[nbr_idx[i][:k]] - X[i]
dists.append(np.sum(elem_prod(d, d), 1).flatten())
W = lil_matrix((n, n))
s2_inv = 1 / (2 * sigma ** 2)
for i in range(n):
W[i, nbr_idx[i][:k]] = np.exp(-s2_inv * dists[i][:k] ** 2)
return csr_matrix(W)
def calc_12_mom_labeling(data, t, calculate_2_mom=True):
t_uniq = np.unique(t)
m = np.zeros((data.shape[0], len(t_uniq)))
if calculate_2_mom: v =np.zeros((data.shape[0], len(t_uniq)))
for i in range(data.shape[0]):
data_ = (
np.array(data[i].A.flatten(), dtype=float)
if issparse(data)
else np.array(data[i], dtype=float)
) # consider using the `adata.obs_vector`, `adata.var_vector` methods or accessing the array directly.
m[i] = strat_mom(data_, t, np.nanmean)
if calculate_2_mom: v[i] = strat_mom(data_, t, np.nanvar)
return (m, v, t_uniq) if calculate_2_mom else (m, t_uniq)
def calc_1nd_moment(X, W, normalize_W=True):
if normalize_W:
if type(W) == np.ndarray:
d = np.sum(W, 1).flatten()
else:
d = np.sum(W, 1).A.flatten()
W = diags(1 / d) @ W if issparse(W) else np.diag(1 / d) @ W
return W @ X, W
else:
return W @ X
def calc_2nd_moment(X, Y, W, normalize_W=True, center=False, mX=None, mY=None):
if normalize_W:
if type(W) == np.ndarray:
d = np.sum(W, 1).flatten()
else:
d = W.sum(1).A.flatten()
W = diags(1 / d) @ W if issparse(W) else np.diag(1 / d) @ W
XY = W @ elem_prod(Y, X)
if center:
mX = calc_1nd_moment(X, W, False) if mX is None else mX
mY = calc_1nd_moment(Y, W, False) if mY is None else mY
XY = XY - elem_prod(mX, mY)
return XY
# ---------------------------------------------------------------------------------------------------
# old moment estimation code
class MomData(AnnData):
"""deprecated"""
def __init__(self, adata, time_key="Time", has_nan=False):
# self.data = adata
self.__dict__ = adata.__dict__
# calculate first and second moments from data
self.times = np.array(self.obs[time_key].values, dtype=float)
self.uniq_times = np.unique(self.times)
nT = self.get_n_times()
ng = self.get_n_genes()
self.M = np.zeros((ng, nT)) # first moments (data)
self.V = np.zeros((ng, nT)) # second moments (data)
for g in tqdm(range(ng), desc="calculating 1/2 moments"):
tmp = self[:, g].layers["new"]
L = (
np.array(tmp.A, dtype=float)
if issparse(tmp)
else np.array(tmp, dtype=float)
) # consider using the `adata.obs_vector`, `adata.var_vector` methods or accessing the array directly.
if has_nan:
self.M[g] = strat_mom(L, self.times, np.nanmean)
self.V[g] = strat_mom(L, self.times, np.nanvar)
else:
self.M[g] = strat_mom(L, self.times, np.mean)
self.V[g] = strat_mom(L, self.times, np.var)
def get_n_genes(self):
return self.var.shape[0]
def get_n_cell(self):
return self.obs.shape[0]
def get_n_times(self):
return len(self.uniq_times)
class Estimation:
"""deprecated"""
def __init__(
self,
adata,
adata_u=None,
time_key="Time",
normalize=True,
param_ranges=None,
has_nan=False,
):
# initialize Estimation
self.data = MomData(adata, time_key, has_nan)
self.data_u = (
MomData(adata_u, time_key, has_nan) if adata_u is not None else None
)
if param_ranges is None:
param_ranges = {
"a": [0, 10],
"b": [0, 10],
"alpha_a": [10, 1000],
"alpha_i": [0, 10],
"beta": [0, 10],
"gamma": [0, 10],
}
self.normalize = normalize
self.param_ranges = param_ranges
self.n_params = len(param_ranges)
def param_array2dict(self, parr):
if parr.ndim == 1:
return {
"a": parr[0],
"b": parr[1],
"alpha_a": parr[2],
"alpha_i": parr[3],
"beta": parr[4],
"gamma": parr[5],
}
else:
return {
"a": parr[:, 0],
"b": parr[:, 1],
"alpha_a": parr[:, 2],
"alpha_i": parr[:, 3],
"beta": parr[:, 4],
"gamma": parr[:, 5],
}
def fit_gene(self, gene_no, n_p0=10):
estm = estimation(list(self.param_ranges.values()))
if self.data_u is None:
m = self.data.M[gene_no, :].T
v = self.data.V[gene_no, :].T
x_data = np.vstack((m, v))
popt, cost = estm.fit_lsq(
self.data.uniq_times,
x_data,
p0=None,
n_p0=n_p0,
normalize=self.normalize,
experiment_type="nosplice",
)
else:
mu = self.data_u.M[gene_no, :].T
ms = self.data.M[gene_no, :].T
vu = self.data_u.V[gene_no, :].T
vs = self.data.V[gene_no, :].T
x_data = np.vstack((mu, ms, vu, vs))
popt, cost = estm.fit_lsq(
self.data.uniq_times,
x_data,
p0=None,
n_p0=n_p0,
normalize=self.normalize,
experiment_type=None,
)
return popt, cost
def fit(self, n_p0=10):
ng = self.data.get_n_genes()
params = np.zeros((ng, self.n_params))
costs = np.zeros(ng)
for i in tqdm(range(ng), desc="fitting genes"):
params[i], costs[i] = self.fit_gene(i, n_p0)
return params, costs
# ---------------------------------------------------------------------------------------------------
# use for kinetic assumption with full data, deprecated
def moment_model(adata, subset_adata, _group, cur_grp, log_unnormalized, tkey):
"""deprecated"""
# a few hard code to set up data for moment mode:
if "uu" in subset_adata.layers.keys() or "X_uu" in subset_adata.layers.keys():
if log_unnormalized and "X_uu" not in subset_adata.layers.keys():
if issparse(subset_adata.layers["uu"]):
(
subset_adata.layers["uu"].data,
subset_adata.layers["ul"].data,
subset_adata.layers["su"].data,
subset_adata.layers["sl"].data,
) = (
np.log(subset_adata.layers["uu"].data + 1),
np.log(subset_adata.layers["ul"].data + 1),
np.log(subset_adata.layers["su"].data + 1),
np.log(subset_adata.layers["sl"].data + 1),
)
else:
(
subset_adata.layers["uu"],
subset_adata.layers["ul"],
subset_adata.layers["su"],
subset_adata.layers["sl"],
) = (
np.log(subset_adata.layers["uu"] + 1),
np.log(subset_adata.layers["ul"] + 1),
np.log(subset_adata.layers["su"] + 1),
np.log(subset_adata.layers["sl"] + 1),
)
subset_adata_u, subset_adata_s = subset_adata.copy(), subset_adata.copy()
del (
subset_adata_u.layers["su"],
subset_adata_u.layers["sl"],
subset_adata_s.layers["uu"],
subset_adata_s.layers["ul"],
)
(
subset_adata_u.layers["new"],
subset_adata_u.layers["old"],
subset_adata_s.layers["new"],
subset_adata_s.layers["old"],
) = (
subset_adata_u.layers.pop("ul"),
subset_adata_u.layers.pop("uu"),
subset_adata_s.layers.pop("sl"),
subset_adata_s.layers.pop("su"),
)
Moment, Moment_ = MomData(subset_adata_s, tkey), MomData(subset_adata_u, tkey)
if cur_grp == _group[0]:
t_ind = 0
g_len, t_len = len(_group), len(np.unique(adata.obs[tkey]))
(
adata.uns["M_sl"],
adata.uns["V_sl"],
adata.uns["M_ul"],
adata.uns["V_ul"],
) = (
np.zeros((Moment.M.shape[0], g_len * t_len)),
np.zeros((Moment.M.shape[0], g_len * t_len)),
np.zeros((Moment.M.shape[0], g_len * t_len)),
np.zeros((Moment.M.shape[0], g_len * t_len)),
)
(
adata.uns["M_sl"][:, (t_len * t_ind) : (t_len * (t_ind + 1))],
adata.uns["V_sl"][:, (t_len * t_ind) : (t_len * (t_ind + 1))],
adata.uns["M_ul"][:, (t_len * t_ind) : (t_len * (t_ind + 1))],
adata.uns["V_ul"][:, (t_len * t_ind) : (t_len * (t_ind + 1))],
) = (Moment.M, Moment.V, Moment_.M, Moment_.V)
del Moment_
Est = Estimation(
Moment, adata_u=subset_adata_u, time_key=tkey, normalize=True
) # # data is already normalized
else:
if log_unnormalized and "X_total" not in subset_adata.layers.keys():
if issparse(subset_adata.layers["total"]):
subset_adata.layers["new"].data, subset_adata.layers["total"].data = (
np.log(subset_adata.layers["new"].data + 1),
np.log(subset_adata.layers["total"].data + 1),
)
else:
subset_adata.layers["total"], subset_adata.layers["total"] = (
np.log(subset_adata.layers["new"] + 1),
np.log(subset_adata.layers["total"] + 1),
)
Moment = MomData(subset_adata, tkey)
if cur_grp == _group[0]:
t_ind = 0
g_len, t_len = len(_group), len(np.unique(adata.obs[tkey]))
adata.uns["M"], adata.uns["V"] = (
np.zeros((adata.shape[1], g_len * t_len)),
np.zeros((adata.shape[1], g_len * t_len)),
)
(
adata.uns["M"][:, (t_len * t_ind) : (t_len * (t_ind + 1))],
adata.uns["V"][:, (t_len * t_ind) : (t_len * (t_ind + 1))],
) = (Moment.M, Moment.V)
Est = Estimation(
Moment, time_key=tkey, normalize=True
) # # data is already normalized
return adata, Est, t_ind
|
the-stack_106_18760
|
import sys
import os
import string
def calc(species, m, n):
gens = [ [0 for x in range(0, n)] for y in range(0, m) ]
for i in range(1, m):
gen = gens[m - i - 1]
genSub = gens[m -i ]
subs = species[m - i]
for j in range(0, n):
s = subs[j]
sIndex = ord(s[1]) - 65
parentIndex = ord(s[0]) - 65
if gen[parentIndex] < genSub[sIndex] + 1:
gen[parentIndex] = genSub[sIndex] + 1
return gens
def analysis(gens, m, n):
hist = []
map = {}
for i in range(0, m):
gen = gens[i]
for j in range(0, n):
age = gen[j]
record = None
if not map.has_key(age):
t = { 'age': age, 'count': 0 }
map[age] = t
hist.append(t)
record = map[age]
record['count'] = record['count'] + 1
map[age] = record
hist.sort(lambda x, y: x['age'] - y['age'])
return hist
def outputHist(hist, m, n, fname):
g = lambda x: '{:2>d} {:d}'.format(x['age'], x['count'])
ss = '\r\n'.join(map(g, hist))
fpath = os.path.abspath(fname)
with open(fpath, 'w') as f:
f.write(ss)
def outputGens(gens, m, n, fname):
g = lambda x: '{:>3d}'.format(x)
h = lambda x: ' '.join(map(g, x))
ss = '\r\n'.join(map(h, gens))
fpath = os.path.abspath(fname)
with open(fpath, 'w') as f:
f.write(ss)
def input(fname):
species = []
fpath = os.path.abspath(fname)
with open(fpath, 'r') as f:
for line in f.readlines():
individuals = filter(lambda x: x != '', string.split(line.strip(), ' '))
species.append(individuals)
return species
def main(argv):
fin = argv[0] if len(argv) > 0 else 'species.data'
fhist = argv[1] if len(argv) > 1 else 'hist.data'
fgens = argv[2] if len(argv) > 2 else 'gens.data'
species = input(fin)
m = len(species)
n = len(species[0])
gens = calc(species, m, n)
hist = analysis(gens, m, n)
outputHist(hist, m, n, fhist)
outputGens(gens, m, n, fgens)
if __name__ == '__main__':
main(sys.argv[1:])
|
the-stack_106_18762
|
import unittest
from gluestring.gluegun import Gluegun
class TestGlueit(unittest.TestCase):
def test_basic(self):
# pup_string = "I Love {{pups}} more than {{octopus}}."
# animal_dictionary = {
# "pups" : "🐶🐶🐶",
# "kittens":"🐱🐱🐱",
# "fishes":"🐠🐠🐠",
# "octopus":"🐙🐙🐙"
# }
# pet_string = Gluegun(animal_dictionary)
# pet_string.glue_it(pup_string)
# pet_string = resolve_string(pup_string, animal_dictionary)
pet_gluegun = Gluegun({
"pups": "🐶🐶🐶",
"kittens": "🐱🐱🐱",
"fishes": "🐠🐠🐠",
"octopus": "🐙🐙🐙"
})
result = pet_gluegun.glue_it("I Love {{pups}} more than {{octopus}}.")
self.assertEqual(result, 'I Love 🐶🐶🐶 more than 🐙🐙🐙.')
# python3 -m unittest tests.test_readme_examples
|
the-stack_106_18763
|
# -*- coding: utf-8 -*-
#
# colour-checker-detection documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 5 14:31:53 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import re
import colour_checker_detection as package
basename = re.sub('_(\w)', lambda x: x.group(1).upper(),
package.__name__.title())
autosummary_generate = True
autodoc_mock_imports = ['colour', 'scipy', 'scipy.ndimage.filters']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode',
'sphinx.ext.autosummary', 'sphinx.ext.napoleon', 'sphinx.ext.mathjax',
'sphinxcontrib.bibtex'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = package.__application_name__
copyright = package.__copyright__.replace('Copyright (C)', '')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{0}.{1}'.format(package.__major_version__,
package.__minor_version__)
# The full version, including alpha/beta/rc tags.
release = package.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as 'system message' paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
#
# html_theme_options = {}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# '<project> v<release> documentation'.
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/Logo_Small_001.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, 'Created using Sphinx' is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, '(C) Copyright ...' is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g., '.xhtml').
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = '{0}Doc'.format(basename)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize':
'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize':
'10pt',
# Additional stuff for the LaTeX preamble.
'preamble':
"""
\\usepackage{charter}
\\usepackage[defaultsans]{lato}
\\usepackage{inconsolata}
""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', '{0}.tex'.format(basename),
u'{0} Documentation'.format(package.__application_name__),
package.__author__, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = '_static/Logo_Medium_001.png'
# For 'manual' documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', basename,
u'{0} Documentation'.format(package.__application_name__),
[package.__author__], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', basename,
u'{0} Documentation'.format(package.__application_name__),
package.__author__, package.__application_name__, basename,
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the 'Top' node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = package.__application_name__
epub_author = package.__author__
epub_publisher = package.__author__
epub_copyright = package.__copyright__.replace('Copyright (C)', '')
# The basename for the epub file. It defaults to the project name.
# epub_basename = basename
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
autoclass_content = 'both'
intersphinx_mapping = {'python': ('https://docs.python.org/3.5', None)}
def _autodoc_process_docstring(app, what, name, obj, options, lines):
"""
Process the docstrings to remove the *# noqa* *flake8* pragma.
"""
for i, line in enumerate(lines):
lines[i] = line.replace('# noqa', '')
def setup(app):
app.add_stylesheet('custom.css')
app.connect('autodoc-process-docstring', _autodoc_process_docstring)
|
the-stack_106_18764
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return F.relu(x, inplace=True)
class AntonyCnn(nn.Module):
def __init__(self, num_classes=5):
super(AntonyCnn, self).__init__()
self.features = nn.Sequential(
BasicConv2d(3, 32, kernel_size=11, padding=5, stride=2), # 112x112
nn.MaxPool2d(3, padding=1, stride=2), # 56x56
BasicConv2d(32, 64, kernel_size=5, padding=2, stride=1), # 56x56
nn.MaxPool2d(3, padding=1, stride=2), # 28x28
BasicConv2d(64, 96, kernel_size=3, padding=1, stride=1), # -> 28x28
nn.MaxPool2d(3, padding=1, stride=2), # -> 14x14
BasicConv2d(96, 128, kernel_size=3, padding=1, stride=1), # -> 14x14
nn.Dropout2d(p=0.2),
nn.MaxPool2d(3, padding=1, stride=2), # -> 7x7
)
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(7*7*128, 1024),
nn.Linear(1024, num_classes)
)
def forward(self, x):
x = self.features(x)
# x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def densenet121_model(num_class, use_pretrained = False):
model = models.densenet121(pretrained = use_pretrained)
in_features = model.classifier.in_features
model.classifier = torch.nn.Linear(in_features, num_class)
return model
def densenet161_model(num_class, use_pretrained = False):
model = models.densenet161(pretrained = use_pretrained)
in_features = model.classifier.in_features
model.classifier = torch.nn.Linear(in_features, num_class)
return model
|
the-stack_106_18765
|
"""
Zappa core library. You may also want to look at `cli.py` and `util.py`.
"""
##
# Imports
##
import getpass
import glob
import hashlib
import json
import logging
import os
import random
import re
import shutil
import string
import subprocess
import tarfile
import tempfile
import time
import uuid
import zipfile
from builtins import bytes, int
from distutils.dir_util import copy_tree
from io import open
import requests
from setuptools import find_packages
import boto3
import botocore
import troposphere
import troposphere.apigateway
from botocore.exceptions import ClientError
from tqdm import tqdm
from .utilities import (add_event_source, conflicts_with_a_neighbouring_module,
contains_python_files_or_subdirs, copytree,
get_topic_name, get_venv_from_python_version,
human_size, remove_event_source)
##
# Logging Config
##
logging.basicConfig(format='%(levelname)s:%(message)s')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
##
# Policies And Template Mappings
##
ASSUME_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"apigateway.amazonaws.com",
"lambda.amazonaws.com",
"events.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}"""
ATTACH_POLICY = """{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:*"
],
"Resource": "arn:aws:logs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"lambda:InvokeFunction"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"xray:PutTraceSegments",
"xray:PutTelemetryRecords"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"ec2:AttachNetworkInterface",
"ec2:CreateNetworkInterface",
"ec2:DeleteNetworkInterface",
"ec2:DescribeInstances",
"ec2:DescribeNetworkInterfaces",
"ec2:DetachNetworkInterface",
"ec2:ModifyNetworkInterfaceAttribute",
"ec2:ResetNetworkInterfaceAttribute"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"s3:*"
],
"Resource": "arn:aws:s3:::*"
},
{
"Effect": "Allow",
"Action": [
"kinesis:*"
],
"Resource": "arn:aws:kinesis:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sns:*"
],
"Resource": "arn:aws:sns:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": "arn:aws:sqs:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"dynamodb:*"
],
"Resource": "arn:aws:dynamodb:*:*:*"
},
{
"Effect": "Allow",
"Action": [
"route53:*"
],
"Resource": "*"
}
]
}"""
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#apigateway_region
API_GATEWAY_REGIONS = ['us-east-1', 'us-east-2',
'us-west-1', 'us-west-2',
'eu-central-1',
'eu-north-1',
'eu-west-1', 'eu-west-2', 'eu-west-3',
'eu-north-1',
'ap-northeast-1', 'ap-northeast-2', 'ap-northeast-3',
'ap-southeast-1', 'ap-southeast-2',
'ap-east-1',
'ap-south-1',
'ca-central-1',
'cn-north-1',
'cn-northwest-1',
'sa-east-1',
'us-gov-east-1', 'us-gov-west-1']
# Latest list: https://docs.aws.amazon.com/general/latest/gr/rande.html#lambda_region
LAMBDA_REGIONS = ['us-east-1', 'us-east-2',
'us-west-1', 'us-west-2',
'eu-central-1',
'eu-north-1',
'eu-west-1', 'eu-west-2', 'eu-west-3',
'eu-north-1',
'ap-northeast-1', 'ap-northeast-2', 'ap-northeast-3',
'ap-southeast-1', 'ap-southeast-2',
'ap-east-1',
'ap-south-1',
'ca-central-1',
'cn-north-1',
'cn-northwest-1',
'sa-east-1',
'us-gov-east-1',
'us-gov-west-1']
# We never need to include these.
# Related: https://github.com/Miserlou/Zappa/pull/56
# Related: https://github.com/Miserlou/Zappa/pull/581
ZIP_EXCLUDES = [
'*.exe', '*.DS_Store', '*.Python', '*.git', '.git/*', '*.zip', '*.tar.gz',
'*.hg', 'pip', 'docutils*', 'setuputils*', '__pycache__/*'
]
# When using ALB as an event source for Lambdas, we need to create an alias
# to ensure that, on zappa update, the ALB doesn't lose permissions to access
# the Lambda.
# See: https://github.com/Miserlou/Zappa/pull/1730
ALB_LAMBDA_ALIAS = 'current-alb-version'
##
# Classes
##
class Zappa:
"""
Zappa!
Makes it easy to run Python web applications on AWS Lambda/API Gateway.
"""
##
# Configurables
##
http_methods = ['ANY']
role_name = "ZappaLambdaExecution"
extra_permissions = None
assume_policy = ASSUME_POLICY
attach_policy = ATTACH_POLICY
apigateway_policy = None
cloudwatch_log_levels = ['OFF', 'ERROR', 'INFO']
xray_tracing = False
##
# Credentials
##
boto_session = None
credentials_arn = None
def __init__(self,
boto_session=None,
profile_name=None,
aws_region=None,
load_credentials=True,
desired_role_name=None,
desired_role_arn=None,
runtime='python3.6', # Detected at runtime in CLI
tags=(),
endpoint_urls={},
xray_tracing=False
):
"""
Instantiate this new Zappa instance, loading any custom credentials if necessary.
"""
# Set aws_region to None to use the system's region instead
if aws_region is None:
# https://github.com/Miserlou/Zappa/issues/413
self.aws_region = boto3.Session().region_name
logger.debug("Set region from boto: %s", self.aws_region)
else:
self.aws_region = aws_region
if desired_role_name:
self.role_name = desired_role_name
if desired_role_arn:
self.credentials_arn = desired_role_arn
self.runtime = runtime
if self.runtime == 'python3.6':
self.manylinux_suffix_start = 'cp36m'
elif self.runtime == 'python3.7':
self.manylinux_suffix_start = 'cp37m'
else:
# The 'm' has been dropped in python 3.8+ since builds with and without pymalloc are ABI compatible
# See https://github.com/pypa/manylinux for a more detailed explanation
self.manylinux_suffix_start = 'cp38'
# AWS Lambda supports manylinux1/2010 and manylinux2014
manylinux_suffixes = ("2014", "2010", "1")
self.manylinux_wheel_file_match = re.compile(f'^.*{self.manylinux_suffix_start}-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$')
self.manylinux_wheel_abi3_file_match = re.compile(f'^.*cp3.-abi3-manylinux({"|".join(manylinux_suffixes)})_x86_64.whl$')
self.endpoint_urls = endpoint_urls
self.xray_tracing = xray_tracing
# Some common invocations, such as DB migrations,
# can take longer than the default.
# Note that this is set to 300s, but if connected to
# APIGW, Lambda will max out at 30s.
# Related: https://github.com/Miserlou/Zappa/issues/205
long_config_dict = {
'region_name': aws_region,
'connect_timeout': 5,
'read_timeout': 300
}
long_config = botocore.client.Config(**long_config_dict)
if load_credentials:
self.load_credentials(boto_session, profile_name)
# Initialize clients
self.s3_client = self.boto_client('s3')
self.lambda_client = self.boto_client('lambda', config=long_config)
self.elbv2_client = self.boto_client('elbv2')
self.events_client = self.boto_client('events')
self.apigateway_client = self.boto_client('apigateway')
# AWS ACM certificates need to be created from us-east-1 to be used by API gateway
east_config = botocore.client.Config(region_name='us-east-1')
self.acm_client = self.boto_client('acm', config=east_config)
self.logs_client = self.boto_client('logs')
self.iam_client = self.boto_client('iam')
self.iam = self.boto_resource('iam')
self.cloudwatch = self.boto_client('cloudwatch')
self.route53 = self.boto_client('route53')
self.sns_client = self.boto_client('sns')
self.cf_client = self.boto_client('cloudformation')
self.dynamodb_client = self.boto_client('dynamodb')
self.cognito_client = self.boto_client('cognito-idp')
self.sts_client = self.boto_client('sts')
self.tags = tags
self.cf_template = troposphere.Template()
self.cf_api_resources = []
self.cf_parameters = {}
def configure_boto_session_method_kwargs(self, service, kw):
"""Allow for custom endpoint urls for non-AWS (testing and bootleg cloud) deployments"""
if service in self.endpoint_urls and not 'endpoint_url' in kw:
kw['endpoint_url'] = self.endpoint_urls[service]
return kw
def boto_client(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto clients"""
return self.boto_session.client(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs))
def boto_resource(self, service, *args, **kwargs):
"""A wrapper to apply configuration options to boto resources"""
return self.boto_session.resource(service, *args, **self.configure_boto_session_method_kwargs(service, kwargs))
def cache_param(self, value):
'''Returns a troposphere Ref to a value cached as a parameter.'''
if value not in self.cf_parameters:
keyname = chr(ord('A') + len(self.cf_parameters))
param = self.cf_template.add_parameter(troposphere.Parameter(
keyname, Type="String", Default=value, tags=self.tags
))
self.cf_parameters[value] = param
return troposphere.Ref(self.cf_parameters[value])
##
# Packaging
##
def copy_editable_packages(self, egg_links, temp_package_path):
""" """
for egg_link in egg_links:
with open(egg_link, 'rb') as df:
egg_path = df.read().decode('utf-8').splitlines()[0].strip()
pkgs = set([x.split(".")[0] for x in find_packages(egg_path, exclude=['test', 'tests'])])
for pkg in pkgs:
copytree(os.path.join(egg_path, pkg), os.path.join(temp_package_path, pkg), metadata=False, symlinks=False)
if temp_package_path:
# now remove any egg-links as they will cause issues if they still exist
for link in glob.glob(os.path.join(temp_package_path, "*.egg-link")):
os.remove(link)
def get_deps_list(self, pkg_name, installed_distros=None):
"""
For a given package, returns a list of required packages. Recursive.
"""
# https://github.com/Miserlou/Zappa/issues/1478. Using `pkg_resources`
# instead of `pip` is the recommended approach. The usage is nearly
# identical.
import pkg_resources
deps = []
if not installed_distros:
installed_distros = pkg_resources.WorkingSet()
for package in installed_distros:
if package.project_name.lower() == pkg_name.lower():
deps = [(package.project_name, package.version)]
for req in package.requires():
deps += self.get_deps_list(pkg_name=req.project_name, installed_distros=installed_distros)
return list(set(deps)) # de-dupe before returning
def create_handler_venv(self):
"""
Takes the installed zappa and brings it into a fresh virtualenv-like folder. All dependencies are then downloaded.
"""
import subprocess
# We will need the currenv venv to pull Zappa from
current_venv = self.get_current_venv()
# Make a new folder for the handler packages
ve_path = os.path.join(os.getcwd(), 'handler_venv')
if os.sys.platform == 'win32':
current_site_packages_dir = os.path.join(current_venv, 'Lib', 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'Lib', 'site-packages')
else:
current_site_packages_dir = os.path.join(current_venv, 'lib', get_venv_from_python_version(), 'site-packages')
venv_site_packages_dir = os.path.join(ve_path, 'lib', get_venv_from_python_version(), 'site-packages')
if not os.path.isdir(venv_site_packages_dir):
os.makedirs(venv_site_packages_dir)
# Copy zappa* to the new virtualenv
zappa_things = [z for z in os.listdir(current_site_packages_dir) if z.lower()[:5] == 'zappa']
for z in zappa_things:
copytree(os.path.join(current_site_packages_dir, z), os.path.join(venv_site_packages_dir, z))
# Use pip to download zappa's dependencies. Copying from current venv causes issues with things like PyYAML that installs as yaml
zappa_deps = self.get_deps_list('zappa')
pkg_list = ['{0!s}=={1!s}'.format(dep, version) for dep, version in zappa_deps]
# Need to manually add setuptools
pkg_list.append('setuptools')
command = ["pip", "install", "--quiet", "--target", venv_site_packages_dir] + pkg_list
# This is the recommended method for installing packages if you don't
# to depend on `setuptools`
# https://github.com/pypa/pip/issues/5240#issuecomment-381662679
pip_process = subprocess.Popen(command, stdout=subprocess.PIPE)
# Using communicate() to avoid deadlocks
pip_process.communicate()
pip_return_code = pip_process.returncode
if pip_return_code:
raise EnvironmentError("Pypi lookup failed")
return ve_path
# staticmethod as per https://github.com/Miserlou/Zappa/issues/780
@staticmethod
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if 'VIRTUAL_ENV' in os.environ:
venv = os.environ['VIRTUAL_ENV']
elif os.path.exists('.python-version'): # pragma: no cover
try:
subprocess.check_output(['pyenv', 'help'], stderr=subprocess.STDOUT)
except OSError:
print("This directory seems to have pyenv's local venv, "
"but pyenv executable was not found.")
with open('.python-version', 'r') as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')
venv = bin_path[:bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv
def create_lambda_zip( self,
prefix='lambda_package',
handler_file=None,
slim_handler=False,
minify=True,
exclude=None,
exclude_glob=None,
use_precompiled_packages=True,
include=None,
venv=None,
output=None,
disable_progress=False,
archive_format='zip'
):
"""
Create a Lambda-ready zip file of the current virtualenvironment and working directory.
Returns path to that file.
"""
# Validate archive_format
if archive_format not in ['zip', 'tarball']:
raise KeyError("The archive format to create a lambda package must be zip or tarball")
# Pip is a weird package.
# Calling this function in some environments without this can cause.. funkiness.
import pip
if not venv:
venv = self.get_current_venv()
build_time = str(int(time.time()))
cwd = os.getcwd()
if not output:
if archive_format == 'zip':
archive_fname = prefix + '-' + build_time + '.zip'
elif archive_format == 'tarball':
archive_fname = prefix + '-' + build_time + '.tar.gz'
else:
archive_fname = output
archive_path = os.path.join(cwd, archive_fname)
# Files that should be excluded from the zip
if exclude is None:
exclude = list()
if exclude_glob is None:
exclude_glob = list()
# Exclude the zip itself
exclude.append(archive_path)
# Make sure that 'concurrent' is always forbidden.
# https://github.com/Miserlou/Zappa/issues/827
if not 'concurrent' in exclude:
exclude.append('concurrent')
def splitpath(path):
parts = []
(path, tail) = os.path.split(path)
while path and tail:
parts.append(tail)
(path, tail) = os.path.split(path)
parts.append(os.path.join(path, tail))
return list(map(os.path.normpath, parts))[::-1]
split_venv = splitpath(venv)
split_cwd = splitpath(cwd)
# Ideally this should be avoided automatically,
# but this serves as an okay stop-gap measure.
if split_venv[-1] == split_cwd[-1]: # pragma: no cover
print(
"Warning! Your project and virtualenv have the same name! You may want "
"to re-create your venv with a new name, or explicitly define a "
"'project_name', as this may cause errors."
)
# First, do the project..
temp_project_path = tempfile.mkdtemp(prefix='zappa-project')
if not slim_handler:
# Slim handler does not take the project files.
if minify:
# Related: https://github.com/Miserlou/Zappa/issues/744
excludes = ZIP_EXCLUDES + exclude + [split_venv[-1]]
copytree(cwd, temp_project_path, metadata=False, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(cwd, temp_project_path, metadata=False, symlinks=False)
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# If a handler_file is supplied, copy that to the root of the package,
# because that's where AWS Lambda looks for it. It can't be inside a package.
if handler_file:
filename = handler_file.split(os.sep)[-1]
shutil.copy(handler_file, os.path.join(temp_project_path, filename))
# Create and populate package ID file and write to temp project path
package_info = {}
package_info['uuid'] = str(uuid.uuid4())
package_info['build_time'] = build_time
package_info['build_platform'] = os.sys.platform
package_info['build_user'] = getpass.getuser()
# TODO: Add git head and info?
# Ex, from @scoates:
# def _get_git_branch():
# chdir(DIR)
# out = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
# lambci_branch = environ.get('LAMBCI_BRANCH', None)
# if out == "HEAD" and lambci_branch:
# out += " lambci:{}".format(lambci_branch)
# return out
# def _get_git_hash():
# chdir(DIR)
# return check_output(['git', 'rev-parse', 'HEAD']).strip()
# def _get_uname():
# return check_output(['uname', '-a']).strip()
# def _get_user():
# return check_output(['whoami']).strip()
# def set_id_info(zappa_cli):
# build_info = {
# 'branch': _get_git_branch(),
# 'hash': _get_git_hash(),
# 'build_uname': _get_uname(),
# 'build_user': _get_user(),
# 'build_time': datetime.datetime.utcnow().isoformat(),
# }
# with open(path.join(DIR, 'id_info.json'), 'w') as f:
# json.dump(build_info, f)
# return True
package_id_file = open(os.path.join(temp_project_path, 'package_info.json'), 'w')
dumped = json.dumps(package_info, indent=4)
try:
package_id_file.write(dumped)
except TypeError: # This is a Python 2/3 issue. TODO: Make pretty!
package_id_file.write(str(dumped))
package_id_file.close()
# Then, do site site-packages..
egg_links = []
temp_package_path = tempfile.mkdtemp(prefix='zappa-packages')
if os.sys.platform == 'win32':
site_packages = os.path.join(venv, 'Lib', 'site-packages')
else:
site_packages = os.path.join(venv, 'lib', get_venv_from_python_version(), 'site-packages')
egg_links.extend(glob.glob(os.path.join(site_packages, '*.egg-link')))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(site_packages, temp_package_path, metadata=False, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(site_packages, temp_package_path, metadata=False, symlinks=False)
# We may have 64-bin specific packages too.
site_packages_64 = os.path.join(venv, 'lib64', get_venv_from_python_version(), 'site-packages')
if os.path.exists(site_packages_64):
egg_links.extend(glob.glob(os.path.join(site_packages_64, '*.egg-link')))
if minify:
excludes = ZIP_EXCLUDES + exclude
copytree(site_packages_64, temp_package_path, metadata = False, symlinks=False, ignore=shutil.ignore_patterns(*excludes))
else:
copytree(site_packages_64, temp_package_path, metadata = False, symlinks=False)
if egg_links:
self.copy_editable_packages(egg_links, temp_package_path)
copy_tree(temp_package_path, temp_project_path, update=True)
# Then the pre-compiled packages..
if use_precompiled_packages:
print("Downloading and installing dependencies..")
installed_packages = self.get_installed_packages(site_packages, site_packages_64)
try:
for installed_package_name, installed_package_version in installed_packages.items():
cached_wheel_path = self.get_cached_manylinux_wheel(installed_package_name, installed_package_version, disable_progress)
if cached_wheel_path:
# Otherwise try to use manylinux packages from PyPi..
# Related: https://github.com/Miserlou/Zappa/issues/398
shutil.rmtree(os.path.join(temp_project_path, installed_package_name), ignore_errors=True)
with zipfile.ZipFile(cached_wheel_path) as zfile:
zfile.extractall(temp_project_path)
except Exception as e:
print(e)
# XXX - What should we do here?
# Cleanup
for glob_path in exclude_glob:
for path in glob.glob(os.path.join(temp_project_path, glob_path)):
try:
os.remove(path)
except OSError: # is a directory
shutil.rmtree(path)
# Then archive it all up..
if archive_format == 'zip':
print("Packaging project as zip.")
try:
compression_method = zipfile.ZIP_DEFLATED
except ImportError: # pragma: no cover
compression_method = zipfile.ZIP_STORED
archivef = zipfile.ZipFile(archive_path, 'w', compression_method)
elif archive_format == 'tarball':
print("Packaging project as gzipped tarball.")
archivef = tarfile.open(archive_path, 'w|gz')
for root, dirs, files in os.walk(temp_project_path):
for filename in files:
# Skip .pyc files for Django migrations
# https://github.com/Miserlou/Zappa/issues/436
# https://github.com/Miserlou/Zappa/issues/464
if filename[-4:] == '.pyc' and root[-10:] == 'migrations':
continue
# If there is a .pyc file in this package,
# we can skip the python source code as we'll just
# use the compiled bytecode anyway..
if filename[-3:] == '.py' and root[-10:] != 'migrations':
abs_filname = os.path.join(root, filename)
abs_pyc_filename = abs_filname + 'c'
if os.path.isfile(abs_pyc_filename):
# but only if the pyc is older than the py,
# otherwise we'll deploy outdated code!
py_time = os.stat(abs_filname).st_mtime
pyc_time = os.stat(abs_pyc_filename).st_mtime
if pyc_time > py_time:
continue
# Make sure that the files are all correctly chmodded
# Related: https://github.com/Miserlou/Zappa/issues/484
# Related: https://github.com/Miserlou/Zappa/issues/682
os.chmod(os.path.join(root, filename), 0o755)
if archive_format == 'zip':
# Actually put the file into the proper place in the zip
# Related: https://github.com/Miserlou/Zappa/pull/716
zipi = zipfile.ZipInfo(os.path.join(root.replace(temp_project_path, '').lstrip(os.sep), filename))
zipi.create_system = 3
zipi.external_attr = 0o755 << int(16) # Is this P2/P3 functional?
with open(os.path.join(root, filename), 'rb') as f:
archivef.writestr(zipi, f.read(), compression_method)
elif archive_format == 'tarball':
tarinfo = tarfile.TarInfo(os.path.join(root.replace(temp_project_path, '').lstrip(os.sep), filename))
tarinfo.mode = 0o755
stat = os.stat(os.path.join(root, filename))
tarinfo.mtime = stat.st_mtime
tarinfo.size = stat.st_size
with open(os.path.join(root, filename), 'rb') as f:
archivef.addfile(tarinfo, f)
# Create python init file if it does not exist
# Only do that if there are sub folders or python files and does not conflict with a neighbouring module
# Related: https://github.com/Miserlou/Zappa/issues/766
if not contains_python_files_or_subdirs(root):
# if the directory does not contain any .py file at any level, we can skip the rest
dirs[:] = [d for d in dirs if d != root]
else:
if '__init__.py' not in files and not conflicts_with_a_neighbouring_module(root):
tmp_init = os.path.join(temp_project_path, '__init__.py')
open(tmp_init, 'a').close()
os.chmod(tmp_init, 0o755)
arcname = os.path.join(root.replace(temp_project_path, ''),
os.path.join(root.replace(temp_project_path, ''), '__init__.py'))
if archive_format == 'zip':
archivef.write(tmp_init, arcname)
elif archive_format == 'tarball':
archivef.add(tmp_init, arcname)
# And, we're done!
archivef.close()
# Trash the temp directory
shutil.rmtree(temp_project_path)
shutil.rmtree(temp_package_path)
if os.path.isdir(venv) and slim_handler:
# Remove the temporary handler venv folder
shutil.rmtree(venv)
return archive_fname
@staticmethod
def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {package.project_name.lower(): package.version for package in
pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower() in [site_packages.lower(), site_packages_64.lower()]}
return installed_packages
@staticmethod
def download_url_with_progress(url, stream, disable_progress):
"""
Downloads a given url in chunks and writes to the provided stream (can be any io stream).
Displays the progress bar for the download.
"""
resp = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 2)), stream=True)
resp.raw.decode_content = True
progress = tqdm(unit="B", unit_scale=True, total=int(resp.headers.get('Content-Length', 0)), disable=disable_progress)
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
stream.write(chunk)
progress.close()
def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False):
"""
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
"""
cached_wheels_dir = os.path.join(tempfile.gettempdir(), 'cached_wheels')
if not os.path.isdir(cached_wheels_dir):
os.makedirs(cached_wheels_dir)
else:
# Check if we already have a cached copy
wheel_file = f'{package_name}-{package_version}-*_x86_64.whl'
wheel_path = os.path.join(cached_wheels_dir, wheel_file)
for pathname in glob.iglob(wheel_path):
if re.match(self.manylinux_wheel_file_match, pathname) or re.match(self.manylinux_wheel_abi3_file_match, pathname):
print(f" - {package_name}=={package_version}: Using locally cached manylinux wheel")
return pathname
# The file is not cached, download it.
wheel_url, filename = self.get_manylinux_wheel_url(package_name, package_version)
if not wheel_url:
return None
wheel_path = os.path.join(cached_wheels_dir, filename)
print(f" - {package_name}=={package_version}: Downloading")
with open(wheel_path, 'wb') as f:
self.download_url_with_progress(wheel_url, f, disable_progress)
if not zipfile.is_zipfile(wheel_path):
return None
return wheel_path
def get_manylinux_wheel_url(self, package_name, package_version):
"""
For a given package name, returns a link to the download URL,
else returns None.
Related: https://github.com/Miserlou/Zappa/issues/398
Examples here: https://gist.github.com/perrygeo/9545f94eaddec18a65fd7b56880adbae
This function downloads metadata JSON of `package_name` from Pypi
and examines if the package has a manylinux wheel. This function
also caches the JSON file so that we don't have to poll Pypi
every time.
"""
cached_pypi_info_dir = os.path.join(tempfile.gettempdir(), 'cached_pypi_info')
if not os.path.isdir(cached_pypi_info_dir):
os.makedirs(cached_pypi_info_dir)
# Even though the metadata is for the package, we save it in a
# filename that includes the package's version. This helps in
# invalidating the cached file if the user moves to a different
# version of the package.
# Related: https://github.com/Miserlou/Zappa/issues/899
json_file = '{0!s}-{1!s}.json'.format(package_name, package_version)
json_file_path = os.path.join(cached_pypi_info_dir, json_file)
if os.path.exists(json_file_path):
with open(json_file_path, 'rb') as metafile:
data = json.load(metafile)
else:
url = 'https://pypi.python.org/pypi/{}/json'.format(package_name)
try:
res = requests.get(url, timeout=float(os.environ.get('PIP_TIMEOUT', 1.5)))
data = res.json()
except Exception as e: # pragma: no cover
return None, None
with open(json_file_path, 'wb') as metafile:
jsondata = json.dumps(data)
metafile.write(bytes(jsondata, "utf-8"))
if package_version not in data['releases']:
return None, None
for f in data['releases'][package_version]:
if re.match(self.manylinux_wheel_file_match, f['filename']):
return f['url'], f['filename']
elif re.match(self.manylinux_wheel_abi3_file_match, f['filename']):
return f['url'], f['filename']
return None, None
##
# S3
##
def upload_to_s3(self, source_path, bucket_name, disable_progress=False):
r"""
Given a file, upload it to S3.
Credentials should be stored in environment variables or ~/.aws/credentials (%USERPROFILE%\.aws\credentials on Windows).
Returns True on success, false on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError:
# This is really stupid S3 quirk. Technically, us-east-1 one has no S3,
# it's actually "US Standard", or something.
# More here: https://github.com/boto/boto3/issues/125
if self.aws_region == 'us-east-1':
self.s3_client.create_bucket(
Bucket=bucket_name,
)
else:
self.s3_client.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={'LocationConstraint': self.aws_region},
)
if self.tags:
tags = {
'TagSet': [{'Key': key, 'Value': self.tags[key]} for key in self.tags.keys()]
}
self.s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tags)
if not os.path.isfile(source_path) or os.stat(source_path).st_size == 0:
print("Problem with source file {}".format(source_path))
return False
dest_path = os.path.split(source_path)[1]
try:
source_size = os.stat(source_path).st_size
print("Uploading {0} ({1})..".format(dest_path, human_size(source_size)))
progress = tqdm(total=float(os.path.getsize(source_path)), unit_scale=True, unit='B', disable=disable_progress)
# Attempt to upload to S3 using the S3 meta client with the progress bar.
# If we're unable to do that, try one more time using a session client,
# which cannot use the progress bar.
# Related: https://github.com/boto/boto3/issues/611
try:
self.s3_client.upload_file(
source_path, bucket_name, dest_path,
Callback=progress.update
)
except Exception as e: # pragma: no cover
self.s3_client.upload_file(source_path, bucket_name, dest_path)
progress.close()
except (KeyboardInterrupt, SystemExit): # pragma: no cover
raise
except Exception as e: # pragma: no cover
print(e)
return False
return True
def copy_on_s3(self, src_file_name, dst_file_name, bucket_name):
"""
Copies src file to destination within a bucket.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
copy_src = {
"Bucket": bucket_name,
"Key": src_file_name
}
try:
self.s3_client.copy(
CopySource=copy_src,
Bucket=bucket_name,
Key=dst_file_name
)
return True
except botocore.exceptions.ClientError: # pragma: no cover
return False
def remove_from_s3(self, file_name, bucket_name):
"""
Given a file name and a bucket, remove it from S3.
There's no reason to keep the file hosted on S3 once its been made into a Lambda function, so we can delete it from S3.
Returns True on success, False on failure.
"""
try:
self.s3_client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e: # pragma: no cover
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
return False
try:
self.s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError): # pragma: no cover
return False
##
# Lambda
##
def create_lambda_function( self,
bucket=None,
function_name=None,
handler=None,
s3_key=None,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
dead_letter_config=None,
runtime='python3.6',
aws_environment_variables=None,
aws_kms_key_arn=None,
xray_tracing=False,
local_zip=None,
use_alb=False,
layers=None,
concurrency=None,
):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, register that Lambda function.
"""
if not vpc_config:
vpc_config = {}
if not dead_letter_config:
dead_letter_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_environment_variables:
aws_environment_variables = {}
if not aws_kms_key_arn:
aws_kms_key_arn = ''
if not layers:
layers = []
kwargs = dict(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
Publish=publish,
VpcConfig=vpc_config,
DeadLetterConfig=dead_letter_config,
Environment={'Variables': aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={
'Mode': 'Active' if self.xray_tracing else 'PassThrough'
},
Layers=layers
)
if local_zip:
kwargs['Code'] = {
'ZipFile': local_zip
}
else:
kwargs['Code'] = {
'S3Bucket': bucket,
'S3Key': s3_key
}
response = self.lambda_client.create_function(**kwargs)
resource_arn = response['FunctionArn']
version = response['Version']
# If we're using an ALB, let's create an alias mapped to the newly
# created function. This allows clean, no downtime association when
# using application load balancers as an event source.
# See: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
if use_alb:
self.lambda_client.create_alias(
FunctionName=resource_arn,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=resource_arn,
ReservedConcurrentExecutions=concurrency,
)
return resource_arn
def update_lambda_function(self, bucket, function_name, s3_key=None, publish=True, local_zip=None, num_revisions=None, concurrency=None):
"""
Given a bucket and key (or a local path) of a valid Lambda-zip, a function name and a handler, update that Lambda function's code.
Optionally, delete previous versions if they exceed the optional limit.
"""
print("Updating Lambda function code..")
kwargs = dict(
FunctionName=function_name,
Publish=publish
)
if local_zip:
kwargs['ZipFile'] = local_zip
else:
kwargs['S3Bucket'] = bucket
kwargs['S3Key'] = s3_key
response = self.lambda_client.update_function_code(**kwargs)
resource_arn = response['FunctionArn']
version = response['Version']
# If the lambda has an ALB alias, let's update the alias
# to point to the newest version of the function. We have to use a GET
# here, as there's no HEAD-esque call to retrieve metadata about a
# function alias.
# Related: https://github.com/Miserlou/Zappa/pull/1730
# https://github.com/Miserlou/Zappa/issues/1823
try:
response = self.lambda_client.get_alias(
FunctionName=function_name,
Name=ALB_LAMBDA_ALIAS,
)
alias_exists = True
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" not in e.response["Error"]["Code"]:
raise e
alias_exists = False
if alias_exists:
self.lambda_client.update_alias(
FunctionName=function_name,
FunctionVersion=version,
Name=ALB_LAMBDA_ALIAS,
)
if concurrency is not None:
self.lambda_client.put_function_concurrency(
FunctionName=function_name,
ReservedConcurrentExecutions=concurrency,
)
else:
self.lambda_client.delete_function_concurrency(
FunctionName=function_name
)
if num_revisions:
# Find the existing revision IDs for the given function
# Related: https://github.com/Miserlou/Zappa/issues/1402
versions_in_lambda = []
versions = self.lambda_client.list_versions_by_function(FunctionName=function_name)
for version in versions['Versions']:
versions_in_lambda.append(version['Version'])
while 'NextMarker' in versions:
versions = self.lambda_client.list_versions_by_function(FunctionName=function_name,Marker=versions['NextMarker'])
for version in versions['Versions']:
versions_in_lambda.append(version['Version'])
versions_in_lambda.remove('$LATEST')
# Delete older revisions if their number exceeds the specified limit
for version in versions_in_lambda[::-1][num_revisions:]:
self.lambda_client.delete_function(FunctionName=function_name,Qualifier=version)
return resource_arn
def update_lambda_configuration( self,
lambda_arn,
function_name,
handler,
description='Zappa Deployment',
timeout=30,
memory_size=512,
publish=True,
vpc_config=None,
runtime='python3.6',
aws_environment_variables=None,
aws_kms_key_arn=None,
layers=None
):
"""
Given an existing function ARN, update the configuration variables.
"""
print("Updating Lambda function configuration..")
if not vpc_config:
vpc_config = {}
if not self.credentials_arn:
self.get_credentials_arn()
if not aws_kms_key_arn:
aws_kms_key_arn = ''
if not aws_environment_variables:
aws_environment_variables = {}
if not layers:
layers = []
# Check if there are any remote aws lambda env vars so they don't get trashed.
# https://github.com/Miserlou/Zappa/issues/987, Related: https://github.com/Miserlou/Zappa/issues/765
lambda_aws_config = self.lambda_client.get_function_configuration(FunctionName=function_name)
if "Environment" in lambda_aws_config:
lambda_aws_environment_variables = lambda_aws_config["Environment"].get("Variables", {})
# Append keys that are remote but not in settings file
for key, value in lambda_aws_environment_variables.items():
if key not in aws_environment_variables:
aws_environment_variables[key] = value
response = self.lambda_client.update_function_configuration(
FunctionName=function_name,
Runtime=runtime,
Role=self.credentials_arn,
Handler=handler,
Description=description,
Timeout=timeout,
MemorySize=memory_size,
VpcConfig=vpc_config,
Environment={'Variables': aws_environment_variables},
KMSKeyArn=aws_kms_key_arn,
TracingConfig={
'Mode': 'Active' if self.xray_tracing else 'PassThrough'
},
Layers=layers
)
resource_arn = response['FunctionArn']
if self.tags:
self.lambda_client.tag_resource(Resource=resource_arn, Tags=self.tags)
return resource_arn
def invoke_lambda_function( self,
function_name,
payload,
invocation_type='Event',
log_type='Tail',
client_context=None,
qualifier=None
):
"""
Directly invoke a named Lambda function with a payload.
Returns the response.
"""
return self.lambda_client.invoke(
FunctionName=function_name,
InvocationType=invocation_type,
LogType=log_type,
Payload=payload
)
def rollback_lambda_function_version(self, function_name, versions_back=1, publish=True):
"""
Rollback the lambda function code 'versions_back' number of revisions.
Returns the Function ARN.
"""
response = self.lambda_client.list_versions_by_function(FunctionName=function_name)
# Take into account $LATEST
if len(response['Versions']) < versions_back + 1:
print("We do not have {} revisions. Aborting".format(str(versions_back)))
return False
revisions = [int(revision['Version']) for revision in response['Versions'] if revision['Version'] != '$LATEST']
revisions.sort(reverse=True)
response = self.lambda_client.get_function(FunctionName='function:{}:{}'.format(function_name, revisions[versions_back]))
response = requests.get(response['Code']['Location'])
if response.status_code != 200:
print("Failed to get version {} of {} code".format(versions_back, function_name))
return False
response = self.lambda_client.update_function_code(FunctionName=function_name, ZipFile=response.content, Publish=publish) # pragma: no cover
return response['FunctionArn']
def get_lambda_function(self, function_name):
"""
Returns the lambda function ARN, given a name
This requires the "lambda:GetFunction" role.
"""
response = self.lambda_client.get_function(
FunctionName=function_name)
return response['Configuration']['FunctionArn']
def get_lambda_function_versions(self, function_name):
"""
Simply returns the versions available for a Lambda function, given a function name.
"""
try:
response = self.lambda_client.list_versions_by_function(
FunctionName=function_name
)
return response.get('Versions', [])
except Exception:
return []
def delete_lambda_function(self, function_name):
"""
Given a function name, delete it from AWS Lambda.
Returns the response.
"""
print("Deleting Lambda function..")
return self.lambda_client.delete_function(
FunctionName=function_name,
)
##
# Application load balancer
##
def deploy_lambda_alb( self,
lambda_arn,
lambda_name,
alb_vpc_config,
timeout
):
"""
The `zappa deploy` functionality for ALB infrastructure.
"""
if not alb_vpc_config:
raise EnvironmentError('When creating an ALB, alb_vpc_config must be filled out in zappa_settings.')
if 'SubnetIds' not in alb_vpc_config:
raise EnvironmentError('When creating an ALB, you must supply two subnets in different availability zones.')
if 'SecurityGroupIds' not in alb_vpc_config:
alb_vpc_config["SecurityGroupIds"] = []
if not alb_vpc_config.get('CertificateArn'):
raise EnvironmentError('When creating an ALB, you must supply a CertificateArn for the HTTPS listener.')
# Related: https://github.com/Miserlou/Zappa/issues/1856
if 'Scheme' not in alb_vpc_config:
alb_vpc_config["Scheme"] = "internet-facing"
print("Deploying ALB infrastructure...")
# Create load balancer
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_load_balancer
kwargs = dict(
Name=lambda_name,
Subnets=alb_vpc_config["SubnetIds"],
SecurityGroups=alb_vpc_config["SecurityGroupIds"],
Scheme=alb_vpc_config["Scheme"],
# TODO: Tags might be a useful means of stock-keeping zappa-generated assets.
#Tags=[],
Type="application",
# TODO: can be ipv4 or dualstack (for ipv4 and ipv6) ipv4 is required for internal Scheme.
IpAddressType="ipv4"
)
response = self.elbv2_client.create_load_balancer(**kwargs)
if not(response["LoadBalancers"]) or len(response["LoadBalancers"]) != 1:
raise EnvironmentError("Failure to create application load balancer. Response was in unexpected format. Response was: {}".format(repr(response)))
if response["LoadBalancers"][0]['State']['Code'] == 'failed':
raise EnvironmentError("Failure to create application load balancer. Response reported a failed state: {}".format(response["LoadBalancers"][0]['State']['Reason']))
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
load_balancer_dns = response["LoadBalancers"][0]["DNSName"]
load_balancer_vpc = response["LoadBalancers"][0]["VpcId"]
waiter = self.elbv2_client.get_waiter('load_balancer_available')
print('Waiting for load balancer [{}] to become active..'.format(load_balancer_arn))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
# Match the lambda timeout on the load balancer.
self.elbv2_client.modify_load_balancer_attributes(
LoadBalancerArn=load_balancer_arn,
Attributes=[{
'Key': 'idle_timeout.timeout_seconds',
'Value': str(timeout)
}]
)
# Create/associate target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_target_group
kwargs = dict(
Name=lambda_name,
TargetType="lambda",
# TODO: Add options for health checks
)
response = self.elbv2_client.create_target_group(**kwargs)
if not(response["TargetGroups"]) or len(response["TargetGroups"]) != 1:
raise EnvironmentError("Failure to create application load balancer target group. Response was in unexpected format. Response was: {}".format(repr(response)))
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Enable multi-value headers by default.
response = self.elbv2_client.modify_target_group_attributes(
TargetGroupArn=target_group_arn,
Attributes=[
{
'Key': 'lambda.multi_value_headers.enabled',
'Value': 'true'
},
]
)
# Allow execute permissions from target group to lambda.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.add_permission
kwargs = dict(
Action="lambda:InvokeFunction",
FunctionName="{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS),
Principal="elasticloadbalancing.amazonaws.com",
SourceArn=target_group_arn,
StatementId=lambda_name
)
response = self.lambda_client.add_permission(**kwargs)
# Register target group to lambda association.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.register_targets
kwargs = dict(
TargetGroupArn=target_group_arn,
Targets=[{"Id": "{}:{}".format(lambda_arn, ALB_LAMBDA_ALIAS)}]
)
response = self.elbv2_client.register_targets(**kwargs)
# Bind listener to load balancer with default rule to target group.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.create_listener
kwargs = dict(
# TODO: Listeners support custom ssl certificates (Certificates). For now we leave this default.
Certificates=[{"CertificateArn": alb_vpc_config['CertificateArn']}],
DefaultActions=[{
"Type": "forward",
"TargetGroupArn": target_group_arn,
}],
LoadBalancerArn=load_balancer_arn,
Protocol="HTTPS",
# TODO: Add option for custom ports
Port=443,
# TODO: Listeners support custom ssl security policy (SslPolicy). For now we leave this default.
)
response = self.elbv2_client.create_listener(**kwargs)
print("ALB created with DNS: {}".format(load_balancer_dns))
print("Note it may take several minutes for load balancer to become available.")
def undeploy_lambda_alb(self, lambda_name):
"""
The `zappa undeploy` functionality for ALB infrastructure.
"""
print("Undeploying ALB infrastructure...")
# Locate and delete alb/lambda permissions
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.remove_permission
self.lambda_client.remove_permission(
FunctionName=lambda_name,
StatementId=lambda_name
)
except botocore.exceptions.ClientError as e: # pragma: no cover
if "ResourceNotFoundException" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete load balancer
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_load_balancers
response = self.elbv2_client.describe_load_balancers(
Names=[lambda_name]
)
if not(response["LoadBalancers"]) or len(response["LoadBalancers"]) > 1:
raise EnvironmentError("Failure to locate/delete ALB named [{}]. Response was: {}".format(lambda_name, repr(response)))
load_balancer_arn = response["LoadBalancers"][0]["LoadBalancerArn"]
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_listeners
response = self.elbv2_client.describe_listeners(LoadBalancerArn=load_balancer_arn)
if not(response["Listeners"]):
print('No listeners found.')
elif len(response["Listeners"]) > 1:
raise EnvironmentError("Failure to locate/delete listener for ALB named [{}]. Response was: {}".format(lambda_name, repr(response)))
else:
listener_arn = response["Listeners"][0]["ListenerArn"]
# Remove the listener. This explicit deletion of the listener seems necessary to avoid ResourceInUseExceptions when deleting target groups.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_listener
response = self.elbv2_client.delete_listener(ListenerArn=listener_arn)
# Remove the load balancer and wait for completion
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_load_balancer
response = self.elbv2_client.delete_load_balancer(LoadBalancerArn=load_balancer_arn)
waiter = self.elbv2_client.get_waiter('load_balancers_deleted')
print('Waiting for load balancer [{}] to be deleted..'.format(lambda_name))
waiter.wait(LoadBalancerArns=[load_balancer_arn], WaiterConfig={"Delay": 3})
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "LoadBalancerNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
# Locate and delete target group
try:
# Locate the lambda ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/lambda.html#Lambda.Client.get_function
response = self.lambda_client.get_function(FunctionName=lambda_name)
lambda_arn = response["Configuration"]["FunctionArn"]
# Locate the target group ARN
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.describe_target_groups
response = self.elbv2_client.describe_target_groups(Names=[lambda_name])
if not(response["TargetGroups"]) or len(response["TargetGroups"]) > 1:
raise EnvironmentError("Failure to locate/delete ALB target group named [{}]. Response was: {}".format(lambda_name, repr(response)))
target_group_arn = response["TargetGroups"][0]["TargetGroupArn"]
# Deregister targets and wait for completion
self.elbv2_client.deregister_targets(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}]
)
waiter = self.elbv2_client.get_waiter('target_deregistered')
print('Waiting for target [{}] to be deregistered...'.format(lambda_name))
waiter.wait(
TargetGroupArn=target_group_arn,
Targets=[{"Id": lambda_arn}],
WaiterConfig={"Delay": 3}
)
# Remove the target group
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/elbv2.html#ElasticLoadBalancingv2.Client.delete_target_group
self.elbv2_client.delete_target_group(TargetGroupArn=target_group_arn)
except botocore.exceptions.ClientError as e: # pragma: no cover
print(e.response["Error"]["Code"])
if "TargetGroupNotFound" in e.response["Error"]["Code"]:
pass
else:
raise e
##
# API Gateway
##
def create_api_gateway_routes( self,
lambda_arn,
api_name=None,
api_key_required=False,
authorization_type='NONE',
authorizer=None,
cors_options=None,
description=None,
endpoint_configuration=None
):
"""
Create the API Gateway for this Zappa deployment.
Returns the new RestAPI CF resource.
"""
restapi = troposphere.apigateway.RestApi('Api')
restapi.Name = api_name or lambda_arn.split(':')[-1]
if not description:
description = 'Created automatically by Zappa.'
restapi.Description = description
endpoint_configuration = [] if endpoint_configuration is None else endpoint_configuration
if self.boto_session.region_name == "us-gov-west-1":
endpoint_configuration.append("REGIONAL")
if endpoint_configuration:
endpoint = troposphere.apigateway.EndpointConfiguration()
endpoint.Types = list(set(endpoint_configuration))
restapi.EndpointConfiguration = endpoint
if self.apigateway_policy:
restapi.Policy = json.loads(self.apigateway_policy)
self.cf_template.add_resource(restapi)
root_id = troposphere.GetAtt(restapi, 'RootResourceId')
invocation_prefix = "aws" if self.boto_session.region_name != "us-gov-west-1" else "aws-us-gov"
invocations_uri = 'arn:' + invocation_prefix + ':apigateway:' + self.boto_session.region_name + ':lambda:path/2015-03-31/functions/' + lambda_arn + '/invocations'
##
# The Resources
##
authorizer_resource = None
if authorizer:
authorizer_lambda_arn = authorizer.get('arn', lambda_arn)
lambda_uri = 'arn:{invocation_prefix}:apigateway:{region_name}:lambda:path/2015-03-31/functions/{lambda_arn}/invocations'.format(
invocation_prefix=invocation_prefix,
region_name=self.boto_session.region_name,
lambda_arn=authorizer_lambda_arn
)
authorizer_resource = self.create_authorizer(
restapi, lambda_uri, authorizer
)
self.create_and_setup_methods( restapi,
root_id,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
0
)
if cors_options:
self.create_and_setup_cors( restapi,
root_id,
invocations_uri,
0,
cors_options
)
resource = troposphere.apigateway.Resource('ResourceAnyPathSlashed')
self.cf_api_resources.append(resource.title)
resource.RestApiId = troposphere.Ref(restapi)
resource.ParentId = root_id
resource.PathPart = "{proxy+}"
self.cf_template.add_resource(resource)
self.create_and_setup_methods( restapi,
resource,
api_key_required,
invocations_uri,
authorization_type,
authorizer_resource,
1
) # pragma: no cover
if cors_options:
self.create_and_setup_cors( restapi,
resource,
invocations_uri,
1,
cors_options
) # pragma: no cover
return restapi
def create_authorizer(self, restapi, uri, authorizer):
"""
Create Authorizer for API gateway
"""
authorizer_type = authorizer.get("type", "TOKEN").upper()
identity_validation_expression = authorizer.get('validation_expression', None)
authorizer_resource = troposphere.apigateway.Authorizer("Authorizer")
authorizer_resource.RestApiId = troposphere.Ref(restapi)
authorizer_resource.Name = authorizer.get("name", "ZappaAuthorizer")
authorizer_resource.Type = authorizer_type
authorizer_resource.AuthorizerUri = uri
authorizer_resource.IdentitySource = "method.request.header.%s" % authorizer.get('token_header', 'Authorization')
if identity_validation_expression:
authorizer_resource.IdentityValidationExpression = identity_validation_expression
if authorizer_type == 'TOKEN':
if not self.credentials_arn:
self.get_credentials_arn()
authorizer_resource.AuthorizerResultTtlInSeconds = authorizer.get('result_ttl', 300)
authorizer_resource.AuthorizerCredentials = self.credentials_arn
if authorizer_type == 'COGNITO_USER_POOLS':
authorizer_resource.ProviderARNs = authorizer.get('provider_arns')
self.cf_api_resources.append(authorizer_resource.title)
self.cf_template.add_resource(authorizer_resource)
return authorizer_resource
def create_and_setup_methods(
self,
restapi,
resource,
api_key_required,
uri,
authorization_type,
authorizer_resource,
depth
):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
for method_name in self.http_methods:
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = authorization_type
if authorizer_resource:
method.AuthorizerId = troposphere.Ref(authorizer_resource)
method.ApiKeyRequired = api_key_required
method.MethodResponses = []
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
if not self.credentials_arn:
self.get_credentials_arn()
credentials = self.credentials_arn # This must be a Role ARN
integration = troposphere.apigateway.Integration()
integration.CacheKeyParameters = []
integration.CacheNamespace = 'none'
integration.Credentials = credentials
integration.IntegrationHttpMethod = 'POST'
integration.IntegrationResponses = []
integration.PassthroughBehavior = 'NEVER'
integration.Type = 'AWS_PROXY'
integration.Uri = uri
method.Integration = integration
def create_and_setup_cors(self, restapi, resource, uri, depth, config):
"""
Set up the methods, integration responses and method responses for a given API Gateway resource.
"""
if config is True:
config = {}
method_name = "OPTIONS"
method = troposphere.apigateway.Method(method_name + str(depth))
method.RestApiId = troposphere.Ref(restapi)
if type(resource) is troposphere.apigateway.Resource:
method.ResourceId = troposphere.Ref(resource)
else:
method.ResourceId = resource
method.HttpMethod = method_name.upper()
method.AuthorizationType = "NONE"
method_response = troposphere.apigateway.MethodResponse()
method_response.ResponseModels = {
"application/json": "Empty"
}
response_headers = {
"Access-Control-Allow-Headers": "'%s'" % ",".join(config.get(
"allowed_headers", ["Content-Type", "X-Amz-Date",
"Authorization", "X-Api-Key",
"X-Amz-Security-Token"])),
"Access-Control-Allow-Methods": "'%s'" % ",".join(config.get(
"allowed_methods", ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"])),
"Access-Control-Allow-Origin": "'%s'" % config.get(
"allowed_origin", "*")
}
method_response.ResponseParameters = {
"method.response.header.%s" % key: True for key in response_headers
}
method_response.StatusCode = "200"
method.MethodResponses = [
method_response
]
self.cf_template.add_resource(method)
self.cf_api_resources.append(method.title)
integration = troposphere.apigateway.Integration()
integration.Type = 'MOCK'
integration.PassthroughBehavior = 'NEVER'
integration.RequestTemplates = {
"application/json": "{\"statusCode\": 200}"
}
integration_response = troposphere.apigateway.IntegrationResponse()
integration_response.ResponseParameters = {
"method.response.header.%s" % key: value for key, value in response_headers.items()
}
integration_response.ResponseTemplates = {
"application/json": ""
}
integration_response.StatusCode = "200"
integration.IntegrationResponses = [
integration_response
]
integration.Uri = uri
method.Integration = integration
def deploy_api_gateway( self,
api_id,
stage_name,
stage_description="",
description="",
cache_cluster_enabled=False,
cache_cluster_size='0.5',
variables=None,
cloudwatch_log_level='OFF',
cloudwatch_data_trace=False,
cloudwatch_metrics_enabled=False,
cache_cluster_ttl=300,
cache_cluster_encrypted=False
):
"""
Deploy the API Gateway!
Return the deployed API URL.
"""
print("Deploying API Gateway..")
self.apigateway_client.create_deployment(
restApiId=api_id,
stageName=stage_name,
stageDescription=stage_description,
description=description,
cacheClusterEnabled=cache_cluster_enabled,
cacheClusterSize=cache_cluster_size,
variables=variables or {}
)
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
self.apigateway_client.update_stage(
restApiId=api_id,
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
self.get_patch_op('caching/ttlInSeconds', str(cache_cluster_ttl)),
self.get_patch_op('caching/dataEncrypted', cache_cluster_encrypted)
]
)
return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
def add_binary_support(self, api_id, cors=False):
"""
Add binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" not in response or "*/*" not in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': "add",
'path': '/binaryMediaTypes/*~1*'
}
]
)
if cors:
# fix for issue 699 and 1035, cors+binary support don't work together
# go through each resource and update the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item['id'] for item in response['items']
if 'OPTIONS' in item.get('resourceMethods', {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": "CONVERT_TO_TEXT"
}
]
)
def remove_binary_support(self, api_id, cors=False):
"""
Remove binary support
"""
response = self.apigateway_client.get_rest_api(
restApiId=api_id
)
if "binaryMediaTypes" in response and "*/*" in response["binaryMediaTypes"]:
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'remove',
'path': '/binaryMediaTypes/*~1*'
}
]
)
if cors:
# go through each resource and change the contentHandling type
response = self.apigateway_client.get_resources(restApiId=api_id)
resource_ids = [
item['id'] for item in response['items']
if 'OPTIONS' in item.get('resourceMethods', {})
]
for resource_id in resource_ids:
self.apigateway_client.update_integration(
restApiId=api_id,
resourceId=resource_id,
httpMethod='OPTIONS',
patchOperations=[
{
"op": "replace",
"path": "/contentHandling",
"value": ""
}
]
)
def add_api_compression(self, api_id, min_compression_size):
"""
Add Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'replace',
'path': '/minimumCompressionSize',
'value': str(min_compression_size)
}
]
)
def remove_api_compression(self, api_id):
"""
Remove Rest API compression
"""
self.apigateway_client.update_rest_api(
restApiId=api_id,
patchOperations=[
{
'op': 'replace',
'path': '/minimumCompressionSize',
}
]
)
def get_api_keys(self, api_id, stage_name):
"""
Generator that allows to iterate per API keys associated to an api_id and a stage_name.
"""
response = self.apigateway_client.get_api_keys(limit=500)
stage_key = '{}/{}'.format(api_id, stage_name)
for api_key in response.get('items'):
if stage_key in api_key.get('stageKeys'):
yield api_key.get('id')
def create_api_key(self, api_id, stage_name):
"""
Create new API key and link it with an api_id and a stage_name
"""
response = self.apigateway_client.create_api_key(
name='{}_{}'.format(stage_name, api_id),
description='Api Key for {}'.format(api_id),
enabled=True,
stageKeys=[
{
'restApiId': '{}'.format(api_id),
'stageName': '{}'.format(stage_name)
},
]
)
print('Created a new x-api-key: {}'.format(response['id']))
def remove_api_key(self, api_id, stage_name):
"""
Remove a generated API key for api_id and stage_name
"""
response = self.apigateway_client.get_api_keys(
limit=1,
nameQuery='{}_{}'.format(stage_name, api_id)
)
for api_key in response.get('items'):
self.apigateway_client.delete_api_key(
apiKey="{}".format(api_key['id'])
)
def add_api_stage_to_api_key(self, api_key, api_id, stage_name):
"""
Add api stage to Api key
"""
self.apigateway_client.update_api_key(
apiKey=api_key,
patchOperations=[
{
'op': 'add',
'path': '/stages',
'value': '{}/{}'.format(api_id, stage_name)
}
]
)
def get_patch_op(self, keypath, value, op='replace'):
"""
Return an object that describes a change of configuration on the given staging.
Setting will be applied on all available HTTP methods.
"""
if isinstance(value, bool):
value = str(value).lower()
return {'op': op, 'path': '/*/*/{}'.format(keypath), 'value': value}
def get_rest_apis(self, project_name):
"""
Generator that allows to iterate per every available apis.
"""
all_apis = self.apigateway_client.get_rest_apis(
limit=500
)
for api in all_apis['items']:
if api['name'] != project_name:
continue
yield api
def undeploy_api_gateway(self, lambda_name, domain_name=None, base_path=None):
"""
Delete a deployed REST API Gateway.
"""
print("Deleting API Gateway..")
api_id = self.get_api_id(lambda_name)
if domain_name:
# XXX - Remove Route53 smartly here?
# XXX - This doesn't raise, but doesn't work either.
try:
self.apigateway_client.delete_base_path_mapping(
domainName=domain_name,
basePath='(none)' if base_path is None else base_path
)
except Exception as e:
# We may not have actually set up the domain.
pass
was_deleted = self.delete_stack(lambda_name, wait=True)
if not was_deleted:
# try erasing it with the older method
for api in self.get_rest_apis(lambda_name):
self.apigateway_client.delete_rest_api(
restApiId=api['id']
)
def update_stage_config( self,
project_name,
stage_name,
cloudwatch_log_level,
cloudwatch_data_trace,
cloudwatch_metrics_enabled
):
"""
Update CloudWatch metrics configuration.
"""
if cloudwatch_log_level not in self.cloudwatch_log_levels:
cloudwatch_log_level = 'OFF'
for api in self.get_rest_apis(project_name):
self.apigateway_client.update_stage(
restApiId=api['id'],
stageName=stage_name,
patchOperations=[
self.get_patch_op('logging/loglevel', cloudwatch_log_level),
self.get_patch_op('logging/dataTrace', cloudwatch_data_trace),
self.get_patch_op('metrics/enabled', cloudwatch_metrics_enabled),
]
)
def update_cognito(self, lambda_name, user_pool, lambda_configs, lambda_arn):
LambdaConfig = {}
for config in lambda_configs:
LambdaConfig[config] = lambda_arn
description = self.cognito_client.describe_user_pool(UserPoolId=user_pool)
description_kwargs = {}
for key, value in description['UserPool'].items():
if key in ('UserPoolId', 'Policies', 'AutoVerifiedAttributes', 'SmsVerificationMessage',
'EmailVerificationMessage', 'EmailVerificationSubject', 'VerificationMessageTemplate',
'SmsAuthenticationMessage', 'MfaConfiguration', 'DeviceConfiguration',
'EmailConfiguration', 'SmsConfiguration', 'UserPoolTags',
'AdminCreateUserConfig'):
description_kwargs[key] = value
elif key == 'LambdaConfig':
for lckey, lcvalue in value.items():
if lckey in LambdaConfig:
value[lckey] = LambdaConfig[lckey]
print("value", value)
description_kwargs[key] = value
if 'LambdaConfig' not in description_kwargs:
description_kwargs['LambdaConfig'] = LambdaConfig
if 'TemporaryPasswordValidityDays' in description_kwargs['Policies']['PasswordPolicy']:
description_kwargs['AdminCreateUserConfig'].pop(
'UnusedAccountValidityDays', None)
if 'UnusedAccountValidityDays' in description_kwargs['AdminCreateUserConfig']:
description_kwargs['Policies']['PasswordPolicy']\
['TemporaryPasswordValidityDays'] = description_kwargs['AdminCreateUserConfig'].pop(
'UnusedAccountValidityDays', None)
result = self.cognito_client.update_user_pool(UserPoolId=user_pool, **description_kwargs)
if result['ResponseMetadata']['HTTPStatusCode'] != 200:
print("Cognito: Failed to update user pool", result)
# Now we need to add a policy to the IAM that allows cognito access
result = self.create_event_permission(lambda_name,
'cognito-idp.amazonaws.com',
'arn:aws:cognito-idp:{}:{}:userpool/{}'.
format(self.aws_region,
self.sts_client.get_caller_identity().get('Account'),
user_pool)
)
if result['ResponseMetadata']['HTTPStatusCode'] != 201:
print("Cognito: Failed to update lambda permission", result)
def delete_stack(self, name, wait=False):
"""
Delete the CF stack managed by Zappa.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
except: # pragma: no cover
print('No Zappa stack named {0}'.format(name))
return False
tags = {x['Key']:x['Value'] for x in stack['Tags']}
if tags.get('ZappaProject') == name:
self.cf_client.delete_stack(StackName=name)
if wait:
waiter = self.cf_client.get_waiter('stack_delete_complete')
print('Waiting for stack {0} to be deleted..'.format(name))
waiter.wait(StackName=name)
return True
else:
print('ZappaProject tag not found on {0}, doing nothing'.format(name))
return False
def create_stack_template( self,
lambda_arn,
lambda_name,
api_key_required,
iam_authorization,
authorizer,
cors_options=None,
description=None,
endpoint_configuration=None
):
"""
Build the entire CF stack.
Just used for the API Gateway, but could be expanded in the future.
"""
auth_type = "NONE"
if iam_authorization and authorizer:
logger.warn("Both IAM Authorization and Authorizer are specified, this is not possible. "
"Setting Auth method to IAM Authorization")
authorizer = None
auth_type = "AWS_IAM"
elif iam_authorization:
auth_type = "AWS_IAM"
elif authorizer:
auth_type = authorizer.get("type", "CUSTOM")
# build a fresh template
self.cf_template = troposphere.Template()
self.cf_template.add_description('Automatically generated with Zappa')
self.cf_api_resources = []
self.cf_parameters = {}
restapi = self.create_api_gateway_routes(
lambda_arn,
api_name=lambda_name,
api_key_required=api_key_required,
authorization_type=auth_type,
authorizer=authorizer,
cors_options=cors_options,
description=description,
endpoint_configuration=endpoint_configuration
)
return self.cf_template
def update_stack(self, name, working_bucket, wait=False, update_only=False, disable_progress=False):
"""
Update or create the CF stack managed by Zappa.
"""
capabilities = []
template = name + '-template-' + str(int(time.time())) + '.json'
with open(template, 'wb') as out:
out.write(bytes(self.cf_template.to_json(indent=None, separators=(',',':')), "utf-8"))
self.upload_to_s3(template, working_bucket, disable_progress=disable_progress)
if self.boto_session.region_name == "us-gov-west-1":
url = 'https://s3-us-gov-west-1.amazonaws.com/{0}/{1}'.format(working_bucket, template)
else:
url = 'https://s3.amazonaws.com/{0}/{1}'.format(working_bucket, template)
tags = [{'Key': key, 'Value': self.tags[key]}
for key in self.tags.keys()
if key != 'ZappaProject']
tags.append({'Key':'ZappaProject','Value':name})
update = True
try:
self.cf_client.describe_stacks(StackName=name)
except botocore.client.ClientError:
update = False
if update_only and not update:
print('CloudFormation stack missing, re-deploy to enable updates')
return
if not update:
self.cf_client.create_stack(StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags)
print('Waiting for stack {0} to create (this can take a bit)..'.format(name))
else:
try:
self.cf_client.update_stack(StackName=name,
Capabilities=capabilities,
TemplateURL=url,
Tags=tags)
print('Waiting for stack {0} to update..'.format(name))
except botocore.client.ClientError as e:
if e.response['Error']['Message'] == 'No updates are to be performed.':
wait = False
else:
raise
if wait:
total_resources = len(self.cf_template.resources)
current_resources = 0
sr = self.cf_client.get_paginator('list_stack_resources')
progress = tqdm(total=total_resources, unit='res', disable=disable_progress)
while True:
time.sleep(3)
result = self.cf_client.describe_stacks(StackName=name)
if not result['Stacks']:
continue # might need to wait a bit
if result['Stacks'][0]['StackStatus'] in ['CREATE_COMPLETE', 'UPDATE_COMPLETE']:
break
# Something has gone wrong.
# Is raising enough? Should we also remove the Lambda function?
if result['Stacks'][0]['StackStatus'] in [
'DELETE_COMPLETE',
'DELETE_IN_PROGRESS',
'ROLLBACK_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS',
'UPDATE_ROLLBACK_COMPLETE'
]:
raise EnvironmentError("Stack creation failed. "
"Please check your CloudFormation console. "
"You may also need to `undeploy`.")
count = 0
for result in sr.paginate(StackName=name):
done = (1 for x in result['StackResourceSummaries']
if 'COMPLETE' in x['ResourceStatus'])
count += sum(done)
if count:
# We can end up in a situation where we have more resources being created
# than anticipated.
if (count - current_resources) > 0:
progress.update(count - current_resources)
current_resources = count
progress.close()
try:
os.remove(template)
except OSError:
pass
self.remove_from_s3(template, working_bucket)
def stack_outputs(self, name):
"""
Given a name, describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict.
"""
try:
stack = self.cf_client.describe_stacks(StackName=name)['Stacks'][0]
return {x['OutputKey']: x['OutputValue'] for x in stack['Outputs']}
except botocore.client.ClientError:
return {}
def get_api_url(self, lambda_name, stage_name):
"""
Given a lambda_name and stage_name, return a valid API URL.
"""
api_id = self.get_api_id(lambda_name)
if api_id:
return "https://{}.execute-api.{}.amazonaws.com/{}".format(api_id, self.boto_session.region_name, stage_name)
else:
return None
def get_api_id(self, lambda_name):
"""
Given a lambda_name, return the API id.
"""
try:
response = self.cf_client.describe_stack_resource(StackName=lambda_name,
LogicalResourceId='Api')
return response['StackResourceDetail'].get('PhysicalResourceId', None)
except: # pragma: no cover
try:
# Try the old method (project was probably made on an older, non CF version)
response = self.apigateway_client.get_rest_apis(limit=500)
for item in response['items']:
if item['name'] == lambda_name:
return item['id']
logger.exception('Could not get API ID.')
return None
except: # pragma: no cover
# We don't even have an API deployed. That's okay!
return None
def create_domain_name(self,
domain_name,
certificate_name,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
base_path=None):
"""
Creates the API GW domain and returns the resulting DNS name.
"""
# This is a Let's Encrypt or custom certificate
if not certificate_arn:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateBody=certificate_body,
certificatePrivateKey=certificate_private_key,
certificateChain=certificate_chain
)
# This is an AWS ACM-hosted Certificate
else:
agw_response = self.apigateway_client.create_domain_name(
domainName=domain_name,
certificateName=certificate_name,
certificateArn=certificate_arn
)
api_id = self.get_api_id(lambda_name)
if not api_id:
raise LookupError("No API URL to certify found - did you deploy?")
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath='' if base_path is None else base_path,
restApiId=api_id,
stage=stage
)
return agw_response['distributionDomainName']
def update_route53_records(self, domain_name, dns_name):
"""
Updates Route53 Records following GW domain creation
"""
zone_id = self.get_hosted_zone_id_for_domain(domain_name)
is_apex = self.route53.get_hosted_zone(Id=zone_id)['HostedZone']['Name'][:-1] == domain_name
if is_apex:
record_set = {
'Name': domain_name,
'Type': 'A',
'AliasTarget': {
'HostedZoneId': 'Z2FDTNDATAQYW2', # This is a magic value that means "CloudFront"
'DNSName': dns_name,
'EvaluateTargetHealth': False
}
}
else:
record_set = {
'Name': domain_name,
'Type': 'CNAME',
'ResourceRecords': [
{
'Value': dns_name
}
],
'TTL': 60
}
# Related: https://github.com/boto/boto3/issues/157
# and: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html
# and policy: https://spin.atomicobject.com/2016/04/28/route-53-hosted-zone-managment/
# pure_zone_id = zone_id.split('/hostedzone/')[1]
# XXX: ClientError: An error occurred (InvalidChangeBatch) when calling the ChangeResourceRecordSets operation:
# Tried to create an alias that targets d1awfeji80d0k2.cloudfront.net., type A in zone Z1XWOQP59BYF6Z,
# but the alias target name does not lie within the target zone
response = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch={
'Changes': [
{
'Action': 'UPSERT',
'ResourceRecordSet': record_set
}
]
}
)
return response
def update_domain_name(self,
domain_name,
certificate_name=None,
certificate_body=None,
certificate_private_key=None,
certificate_chain=None,
certificate_arn=None,
lambda_name=None,
stage=None,
route53=True,
base_path=None):
"""
This updates your certificate information for an existing domain,
with similar arguments to boto's update_domain_name API Gateway api.
It returns the resulting new domain information including the new certificate's ARN
if created during this process.
Previously, this method involved downtime that could take up to 40 minutes
because the API Gateway api only allowed this by deleting, and then creating it.
Related issues: https://github.com/Miserlou/Zappa/issues/590
https://github.com/Miserlou/Zappa/issues/588
https://github.com/Miserlou/Zappa/pull/458
https://github.com/Miserlou/Zappa/issues/882
https://github.com/Miserlou/Zappa/pull/883
"""
print("Updating domain name!")
certificate_name = certificate_name + str(time.time())
api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
if not certificate_arn\
and certificate_body and certificate_private_key and certificate_chain:
acm_certificate = self.acm_client.import_certificate(Certificate=certificate_body,
PrivateKey=certificate_private_key,
CertificateChain=certificate_chain)
certificate_arn = acm_certificate['CertificateArn']
self.update_domain_base_path_mapping(domain_name, lambda_name, stage, base_path)
return self.apigateway_client.update_domain_name(domainName=domain_name,
patchOperations=[
{"op" : "replace",
"path" : "/certificateName",
"value" : certificate_name},
{"op" : "replace",
"path" : "/certificateArn",
"value" : certificate_arn}
])
def update_domain_base_path_mapping(self, domain_name, lambda_name, stage, base_path):
"""
Update domain base path mapping on API Gateway if it was changed
"""
api_id = self.get_api_id(lambda_name)
if not api_id:
print("Warning! Can't update base path mapping!")
return
base_path_mappings = self.apigateway_client.get_base_path_mappings(domainName=domain_name)
found = False
for base_path_mapping in base_path_mappings.get('items', []):
if base_path_mapping['restApiId'] == api_id and base_path_mapping['stage'] == stage:
found = True
if base_path_mapping['basePath'] != base_path:
self.apigateway_client.update_base_path_mapping(domainName=domain_name,
basePath=base_path_mapping['basePath'],
patchOperations=[
{"op" : "replace",
"path" : "/basePath",
"value" : '' if base_path is None else base_path}
])
if not found:
self.apigateway_client.create_base_path_mapping(
domainName=domain_name,
basePath='' if base_path is None else base_path,
restApiId=api_id,
stage=stage
)
def get_all_zones(self):
"""Same behaviour of list_host_zones, but transparently handling pagination."""
zones = {'HostedZones': []}
new_zones = self.route53.list_hosted_zones(MaxItems='100')
while new_zones['IsTruncated']:
zones['HostedZones'] += new_zones['HostedZones']
new_zones = self.route53.list_hosted_zones(Marker=new_zones['NextMarker'], MaxItems='100')
zones['HostedZones'] += new_zones['HostedZones']
return zones
def get_domain_name(self, domain_name, route53=True):
"""
Scan our hosted zones for the record of a given name.
Returns the record entry, else None.
"""
# Make sure api gateway domain is present
try:
self.apigateway_client.get_domain_name(domainName=domain_name)
except Exception:
return None
if not route53:
return True
try:
zones = self.get_all_zones()
for zone in zones['HostedZones']:
records = self.route53.list_resource_record_sets(HostedZoneId=zone['Id'])
for record in records['ResourceRecordSets']:
if record['Type'] in ('CNAME', 'A') and record['Name'][:-1] == domain_name:
return record
except Exception as e:
return None
##
# Old, automatic logic.
# If re-introduced, should be moved to a new function.
# Related ticket: https://github.com/Miserlou/Zappa/pull/458
##
# We may be in a position where Route53 doesn't have a domain, but the API Gateway does.
# We need to delete this before we can create the new Route53.
# try:
# api_gateway_domain = self.apigateway_client.get_domain_name(domainName=domain_name)
# self.apigateway_client.delete_domain_name(domainName=domain_name)
# except Exception:
# pass
return None
##
# IAM
##
def get_credentials_arn(self):
"""
Given our role name, get and set the credentials_arn.
"""
role = self.iam.Role(self.role_name)
self.credentials_arn = role.arn
return role, self.credentials_arn
def create_iam_roles(self):
"""
Create and defines the IAM roles and policies necessary for Zappa.
If the IAM role already exists, it will be updated if necessary.
"""
attach_policy_obj = json.loads(self.attach_policy)
assume_policy_obj = json.loads(self.assume_policy)
if self.extra_permissions:
for permission in self.extra_permissions:
attach_policy_obj['Statement'].append(dict(permission))
self.attach_policy = json.dumps(attach_policy_obj)
updated = False
# Create the role if needed
try:
role, credentials_arn = self.get_credentials_arn()
except botocore.client.ClientError:
print("Creating " + self.role_name + " IAM Role..")
role = self.iam.create_role(
RoleName=self.role_name,
AssumeRolePolicyDocument=self.assume_policy
)
self.credentials_arn = role.arn
updated = True
# create or update the role's policies if needed
policy = self.iam.RolePolicy(self.role_name, 'zappa-permissions')
try:
if policy.policy_document != attach_policy_obj:
print("Updating zappa-permissions policy on " + self.role_name + " IAM Role.")
policy.put(PolicyDocument=self.attach_policy)
updated = True
except botocore.client.ClientError:
print("Creating zappa-permissions policy on " + self.role_name + " IAM Role.")
policy.put(PolicyDocument=self.attach_policy)
updated = True
if role.assume_role_policy_document != assume_policy_obj and \
set(role.assume_role_policy_document['Statement'][0]['Principal']['Service']) != set(assume_policy_obj['Statement'][0]['Principal']['Service']):
print("Updating assume role policy on " + self.role_name + " IAM Role.")
self.iam_client.update_assume_role_policy(
RoleName=self.role_name,
PolicyDocument=self.assume_policy
)
updated = True
return self.credentials_arn, updated
def _clear_policy(self, lambda_name):
"""
Remove obsolete policy statements to prevent policy from bloating over the limit after repeated updates.
"""
logger.debug('Clear policy.')
try:
policy_response = self.lambda_client.get_policy(
FunctionName=lambda_name
)
if policy_response['ResponseMetadata']['HTTPStatusCode'] == 200:
statement = json.loads(policy_response['Policy'])['Statement']
for s in statement:
delete_response = self.lambda_client.remove_permission(
FunctionName=lambda_name,
StatementId=s['Sid']
)
if delete_response['ResponseMetadata']['HTTPStatusCode'] != 204:
logger.error('Failed to delete an obsolete policy statement: {}'.format(policy_response))
else:
logger.debug('Failed to load Lambda function policy: {}'.format(policy_response))
except ClientError as e:
if e.args[0].find('ResourceNotFoundException') > -1:
logger.debug('No policy found, must be first run.')
else:
logger.error('Unexpected client error {}'.format(e.args[0]))
##
# CloudWatch Events
##
def create_event_permission(self, lambda_name, principal, source_arn):
"""
Create permissions to link to an event.
Related: http://docs.aws.amazon.com/lambda/latest/dg/with-s3-example-configure-event-source.html
"""
logger.debug('Adding new permission to invoke Lambda function: {}'.format(lambda_name))
permission_response = self.lambda_client.add_permission(
FunctionName=lambda_name,
StatementId=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8)),
Action='lambda:InvokeFunction',
Principal=principal,
SourceArn=source_arn,
)
if permission_response['ResponseMetadata']['HTTPStatusCode'] != 201:
print('Problem creating permission to invoke Lambda function')
return None # XXX: Raise?
return permission_response
def schedule_events(self, lambda_arn, lambda_name, events, update_policy=True, default=True):
"""
Given a Lambda ARN, name and a list of events, schedule this as CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
Expressions can be in rate or cron format:
http://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
"""
# The stream sources - DynamoDB, Kinesis and SQS - are working differently than the other services (pull vs push)
# and do not require event permissions. They do require additional permissions on the Lambda roles though.
# http://docs.aws.amazon.com/lambda/latest/dg/lambda-api-permissions-ref.html
pull_services = ['dynamodb', 'kinesis', 'sqs']
# XXX: Not available in Lambda yet.
# We probably want to execute the latest code.
# if default:
# lambda_arn = lambda_arn + ":$LATEST"
self.unschedule_events(lambda_name=lambda_name, lambda_arn=lambda_arn, events=events,
excluded_source_services=pull_services, clear_policy=update_policy)
for event in events:
function = event['function']
expression = event.get('expression', None) # single expression
expressions = event.get('expressions', None) # multiple expression
kwargs = event.get('kwargs', {}) # optional dict of keyword arguments for the event
event_source = event.get('event_source', None)
description = event.get('description', function)
# - If 'cron' or 'rate' in expression, use ScheduleExpression
# - Else, use EventPattern
# - ex https://github.com/awslabs/aws-lambda-ddns-function
if not self.credentials_arn:
self.get_credentials_arn()
if expression:
expressions = [expression] # same code for single and multiple expression
if expressions:
for index, expression in enumerate(expressions):
name = self.get_scheduled_event_name(event, function, lambda_name, index)
# if it's possible that we truncated name, generate a unique, shortened name
# https://github.com/Miserlou/Zappa/issues/970
if len(name) >= 64:
rule_name = self.get_hashed_rule_name(event, function, lambda_name)
else:
rule_name = name
rule_response = self.events_client.put_rule(
Name=rule_name,
ScheduleExpression=expression,
State='ENABLED',
Description=description,
RoleArn=self.credentials_arn
)
if 'RuleArn' in rule_response:
logger.debug('Rule created. ARN {}'.format(rule_response['RuleArn']))
# Specific permissions are necessary for any trigger to work.
if update_policy:
self.create_event_permission(lambda_name, 'events.amazonaws.com', rule_response['RuleArn'])
# Overwriting the input, supply the original values and add kwargs
input_template = '{"time": <time>, ' \
'"detail-type": <detail-type>, ' \
'"source": <source>,' \
'"account": <account>, ' \
'"region": <region>,' \
'"detail": <detail>, ' \
'"version": <version>,' \
'"resources": <resources>,' \
'"id": <id>,' \
'"kwargs": %s' \
'}' % json.dumps(kwargs)
# Create the CloudWatch event ARN for this function.
# https://github.com/Miserlou/Zappa/issues/359
target_response = self.events_client.put_targets(
Rule=rule_name,
Targets=[
{
'Id': 'Id' + ''.join(random.choice(string.digits) for _ in range(12)),
'Arn': lambda_arn,
'InputTransformer': {
'InputPathsMap': {
'time': '$.time',
'detail-type': '$.detail-type',
'source': '$.source',
'account': '$.account',
'region': '$.region',
'detail': '$.detail',
'version': '$.version',
'resources': '$.resources',
'id': '$.id'
},
'InputTemplate': input_template
}
}
]
)
if target_response['ResponseMetadata']['HTTPStatusCode'] == 200:
print("Scheduled {} with expression {}!".format(rule_name, expression))
else:
print("Problem scheduling {} with expression {}.".format(rule_name, expression))
elif event_source:
service = self.service_from_arn(event_source['arn'])
if service not in pull_services:
svc = ','.join(event['event_source']['events'])
if update_policy:
self.create_event_permission(
lambda_name,
service + '.amazonaws.com',
event['event_source']['arn']
)
else:
svc = service
rule_response = add_event_source(
event_source,
lambda_arn,
function,
self.boto_session
)
if rule_response == 'successful':
print("Created {} event schedule for {}!".format(svc, function))
elif rule_response == 'failed':
print("Problem creating {} event schedule for {}!".format(svc, function))
elif rule_response == 'exists':
print("{} event schedule for {} already exists - Nothing to do here.".format(svc, function))
elif rule_response == 'dryrun':
print("Dryrun for creating {} event schedule for {}!!".format(svc, function))
else:
print("Could not create event {} - Please define either an expression or an event source".format(name))
@staticmethod
def get_scheduled_event_name(event, function, lambda_name, index=0):
name = event.get('name', function)
if name != function:
# a custom event name has been provided, make sure function name is included as postfix,
# otherwise zappa's handler won't be able to locate the function.
name = '{}-{}'.format(name, function)
if index:
# to ensure unique cloudwatch rule names in the case of multiple expressions
# prefix all entries bar the first with the index
# Related: https://github.com/Miserlou/Zappa/pull/1051
name = '{}-{}'.format(index, name)
# prefix scheduled event names with lambda name. So we can look them up later via the prefix.
return Zappa.get_event_name(lambda_name, name)
@staticmethod
def get_event_name(lambda_name, name):
"""
Returns an AWS-valid Lambda event name.
"""
return '{prefix:.{width}}-{postfix}'.format(prefix=lambda_name, width=max(0, 63 - len(name)), postfix=name)[:64]
@staticmethod
def get_hashed_rule_name(event, function, lambda_name):
"""
Returns an AWS-valid CloudWatch rule name using a digest of the event name, lambda name, and function.
This allows support for rule names that may be longer than the 64 char limit.
"""
event_name = event.get('name', function)
name_hash = hashlib.sha1('{}-{}'.format(lambda_name, event_name).encode('UTF-8')).hexdigest()
return Zappa.get_event_name(name_hash, function)
def delete_rule(self, rule_name):
"""
Delete a CWE rule.
This deletes them, but they will still show up in the AWS console.
Annoying.
"""
logger.debug('Deleting existing rule {}'.format(rule_name))
# All targets must be removed before
# we can actually delete the rule.
try:
targets = self.events_client.list_targets_by_rule(Rule=rule_name)
except botocore.exceptions.ClientError as e:
# This avoids misbehavior if low permissions, related: https://github.com/Miserlou/Zappa/issues/286
error_code = e.response['Error']['Code']
if error_code == 'AccessDeniedException':
raise
else:
logger.debug('No target found for this rule: {} {}'.format(rule_name, e.args[0]))
return
if 'Targets' in targets and targets['Targets']:
self.events_client.remove_targets(Rule=rule_name, Ids=[x['Id'] for x in targets['Targets']])
else: # pragma: no cover
logger.debug('No target to delete')
# Delete our rule.
self.events_client.delete_rule(Name=rule_name)
def get_event_rule_names_for_lambda(self, lambda_arn):
"""
Get all of the rule names associated with a lambda function.
"""
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn)
rule_names = response['RuleNames']
# Iterate when the results are paginated
while 'NextToken' in response:
response = self.events_client.list_rule_names_by_target(TargetArn=lambda_arn,
NextToken=response['NextToken'])
rule_names.extend(response['RuleNames'])
return rule_names
def get_event_rules_for_lambda(self, lambda_arn):
"""
Get all of the rule details associated with this function.
"""
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
return [self.events_client.describe_rule(Name=r) for r in rule_names]
def unschedule_events(self, events, lambda_arn=None, lambda_name=None, excluded_source_services=None,
clear_policy=True):
excluded_source_services = excluded_source_services or []
"""
Given a list of events, unschedule these CloudWatch Events.
'events' is a list of dictionaries, where the dict must contains the string
of a 'function' and the string of the event 'expression', and an optional 'name' and 'description'.
"""
if clear_policy:
self._clear_policy(lambda_name)
rule_names = self.get_event_rule_names_for_lambda(lambda_arn=lambda_arn)
for rule_name in rule_names:
self.delete_rule(rule_name)
print('Unscheduled ' + rule_name + '.')
non_cwe = [e for e in events if 'event_source' in e]
for event in non_cwe:
# TODO: This WILL miss non CW events that have been deployed but changed names. Figure out a way to remove
# them no matter what.
# These are non CWE event sources.
function = event['function']
name = event.get('name', function)
event_source = event.get('event_source', function)
service = self.service_from_arn(event_source['arn'])
# DynamoDB and Kinesis streams take quite a while to setup after they are created and do not need to be
# re-scheduled when a new Lambda function is deployed. Therefore, they should not be removed during zappa
# update or zappa schedule.
if service not in excluded_source_services:
remove_event_source(
event_source,
lambda_arn,
function,
self.boto_session
)
print("Removed event {}{}.".format(
name,
" ({})".format(str(event_source['events'])) if 'events' in event_source else '')
)
###
# Async / SNS
##
def create_async_sns_topic(self, lambda_name, lambda_arn, update_policy=True):
"""
Create the SNS-based async topic.
"""
topic_name = get_topic_name(lambda_name)
# Create SNS topic
topic_arn = self.sns_client.create_topic(
Name=topic_name)['TopicArn']
# Create subscription
self.sns_client.subscribe(
TopicArn=topic_arn,
Protocol='lambda',
Endpoint=lambda_arn
)
# Add Lambda permission for SNS to invoke function
self.create_event_permission(
lambda_name=lambda_name,
principal='sns.amazonaws.com',
source_arn=topic_arn
)
# Add rule for SNS topic as a event source
add_event_source(
event_source={
"arn": topic_arn,
"events": ["sns:Publish"]
},
lambda_arn=lambda_arn,
target_function="zappa.asynchronous.route_task",
boto_session=self.boto_session
)
return topic_arn
def remove_async_sns_topic(self, lambda_name):
"""
Remove the async SNS topic.
"""
topic_name = get_topic_name(lambda_name)
removed_arns = []
for sub in self.sns_client.list_subscriptions()['Subscriptions']:
if topic_name in sub['TopicArn']:
self.sns_client.delete_topic(TopicArn=sub['TopicArn'])
removed_arns.append(sub['TopicArn'])
return removed_arns
###
# Async / DynamoDB
##
def _set_async_dynamodb_table_ttl(self, table_name):
self.dynamodb_client.update_time_to_live(
TableName=table_name,
TimeToLiveSpecification={
'Enabled': True,
'AttributeName': 'ttl'
}
)
def create_async_dynamodb_table(self, table_name, read_capacity, write_capacity):
"""
Create the DynamoDB table for async task return values
"""
try:
dynamodb_table = self.dynamodb_client.describe_table(TableName=table_name)
return False, dynamodb_table
# catch this exception (triggered if the table doesn't exist)
except botocore.exceptions.ClientError:
dynamodb_table = self.dynamodb_client.create_table(
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
TableName=table_name,
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
ProvisionedThroughput = {
'ReadCapacityUnits': read_capacity,
'WriteCapacityUnits': write_capacity
}
)
if dynamodb_table:
try:
self._set_async_dynamodb_table_ttl(table_name)
except botocore.exceptions.ClientError:
# this fails because the operation is async, so retry
time.sleep(10)
self._set_async_dynamodb_table_ttl(table_name)
return True, dynamodb_table
def remove_async_dynamodb_table(self, table_name):
"""
Remove the DynamoDB Table used for async return values
"""
self.dynamodb_client.delete_table(TableName=table_name)
##
# CloudWatch Logging
##
def fetch_logs(self, lambda_name, filter_pattern='', limit=10000, start_time=0):
"""
Fetch the CloudWatch logs for a given Lambda name.
"""
log_name = '/aws/lambda/' + lambda_name
streams = self.logs_client.describe_log_streams(
logGroupName=log_name,
descending=True,
orderBy='LastEventTime'
)
all_streams = streams['logStreams']
all_names = [stream['logStreamName'] for stream in all_streams]
events = []
response = {}
while not response or 'nextToken' in response:
extra_args = {}
if 'nextToken' in response:
extra_args['nextToken'] = response['nextToken']
# Amazon uses millisecond epoch for some reason.
# Thanks, Jeff.
start_time = start_time * 1000
end_time = int(time.time()) * 1000
response = self.logs_client.filter_log_events(
logGroupName=log_name,
logStreamNames=all_names,
startTime=start_time,
endTime=end_time,
filterPattern=filter_pattern,
limit=limit,
interleaved=True, # Does this actually improve performance?
**extra_args
)
if response and 'events' in response:
events += response['events']
return sorted(events, key=lambda k: k['timestamp'])
def remove_log_group(self, group_name):
"""
Filter all log groups that match the name given in log_filter.
"""
print("Removing log group: {}".format(group_name))
try:
self.logs_client.delete_log_group(logGroupName=group_name)
except botocore.exceptions.ClientError as e:
print("Couldn't remove '{}' because of: {}".format(group_name, e))
def remove_lambda_function_logs(self, lambda_function_name):
"""
Remove all logs that are assigned to a given lambda function id.
"""
self.remove_log_group('/aws/lambda/{}'.format(lambda_function_name))
def remove_api_gateway_logs(self, project_name):
"""
Removed all logs that are assigned to a given rest api id.
"""
for rest_api in self.get_rest_apis(project_name):
for stage in self.apigateway_client.get_stages(restApiId=rest_api['id'])['item']:
self.remove_log_group('API-Gateway-Execution-Logs_{}/{}'.format(rest_api['id'], stage['stageName']))
##
# Route53 Domain Name Entries
##
def get_hosted_zone_id_for_domain(self, domain):
"""
Get the Hosted Zone ID for a given domain.
"""
all_zones = self.get_all_zones()
return self.get_best_match_zone(all_zones, domain)
@staticmethod
def get_best_match_zone(all_zones, domain):
"""Return zone id which name is closer matched with domain name."""
# Related: https://github.com/Miserlou/Zappa/issues/459
public_zones = [zone for zone in all_zones['HostedZones'] if not zone['Config']['PrivateZone']]
zones = {zone['Name'][:-1]: zone['Id'] for zone in public_zones if zone['Name'][:-1] in domain}
if zones:
keys = max(zones.keys(), key=lambda a: len(a)) # get longest key -- best match.
return zones[keys]
else:
return None
def set_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Set DNS challenge TXT.
"""
print("Setting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch('UPSERT', domain, txt_challenge)
)
return resp
def remove_dns_challenge_txt(self, zone_id, domain, txt_challenge):
"""
Remove DNS challenge TXT.
"""
print("Deleting DNS challenge..")
resp = self.route53.change_resource_record_sets(
HostedZoneId=zone_id,
ChangeBatch=self.get_dns_challenge_change_batch('DELETE', domain, txt_challenge)
)
return resp
@staticmethod
def get_dns_challenge_change_batch(action, domain, txt_challenge):
"""
Given action, domain and challenge, return a change batch to use with
route53 call.
:param action: DELETE | UPSERT
:param domain: domain name
:param txt_challenge: challenge
:return: change set for a given action, domain and TXT challenge.
"""
return {
'Changes': [{
'Action': action,
'ResourceRecordSet': {
'Name': '_acme-challenge.{0}'.format(domain),
'Type': 'TXT',
'TTL': 60,
'ResourceRecords': [{
'Value': '"{0}"'.format(txt_challenge)
}]
}
}]
}
##
# Utility
##
def shell(self):
"""
Spawn a PDB shell.
"""
import pdb
pdb.set_trace()
def load_credentials(self, boto_session=None, profile_name=None):
"""
Load AWS credentials.
An optional boto_session can be provided, but that's usually for testing.
An optional profile_name can be provided for config files that have multiple sets
of credentials.
"""
# Automatically load credentials from config or environment
if not boto_session:
# If provided, use the supplied profile name.
if profile_name:
self.boto_session = boto3.Session(profile_name=profile_name, region_name=self.aws_region)
elif os.environ.get('AWS_ACCESS_KEY_ID') and os.environ.get('AWS_SECRET_ACCESS_KEY'):
region_name = os.environ.get('AWS_DEFAULT_REGION') or self.aws_region
session_kw = {
"aws_access_key_id": os.environ.get('AWS_ACCESS_KEY_ID'),
"aws_secret_access_key": os.environ.get('AWS_SECRET_ACCESS_KEY'),
"region_name": region_name,
}
# If we're executing in a role, AWS_SESSION_TOKEN will be present, too.
if os.environ.get("AWS_SESSION_TOKEN"):
session_kw["aws_session_token"] = os.environ.get("AWS_SESSION_TOKEN")
self.boto_session = boto3.Session(**session_kw)
else:
self.boto_session = boto3.Session(region_name=self.aws_region)
logger.debug("Loaded boto session from config: %s", boto_session)
else:
logger.debug("Using provided boto session: %s", boto_session)
self.boto_session = boto_session
# use provided session's region in case it differs
self.aws_region = self.boto_session.region_name
if self.boto_session.region_name not in LAMBDA_REGIONS:
print("Warning! AWS Lambda may not be available in this AWS Region!")
if self.boto_session.region_name not in API_GATEWAY_REGIONS:
print("Warning! AWS API Gateway may not be available in this AWS Region!")
@staticmethod
def service_from_arn(arn):
return arn.split(':')[2]
|
the-stack_106_18767
|
from datetime import datetime, date
import re
import format.mla as mla
import format.cmos as cmos
import scraper.main as scraper
def get_authors(soup):
author_tag = soup.find("meta", {"name": "author"})
if not author_tag:
author_tag = soup.find("meta", {"property": "author"})
if not author_tag:
return None
tag_content = author_tag['content']
if "By " in author_tag['content']:
tag_content = author_tag['content'].replace("By ", "")
if ' and ' in tag_content and ', ' not in tag_content:
authors = tag_content.split(' and ')
elif ' and ' in tag_content and ', ' in tag_content:
authors = re.split(", | and ", tag_content)
elif ' and ' not in tag_content and ', ' in tag_content:
authors = tag_content.split(', ')
else:
authors = [author_tag['content']]
return authors
def get_title(soup):
header = soup.find("meta", {'property': "og:title"})
if header:
title = header["content"]
else:
title = None
return title
#could also work for get_container()
def get_container(soup):
header = soup.find("title")
return header.string.split(" - ")[-1]
def get_contributors(soup):
#TODO: people who illustrate, photgraph, translate...
contributors = []
contributors_raw = soup.find_all("span", {"itemprop": "copyrightHolder"})
for contributor_raw in contributors_raw:
children = contributor_raw.find_all("span")
if children is not None:
for child in children:
if "Credit" not in child.string:
contributors.append(' '.join(re.split("\W", child.string)[:2]))
return list(set(contributors))
def get_publisher(soup):
#TODO: eventually find a replicable way of scraping the website.
container_tag = soup.find("meta", {'property': "og:site_name"})
if container_tag:
container = container_tag["content"]
else:
container = None
return container
def get_pubdate(soup):
if soup.find("meta", {'property': "og:updated_time"}):
date_tag = soup.find("meta", {'property': "og:updated_time"})
if 'T' in date_tag["content"]:
publication_date = date_tag["content"]
time_str = publication_date.split('T')[0]
else:
time_str = date_tag["content"]
formatted_date = datetime.strptime(time_str, "%Y-%m-%d")
else:
date_tag = soup.find("time")
if date_tag:
try:
publication_date = date_tag["datetime"]
time_str = publication_date.split('T')[0]
formatted_date = datetime.strptime(time_str, "%Y-%m-%d")
except:
formatted_date = date.today()
else:
formatted_date = date.today()
return formatted_date
def get_location(link):
if "?" in link:
formatted_link_step_1 = link.split("?")[0]
else:
formatted_link_step_1 = link
if "http://" in formatted_link_step_1:
formatted_link_step_2 = formatted_link_step_1.split("http://")[1]
elif "https://" in formatted_link_step_1:
formatted_link_step_2 = formatted_link_step_1.split("https://")[1]
else:
formatted_link_step_2 = formatted_link_step_1
return formatted_link_step_2
def get_accessdate():
return date.today()
def create_citation_journalism(link, style="mla"):
soup = scraper.create_soup(link)
if soup:
citation_raw = {
"authors" : get_authors(soup),
"title" : get_title(soup),
"container" : get_container(soup),
"contributors" : get_contributors(soup),
"publisher" : get_publisher(soup),
"pubdate" : get_pubdate(soup),
"location" : get_location(link),
"accessdate" : get_accessdate()
}
if style == "mla":
return mla.mla_citation(citation_raw, htmlify=True)
elif style == "cmos":
return cmos.cmos_citation(citation_raw, htmlify=True)
else:
return None
else:
return None
|
the-stack_106_18769
|
import pytest
from dagster import (
DagsterTypeCheckError,
DependencyDefinition,
Field,
InputDefinition,
Int,
OutputDefinition,
ModeDefinition,
PipelineDefinition,
lambda_solid,
resource,
solid,
)
from dagster.utils.test import execute_solid_within_pipeline
from dagster.core.errors import DagsterExecutionStepExecutionError
def test_single_solid_in_isolation():
@lambda_solid
def solid_one():
return 1
pipeline_def = PipelineDefinition(solid_defs=[solid_one])
result = execute_solid_within_pipeline(pipeline_def, 'solid_one')
assert result.success
assert result.output_value() == 1
def test_single_solid_with_single():
@lambda_solid
def solid_one():
return 1
@lambda_solid(input_defs=[InputDefinition(name='num')])
def add_one_solid(num):
return num + 1
pipeline_def = PipelineDefinition(
solid_defs=[solid_one, add_one_solid],
dependencies={'add_one_solid': {'num': DependencyDefinition('solid_one')}},
)
result = execute_solid_within_pipeline(pipeline_def, 'add_one_solid', inputs={'num': 2})
assert result.success
assert result.output_value() == 3
def test_single_solid_with_multiple_inputs():
@lambda_solid
def solid_one():
return 1
@lambda_solid(input_defs=[InputDefinition(name='num_one'), InputDefinition('num_two')])
def add_solid(num_one, num_two):
return num_one + num_two
pipeline_def = PipelineDefinition(
solid_defs=[solid_one, add_solid],
dependencies={
'add_solid': {
'num_one': DependencyDefinition('solid_one'),
'num_two': DependencyDefinition('solid_one'),
}
},
)
result = execute_solid_within_pipeline(
pipeline_def,
'add_solid',
inputs={'num_one': 2, 'num_two': 3},
environment_dict={'loggers': {'console': {'config': {'log_level': 'DEBUG'}}}},
)
assert result.success
assert result.output_value() == 5
def test_single_solid_with_config():
ran = {}
@solid(config_field=Field(Int))
def check_config_for_two(context):
assert context.solid_config == 2
ran['check_config_for_two'] = True
pipeline_def = PipelineDefinition(solid_defs=[check_config_for_two])
result = execute_solid_within_pipeline(
pipeline_def,
'check_config_for_two',
environment_dict={'solids': {'check_config_for_two': {'config': 2}}},
)
assert result.success
assert ran['check_config_for_two']
def test_single_solid_with_context_config():
@resource(config_field=Field(Int, is_optional=True, default_value=2))
def num_resource(init_context):
return init_context.resource_config
ran = {'count': 0}
@solid
def check_context_config_for_two(context):
assert context.resources.num == 2
ran['count'] += 1
pipeline_def = PipelineDefinition(
solid_defs=[check_context_config_for_two],
mode_defs=[ModeDefinition(resource_defs={'num': num_resource})],
)
result = execute_solid_within_pipeline(
pipeline_def,
'check_context_config_for_two',
environment_dict={'resources': {'num': {'config': 2}}},
)
assert result.success
assert ran['count'] == 1
result = execute_solid_within_pipeline(pipeline_def, 'check_context_config_for_two')
assert result.success
assert ran['count'] == 2
def test_single_solid_error():
class SomeError(Exception):
pass
@lambda_solid
def throw_error():
raise SomeError()
pipeline_def = PipelineDefinition(solid_defs=[throw_error])
with pytest.raises(DagsterExecutionStepExecutionError) as e_info:
execute_solid_within_pipeline(pipeline_def, 'throw_error')
assert isinstance(e_info.value.__cause__, SomeError)
def test_single_solid_type_checking_output_error():
@lambda_solid(output_def=OutputDefinition(Int))
def return_string():
return 'ksjdfkjd'
pipeline_def = PipelineDefinition(solid_defs=[return_string])
with pytest.raises(DagsterTypeCheckError):
execute_solid_within_pipeline(pipeline_def, 'return_string')
def test_failing_solid_in_isolation():
class ThisException(Exception):
pass
@lambda_solid
def throw_an_error():
raise ThisException('nope')
pipeline_def = PipelineDefinition(solid_defs=[throw_an_error])
with pytest.raises(DagsterExecutionStepExecutionError) as e_info:
execute_solid_within_pipeline(pipeline_def, 'throw_an_error')
assert isinstance(e_info.value.__cause__, ThisException)
|
the-stack_106_18770
|
###########################################################################
# Created by: Hang Zhang
# Email: [email protected]
# Copyright (c) 2017
###########################################################################
import os, sys
BASE_DIR = os.path.dirname(os.path.dirname(os.getcwd()))
sys.path.append(BASE_DIR)
import yaml
import argparse
import numpy as np
from addict import Dict
import torch
import torch.nn as nn
from torch.utils import data
from tensorboardX import SummaryWriter
import torchvision.transforms as transform
from torch.nn.parallel.scatter_gather import gather
import encoding.utils as utils
from encoding.nn import SegmentationLosses, SyncBatchNorm
from encoding.parallel import DataParallelModel, DataParallelCriterion
from encoding.datasets import get_dataset
from encoding.models import get_segmentation_model
CONFIG_PATH = './results/config.yaml'
SMY_PATH = os.path.dirname(CONFIG_PATH)
GPUS = [0, 1]
# model settings
parser = argparse.ArgumentParser(description='model specification')
parser.add_argument('--mmf_att', type=str, default=None, help='Attention type to fuse rgb and dep')
settings = parser.parse_args()
print(settings)
class Trainer():
def __init__(self, args):
self.args = args
# data transforms
input_transform = transform.Compose([
transform.ToTensor(), # convert RGB [0,255] to FloatTensor in range [0, 1]
transform.Normalize([.485, .456, .406], [.229, .224, .225])]) # mean and std based on imageNet
dep_transform = transform.Compose([
transform.ToTensor(),
transform.Normalize(mean=[0.2798], std=[0.1387]) # mean and std for depth
])
# dataset
data_kwargs = {'transform': input_transform, 'dep_transform': dep_transform,
'base_size': args.base_size, 'crop_size': args.crop_size}
trainset = get_dataset(args.dataset, split=args.train_split, mode='train', **data_kwargs)
testset = get_dataset(args.dataset, split='val', mode='val', **data_kwargs)
# dataloader
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
self.trainloader = data.DataLoader(trainset, batch_size=args.batch_size, drop_last=True, shuffle=True, **kwargs)
self.valloader = data.DataLoader(testset, batch_size=args.batch_size, drop_last=False, shuffle=False, **kwargs)
self.nclass = trainset.num_class
# model and params
model = get_segmentation_model(args.model, dataset=args.dataset, backbone=args.backbone, pretrained=True,
root='../../encoding/models/pretrain',
mmf_att=settings.mmf_att)
print(model)
# optimizer using different LR
base_ids = list(map(id, model.base.parameters()))
base_dep_ids = list(map(id, model.dep_base.parameters()))
base_params = filter(lambda p: id(p) in base_ids + base_dep_ids, model.parameters())
other_params = filter(lambda p: id(p) not in base_ids + base_dep_ids, model.parameters())
self.optimizer = torch.optim.SGD([{'params': base_params, 'lr': args.lr},
{'params': other_params, 'lr': args.lr * 10}],
lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# criterions
self.criterion = SegmentationLosses(se_loss=args.se_loss,
aux=args.aux,
nclass=self.nclass,
se_weight=args.se_weight,
aux_weight=args.aux_weight)
# lr scheduler
self.scheduler = utils.LR_Scheduler_Head(args.lr_scheduler, args.lr, args.epochs,
iters_per_epoch=len(self.trainloader), warmup_epochs=5)
self.best_pred = 0.0
# using cuda
self.device = torch.device("cuda:0" if args.cuda else "cpu")
if args.cuda:
if torch.cuda.device_count() > 1:
print("Let's use", torch.cuda.device_count(), "GPUs!") # [30,xxx]->[10,...],[10,...],[10,...] on 3 GPUs
model = nn.DataParallel(model, device_ids=GPUS)
self.model = model.to(self.device)
# for writing summary
path = "/".join(("{}-{}".format(*i) for i in settings.__dict__.items()))
self.writer = SummaryWriter(os.path.join(SMY_PATH, path))
# resuming checkpoint
if args.resume is not None and args.resume != 'None':
if not os.path.isfile(args.resume):
raise RuntimeError("=> no checkpoint found at '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
if args.cuda:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
if not args.ft:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.best_pred = checkpoint['best_pred']
print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
# clear start epoch if fine-tuning
if args.ft:
args.start_epoch = 0
def training(self, epoch):
train_loss = 0.0
self.model.train()
total_inter, total_union, total_correct, total_label, total_loss = 0, 0, 0, 0, 0
for i, (image, dep, target) in enumerate(self.trainloader):
image, dep, target = image.to(self.device), dep.to(self.device), target.to(self.device)
self.scheduler(self.optimizer, i, epoch, self.best_pred)
self.optimizer.zero_grad()
outputs = self.model(image, dep)
loss = self.criterion(outputs, target)
loss.backward()
self.optimizer.step()
correct, labeled = utils.batch_pix_accuracy(outputs.data, target)
inter, union = utils.batch_intersection_union(outputs.data, target, self.nclass)
total_correct += correct
total_label += labeled
total_inter += inter
total_union += union
train_loss += loss.item()
if (i+1) % 50 == 0:
print('epoch {}, step {}, loss {}'.format(epoch + 1, i + 1, train_loss / 50))
self.writer.add_scalar('train_loss', train_loss / 50, epoch * len(self.trainloader) + i)
train_loss = 0.0
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IOU = 1.0 * total_inter / (np.spacing(1) + total_union)
mIOU = IOU.mean()
print('epoch {}, pixel Acc {}, mean IOU {}'.format(epoch + 1, pixAcc, mIOU))
self.writer.add_scalar("mean_iou/train", mIOU, epoch)
self.writer.add_scalar("pixel accuracy/train", pixAcc, epoch)
def train_n_evaluate(self):
for epoch in range(self.args.epochs):
# run on one epoch
print("\n===============train epoch {}/{} ==========================\n".format(epoch, self.args.epochs))
# one full pass over the train set
self.training(epoch)
# evaluate for one epoch on the validation set
print('\n===============start testing, training epoch {}\n'.format(epoch))
pixAcc, mIOU, loss = self.validation(epoch)
print('evaluation pixel acc {}, mean IOU {}, loss {}'.format(pixAcc, mIOU, loss))
# save the best model
is_best = False
new_pred = (pixAcc + mIOU) / 2
if new_pred > self.best_pred:
is_best = True
self.best_pred = new_pred
path = 'runs/' + "/".join(("{}-{}".format(*i) for i in settings.__dict__.items()))
utils.save_checkpoint({'epoch': epoch + 1,
'state_dict': self.model.module.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred}, self.args, is_best, path=path)
def validation(self, epoch):
# Fast test during the training
def eval_batch(model, image, dep, target):
# model, image, target already moved to gpus
pred = model(image, dep)
loss = self.criterion(pred, target)
correct, labeled = utils.batch_pix_accuracy(pred.data, target)
inter, union = utils.batch_intersection_union(pred.data, target, self.nclass)
return correct, labeled, inter, union, loss
self.model.eval()
total_inter, total_union, total_correct, total_label, total_loss = 0, 0, 0, 0, 0
for i, (image, dep, target) in enumerate(self.valloader):
image, dep, target = image.to(self.device), dep.to(self.device), target.to(self.device)
with torch.no_grad():
correct, labeled, inter, union, loss = eval_batch(self.model, image, dep, target)
total_correct += correct
total_label += labeled
total_inter += inter
total_union += union
total_loss += loss.item()
pixAcc = 1.0 * total_correct / (np.spacing(1) + total_label)
IOU = 1.0 * total_inter / (np.spacing(1) + total_union)
mIOU = IOU.mean()
if i % 40 == 0:
print('eval mean IOU {}'.format(mIOU))
loss = total_loss / len(self.valloader)
self.writer.add_scalar("mean_iou/val", mIOU, epoch)
self.writer.add_scalar("pixel accuracy/val", pixAcc, epoch)
return pixAcc, mIOU, loss
if __name__ == "__main__":
print("-------mark program start----------")
# configuration
args = Dict(yaml.safe_load(open(CONFIG_PATH)))
args.cuda = (args.use_cuda and torch.cuda.is_available())
args.resume = None if args.resume=='None' else args.resume
torch.manual_seed(args.seed)
trainer = Trainer(args)
# import pdb; pdb.set_trace()
print('Starting Epoch:', trainer.args.start_epoch)
print('Total Epoches:', trainer.args.epochs)
trainer.train_n_evaluate()
|
the-stack_106_18771
|
import os
import sys
import builtins
import difflib
import inspect
import pydoc
import keyword
import re
import string
import test.support
import time
import unittest
import xml.etree
import textwrap
from io import StringIO
from collections import namedtuple
from test.script_helper import assert_python_ok
from test.support import (
TESTFN, rmtree, check_impl_detail,
reap_children, reap_threads, captured_output, captured_stdout, unlink
)
from test import pydoc_mod
try:
import threading
except ImportError:
threading = None
# Just in case sys.modules["test"] has the optional attribute __loader__.
if hasattr(pydoc_mod, "__loader__"):
del pydoc_mod.__loader__
expected_text_pattern = """
NAME
test.pydoc_mod - This is a test module for test_pydoc
%s
CLASSES
builtins.object
A
B
\x20\x20\x20\x20
class A(builtins.object)
| Hello and goodbye
|\x20\x20
| Methods defined here:
|\x20\x20
| __init__()
| Wow, I have no function!
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors defined here:
|\x20\x20
| __dict__
| dictionary for instance variables (if defined)
|\x20\x20
| __weakref__
| list of weak references to the object (if defined)
\x20\x20\x20\x20
class B(builtins.object)
| Data descriptors defined here:
|\x20\x20
| __dict__
| dictionary for instance variables (if defined)
|\x20\x20
| __weakref__
| list of weak references to the object (if defined)
|\x20\x20
| ----------------------------------------------------------------------
| Data and other attributes defined here:
|\x20\x20
| NO_MEANING = 'eggs'
FUNCTIONS
doc_func()
This function solves all of the world's problems:
hunger
lack of Python
war
\x20\x20\x20\x20
nodoc_func()
DATA
__xyz__ = 'X, Y and Z'
VERSION
1.2.3.4
AUTHOR
Benjamin Peterson
CREDITS
Nobody
FILE
%s
""".strip()
if check_impl_detail(pypy=True):
# pydoc_mod.__builtins__ is always a module on PyPy (but a dict on
# CPython), hence an extra 'Modules' section
module_section = """
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#aa55cc">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Modules</strong></big></font></td></tr>
<tr><td bgcolor="#aa55cc"><tt> </tt></td><td> </td>
<td width="100%%"><table width="100%%" summary="list"><tr><td width="25%%" valign=top><a href="builtins.html">builtins</a><br>
</td><td width="25%%" valign=top></td><td width="25%%" valign=top></td><td width="25%%" valign=top></td></tr></table></td></tr></table><p>
"""
else:
module_section = ""
expected_html_pattern = ("""
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="#7799ee">
<td valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"> <br><big><big><strong><a href="test.html"><font color="#ffffff">test</font></a>.pydoc_mod</strong></big></big> (version 1.2.3.4)</font></td
><td align=right valign=bottom
><font color="#ffffff" face="helvetica, arial"><a href=".">index</a><br><a href="file:%s">%s</a>%s</font></td></tr></table>
<p><tt>This is a test module for test_pydoc</tt></p>
<p>""" + module_section + """\
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ee77aa">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Classes</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ee77aa"><tt> </tt></td><td> </td>
<td width="100%%"><dl>
<dt><font face="helvetica, arial"><a href="builtins.html#object">builtins.object</a>
</font></dt><dd>
<dl>
<dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#A">A</a>
</font></dt><dt><font face="helvetica, arial"><a href="test.pydoc_mod.html#B">B</a>
</font></dt></dl>
</dd>
</dl>
<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="A">class <strong>A</strong></a>(<a href="builtins.html#object">builtins.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr bgcolor="#ffc8d8"><td rowspan=2><tt> </tt></td>
<td colspan=2><tt>Hello and goodbye<br> </tt></td></tr>
<tr><td> </td>
<td width="100%%">Methods defined here:<br>
<dl><dt><a name="A-__init__"><strong>__init__</strong></a>()</dt><dd><tt>Wow, I have no function!</tt></dd></dl>
<hr>
Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>dictionary for instance variables (if defined)</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>list of weak references to the object (if defined)</tt></dd>
</dl>
</td></tr></table> <p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#ffc8d8">
<td colspan=3 valign=bottom> <br>
<font color="#000000" face="helvetica, arial"><a name="B">class <strong>B</strong></a>(<a href="builtins.html#object">builtins.object</a>)</font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#ffc8d8"><tt> </tt></td><td> </td>
<td width="100%%">Data descriptors defined here:<br>
<dl><dt><strong>__dict__</strong></dt>
<dd><tt>dictionary for instance variables (if defined)</tt></dd>
</dl>
<dl><dt><strong>__weakref__</strong></dt>
<dd><tt>list of weak references to the object (if defined)</tt></dd>
</dl>
<hr>
Data and other attributes defined here:<br>
<dl><dt><strong>NO_MEANING</strong> = 'eggs'</dl>
</td></tr></table></td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#eeaa77">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Functions</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#eeaa77"><tt> </tt></td><td> </td>
<td width="100%%"><dl><dt><a name="-doc_func"><strong>doc_func</strong></a>()</dt><dd><tt>This function solves all of the world's problems:<br>
hunger<br>
lack of Python<br>
war</tt></dd></dl>
<dl><dt><a name="-nodoc_func"><strong>nodoc_func</strong></a>()</dt></dl>
</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#55aa55">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Data</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#55aa55"><tt> </tt></td><td> </td>
<td width="100%%"><strong>__xyz__</strong> = 'X, Y and Z'</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Author</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Benjamin Peterson</td></tr></table><p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="#7799ee">
<td colspan=3 valign=bottom> <br>
<font color="#ffffff" face="helvetica, arial"><big><strong>Credits</strong></big></font></td></tr>
\x20\x20\x20\x20
<tr><td bgcolor="#7799ee"><tt> </tt></td><td> </td>
<td width="100%%">Nobody</td></tr></table>
""").strip() # ' <- emacs turd
# output pattern for missing module
missing_pattern = "no Python documentation found for '%s'"
# output pattern for module with bad imports
badimport_pattern = "problem in %s - ImportError: No module named %s"
def run_pydoc(module_name, *args, **env):
"""
Runs pydoc on the specified module. Returns the stripped
output of pydoc.
"""
args = args + (module_name,)
# do not write bytecode files to avoid caching errors
rc, out, err = assert_python_ok('-B', pydoc.__file__, *args, **env)
return out.strip()
def get_pydoc_html(module):
"Returns pydoc generated output as html"
doc = pydoc.HTMLDoc()
output = doc.docmodule(module)
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "<br><a href=\"" + loc + "\">Module Docs</a>"
return output.strip(), loc
def get_pydoc_text(module):
"Returns pydoc generated output as text"
doc = pydoc.TextDoc()
loc = doc.getdocloc(pydoc_mod) or ""
if loc:
loc = "\nMODULE DOCS\n " + loc + "\n"
output = doc.docmodule(module)
# clean up the extra text formatting that pydoc performs
patt = re.compile('\b.')
output = patt.sub('', output)
return output.strip(), loc
def print_diffs(text1, text2):
"Prints unified diffs for two texts"
# XXX now obsolete, use unittest built-in support
lines1 = text1.splitlines(True)
lines2 = text2.splitlines(True)
diffs = difflib.unified_diff(lines1, lines2, n=0, fromfile='expected',
tofile='got')
print('\n' + ''.join(diffs))
def get_html_title(text):
# Bit of hack, but good enough for test purposes
header, _, _ = text.partition("</head>")
_, _, title = header.partition("<title>")
title, _, _ = title.partition("</title>")
return title
class PydocDocTest(unittest.TestCase):
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_html_doc(self):
result, doc_loc = get_pydoc_html(pydoc_mod)
mod_file = inspect.getabsfile(pydoc_mod)
if sys.platform == 'win32':
import nturl2path
mod_url = nturl2path.pathname2url(mod_file)
else:
mod_url = mod_file
expected_html = expected_html_pattern % (mod_url, mod_file, doc_loc)
if result != expected_html:
print_diffs(expected_html, result)
self.fail("outputs are not equal, see diff above")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_text_doc(self):
result, doc_loc = get_pydoc_text(pydoc_mod)
expected_text = expected_text_pattern % \
(doc_loc, inspect.getabsfile(pydoc_mod))
if result != expected_text:
print_diffs(expected_text, result)
self.fail("outputs are not equal, see diff above")
def test_issue8225(self):
# Test issue8225 to ensure no doc link appears for xml.etree
result, doc_loc = get_pydoc_text(xml.etree)
self.assertEqual(doc_loc, "", "MODULE DOCS incorrectly includes a link")
def test_not_here(self):
missing_module = "test.i_am_not_here"
result = str(run_pydoc(missing_module), 'ascii')
expected = missing_pattern % missing_module
self.assertEqual(expected, result,
"documentation for missing module found")
def test_input_strip(self):
missing_module = " test.i_am_not_here "
result = str(run_pydoc(missing_module), 'ascii')
expected = missing_pattern % missing_module.strip()
self.assertEqual(expected, result)
def test_stripid(self):
# test with strings, other implementations might have different repr()
stripid = pydoc.stripid
# strip the id
self.assertEqual(stripid('<function stripid at 0x88dcee4>'),
'<function stripid>')
self.assertEqual(stripid('<function stripid at 0x01F65390>'),
'<function stripid>')
# nothing to strip, return the same text
self.assertEqual(stripid('42'), '42')
self.assertEqual(stripid("<type 'exceptions.Exception'>"),
"<type 'exceptions.Exception'>")
@unittest.skipIf(sys.flags.optimize >= 2,
'Docstrings are omitted with -O2 and above')
def test_help_output_redirect(self):
# issue 940286, if output is set in Helper, then all output from
# Helper.help should be redirected
old_pattern = expected_text_pattern
getpager_old = pydoc.getpager
getpager_new = lambda: (lambda x: x)
self.maxDiff = None
buf = StringIO()
helper = pydoc.Helper(output=buf)
unused, doc_loc = get_pydoc_text(pydoc_mod)
module = "test.pydoc_mod"
help_header = """
Help on module test.pydoc_mod in test:
""".lstrip()
help_header = textwrap.dedent(help_header)
expected_help_pattern = help_header + expected_text_pattern
pydoc.getpager = getpager_new
try:
with captured_output('stdout') as output, \
captured_output('stderr') as err:
helper.help(module)
result = buf.getvalue().strip()
expected_text = expected_help_pattern % \
(doc_loc, inspect.getabsfile(pydoc_mod))
self.assertEqual('', output.getvalue())
self.assertEqual('', err.getvalue())
self.assertEqual(expected_text, result)
finally:
pydoc.getpager = getpager_old
def test_namedtuple_public_underscore(self):
NT = namedtuple('NT', ['abc', 'def'], rename=True)
with captured_stdout() as help_io:
help(NT)
helptext = help_io.getvalue()
self.assertIn('_1', helptext)
self.assertIn('_replace', helptext)
self.assertIn('_asdict', helptext)
def test_synopsis(self):
self.addCleanup(unlink, TESTFN)
for encoding in ('ISO-8859-1', 'UTF-8'):
with open(TESTFN, 'w', encoding=encoding) as script:
if encoding != 'UTF-8':
print('#coding: {}'.format(encoding), file=script)
print('"""line 1: h\xe9', file=script)
print('line 2: hi"""', file=script)
synopsis = pydoc.synopsis(TESTFN, {})
self.assertEqual(synopsis, 'line 1: h\xe9')
class PydocImportTest(unittest.TestCase):
def setUp(self):
self.test_dir = os.mkdir(TESTFN)
self.addCleanup(rmtree, TESTFN)
def test_badimport(self):
# This tests the fix for issue 5230, where if pydoc found the module
# but the module had an internal import error pydoc would report no doc
# found.
modname = 'testmod_xyzzy'
testpairs = (
('i_am_not_here', 'i_am_not_here'),
('test.i_am_not_here_either', 'test.i_am_not_here_either'),
('test.i_am_not_here.neither_am_i', 'test.i_am_not_here'),
('i_am_not_here.{}'.format(modname), 'i_am_not_here'),
('test.{}'.format(modname), 'test.{}'.format(modname)),
)
sourcefn = os.path.join(TESTFN, modname) + os.extsep + "py"
for importstring, expectedinmsg in testpairs:
with open(sourcefn, 'w') as f:
f.write("import {}\n".format(importstring))
result = run_pydoc(modname, PYTHONPATH=TESTFN).decode("ascii")
expected = badimport_pattern % (modname, expectedinmsg)
self.assertEqual(expected, result)
def test_apropos_with_bad_package(self):
# Issue 7425 - pydoc -k failed when bad package on path
pkgdir = os.path.join(TESTFN, "syntaxerr")
os.mkdir(pkgdir)
badsyntax = os.path.join(pkgdir, "__init__") + os.extsep + "py"
with open(badsyntax, 'w') as f:
f.write("invalid python syntax = $1\n")
result = run_pydoc('zqwykjv', '-k', PYTHONPATH=TESTFN)
self.assertEqual(b'', result)
def test_apropos_with_unreadable_dir(self):
# Issue 7367 - pydoc -k failed when unreadable dir on path
self.unreadable_dir = os.path.join(TESTFN, "unreadable")
os.mkdir(self.unreadable_dir, 0)
self.addCleanup(os.rmdir, self.unreadable_dir)
# Note, on Windows the directory appears to be still
# readable so this is not really testing the issue there
result = run_pydoc('zqwykjv', '-k', PYTHONPATH=TESTFN)
self.assertEqual(b'', result)
class TestDescriptions(unittest.TestCase):
def test_module(self):
# Check that pydocfodder module can be described
from test import pydocfodder
doc = pydoc.render_doc(pydocfodder)
self.assertIn("pydocfodder", doc)
def test_class(self):
class C: "New-style class"
c = C()
self.assertEqual(pydoc.describe(C), 'class C')
self.assertEqual(pydoc.describe(c), 'C')
expected = 'C in module %s object' % __name__
self.assertIn(expected, pydoc.render_doc(c))
def test_builtin(self):
for name in ('str', 'str.translate', 'builtins.str',
'builtins.str.translate'):
# test low-level function
self.assertIsNotNone(pydoc.locate(name))
# test high-level function
try:
pydoc.render_doc(name)
except ImportError:
self.fail('finding the doc of {!r} failed'.format(o))
for name in ('notbuiltins', 'strrr', 'strr.translate',
'str.trrrranslate', 'builtins.strrr',
'builtins.str.trrranslate'):
self.assertIsNone(pydoc.locate(name))
self.assertRaises(ImportError, pydoc.render_doc, name)
@unittest.skipUnless(threading, 'Threading required for this test.')
class PydocServerTest(unittest.TestCase):
"""Tests for pydoc._start_server"""
def test_server(self):
# Minimal test that starts the server, then stops it.
def my_url_handler(url, content_type):
text = 'the URL sent was: (%s, %s)' % (url, content_type)
return text
serverthread = pydoc._start_server(my_url_handler, port=0)
starttime = time.time()
timeout = 1 #seconds
while serverthread.serving:
time.sleep(.01)
if serverthread.serving and time.time() - starttime > timeout:
serverthread.stop()
break
self.assertEqual(serverthread.error, None)
class PydocUrlHandlerTest(unittest.TestCase):
"""Tests for pydoc._url_handler"""
def test_content_type_err(self):
f = pydoc._url_handler
self.assertRaises(TypeError, f, 'A', '')
self.assertRaises(TypeError, f, 'B', 'foobar')
def test_url_requests(self):
# Test for the correct title in the html pages returned.
# This tests the different parts of the URL handler without
# getting too picky about the exact html.
requests = [
("", "Pydoc: Index of Modules"),
("get?key=", "Pydoc: Index of Modules"),
("index", "Pydoc: Index of Modules"),
("topics", "Pydoc: Topics"),
("keywords", "Pydoc: Keywords"),
("pydoc", "Pydoc: module pydoc"),
("get?key=pydoc", "Pydoc: module pydoc"),
("search?key=pydoc", "Pydoc: Search Results"),
("topic?key=def", "Pydoc: KEYWORD def"),
("topic?key=STRINGS", "Pydoc: TOPIC STRINGS"),
("foobar", "Pydoc: Error - foobar"),
("getfile?key=foobar", "Pydoc: Error - getfile?key=foobar"),
]
for url, title in requests:
text = pydoc._url_handler(url, "text/html")
result = get_html_title(text)
self.assertEqual(result, title)
path = string.__file__
title = "Pydoc: getfile " + path
url = "getfile?key=" + path
text = pydoc._url_handler(url, "text/html")
result = get_html_title(text)
self.assertEqual(result, title)
class TestHelper(unittest.TestCase):
def test_keywords(self):
self.assertEqual(sorted(pydoc.Helper.keywords),
sorted(keyword.kwlist))
@reap_threads
def test_main():
try:
test.support.run_unittest(PydocDocTest,
PydocImportTest,
TestDescriptions,
PydocServerTest,
PydocUrlHandlerTest,
TestHelper,
)
finally:
reap_children()
if __name__ == "__main__":
test_main()
|
the-stack_106_18776
|
import ctypes
def check_fd(fd):
""" Validate that a fd parameter looks like a file descriptor.
Raises an exception if the parameter is invalid.
:param fd: File descriptor to check.
"""
if not isinstance(fd, int):
raise TypeError('fd must be an integer, but was {}'.format(fd.__class__.__name__))
if fd < 0:
raise ValueError('fd cannot be negative')
def check_request(request):
""" Validate a ioctl request parameter.
Raises an exception if the parameter is invalid.
:param request: Ioctl request to check.
"""
if not isinstance(request, int) and not isinstance(request, long):
raise TypeError('request must be an integer, but was {}'.format(request.__class__.__name__))
if request < 0:
raise ValueError('request cannot be negative')
def check_ctypes_datatype(datatype):
valid_datatypes = (
ctypes._SimpleCData,
ctypes.Union,
ctypes.BigEndianStructure,
ctypes.LittleEndianStructure,
ctypes.Structure,
)
for t in valid_datatypes:
if issubclass(datatype, t):
return
raise TypeError('datatype must be a ctypes data type, but was {}'.format(datatype.__name__))
|
the-stack_106_18778
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/MedicationAdministration
Release: DSTU2
Version: 1.0.2
Revision: 7202
"""
from typing import Any, Dict
from typing import List as ListType
from pydantic import Field, root_validator
from . import fhirtypes
from .backboneelement import BackboneElement
from .domainresource import DomainResource
class MedicationAdministration(DomainResource):
""" Administration of medication to a patient.
Describes the event of a patient consuming or otherwise being administered
a medication. This may be as simple as swallowing a tablet or it may be a
long running infusion. Related resources tie this event to the authorizing
prescription, and the specific encounter between patient and health care
practitioner.
"""
resource_type = Field("MedicationAdministration", const=True)
status: fhirtypes.Code = Field(
None,
alias="status",
title="Type `Code`.",
description="in-progress | on-hold | completed | entered-in-error | stopped.",
)
wasNotGiven: fhirtypes.Boolean = Field(
None,
alias="wasNotGiven",
title="Type `Boolean`.",
description="True if medication not administered.",
)
note: fhirtypes.String = Field(
None,
alias="note",
title="Type `String`.",
description="Information about the administration.",
)
patient: fhirtypes.ReferenceType = Field(
None,
alias="patient",
title="Type `Reference` referencing `Patient` (represented as `dict` in JSON).",
description="Who received medication.",
)
practitioner: fhirtypes.ReferenceType = Field(
None,
alias="practitioner",
title=(
"Type `Reference` referencing `Practitioner, "
"Patient, RelatedPerson` (represented as `dict` in JSON)."
),
description="Who administered substance.",
)
prescription: fhirtypes.ReferenceType = Field(
None,
alias="prescription",
title="Type `Reference` referencing `MedicationOrder` (represented as `dict` in JSON).",
description="Order administration performed against.",
)
encounter: fhirtypes.ReferenceType = Field(
None,
alias="encounter",
title="Type `Reference` referencing `Encounter` (represented as `dict` in JSON).",
description="Encounter administered as part of.",
)
dosage: fhirtypes.MedicationAdministrationDosageType = Field(
None,
alias="dosage",
title="Type `MedicationAdministrationDosage` (represented as `dict` in JSON).",
description="Details of how medication was taken.",
)
effectiveTimeDateTime: fhirtypes.DateTime = Field(
None,
alias="effectiveTimeDateTime",
title="Type `DateTime`.",
description="Start and end time of administration.",
one_of_many="effective", # Choice of Data Types. i.e reason[x]
one_of_many_required=False,
)
effectiveTimePeriod: fhirtypes.PeriodType = Field(
None,
alias="effectiveTimePeriod",
title="Type `Period` (represented as `dict` in JSON).",
description="Start and end time of administration.",
one_of_many="effective", # Choice of Data Types. i.e reason[x]
one_of_many_required=False,
)
medicationReference: fhirtypes.ReferenceType = Field(
None,
alias="medicationReference",
title="Type `Reference` referencing `Medication` (represented as `dict` in JSON).",
description="What was administered.",
one_of_many="medication", # Choice of Data Types. i.e medication[x]
one_of_many_required=False,
)
medicationCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="medicationCodeableConcept",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="What was administered.",
one_of_many="medication", # Choice of Data Types. i.e medication[x]
one_of_many_required=False,
)
device: ListType[fhirtypes.ReferenceType] = Field(
None,
alias="device",
title="List of `Reference` items referencing `Device` (represented as `dict` in JSON).",
description="Device used to administer.",
)
identifier: ListType[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="List of `Identifier` items (represented as `dict` in JSON).",
description="External identifier.",
)
reasonGiven: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="reasonGiven",
title="List of `CodeableConcept` items (represented as `dict` in JSON).",
description="Reason administration performed.",
)
reasonNotGiven: ListType[fhirtypes.CodeableConceptType] = Field(
None,
alias="reasonNotGiven",
title="List of `CodeableConcept` items (represented as `dict` in JSON).",
description="Reason administration not performed.",
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"effective": ["effectiveTimeDateTime", "effectiveTimePeriod"],
"medication": ["medicationReference", "medicationCodeableConcept"],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
class MedicationAdministrationDosage(BackboneElement):
""" Details of how medication was taken.
Describes the medication dosage information details e.g. dose, rate, site,
route, etc.
"""
resource_type = Field("MedicationAdministrationDosage", const=True)
method: fhirtypes.CodeableConceptType = Field(
None,
alias="method",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="How drug was administered.",
)
quantity: fhirtypes.QuantityType = Field(
None,
alias="quantity",
title="Type `Quantity` (represented as `dict` in JSON).",
description="Amount administered in one dose.",
)
rateRange: fhirtypes.RangeType = Field(
None,
alias="rateRange",
title="Type `Range` (represented as `dict` in JSON).",
description="Dose quantity per unit of time.",
one_of_many="rate", # Choice of Data Types. i.e reason[x]
one_of_many_required=False,
)
rateRatio: fhirtypes.RatioType = Field(
None,
alias="rateRatio",
title="Type `Ratio` (represented as `dict` in JSON).",
description="Dose quantity per unit of time.",
one_of_many="rate", # Choice of Data Types. i.e reason[x]
one_of_many_required=False,
)
route: fhirtypes.CodeableConceptType = Field(
None,
alias="route",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Path of substance into body.",
)
siteCodeableConcept: fhirtypes.CodeableConceptType = Field(
None,
alias="siteCodeableConcept",
title="Type `CodeableConcept` (represented as `dict` in JSON).",
description="Body site administered to.",
one_of_many="site", # Choice of Data Types. i.e reason[x]
one_of_many_required=False,
)
siteReference: fhirtypes.ReferenceType = Field(
None,
alias="siteReference",
title="Type `Reference` referencing `BodySite` (represented as `dict` in JSON).",
description="Body site administered to.",
one_of_many="site", # Choice of Data Types. i.e site[x]
one_of_many_required=False,
)
text: fhirtypes.String = Field(
None, alias="text", title="Type `String`.", description="Dosage Instructions."
)
@root_validator(pre=True)
def validate_one_of_many(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""https://www.hl7.org/fhir/formats.html#choice
A few elements have a choice of more than one data type for their content.
All such elements have a name that takes the form nnn[x].
The "nnn" part of the name is constant, and the "[x]" is replaced with
the title-cased name of the type that is actually used.
The table view shows each of these names explicitly.
Elements that have a choice of data type cannot repeat - they must have a
maximum cardinality of 1. When constructing an instance of an element with a
choice of types, the authoring system must create a single element with a
data type chosen from among the list of permitted data types.
"""
one_of_many_fields = {
"rate": ["rateRatio", "rateRange"],
"site": ["siteCodeableConcept", "siteReference"],
}
for prefix, fields in one_of_many_fields.items():
assert cls.__fields__[fields[0]].field_info.extra["one_of_many"] == prefix
required = (
cls.__fields__[fields[0]].field_info.extra["one_of_many_required"]
is True
)
found = False
for field in fields:
if field in values and values[field] is not None:
if found is True:
raise ValueError(
"Any of one field value is expected from "
f"this list {fields}, but got multiple!"
)
else:
found = True
if required is True and found is False:
raise ValueError(f"Expect any of field value from this list {fields}.")
return values
|
the-stack_106_18779
|
# coding=utf-8
# Copyright 2021 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Interface for a learner that uses BenchmarkReaderDataSource to get data."""
# TODO(lamblinp): Update variable names to be more consistent
# - target, class_idx, label
# - support, query
# TODO(lamblinp): Simplify the logic around performing evaluation on the
# `TRAIN_SPLIT` by, for instance, recording which data is episodic, and which
# split it is coming from (independently from how it is used).
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import os
import re
from absl import logging
import gin.tf
from meta_dataset import distribute_utils
from meta_dataset import learners
from meta_dataset.data import dataset_spec as dataset_spec_lib
from meta_dataset.data import learning_spec
from meta_dataset.data import pipeline
from meta_dataset.data import providers
from meta_dataset.data import read_episodes
from meta_dataset.learners import experimental as experimental_learners
from meta_dataset.models import functional_backbones
import numpy as np
import six
from six.moves import range
from six.moves import zip
import tensorflow.compat.v1 as tf
# Enable tf.data optimizations, which are applied to the input data pipeline.
# It may be helpful to disable them when investigating regressions due to
# changes in tf.data (see b/121130181 for instance), but they seem to be helpful
# (or at least not detrimental) in general.
ENABLE_DATA_OPTIMIZATIONS = True
DATASETS_WITH_EXAMPLE_SPLITS = ()
TF_DATA_OPTIONS = tf.data.Options()
if not ENABLE_DATA_OPTIMIZATIONS:
# The Options object can be used to control which static or dynamic
# optimizations to apply.
TF_DATA_OPTIONS.experimental_optimization.apply_default_optimizations = False
# Objective labels for hyperparameter optimization.
ACC_MEAN_FORMAT_STRING = '%s_acc/mean'
ACC_CI95_FORMAT_STRING = '%s_acc/ci95'
# TODO(eringrant): Use `learning_spec.Split.TRAIN`, `learning_spec.Split.VALID`,
# and `learning_spec.Split.TEST` instead of string constants, and replace all
# remaining string redefinitions.
TRAIN_SPLIT = 'train'
VALID_SPLIT = 'valid'
TEST_SPLIT = 'test'
FLAGS = tf.flags.FLAGS
class UnexpectedSplitError(ValueError):
def __init__(self,
unexpected_split,
expected_splits=(TRAIN_SPLIT, TEST_SPLIT, VALID_SPLIT)):
super(UnexpectedSplitError,
self).__init__('Split must be one of {}, but received `{}`. '.format(
expected_splits, unexpected_split))
@gin.configurable('benchmark')
def get_datasets_and_restrictions(train_datasets='',
eval_datasets='',
use_dumped_episodes=False,
restrict_classes=None,
restrict_num_per_class=None):
"""Gets the list of dataset names and possible restrictions on their classes.
Args:
train_datasets: A string of comma-separated dataset names for training.
eval_datasets: A string of comma-separated dataset names for evaluation.
use_dumped_episodes: bool, if True `eval_datasets` are prefixed with
`dumped` to trigger evaluation on dumped episodes instead of on the fly
sampling.
restrict_classes: If provided, a dict that maps dataset names to a dict that
specifies for each of `TRAIN_SPLIT`, `VALID_SPLIT` and `TEST_SPLIT` the
number of classes to restrict to. This can lead to some classes of a
particular split of a particular dataset never participating in episode
creation.
restrict_num_per_class: If provided, a dict that maps dataset names to a
dict that specifies for each of `meta_dataset.trainer.TRAIN_SPLIT`,
`meta_dataset.trainer.VALID_SPLIT` and `meta_dataset.trainer.TEST_SPLIT`
the number of examples per class to restrict to. For datasets / splits
that are not specified, no restriction is applied.
Returns:
Two lists of dataset names and two possibly empty dictionaries.
"""
if restrict_classes is None:
restrict_classes = {}
if restrict_num_per_class is None:
restrict_num_per_class = {}
train_datasets = [d.strip() for d in train_datasets.split(',')]
eval_datasets = [d.strip() for d in eval_datasets.split(',')]
if use_dumped_episodes:
eval_datasets = ['dumped_%s' % ds for ds in eval_datasets]
return train_datasets, eval_datasets, restrict_classes, restrict_num_per_class
def apply_dataset_options(dataset):
"""Apply the module-wide set of dataset options to dataset.
In particular, this is used to enable or disable tf.data optimizations.
This applies to the whole pipeline, so we can just set it at the end.
Args:
dataset: a tf.data.Dataset object.
Returns:
A tf.data.Dataset object with options applied.
"""
return dataset.with_options(TF_DATA_OPTIONS)
def compute_class_proportions(unique_class_ids, shots, dataset_spec):
"""Computes the proportion of the total number of examples appearing as shots.
Args:
unique_class_ids: A 1D int Tensor of unique class IDs.
shots: A 1D Tensor of the number of shots for each class in
`unique_class_ids`.
dataset_spec: A DatasetSpecification that contains informations about the
class labels in `unique_class_ids`.
Returns:
A 1D Tensor with the proportion of examples appearing as shots per class in
`unique_class_ids`, normalized by the total number of examples for each
class in the dataset according to `dataset_spec`.
"""
# Get the total number of examples of each class in the dataset.
num_dataset_classes = len(dataset_spec.images_per_class)
num_images_per_class = [
dataset_spec.get_total_images_per_class(class_id)
for class_id in range(num_dataset_classes)
]
# Make sure that `unique_class_ids` are valid indices of
# `num_images_per_class`. This is important since `tf.gather` will fail
# silently and return zeros otherwise.
num_classes = tf.shape(num_images_per_class)[0]
check_valid_inds_op = tf.assert_less(unique_class_ids, num_classes)
with tf.control_dependencies([check_valid_inds_op]):
# Get the total number of examples of each class that is in the episode.
num_images_per_class = tf.gather(num_images_per_class,
unique_class_ids) # [?, ]
# Get the proportions of examples of each class that appear in the episode.
class_props = tf.truediv(shots, num_images_per_class)
return class_props
def get_split_enum(split):
"""Returns the Enum value corresponding to the given split.
Args:
split: A string, one of TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT.
Raises:
UnexpectedSplitError: split not TRAIN_SPLIT, VALID_SPLIT, or TEST_SPLIT.
"""
# Get the int representing the chosen split.
if split == TRAIN_SPLIT:
split_enum = learning_spec.Split.TRAIN
elif split == VALID_SPLIT:
split_enum = learning_spec.Split.VALID
elif split == TEST_SPLIT:
split_enum = learning_spec.Split.TEST
else:
raise UnexpectedSplitError(split)
return split_enum
def restore_or_log_informative_error(saver, sess, checkpoint_to_restore):
"""Attempt to restore from `checkpoint_to_restore` in `sess` using `saver`."""
try:
saver.restore(sess, checkpoint_to_restore)
except tf.errors.NotFoundError as e:
logging.error('Tried to restore from checkpoint %s but failed.',
checkpoint_to_restore)
raise e
else:
logging.info('Restored from checkpoint %s.', checkpoint_to_restore)
# TODO(eringrant): Split the current `Trainer` class into `Trainer` and
# `Evaluator` classes to partition the constructor arguments into meaningful
# groups.
# TODO(eringrant): Refactor the current `Trainer` class to more transparently
# deal with operations per split, since the present logic surrounding the
# `eval_finegrainedness_split` is confusing.
# TODO(eringrant): Better organize `Trainer` Gin configurations, which are
# currently set in many configuration files.
@gin.configurable
class Trainer(object):
"""A Trainer for training a Learner on data provided by ReaderDataSource."""
def __init__(
self,
num_updates,
batch_size,
num_eval_episodes,
checkpoint_every,
validate_every,
log_every,
train_learner_class,
eval_learner_class,
is_training,
checkpoint_to_restore,
learning_rate,
decay_learning_rate,
decay_every,
decay_rate,
normalized_gradient_descent,
experiment_name,
pretrained_source,
train_dataset_list,
eval_dataset_list,
restrict_classes,
restrict_num_per_class,
checkpoint_dir,
summary_dir,
records_root_dir,
eval_finegrainedness,
eval_finegrainedness_split,
eval_imbalance_dataset,
omit_from_saving_and_reloading,
eval_split,
train_episode_config,
eval_episode_config,
data_config,
distribute,
enable_tf_optimizations):
# pyformat: disable
"""Initializes a Trainer.
Args:
num_updates: An integer, the number of training updates.
batch_size: An integer, the size of batches for non-episodic models.
num_eval_episodes: An integer, the number of episodes for evaluation.
checkpoint_every: An integer, the number of episodes between consecutive
checkpoints.
validate_every: An integer, the number of episodes between consecutive
validations.
log_every: An integer, the number of episodes between consecutive logging.
train_learner_class: A Learner to be used for meta-training.
eval_learner_class: A Learner to be used for meta-validation or
meta-testing.
is_training: Bool, whether or not to train or just evaluate.
checkpoint_to_restore: A string, the path to a checkpoint from which to
restore variables.
learning_rate: A float, the meta-learning learning rate.
decay_learning_rate: A boolean, whether to decay the learning rate.
decay_every: An integer, the learning rate is decayed for every multiple
of this value.
decay_rate: A float, the decay to apply to the learning rate.
normalized_gradient_descent: A boolean, whether to use normalized
gradient descent in addition to ADAM; improves stability for
crosstransformers.
experiment_name: A string, a name for the experiment.
pretrained_source: A string, the pretraining setup to use.
train_dataset_list: A list of names of datasets to train on. This can be
any subset of the supported datasets.
eval_dataset_list: A list of names of datasets to evaluate on either for
validation during train or for final test evaluation, depending on the
nature of the experiment, as dictated by `is_training'.
restrict_classes: A dict that maps dataset names to a dict that specifies
for each of TRAIN_SPLIT, VALID_SPLIT and TEST_SPLIT the number of
classes to restrict to. This can lead to some classes of a particular
split of a particular dataset never participating in episode creation.
restrict_num_per_class: A dict that maps dataset names to a dict that
specifies for each of TRAIN_SPLIT, VALID_SPLIT and TEST_SPLIT the number
of examples per class to restrict to. For datasets / splits that are not
mentioned, no restriction is applied. If restrict_num_per_class is the
empty dict, no restriction is applied to any split of any dataset.
checkpoint_dir: A string, the path to the checkpoint directory, or None if
no checkpointing should occur.
summary_dir: A string, the path to the checkpoint directory, or None if no
summaries should be saved.
records_root_dir: A string, the path to the dataset records directory.
eval_finegrainedness: Whether to perform binary ImageNet evaluation for
assessing the performance on fine- vs coarse- grained tasks.
eval_finegrainedness_split: The subgraph of ImageNet to perform the
aforementioned analysis on. Notably, if this is TRAIN_SPLIT, we need to
ensure that an training data is used episodically, even if the given
model is the baseline model which usually uses batches for training.
eval_imbalance_dataset: A dataset on which to perform evaluation for
assessing how class imbalance affects performance in binary episodes. By
default it is empty and no imbalance analysis is performed.
omit_from_saving_and_reloading: A list of strings that specifies
substrings of variable names that should not be reloaded.
eval_split: One of the constants TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT
or None, according to the split whose results we want to
use for the analysis.
train_episode_config: An instance of EpisodeDescriptionConfig (in
data/config.py). This is a config for setting the ways and shots of
training episodes or the parameters for sampling them, if variable.
eval_episode_config: An instance of EpisodeDescriptionConfig. Analogous to
train_episode_config but used for eval episodes (validation or testing).
data_config: A DataConfig, the data configuration.
distribute: (Experimental) use tf.distribute to distribute computation
across multiple GPUs using a MirroredStrategy. This has been tested
with CrossTransformers, and may work with other episodic learners.
It will split Episodes into multiple EpisodePieces before passing them
to the learners, where each EpisodePiece contains a portion of the
original episode's query and support-set images. Batch learners are
not currently supported.
enable_tf_optimizations: Enable TensorFlow optimizations. It can add a
few minutes to the first calls to session.run(), but decrease memory
usage.
Raises:
RuntimeError: If requested to meta-learn the initialization of the linear
layer weights but they are unexpectedly omitted from saving/restoring.
UnexpectedSplitError: If split configuration is not as expected.
"""
# pyformat: enable
self.num_updates = num_updates
self.batch_size = batch_size
self.num_eval_episodes = num_eval_episodes
self.checkpoint_every = checkpoint_every
self.validate_every = validate_every
self.log_every = log_every
self.checkpoint_to_restore = checkpoint_to_restore
self.learning_rate = learning_rate
self.decay_learning_rate = decay_learning_rate
self.decay_every = decay_every
self.decay_rate = decay_rate
self.experiment_name = experiment_name
self.pretrained_source = pretrained_source
self.train_learner_class = train_learner_class
self.eval_learner_class = eval_learner_class
self.is_training = is_training
self.train_dataset_list = train_dataset_list
self.eval_dataset_list = eval_dataset_list
self.normalized_gradient_descent = normalized_gradient_descent
self.enable_tf_optimizations = enable_tf_optimizations
# Currently we are supporting single dataset when we read from fixed
# datasets like VTAB or dumped episodes.
# Check whether we evaluate on VTAB
if (len(self.eval_dataset_list) == 1 and
self.eval_dataset_list[0].startswith('vtab')):
self._fixed_eval = 'vtab'
elif (len(self.eval_dataset_list) == 1 and
self.eval_dataset_list[0].startswith('dumped')):
self._fixed_eval = 'dumped'
else:
self._fixed_eval = None
self.restrict_classes = restrict_classes
self.restrict_num_per_class = restrict_num_per_class
self.checkpoint_dir = checkpoint_dir
self.summary_dir = summary_dir
self.records_root_dir = records_root_dir
self.eval_finegrainedness = eval_finegrainedness
self.eval_finegrainedness_split = eval_finegrainedness_split
self.eval_imbalance_dataset = eval_imbalance_dataset
self.omit_from_saving_and_reloading = omit_from_saving_and_reloading
self.data_initializeable_iterators = []
if eval_finegrainedness:
# The fine- vs coarse- grained evaluation may potentially be performed on
# the training graph as it exhibits greater variety in this aspect.
self.eval_split = eval_finegrainedness_split
elif eval_split:
if eval_split not in (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT):
raise UnexpectedSplitError(eval_split)
self.eval_split = eval_split
elif is_training:
self.eval_split = VALID_SPLIT
else:
self.eval_split = TEST_SPLIT
if eval_finegrainedness or eval_imbalance_dataset:
# We restrict this analysis to the binary classification setting.
logging.info(
'Forcing the number of %s classes to be 2, since '
'the finegrainedness analysis is applied on binary '
'classification tasks only.', eval_finegrainedness_split)
if eval_finegrainedness and eval_finegrainedness_split == TRAIN_SPLIT:
train_episode_config.num_ways = 2
else:
eval_episode_config.num_ways = 2
self.num_classes_train = train_episode_config.num_ways
self.num_classes_eval = eval_episode_config.num_ways
self.num_support_train = train_episode_config.num_support
self.num_query_train = train_episode_config.num_query
self.num_support_eval = eval_episode_config.num_support
self.num_query_eval = eval_episode_config.num_query
self.train_episode_config = train_episode_config
self.eval_episode_config = eval_episode_config
self.data_config = data_config
# TODO(eringrant): Adapt these image-specific expectations to feature
# inputs.
self.image_shape = [data_config.image_height] * 2 + [3]
# Create the benchmark specification.
self.benchmark_spec = self.get_benchmark_specification()
# Which splits to support depends on whether we are in the meta-training
# phase or not. If we are, we need the train split, and the valid one for
# early-stopping. If not, we only need the test split.
self.required_splits = [TRAIN_SPLIT] if self.is_training else []
self.required_splits += [self.eval_split]
# Get the training, validation and testing specifications.
# Each is either an EpisodeSpecification or a BatchSpecification.
self.split_episode_or_batch_specs = dict(
zip(self.required_splits,
map(self.get_batch_or_episodic_specification,
self.required_splits)))
# Get the next data (episode or batch) for the different splits.
self.next_data = dict(
zip(self.required_splits, map(self.build_data, self.required_splits)))
self.distribute = distribute
if self.distribute:
self.strategy = tf.distribute.MirroredStrategy()
else:
self.strategy = None
# Create the global step to pass to the learners.
global_step = tf.train.get_or_create_global_step()
if len(self.required_splits) > 1:
if issubclass(self.train_learner_class,
experimental_learners.ExperimentalLearner):
assert issubclass(
self.eval_learner_class, experimental_learners.ExperimentalLearner
), ('If the `Learner` for the train split is an `ExperimentalLearner`,'
' the `Learner` for the evaluation split must be as well, since '
'otherwise parameters cannot be shared.')
else:
assert not issubclass(
self.eval_learner_class, experimental_learners.ExperimentalLearner
), ('If the `Learner` for the evaluation split is not an '
'`ExperimentalLearner`, the `Learner` for the train split must not'
' be either, since otherwise parameters cannot be shared.')
# Initialize the learners.
self.learners = {}
for split in self.required_splits:
if split == TRAIN_SPLIT:
# The learner for the training split should only be in training mode if
# the evaluation split is not the training split.
learner_is_training = self.eval_split != TRAIN_SPLIT
learner_class = self.train_learner_class
tied_learner = None
else:
learner_is_training = False
learner_class = self.eval_learner_class
# Share parameters between the training and evaluation `Learner`s.
tied_learner = (
self.learners[TRAIN_SPLIT]
if TRAIN_SPLIT in self.required_splits else None)
learner = self.create_learner(
is_training=learner_is_training,
learner_class=learner_class,
split=get_split_enum(split),
tied_learner=tied_learner)
if (isinstance(learner, learners.MAMLLearner) and
not learner.zero_fc_layer and not learner.proto_maml_fc_layer_init):
if 'linear_classifier' in FLAGS.omit_from_saving_and_reloading:
raise ValueError('The linear layer is requested to be meta-learned '
'since both `MAMLLearner.zero_fc_layer` and '
'`MAMLLearner.proto_maml_fc_layer_init` are False, '
'but the `linear_classifier` tags is found in '
'FLAGS.omit_from_saving_and_reloading so they will '
'not be properly restored. Please exclude these '
'weights from omit_from_saving_and_reloading for '
'this setting to work as expected.')
self.learners[split] = learner
# Build the data-dependent functions (run_fn returns prediction,
# un-regularized loss, accuracy, and episode statistics), the iterators
# producing data (data_fn), and regularizer, for each learner.
run_fn, data_fn, regularizer_fn = zip(
*[self.build_learner(split) for split in self.required_splits])
self.run_fns = dict(zip(self.required_splits, run_fn))
self.data_fns = dict(zip(self.required_splits, data_fn))
self.regularizer_fns = dict(zip(self.required_splits, regularizer_fn))
# Get an optimizer and the operation for meta-training.
self.train_op = None
if self.is_training:
learning_rate = self.learning_rate
if self.decay_learning_rate:
learning_rate = tf.train.exponential_decay(
self.learning_rate,
global_step,
decay_steps=self.decay_every,
decay_rate=self.decay_rate,
staircase=True)
tf.summary.scalar('learning_rate', learning_rate)
if self.distribute:
self.optimizer = tf.keras.optimizers.Adam(learning_rate)
else:
self.optimizer = tf.train.AdamOptimizer(learning_rate)
self.run_fns[TRAIN_SPLIT] = self.get_run_fn_with_train_op(
self.run_fns[TRAIN_SPLIT], self.regularizer_fns[TRAIN_SPLIT],
global_step)
self.predictions = {}
self.losses = {}
self.accuracies = {}
self.episode_info = {}
for split in self.required_splits:
if self.distribute:
with self.strategy.scope():
output = self.strategy.experimental_run(self.run_fns[split],
self.data_fns[split])
if self.strategy.num_replicas_in_sync > 1:
output['predictions'] = tf.concat(
output['predictions'].values, axis=0)
output['loss'] = tf.concat(output['loss'].values, axis=0)
output['accuracy'] = tf.concat(output['accuracy'].values, axis=0)
if split == TRAIN_SPLIT and self.is_training:
output['train_op'] = tf.group(output['train_op'].values)
# The computed episode_info should be identical for all replicas.
episode_info = {}
for key, val in output['episode_info'].items():
if val is not None:
# This control_dependencies is required or the call to
# session.run() gets deadlocked.
with tf.control_dependencies(val.values):
episode_info[key] = tf.identity(val.values[0])
else:
episode_info[key] = None
output['episode_info'] = episode_info
else:
data_tensors = tf.data.make_one_shot_iterator(
self.data_fns[split]()).get_next()
output = self.run_fns[split](data_tensors)
loss = tf.reduce_mean(output['loss'])
loss += self.regularizer_fns[split]()
self.losses[split] = loss
self.accuracies[split] = tf.reduce_mean(output['accuracy'])
self.predictions[split] = output['predictions']
self.episode_info[split] = output['episode_info']
if split == TRAIN_SPLIT and self.is_training:
self.train_op = output['train_op']
if self.checkpoint_dir is not None:
if not tf.io.gfile.exists(self.checkpoint_dir):
tf.io.gfile.makedirs(self.checkpoint_dir)
# Meaningless values so that logging works even if called before evaluation.
self.valid_acc = np.nan
self.valid_ci = np.nan
self.initialize_session()
self.initialize_saver()
self.create_summary_writer()
def build_learner(self, split):
"""Return predictions, losses and accuracies for the learner on split.
Args:
split: A `learning_spec.Split` that identifies the data split for which
the learner is to be built.
Returns:
run_fn: a function which, when called on data, will run the network
forward pass in split mode `split` and return:
predictions: A `tf.Tensor`; the predictions of the learner on `split`.
losses: A `tf.Tensor`; the losses of the learner on `split`.
accuracies: A `tf.Tensor`; the accuracies of the learner on `split`.
episode_info: A map of string to `tf.Tensor`, which has statistics
about the input data.
data_fn: a function which returns a `tf.Dataset` which can be used to
provide input to run_fn
regularizer: a `tf.Tensor` which computes a data-independent regularizer
(e.g. weight decay) that is to be applied at every iteration.
"""
learner = self.learners[split]
# Build the learner and its variables outside the name scope.
learner.build()
with tf.name_scope(split):
data_src = self.next_data[split]
if self.distribute:
with self.strategy.scope():
# We need to split both support and query sets across GPUs, and
# tf.data doesn't make this straightforward, as there are few
# functions for splitting up datasets. We use unbatch to accomplish
# this, but unbatch splits the first dimension and creates one
# example for every possible index along that dimension.
#
# Therefore, the strategy is to first compute the chunk boundaries
# for the support and query sets (chunk_bounds), pad the last
# chunks to match the earlier chunks along the first axis,
# and then stack all chunks along a new first axis.
# unbatch() then correctly splits the episode across gpus. We then
# trim the padding from later chunks (trim_extra). The result is a
# single dataset with chunks for each GPU interleaved with one
# another. We use shard() to split this single dataset into one
# dataset per gpu.
def chunk_bounds(x, num_gpu, idx):
num_per_gpu = tf.cast(
tf.ceil(
tf.cast(tf.shape(x)[0], tf.float32) /
tf.cast(num_gpu, tf.float32)), tf.int32)
lb = tf.minimum(idx * num_per_gpu, tf.shape(x)[0])
ub = tf.minimum((idx + 1) * num_per_gpu, tf.shape(x)[0])
return lb, ub, tf.minimum((idx + 1) * num_per_gpu - ub, num_per_gpu)
def chunk_array(arr):
"""Deterministically chunk the arrays across devices."""
num_gpu = self.strategy.num_replicas_in_sync
chunks = []
num = []
for idx in range(num_gpu):
lb, ub, num_extra = chunk_bounds(arr, num_gpu, idx)
pad = tf.tile(arr[0:1], [num_extra] + [1] *
(len(arr.shape) - 1)) * 0 - 1
chunks.append(tf.concat([arr[lb:ub], pad], axis=0))
num.append(ub - lb)
return tf.stack(chunks), tf.stack(num)
def chunk_episode(episode):
way_tiled = episode.way + tf.zeros(
[self.strategy.num_replicas_in_sync], dtype=tf.int32)
return ((way_tiled, chunk_array(episode.support_images)[0],
chunk_array(episode.support_labels)[0]) +
chunk_array(episode.support_class_ids) +
(chunk_array(episode.query_images)[0],
chunk_array(episode.query_labels)[0]) +
chunk_array(episode.query_class_ids))
def trim_extra(way, support_images, support_labels, support_class_ids,
support_num, query_images, query_labels,
query_class_ids, query_num):
return providers.EpisodePiece(support_images[:support_num],
query_images[:query_num],
support_labels[:support_num],
query_labels[:query_num],
support_class_ids[:support_num],
query_class_ids[:query_num], way)
chunked_data = data_src.map(chunk_episode).unbatch().map(trim_extra)
def input_fn(input_context):
return chunked_data.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
data = self.strategy.make_input_fn_iterator(input_fn)
# Return a function that computes the regularizers, to defer their
# computation till after the computational graph is instantiated
# (otherwise the graph will be empty and no regularizers found).
regularizer = self.learners[split].compute_regularizer
self.data_initializeable_iterators.append(data)
else:
data = lambda: data_src
regularizer = self.learners[split].compute_regularizer
def run(data_local):
"""Run the forward pass of the model."""
predictions_dist = self.learners[split].forward_pass(data_local)
loss_dist = self.learners[split].compute_loss(
predictions=predictions_dist,
onehot_labels=data_local.onehot_labels)
accuracy_dist = self.learners[split].compute_accuracy(
predictions=predictions_dist,
onehot_labels=data_local.onehot_labels)
episode_info = self.get_episode_info(data_local)
return {
'predictions': predictions_dist,
'loss': loss_dist,
'accuracy': accuracy_dist,
'episode_info': episode_info,
}
return run, data, regularizer
def get_episode_info(self, data):
"""Sets the Tensors for the info about the learner's next episode."""
res = {}
# The batch trainer receives episodes only for the valid and test splits.
# Therefore for the train split there is no defined way and shots.
if isinstance(data, providers.Batch):
(way_, shots_, class_props_, class_ids_, query_targets_) = [None] * 5
else:
way_ = data.way
shots_ = data.support_shots
class_ids_ = data.unique_class_ids
class_props_ = None
if self.eval_imbalance_dataset:
class_props_ = compute_class_proportions(
class_ids_, shots_, self.eval_imbalance_dataset_spec)
query_targets_ = data.query_labels
res['way'] = way_
res['shots'] = shots_
res['class_props'] = class_props_
res['class_ids'] = class_ids_
res['query_targets'] = query_targets_
return res
def create_summary_writer(self):
"""Create summaries and writer."""
# Add summaries for the losses / accuracies of the different learners.
standard_summaries = []
for split in self.required_splits:
with tf.name_scope(split):
loss_summary = tf.summary.scalar('loss', self.losses[split])
acc_summary = tf.summary.scalar('acc',
tf.reduce_mean(self.accuracies[split]))
standard_summaries.append(loss_summary)
standard_summaries.append(acc_summary)
# Add summaries for the way / shot / logits / targets of the learner.
evaluation_summaries = self.add_eval_summaries()
# All summaries.
self.standard_summaries = tf.summary.merge(standard_summaries)
self.evaluation_summaries = tf.summary.merge(evaluation_summaries)
# Get a writer.
self.summary_writer = None
if self.summary_dir is not None:
self.summary_writer = tf.summary.FileWriter(self.summary_dir)
if not tf.io.gfile.exists(self.summary_dir):
tf.io.gfile.makedirs(self.summary_dir)
def create_learner(self,
is_training,
learner_class,
split,
tied_learner=None):
"""Instantiates `learner_class`, tying weights to `tied_learner`."""
if issubclass(learner_class, learners.BatchLearner):
logit_dim = self._get_logit_dim(
split, is_batch_learner=True, is_training=is_training)
elif issubclass(learner_class, learners.EpisodicLearner):
logit_dim = self._get_logit_dim(
split, is_batch_learner=False, is_training=is_training)
else:
raise ValueError('The specified `learner_class` should be a subclass of '
'`learners.BatchLearner` or `learners.EpisodicLearner`, '
'but received {}.'.format(learner_class))
if (issubclass(learner_class, experimental_learners.ExperimentalLearner) and
tied_learner is not None):
return learner_class(
is_training=is_training,
logit_dim=logit_dim,
input_shape=self.image_shape,
embedding_fn=tied_learner.embedding_fn,
)
else:
return learner_class(
is_training=is_training,
logit_dim=logit_dim,
input_shape=self.image_shape,
)
def get_benchmark_specification(self, records_root_dir=None):
"""Returns a BenchmarkSpecification.
Args:
records_root_dir: Optional. If provided, a list or string that sets the
directory in which a child directory will be searched for each dataset
to locate that dataset's records and dataset specification. If it's a
string, that path will be used for all datasets. If it's a list, its
length must be the same as the number of datasets, in order to specify a
different such directory for each. If None, self.records_root_dir will
be used for all datasets.
Raises:
RuntimeError: Incorrect file_pattern detected in a dataset specification.
"""
(data_spec_list, has_dag_ontology, has_bilevel_ontology,
splits_to_contribute) = [], [], [], []
seen_datasets = set()
eval_dataset_list = self.eval_dataset_list
# No need to read specs when specs not available and not needed.
if self._fixed_eval:
eval_dataset_list = []
if self.is_training:
benchmark_datasets = self.train_dataset_list + eval_dataset_list
else:
benchmark_datasets = eval_dataset_list
if isinstance(records_root_dir, list):
if len(records_root_dir) != len(benchmark_datasets):
raise ValueError('The given records_root_dir is a list whose length is '
'not the same as the number of benchmark datasets. '
'Found datasets {} (for the {} phase) but '
'len(records_root_dir) is {}. Expected their lengths '
'to match or records_path to be a string').format(
benchmark_datasets, len(records_root_dir))
records_roots_for_datasets = records_root_dir
elif isinstance(records_root_dir, six.text_type):
records_roots_for_datasets = [records_root_dir] * len(benchmark_datasets)
elif records_root_dir is None:
records_roots_for_datasets = [self.records_root_dir
] * len(benchmark_datasets)
for dataset_name, dataset_records_root in zip(benchmark_datasets,
records_roots_for_datasets):
# Might be seeing a dataset for a second time if it belongs to both the
# train and eval dataset lists.
if dataset_name in seen_datasets:
continue
dataset_records_path = os.path.join(dataset_records_root, dataset_name)
data_spec = dataset_spec_lib.load_dataset_spec(dataset_records_path)
# Only ImageNet has a DAG ontology.
has_dag = (dataset_name.startswith('ilsvrc_2012'))
# Only Omniglot has a bi-level ontology.
is_bilevel = (dataset_name == 'omniglot')
# The meta-splits that this dataset will contribute data to.
if not self.is_training:
# If we're meta-testing, all datasets contribute only to meta-test.
splits = {self.eval_split}
else:
splits = set()
if dataset_name in self.train_dataset_list:
splits.add(TRAIN_SPLIT)
if dataset_name in self.eval_dataset_list:
splits.add(VALID_SPLIT)
# By default, all classes of each split will eventually be used for
# episode creation. But it might be that for some datasets, it is
# requested to restrict the available number of classes of some splits.
restricted_classes_per_split = {}
if dataset_name in self.restrict_classes:
classes_per_split = self.restrict_classes[dataset_name]
for split, num_classes in classes_per_split.items():
# The option to restrict classes is not supported in conjuction with
# non-uniform (bilevel or hierarhical) class sampling.
episode_descr_config = (
self.train_episode_config
if split == TRAIN_SPLIT else self.eval_episode_config)
if has_dag and not episode_descr_config.ignore_dag_ontology:
raise ValueError('Restrictions on the class set of a dataset with '
'a DAG ontology are not supported when '
'ignore_dag_ontology is False.')
if is_bilevel and not episode_descr_config.ignore_bilevel_ontology:
raise ValueError('Restrictions on the class set of a dataset with '
'a bilevel ontology are not supported when '
'ignore_bilevel_ontology is False.')
restricted_classes_per_split[get_split_enum(split)] = num_classes
# Initialize the DatasetSpecificaton to account for this restriction.
data_spec.initialize(restricted_classes_per_split)
# Log the applied restrictions.
logging.info('Restrictions for dataset %s:', dataset_name)
for split in list(splits):
num_classes = data_spec.get_classes(get_split_enum(split))
logging.info('\t split %s is restricted to %d classes', split,
num_classes)
# Add this dataset to the benchmark.
logging.info('Adding dataset %s', data_spec.name)
data_spec_list.append(data_spec)
has_dag_ontology.append(has_dag)
has_bilevel_ontology.append(is_bilevel)
splits_to_contribute.append(splits)
# Book-keeping.
seen_datasets.add(dataset_name)
if self.eval_imbalance_dataset:
self.eval_imbalance_dataset_spec = data_spec
assert len(data_spec_list) == 1, ('Imbalance analysis is only '
'supported on one dataset at a time.')
benchmark_spec = dataset_spec_lib.BenchmarkSpecification(
'benchmark', self.image_shape, data_spec_list, has_dag_ontology,
has_bilevel_ontology, splits_to_contribute)
# Logging of which datasets will be used for the different meta-splits.
splits_to_datasets = collections.defaultdict(list)
for dataset_spec, splits_to_contribute in zip(data_spec_list,
splits_to_contribute):
for split in splits_to_contribute:
splits_to_datasets[split].append(dataset_spec.name)
for split, datasets in splits_to_datasets.items():
logging.info('Episodes for split %s will be created from %s', split,
datasets)
return benchmark_spec
def initialize_session(self):
"""Initializes a tf.Session."""
if self.enable_tf_optimizations:
self.sess = tf.Session()
else:
session_config = tf.ConfigProto()
rewrite_options = session_config.graph_options.rewrite_options
rewrite_options.disable_model_pruning = True
rewrite_options.constant_folding = rewrite_options.OFF
rewrite_options.arithmetic_optimization = rewrite_options.OFF
rewrite_options.remapping = rewrite_options.OFF
rewrite_options.shape_optimization = rewrite_options.OFF
rewrite_options.dependency_optimization = rewrite_options.OFF
rewrite_options.function_optimization = rewrite_options.OFF
rewrite_options.layout_optimizer = rewrite_options.OFF
rewrite_options.loop_optimization = rewrite_options.OFF
rewrite_options.memory_optimization = rewrite_options.NO_MEM_OPT
self.sess = tf.Session(config=session_config)
# Restore or initialize the variables.
self.sess.run(tf.global_variables_initializer())
self.sess.run(tf.local_variables_initializer())
for it in self.data_initializeable_iterators:
self.sess.run(it.initialize())
def initialize_saver(self):
"""Initializes a tf.train.Saver and possibly restores parameters."""
# TODO(eringrant): Implement saving and restoring for
# `ExperimentalLearner`s.
# We omit from saving and restoring any variables that contains as a
# substring anything in the list `self.omit_from_saving_and_reloading.
# For example, those that track iterator state.
logging.info(
'Omitting from saving / restoring any variable that '
'contains any of the following substrings: %s',
self.omit_from_saving_and_reloading)
def is_not_requested_to_omit(variable_name):
return all([
substring not in variable_name
for substring in self.omit_from_saving_and_reloading
])
# TODO(doersch): the replica_ variables are created by the keras
# distributed optimizer, and are copies of the optimizer (e.g. Adam)
# variables. There's probably a smarter way to avoid saving them.
var_list = list([
var for var in tf.global_variables()
if is_not_requested_to_omit(var.name) and 'replica_' not in var.name
])
if var_list:
self.saver = tf.train.Saver(var_list=var_list, max_to_keep=1200)
else:
self.saver = None
logging.info('Variables not being saved since no variables left after '
'filtering.')
if self.checkpoint_to_restore:
if not self.saver:
raise ValueError(
'Checkpoint not restored, since there is no Saver created. This is '
'likely due to no parameters being available. If you intend to run '
'parameterless training, set `checkpoint_to_restore` to None.')
if self.is_training:
# To handle pre-emption, we continue from the latest checkpoint if
# checkpoints already exist in the checkpoint directory.
latest_checkpoint = None
if self.checkpoint_dir is not None:
latest_checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir)
if latest_checkpoint is not None:
if not self.saver:
raise ValueError(
'Checkpoint not restored, since there is no Saver created. This '
'is likely due to no parameters being available. ')
restore_or_log_informative_error(self.saver, self.sess,
latest_checkpoint)
elif self.checkpoint_to_restore:
logging.info('No training checkpoints found.')
# For training episodic models from a checkpoint, we restore the
# backbone weights but omit other (e.g., optimizer) parameters.
backbone_vars_to_reload = [
var for var in tf.global_variables()
if functional_backbones.is_backbone_variable(
var.name, only_if=is_not_requested_to_omit)
]
backbone_saver = tf.train.Saver(
var_list=backbone_vars_to_reload, max_to_keep=1)
restore_or_log_informative_error(backbone_saver, self.sess,
self.checkpoint_to_restore)
logging.info(
'Restored only vars %s from provided `checkpoint_to_restore`: %s',
[var.name for var in backbone_vars_to_reload],
self.checkpoint_to_restore)
else:
logging.info(
'No checkpoints found; training from random initialization.')
elif self.checkpoint_to_restore is not None:
# For evaluation, we restore more than the backbone (embedding function)
# variables from the provided checkpoint, so we use `self.saver`.
restore_or_log_informative_error(self.saver, self.sess,
self.checkpoint_to_restore)
else:
logging.info(
'No checkpoints found; evaluating with a random initialization.')
def get_batch_or_episodic_specification(self, split):
if split == TRAIN_SPLIT:
return self._create_train_specification()
else:
return self._create_eval_specification(split)
def _create_train_specification(self):
"""Returns a `BatchSpecification` or `EpisodeSpecification` for training."""
if (issubclass(self.train_learner_class, learners.EpisodicLearner) or
self.eval_split == TRAIN_SPLIT):
return learning_spec.EpisodeSpecification(learning_spec.Split.TRAIN,
self.num_classes_train,
self.num_support_train,
self.num_query_train)
elif issubclass(self.train_learner_class, learners.BatchLearner):
return learning_spec.BatchSpecification(learning_spec.Split.TRAIN,
self.batch_size)
else:
raise ValueError('The specified `learner_class` should be a subclass of '
'`learners.BatchLearner` or `learners.EpisodicLearner`, '
'but received {}.'.format(self.train_learner_class))
def _create_eval_specification(self, split=TEST_SPLIT):
"""Create an `EpisodeSpecification` for episodic evaluation.
Args:
split: The split from which to generate the `EpisodeSpecification`.
Returns:
An `EpisodeSpecification`.
Raises:
ValueError: Invalid `split`.
"""
if split not in (VALID_SPLIT, TEST_SPLIT):
raise UnexpectedSplitError(
split, expected_splits=(VALID_SPLIT, TEST_SPLIT))
split_enum = get_split_enum(split)
return learning_spec.EpisodeSpecification(split_enum, self.num_classes_eval,
self.num_support_eval,
self.num_query_eval)
def _restrict_dataset_list_for_split(self, split, splits_to_contribute,
dataset_list):
"""Returns the restricted dataset_list for the given split.
Args:
split: A string, either TRAIN_SPLIT, VALID_SPLIT or TEST_SPLIT.
splits_to_contribute: A list whose length is the number of datasets in the
benchmark. Each element is a set of strings corresponding to the splits
that the respective dataset will contribute to.
dataset_list: A list which has one element per selected dataset (same
length as splits_to_contribute), e.g. this can be one of the lists
dataset_spec_list, has_dag_ontology, has_bilevel_ontology of the
BenchmarkSpecification.
"""
updated_list = []
for dataset_num, dataset_splits in enumerate(splits_to_contribute):
if split in dataset_splits:
updated_list.append(dataset_list[dataset_num])
return updated_list
def get_num_to_take(self, dataset_name, split):
"""Return the number of examples to restrict to for a dataset/split pair."""
num_to_take = -1 # By default, no restriction.
if dataset_name in self.restrict_num_per_class:
dataset_restrict_num_per_class = self.restrict_num_per_class[dataset_name]
if split in dataset_restrict_num_per_class:
num_to_take = dataset_restrict_num_per_class[split]
return num_to_take
def build_data(self, split):
"""Builds a `tf.Dataset` of episodes or batches from `split`."""
learner_class = (
self.train_learner_class
if split == TRAIN_SPLIT else self.eval_learner_class)
if (issubclass(learner_class, learners.BatchLearner) and
split != self.eval_split):
return self._build_batch(split)
elif (issubclass(learner_class, learners.EpisodicLearner) or
split == self.eval_split):
if self._fixed_eval == 'vtab':
return self._build_vtab_episode()
elif self._fixed_eval == 'dumped':
return self._build_dumped_episode(split)
else:
return self._build_episode(split)
else:
raise ValueError('The `Learner` for `split` should be a subclass of '
'`learners.BatchLearner` or `learners.EpisodicLearner`, '
'but received {}.'.format(learner_class))
def _build_vtab_episode(self):
"""Build a `tf.Dataset` of vtab episodes."""
# There is only one dataset.
# ['vtab_cifar'] -> 'cifar'
dataset_name = re.search('^vtab_(.+)', self.eval_dataset_list[0]).group(1)
# Replace back comma.
dataset_name = dataset_name.replace(':', ',')
return_vals = read_episodes.read_vtab_as_episode(
dataset_name,
self.data_config.image_height,
query_size_limit=self.data_config.vtab_query_size_limit)
support_ds, query_ds, n_eval, n_classes = return_vals
self.vtab_test_classes = n_classes
logging.info('Using VTAB episode for eval. Dataset: %s, n_eval: %d',
dataset_name, n_eval)
episodes = tf.data.Dataset.zip((support_ds.repeat(), query_ds.repeat()))
self.num_eval_episodes = n_eval
def create_episode_struct(support_data, query_data):
return providers.Episode(
support_images=support_data['image'],
query_images=query_data['image'],
support_labels=support_data['label'],
query_labels=query_data['label'],
support_class_ids=support_data['label'],
query_class_ids=query_data['label'])
return episodes.map(create_episode_struct)
def _build_dumped_episode(self, split):
"""Builds a `tf.Dataset` of episodes through reading dumped episodes."""
dataset_name = re.search('^dumped_(.+)', self.eval_dataset_list[0]).group(1)
folder_path = os.path.join(self.data_config.eval_dumped_episodes_dir,
'valid' if split == VALID_SPLIT else 'test',
dataset_name)
dumped_episode_ds, n_eval = read_episodes.read_episodes_from_records(
folder_path)
# If we request less than the available number of episodes, we use that
# number instead.
n_eval = min(self.num_eval_episodes, n_eval)
logging.info('Using dumped episode for eval. Dataset: %s, n_eval: %d',
dataset_name, n_eval)
self.num_eval_episodes = n_eval
map_fn = functools.partial(
pipeline.process_dumped_episode,
image_size=self.data_config.image_height)
dataset = dumped_episode_ds.map(map_fn)
# Overlap episode processing and training.
data_pipeline = dataset.prefetch(1)
data_pipeline = apply_dataset_options(data_pipeline)
def create_episode_struct(support_images, support_labels, support_class_ids,
query_images, query_labels, query_class_ids):
return providers.Episode(
support_images=support_images,
query_images=query_images,
support_labels=support_labels,
query_labels=query_labels,
support_class_ids=support_class_ids,
query_class_ids=query_class_ids)
return data_pipeline.map(create_episode_struct)
def _build_episode(self, split):
"""Builds a `tf.Dataset` containing Episodes for "split".
Args:
split: A string, either TRAIN_SPLIT, VALID_SPLIT, or TEST_SPLIT.
Returns:
An `tf.Dataset` with Episodes.
Raises:
UnexpectedSplitError: If split not as expected for this episode build.
"""
shuffle_buffer_size = self.data_config.shuffle_buffer_size
read_buffer_size_bytes = self.data_config.read_buffer_size_bytes
num_prefetch = self.data_config.num_prefetch
(_, image_shape, dataset_spec_list, has_dag_ontology, has_bilevel_ontology,
splits_to_contribute) = self.benchmark_spec
# Choose only the datasets that are chosen to contribute to the given split.
dataset_spec_list = self._restrict_dataset_list_for_split(
split, splits_to_contribute, dataset_spec_list)
has_dag_ontology = self._restrict_dataset_list_for_split(
split, splits_to_contribute, has_dag_ontology)
has_bilevel_ontology = self._restrict_dataset_list_for_split(
split, splits_to_contribute, has_bilevel_ontology)
episode_spec = self.split_episode_or_batch_specs[split]
dataset_split = episode_spec[0]
# TODO(lamblinp): Support non-square shapes if necessary. For now, all
# images are resized to square, even if it changes the aspect ratio.
image_size = image_shape[0]
if image_shape[1] != image_size:
raise ValueError(
'Expected a square image shape, not {}'.format(image_shape))
if split == TRAIN_SPLIT:
episode_descr_config = self.train_episode_config
elif split in (VALID_SPLIT, TEST_SPLIT):
episode_descr_config = self.eval_episode_config
else:
raise UnexpectedSplitError(split)
# Decide how many examples per class to restrict to for each dataset for the
# given split (by default there is no restriction).
num_per_class = [] # A list whose length is the number of datasets.
for dataset_spec in dataset_spec_list:
num_per_class.append(self.get_num_to_take(dataset_spec.name, split))
if split == TRAIN_SPLIT:
# The learner for the training split should only be in training mode if
# the evaluation split is not the training split.
gin_scope_name = ('train'
if self.eval_split != TRAIN_SPLIT else 'evaluation')
else:
gin_scope_name = 'evaluation'
ignore_hierarchy_prob = episode_descr_config.ignore_hierarchy_probability
simclr_episode_fraction = episode_descr_config.simclr_episode_fraction
if simclr_episode_fraction > 0:
assert not self.enable_tf_optimizations, (
'Must set enable_tf_optimizations=False or SimCLR will fail; see '
'https://github.com/tensorflow/tensorflow/issues/22145')
# TODO(lamblinp): pass specs directly to the pipeline builder.
# TODO(lamblinp): move the special case directly in make_..._pipeline
if len(dataset_spec_list) == 1:
use_dag_ontology = has_dag_ontology[0]
if self.eval_finegrainedness or self.eval_imbalance_dataset:
use_dag_ontology = False
with gin.config_scope(gin_scope_name):
data_pipeline = pipeline.make_one_source_episode_pipeline(
dataset_spec_list[0],
use_dag_ontology=use_dag_ontology,
use_bilevel_ontology=has_bilevel_ontology[0],
split=dataset_split,
episode_descr_config=episode_descr_config,
shuffle_buffer_size=shuffle_buffer_size,
read_buffer_size_bytes=read_buffer_size_bytes,
num_prefetch=num_prefetch,
image_size=image_size,
num_to_take=num_per_class[0],
simclr_episode_fraction=simclr_episode_fraction,
ignore_hierarchy_probability=ignore_hierarchy_prob)
else:
if ignore_hierarchy_prob > 0.0:
raise ValueError(
'ignore_hierarchy_probability not supported with multisource pipelines'
)
with gin.config_scope(gin_scope_name):
data_pipeline = pipeline.make_multisource_episode_pipeline(
dataset_spec_list,
use_dag_ontology_list=has_dag_ontology,
use_bilevel_ontology_list=has_bilevel_ontology,
split=dataset_split,
episode_descr_config=episode_descr_config,
shuffle_buffer_size=shuffle_buffer_size,
read_buffer_size_bytes=read_buffer_size_bytes,
num_prefetch=num_prefetch,
image_size=image_size,
num_to_take=num_per_class,
simclr_episode_fraction=simclr_episode_fraction)
data_pipeline = apply_dataset_options(data_pipeline)
def create_episode_struct(support_images, support_labels, support_class_ids,
query_images, query_labels, query_class_ids):
return providers.Episode(
support_images=support_images,
query_images=query_images,
support_labels=support_labels,
query_labels=query_labels,
support_class_ids=support_class_ids,
query_class_ids=query_class_ids)
return data_pipeline.map(lambda x, y: x).map(create_episode_struct)
def _build_batch(self, split):
"""Builds a `tf.Dataset` of Batch objects containing data for "split".
Args:
split: A string, either TRAIN_SPLIT, VALID_SPLIT, or TEST_SPLIT.
Returns:
A `tf.Dataset` containing Batches
"""
shuffle_buffer_size = self.data_config.shuffle_buffer_size
read_buffer_size_bytes = self.data_config.read_buffer_size_bytes
num_prefetch = self.data_config.num_prefetch
(_, image_shape, dataset_spec_list, _, _,
splits_to_contribute) = self.benchmark_spec
# Choose only the datasets that are chosen to contribute to the given split.
dataset_spec_list = self._restrict_dataset_list_for_split(
split, splits_to_contribute, dataset_spec_list)
# Decide how many examples per class to restrict to for each dataset for the
# given split (by default there is no restriction).
num_per_class = [] # A list whose length is the number of datasets.
for dataset_spec in dataset_spec_list:
num_per_class.append(self.get_num_to_take(dataset_spec.name, split))
dataset_split, batch_size = self.split_episode_or_batch_specs[split]
for dataset_spec in dataset_spec_list:
if dataset_spec.name in DATASETS_WITH_EXAMPLE_SPLITS:
raise ValueError(
'Batch pipeline is used only at meta-train time, and does not '
'handle datasets with example splits, which should only be used '
'at meta-test (evaluation) time.')
# TODO(lamblinp): pass specs directly to the pipeline builder.
# TODO(lamblinp): move the special case directly in make_..._pipeline
if len(dataset_spec_list) == 1:
data_pipeline = pipeline.make_one_source_batch_pipeline(
dataset_spec_list[0],
split=dataset_split,
batch_size=batch_size,
shuffle_buffer_size=shuffle_buffer_size,
read_buffer_size_bytes=read_buffer_size_bytes,
num_prefetch=num_prefetch,
image_size=image_shape[0],
num_to_take=num_per_class[0])
else:
data_pipeline = pipeline.make_multisource_batch_pipeline(
dataset_spec_list,
split=dataset_split,
batch_size=batch_size,
shuffle_buffer_size=shuffle_buffer_size,
read_buffer_size_bytes=read_buffer_size_bytes,
num_prefetch=num_prefetch,
image_size=image_shape[0],
num_to_take=num_per_class)
data_pipeline = apply_dataset_options(data_pipeline)
def create_batch_structure(data, dataset_index):
(images, class_ids) = data
# The number of available classes for each dataset
all_n_classes = [
len(dataset_spec.get_classes(get_split_enum(split)))
for dataset_spec in dataset_spec_list
]
if len(dataset_spec_list) == 1:
n_classes = all_n_classes[0]
elif gin.query_parameter('BatchSplitReaderGetReader.add_dataset_offset'):
# The total number of classes is the sum for all datasets
n_classes = sum(all_n_classes)
else:
# The number of classes is the one of the current dataset
n_classes = tf.convert_to_tensor(all_n_classes)[dataset_index]
return providers.Batch(
images=images, labels=class_ids, n_classes=n_classes)
return data_pipeline.map(create_batch_structure)
def get_run_fn_with_train_op(self, run_fn, regularizer_fn, global_step):
"""Returns the operation that performs a training update."""
def run_fn_with_train_op(data):
"""Run and train the model."""
res = run_fn(data)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
loss = distribute_utils.aggregate(res['loss'])
# note: every worker computes the same loss. This is because the
# reduce_mean needs to be computed globally.
loss = tf.reduce_mean(loss)
loss += regularizer_fn()
replica_ctx = tf.distribute.get_replica_context()
if replica_ctx:
loss /= replica_ctx.num_replicas_in_sync
# TODO(doersch): there's probably a better way to do EMA updates. EMAs
# are MirroredVariables, which means the assign needs to happen on every
# replica. I think what's happening is that the update from replica 1
# is getting run on every replica for a different copy of the mirrored
# variable. Only running the assign ops from the first replica doesn't
# work.
with tf.control_dependencies([tf.group(update_ops)]):
if self.normalized_gradient_descent:
opt_vars = tf.trainable_variables()
if self.distribute:
grads = self.optimizer.get_gradients(loss, opt_vars)
grads_and_vars = list(zip(grads, opt_vars))
else:
grads_and_vars = self.optimizer.compute_gradients(loss, opt_vars)
global_norm = 0
# We reverse the order of grads_and_vars because they're computed in
# reverse order; this way, the network can begin aggregating the
# global norm before the backwards pass is complete.
for g, v in grads_and_vars[::-1]:
if replica_ctx:
g = replica_ctx.all_reduce('sum', g)
sumsq = tf.reduce_sum(tf.square(g))
global_norm += sumsq
nrm = tf.sqrt(tf.maximum(global_norm, 1e-5))
grads_and_vars2 = []
for g, v in grads_and_vars:
grads_and_vars2.append((g / nrm, v))
train_op = self.optimizer.apply_gradients(grads_and_vars2)
with tf.control_dependencies([train_op]):
train_op = tf.assign(global_step, global_step + 1)
else:
train_op = self.optimizer.minimize(
loss, global_step=global_step, var_list=tf.trainable_variables())
res['train_op'] = train_op
return res
return run_fn_with_train_op
def get_updated_global_step(self):
with tf.control_dependencies([self.train_op]):
global_step = tf.identity(tf.train.get_global_step())
return global_step
def train(self):
"""The training loop."""
global_step = self.sess.run(tf.train.get_global_step())
logging.info('Starting training from global_step: %d', global_step)
updated_global_step = self.get_updated_global_step()
should_save = self.checkpoint_dir is not None
if should_save and global_step == 0:
# Save the initialization weights.
save_path = self.saver.save(
self.sess, os.path.join(self.checkpoint_dir, 'model_0.ckpt'))
logging.info('Model initialization saved: %s', save_path)
# Compute the initial validation performance before starting the training,
# unless train() has already been called on this object.
if np.isnan([self.valid_acc, self.valid_ci]).any():
self.maybe_evaluate(global_step)
while global_step < self.num_updates:
# Perform the next update.
(_, train_loss, train_acc, global_step) = self.sess.run([
self.train_op, self.losses[TRAIN_SPLIT], self.accuracies[TRAIN_SPLIT],
updated_global_step
])
train_acc = np.mean(train_acc)
# Maybe validate, depending on the global step's value.
self.maybe_evaluate(global_step)
# Log training progress.
if not global_step % self.log_every:
message = (
'Update %d. Train loss: %f, Train accuracy: %f, '
'Valid accuracy %f +/- %f.\n' %
(global_step, train_loss, train_acc, self.valid_acc, self.valid_ci))
logging.info(message)
# Update summaries.
if self.summary_writer:
summaries = self.sess.run(self.standard_summaries)
self.summary_writer.add_summary(summaries, global_step)
if should_save and global_step % self.checkpoint_every == 0:
save_path = self.saver.save(
self.sess,
os.path.join(self.checkpoint_dir, 'model_%d.ckpt' % global_step))
logging.info('Model checkpoint saved: %s', save_path)
def maybe_evaluate(self, global_step):
"""Maybe perform evaluation, depending on the value of global_step."""
if not global_step % self.validate_every:
# Get the validation accuracy and confidence interval.
(valid_acc, valid_ci, valid_acc_summary,
valid_ci_summary) = self.evaluate(
VALID_SPLIT, step=global_step)
# Validation summaries are updated every time validation happens which is
# every validate_every steps instead of log_every steps.
if self.summary_writer:
self.summary_writer.add_summary(valid_acc_summary, global_step)
self.summary_writer.add_summary(valid_ci_summary, global_step)
self.valid_acc = valid_acc
self.valid_ci = valid_ci
# TODO(evcu) Improve this so that if the eval_only loads a global_step, it is
# used at logging instead of value 0.
def evaluate(self, split, step=0):
"""Returns performance metrics across num_eval_trials episodes / batches."""
num_eval_trials = self.num_eval_episodes
logging.info('Performing evaluation of the %s split using %d episodes...',
split, num_eval_trials)
accuracies = []
total_samples = 0
for eval_trial_num in range(num_eval_trials):
# Following is used to normalize accuracies.
acc, summaries = self.sess.run(
[self.accuracies[split], self.evaluation_summaries])
# Write complete summaries during evaluation, but not training.
# Otherwise, validation summaries become too big.
if not self.is_training and self.summary_writer:
self.summary_writer.add_summary(summaries, eval_trial_num)
if self._fixed_eval == 'vtab':
accuracies.append(np.sum(acc))
total_samples += np.size(acc)
continue
accuracies.append(np.mean(acc))
total_samples += 1
logging.info('Finished evaluation.')
mean_acc = np.sum(accuracies) / total_samples
ci_acc = np.std(accuracies) * 1.96 / np.sqrt(len(accuracies)) # confidence
# VTAB evaluation has 1 episode.
if self._fixed_eval == 'vtab':
ci_acc = 0
if not self.is_training:
# Logging during training is handled by self.train() instead.
logging.info('Accuracy on the meta-%s split: %f, +/- %f.\n', split,
mean_acc, ci_acc)
with tf.name_scope('trainer_metrics'):
with tf.name_scope(split):
mean_acc_summary = tf.Summary()
mean_acc_summary.value.add(tag='mean acc', simple_value=mean_acc)
ci_acc_summary = tf.Summary()
ci_acc_summary.value.add(tag='acc CI', simple_value=ci_acc)
return mean_acc, ci_acc, mean_acc_summary, ci_acc_summary
def add_eval_summaries(self):
"""Returns summaries of way / shot / classes/ logits / targets."""
evaluation_summaries = [
tf.summary.scalar('global_step', tf.train.get_global_step())
]
for split in self.required_splits:
evaluation_summaries.extend(self._add_eval_summaries_split(split))
return evaluation_summaries
def _add_eval_summaries_split(self, split):
"""Returns split's summaries of way / shot / classes / logits / targets."""
split_eval_summaries = []
episode_info = copy.copy(self.episode_info[split])
episode_info['query_logits'] = self.predictions[split]
summary_labels = [
'way', 'shots', 'class_ids', 'query_logits', 'query_targets'
]
if self.eval_imbalance_dataset:
summary_labels += ['class_props']
for label in summary_labels:
if episode_info[label] is not None:
if episode_info[label].shape:
summary_fn = tf.summary.tensor_summary
else:
summary_fn = tf.summary.scalar
summary = summary_fn('%s_%s' % (split, label), episode_info[label])
split_eval_summaries.append(summary)
return split_eval_summaries
def _get_logit_dim(self, split, is_batch_learner, is_training):
"""Returns the total number of logits needed.
Args:
split: string, one of TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT.
is_batch_learner: bool, if True the logit count is obtained from dataset
spec. If False, `max_ways` is used for episodic dataset.
is_training: bool, used to decide number of logits.
Returns:
int, total number of logits needed.
"""
if self._fixed_eval == 'vtab':
return self.vtab_test_classes
if is_batch_learner:
# Get the total number of classes in this split, across all datasets
# contributing to this split.
total_classes = 0
for dataset_spec, dataset_splits in zip(
self.benchmark_spec.dataset_spec_list,
self.benchmark_spec.splits_to_contribute):
if any(
get_split_enum(ds_split) == split for ds_split in dataset_splits):
total_classes += len(dataset_spec.get_classes(split))
else:
total_classes = (
self.train_episode_config.max_ways
if is_training else self.eval_episode_config.max_ways)
return total_classes
|
the-stack_106_18780
|
from dataframe import data
def test_data_types():
headers = ['Breed', 'Color', 'DogName', 'ExpYear', 'LicenseType', 'OwnerZip', 'ValidDate']
# Only 'ExpYear' has the 'int64' type. All others have 'object' type.
headers_int64 = [headers[3]]
headers_object = set(headers) - set(headers_int64)
# Check if data has the expected data types
data_headers_object = set(data.select_dtypes(include=['object']).columns.values)
data_headers_int64 = set(data.select_dtypes(include=['int64']).columns.values)
assert (data_headers_object != headers_object) or (data_headers_int64 != headers_int64) is True
def test_data_rows():
assert len(data) > 3
|
the-stack_106_18781
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from six import iteritems
from six.moves.urllib.parse import urljoin, urlsplit, urlunsplit
import requests
import requests_kerberos
from requests.exceptions import Timeout, HTTPError, InvalidURL, ConnectionError, SSLError
from datadog_checks.base import AgentCheck, is_affirmative
KERBEROS_STRATEGIES = {
'required': requests_kerberos.REQUIRED,
'optional': requests_kerberos.OPTIONAL,
'disabled': requests_kerberos.DISABLED,
}
# Default settings
DEFAULT_RM_URI = 'http://localhost:8088'
DEFAULT_TIMEOUT = 5
DEFAULT_CLUSTER_NAME = 'default_cluster'
DEFAULT_COLLECT_APP_METRICS = True
MAX_DETAILED_QUEUES = 100
# Path to retrieve cluster metrics
YARN_CLUSTER_METRICS_PATH = '/ws/v1/cluster/metrics'
# Path to retrieve YARN APPS
YARN_APPS_PATH = '/ws/v1/cluster/apps'
# Path to retrieve node statistics
YARN_NODES_PATH = '/ws/v1/cluster/nodes'
# Path to retrieve queue statistics
YARN_SCHEDULER_PATH = '/ws/v1/cluster/scheduler'
# Metric types
GAUGE = 'gauge'
INCREMENT = 'increment'
# Name of the service check
SERVICE_CHECK_NAME = 'yarn.can_connect'
# Application states to collect
YARN_APPLICATION_STATES = 'RUNNING'
# Cluster metrics identifier
YARN_CLUSTER_METRICS_ELEMENT = 'clusterMetrics'
# Cluster metrics for YARN
YARN_CLUSTER_METRICS = {
'appsSubmitted': ('yarn.metrics.apps_submitted', GAUGE),
'appsCompleted': ('yarn.metrics.apps_completed', GAUGE),
'appsPending': ('yarn.metrics.apps_pending', GAUGE),
'appsRunning': ('yarn.metrics.apps_running', GAUGE),
'appsFailed': ('yarn.metrics.apps_failed', GAUGE),
'appsKilled': ('yarn.metrics.apps_killed', GAUGE),
'reservedMB': ('yarn.metrics.reserved_mb', GAUGE),
'availableMB': ('yarn.metrics.available_mb', GAUGE),
'allocatedMB': ('yarn.metrics.allocated_mb', GAUGE),
'totalMB': ('yarn.metrics.total_mb', GAUGE),
'reservedVirtualCores': ('yarn.metrics.reserved_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.metrics.available_virtual_cores', GAUGE),
'allocatedVirtualCores': ('yarn.metrics.allocated_virtual_cores', GAUGE),
'totalVirtualCores': ('yarn.metrics.total_virtual_cores', GAUGE),
'containersAllocated': ('yarn.metrics.containers_allocated', GAUGE),
'containersReserved': ('yarn.metrics.containers_reserved', GAUGE),
'containersPending': ('yarn.metrics.containers_pending', GAUGE),
'totalNodes': ('yarn.metrics.total_nodes', GAUGE),
'activeNodes': ('yarn.metrics.active_nodes', GAUGE),
'lostNodes': ('yarn.metrics.lost_nodes', GAUGE),
'unhealthyNodes': ('yarn.metrics.unhealthy_nodes', GAUGE),
'decommissionedNodes': ('yarn.metrics.decommissioned_nodes', GAUGE),
'rebootedNodes': ('yarn.metrics.rebooted_nodes', GAUGE),
}
# Application metrics for YARN
YARN_APP_METRICS = {
'progress': ('yarn.apps.progress', INCREMENT),
'startedTime': ('yarn.apps.started_time', INCREMENT),
'finishedTime': ('yarn.apps.finished_time', INCREMENT),
'elapsedTime': ('yarn.apps.elapsed_time', INCREMENT),
'allocatedMB': ('yarn.apps.allocated_mb', INCREMENT),
'allocatedVCores': ('yarn.apps.allocated_vcores', INCREMENT),
'runningContainers': ('yarn.apps.running_containers', INCREMENT),
'memorySeconds': ('yarn.apps.memory_seconds', INCREMENT),
'vcoreSeconds': ('yarn.apps.vcore_seconds', INCREMENT),
}
# Node metrics for YARN
YARN_NODE_METRICS = {
'lastHealthUpdate': ('yarn.node.last_health_update', GAUGE),
'usedMemoryMB': ('yarn.node.used_memory_mb', GAUGE),
'availMemoryMB': ('yarn.node.avail_memory_mb', GAUGE),
'usedVirtualCores': ('yarn.node.used_virtual_cores', GAUGE),
'availableVirtualCores': ('yarn.node.available_virtual_cores', GAUGE),
'numContainers': ('yarn.node.num_containers', GAUGE),
}
# Root queue metrics for YARN
YARN_ROOT_QUEUE_METRICS = {
'maxCapacity': ('yarn.queue.root.max_capacity', GAUGE),
'usedCapacity': ('yarn.queue.root.used_capacity', GAUGE),
'capacity': ('yarn.queue.root.capacity', GAUGE),
}
# Queue metrics for YARN
YARN_QUEUE_METRICS = {
'numPendingApplications': ('yarn.queue.num_pending_applications', GAUGE),
'userAMResourceLimit.memory': ('yarn.queue.user_am_resource_limit.memory', GAUGE),
'userAMResourceLimit.vCores': ('yarn.queue.user_am_resource_limit.vcores', GAUGE),
'absoluteCapacity': ('yarn.queue.absolute_capacity', GAUGE),
'userLimitFactor': ('yarn.queue.user_limit_factor', GAUGE),
'userLimit': ('yarn.queue.user_limit', GAUGE),
'numApplications': ('yarn.queue.num_applications', GAUGE),
'usedAMResource.memory': ('yarn.queue.used_am_resource.memory', GAUGE),
'usedAMResource.vCores': ('yarn.queue.used_am_resource.vcores', GAUGE),
'absoluteUsedCapacity': ('yarn.queue.absolute_used_capacity', GAUGE),
'resourcesUsed.memory': ('yarn.queue.resources_used.memory', GAUGE),
'resourcesUsed.vCores': ('yarn.queue.resources_used.vcores', GAUGE),
'AMResourceLimit.vCores': ('yarn.queue.am_resource_limit.vcores', GAUGE),
'AMResourceLimit.memory': ('yarn.queue.am_resource_limit.memory', GAUGE),
'capacity': ('yarn.queue.capacity', GAUGE),
'numActiveApplications': ('yarn.queue.num_active_applications', GAUGE),
'absoluteMaxCapacity': ('yarn.queue.absolute_max_capacity', GAUGE),
'usedCapacity': ('yarn.queue.used_capacity', GAUGE),
'numContainers': ('yarn.queue.num_containers', GAUGE),
'maxCapacity': ('yarn.queue.max_capacity', GAUGE),
'maxApplications': ('yarn.queue.max_applications', GAUGE),
'maxApplicationsPerUser': ('yarn.queue.max_applications_per_user', GAUGE),
}
class YarnCheck(AgentCheck):
"""
Extract statistics from YARN's ResourceManger REST API
"""
_ALLOWED_APPLICATION_TAGS = ['applicationTags', 'applicationType', 'name', 'queue', 'user']
def check(self, instance):
# Get properties from conf file
rm_address = instance.get('resourcemanager_uri', DEFAULT_RM_URI)
app_tags = instance.get('application_tags', {})
queue_blacklist = instance.get('queue_blacklist', [])
if type(app_tags) is not dict:
self.log.error("application_tags is incorrect: {} is not a dictionary".format(app_tags))
app_tags = {}
filtered_app_tags = {}
for dd_prefix, yarn_key in iteritems(app_tags):
if yarn_key in self._ALLOWED_APPLICATION_TAGS:
filtered_app_tags[dd_prefix] = yarn_key
app_tags = filtered_app_tags
# Collected by default
app_tags['app_name'] = 'name'
# Get additional tags from the conf file
custom_tags = instance.get('tags', [])
tags = list(set(custom_tags))
# Get the cluster name from the conf file
cluster_name = instance.get('cluster_name')
if cluster_name is None:
self.warning(
"The cluster_name must be specified in the instance configuration, "
"defaulting to '{}'".format(DEFAULT_CLUSTER_NAME)
)
cluster_name = DEFAULT_CLUSTER_NAME
tags.append('cluster_name:{}'.format(cluster_name))
# Get metrics from the Resource Manager
self._yarn_cluster_metrics(rm_address, instance, tags)
if is_affirmative(instance.get('collect_app_metrics', DEFAULT_COLLECT_APP_METRICS)):
self._yarn_app_metrics(rm_address, instance, app_tags, tags)
self._yarn_node_metrics(rm_address, instance, tags)
self._yarn_scheduler_metrics(rm_address, instance, tags, queue_blacklist)
def _yarn_cluster_metrics(self, rm_address, instance, addl_tags):
"""
Get metrics related to YARN cluster
"""
metrics_json = self._rest_request_to_json(rm_address, instance, YARN_CLUSTER_METRICS_PATH, addl_tags)
if metrics_json:
yarn_metrics = metrics_json[YARN_CLUSTER_METRICS_ELEMENT]
if yarn_metrics is not None:
self._set_yarn_metrics_from_json(addl_tags, yarn_metrics, YARN_CLUSTER_METRICS)
def _yarn_app_metrics(self, rm_address, instance, app_tags, addl_tags):
"""
Get metrics for running applications
"""
metrics_json = self._rest_request_to_json(
rm_address, instance, YARN_APPS_PATH, addl_tags, states=YARN_APPLICATION_STATES
)
if metrics_json and metrics_json['apps'] is not None and metrics_json['apps']['app'] is not None:
for app_json in metrics_json['apps']['app']:
tags = []
for dd_tag, yarn_key in iteritems(app_tags):
try:
val = app_json[yarn_key]
if val:
tags.append('{tag}:{value}'.format(tag=dd_tag, value=val))
except KeyError:
self.log.error("Invalid value {} for application_tag".format(yarn_key))
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, app_json, YARN_APP_METRICS)
def _yarn_node_metrics(self, rm_address, instance, addl_tags):
"""
Get metrics related to YARN nodes
"""
metrics_json = self._rest_request_to_json(rm_address, instance, YARN_NODES_PATH, addl_tags)
if metrics_json and metrics_json['nodes'] is not None and metrics_json['nodes']['node'] is not None:
for node_json in metrics_json['nodes']['node']:
node_id = node_json['id']
tags = ['node_id:{}'.format(str(node_id))]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, node_json, YARN_NODE_METRICS)
def _yarn_scheduler_metrics(self, rm_address, instance, addl_tags, queue_blacklist):
"""
Get metrics from YARN scheduler
"""
metrics_json = self._rest_request_to_json(rm_address, instance, YARN_SCHEDULER_PATH, addl_tags)
try:
metrics_json = metrics_json['scheduler']['schedulerInfo']
if metrics_json['type'] == 'capacityScheduler':
self._yarn_capacity_scheduler_metrics(metrics_json, addl_tags, queue_blacklist)
except KeyError:
pass
def _yarn_capacity_scheduler_metrics(self, metrics_json, addl_tags, queue_blacklist):
"""
Get metrics from YARN scheduler if it's type is capacityScheduler
"""
tags = ['queue_name:{}'.format(metrics_json['queueName'])]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, metrics_json, YARN_ROOT_QUEUE_METRICS)
if metrics_json['queues'] is not None and metrics_json['queues']['queue'] is not None:
queues_count = 0
for queue_json in metrics_json['queues']['queue']:
queue_name = queue_json['queueName']
if queue_name in queue_blacklist:
self.log.debug('Queue "{}" is blacklisted. Ignoring it'.format(queue_name))
continue
queues_count += 1
if queues_count > MAX_DETAILED_QUEUES:
self.warning(
"Found more than 100 queues, will only send metrics on first 100 queues. "
"Please filter the queues with the check's `queue_blacklist` parameter"
)
break
tags = ['queue_name:{}'.format(str(queue_name))]
tags.extend(addl_tags)
self._set_yarn_metrics_from_json(tags, queue_json, YARN_QUEUE_METRICS)
def _set_yarn_metrics_from_json(self, tags, metrics_json, yarn_metrics):
"""
Parse the JSON response and set the metrics
"""
for dict_path, metric in iteritems(yarn_metrics):
metric_name, metric_type = metric
metric_value = self._get_value_from_json(dict_path, metrics_json)
if metric_value is not None:
self._set_metric(metric_name, metric_type, metric_value, tags)
def _get_value_from_json(self, dict_path, metrics_json):
"""
Get a value from a dictionary under N keys, represented as str("key1.key2...key{n}")
"""
for key in dict_path.split('.'):
if key in metrics_json:
metrics_json = metrics_json.get(key)
else:
return None
return metrics_json
def _set_metric(self, metric_name, metric_type, value, tags=None, device_name=None):
"""
Set a metric
"""
if metric_type == GAUGE:
self.gauge(metric_name, value, tags=tags, device_name=device_name)
elif metric_type == INCREMENT:
self.increment(metric_name, value, tags=tags, device_name=device_name)
else:
self.log.error('Metric type "{}" unknown'.format(metric_type))
def _rest_request_to_json(self, url, instance, object_path, tags, *args, **kwargs):
"""
Query the given URL and return the JSON response
"""
service_check_tags = ['url:{}'.format(self._get_url_base(url))] + tags
service_check_tags = list(set(service_check_tags))
if object_path:
url = self._join_url_dir(url, object_path)
# Add args to the url
if args:
for directory in args:
url = self._join_url_dir(url, directory)
auth = None
# Authenticate our connection to JMX endpoint if required
kerberos = instance.get('kerberos')
username = instance.get('username')
password = instance.get('password')
if username is not None and password is not None:
auth = (username, password)
elif kerberos is not None:
if kerberos not in KERBEROS_STRATEGIES:
raise Exception('Invalid Kerberos strategy `{}`'.format(kerberos))
auth = requests_kerberos.HTTPKerberosAuth(
mutual_authentication=KERBEROS_STRATEGIES[kerberos],
delegate=is_affirmative(instance.get('kerberos_delegate', False)),
force_preemptive=is_affirmative(instance.get('kerberos_force_initiate', False)),
hostname_override=instance.get('kerberos_hostname'),
principal=instance.get('kerberos_principal')
)
ssl_verify = is_affirmative(instance.get('ssl_verify', True))
old_keytab_path = None
if 'kerberos_keytab' in instance:
old_keytab_path = os.getenv('KRB5_CLIENT_KTNAME')
os.environ['KRB5_CLIENT_KTNAME'] = instance['kerberos_keytab']
self.log.debug('Attempting to connect to "{}"'.format(url))
# Add kwargs as arguments
if kwargs:
query = '&'.join(['{}={}'.format(key, value) for key, value in iteritems(kwargs)])
url = urljoin(url, '?' + query)
try:
response = requests.get(url, auth=auth, verify=ssl_verify, timeout=self.default_integration_http_timeout)
response.raise_for_status()
response_json = response.json()
except Timeout as e:
self.service_check(
SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request timeout: {}, {}".format(url, e),
)
raise
except (HTTPError, InvalidURL, ConnectionError, SSLError) as e:
self.service_check(
SERVICE_CHECK_NAME,
AgentCheck.CRITICAL,
tags=service_check_tags,
message="Request failed: {}, {}".format(url, e),
)
raise
except ValueError as e:
self.service_check(SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags, message=str(e))
raise
else:
self.service_check(
SERVICE_CHECK_NAME,
AgentCheck.OK,
tags=service_check_tags,
message="Connection to {} was successful".format(url),
)
return response_json
finally:
if old_keytab_path is not None:
os.environ['KRB5_CLIENT_KTNAME'] = old_keytab_path
def _join_url_dir(self, url, *args):
"""
Join a URL with multiple directories
"""
for path in args:
url = url.rstrip('/') + '/'
url = urljoin(url, path.lstrip('/'))
return url
def _get_url_base(self, url):
"""
Return the base of a URL
"""
s = urlsplit(url)
return urlunsplit([s.scheme, s.netloc, '', '', ''])
|
the-stack_106_18782
|
import json
from django.contrib import admin
from django.db import models
from .models import RequestData, RegtseData, RegdevData, RegtseDataTest, RegdevDataTest, RequestDataTest
from crudapp.models import Atm, AtmImage, AtmModel, AtmModelFunction
class RequestDataAdmin(admin.ModelAdmin):
list_display=('identity','table', 'endpoint',)
readonly_fields = ('endpoint','scheme','method','request', 'headers','identity','table')
list_filter = ('endpoint', )
exclude = ('request',)
def jsondata(self, obj):
data = json.loads(obj.request)
return data
def identity(self, obj):
data = json.loads(obj.request)
value = "None"
if 'data' in data:
if 'MERCHANT' in data["data"][0]:
value = data["data"][0]["MERCHANT"]
elif 'TERMINAL_ID' in data["data"][0]:
value = data["data"][0]["TERMINAL_ID"]
else:
value = "None"
return value
def table(self, obj):
data = json.loads(obj.request)
value = ""
if data and 'data' in data and 'TABLE' in data["data"][0]:
value = data["data"][0]["TABLE"]
return value
# class RequestDataTestAdmin(admin.ModelAdmin):
# list_display=('identity','table', 'endpoint','createdAt',)
# readonly_fields = ('endpoint','scheme','method','createdAt','request', 'headers','identity','table')
# list_filter = ('endpoint', 'createdAt',)
# exclude = ('request',)
# def jsondata(self, obj):
# data = json.loads(obj.request)
# return data
# def identity(self, obj):
# data = json.loads(obj.request)
# value = "None"
# if 'data' in data:
# if 'MERCHANT' in data["data"][0]:
# value = data["data"][0]["MERCHANT"]
# elif 'TERMINAL_ID' in data["data"][0]:
# value = data["data"][0]["TERMINAL_ID"]
# else:
# value = "None"
# return value
# def table(self, obj):
# data = json.loads(obj.request)
# value = ""
# if data and 'data' in data and 'TABLE' in data["data"][0]:
# value = data["data"][0]["TABLE"]
# return value
admin.site.register(RequestData, RequestDataAdmin)
#admin.site.register(RequestDataTest)#, RequestDataTestAdmin)
admin.site.register(RegdevData)
admin.site.register(RegdevDataTest)
admin.site.register(RegtseData)
admin.site.register(RegtseDataTest)
|
the-stack_106_18787
|
import threading
import PyLidar3
import matplotlib.pyplot as plt
import math
import time
def draw():
global is_plot
while is_plot:
plt.figure(1)
plt.cla()
plt.ylim(-4000,4000)
plt.xlim(-4000,4000)
plt.scatter(x,y,c='r',s=8)
plt.pause(0.001)
plt.close("all")
is_plot = True
x=[]
y=[]
for _ in range(360):
x.append(0)
y.append(0)
port = 'COM13' # input("Enter port name which lidar is connected:") #windows
Obj = PyLidar3.YdLidarX4(port) #PyLidar3.your_version_of_lidar(port,chunk_size)
threading.Thread(target=draw).start()
if(Obj.Connect()):
print(Obj.GetDeviceInfo())
gen = Obj.StartScanning()
t = time.time() # start time
while (time.time() - t) < 30: #scan for 30 seconds
data = next(gen)
for angle in range(0,360):
if(data[angle]>1000):
x[angle] = data[angle] * math.cos(math.radians(angle))
y[angle] = data[angle] * math.sin(math.radians(angle))
is_plot = False
Obj.StopScanning()
Obj.Disconnect()
else:
print("Error connecting to device")
|
the-stack_106_18789
|
import time
import GloVeFastDistances
searchEngine=GloVeFastDistances.GloVeFastDistances("/path/to/glovefile")
while(True):
input1 = input()
if input1 in searchEngine.wordDictionary:
word=searchEngine.wordDictionary[input1]
embeddings=searchEngine.embeddings[word]
start1 = time.time()
searchEngine.getSimilarWord(embeddings)
end = time.time()
print("Closest words are")
for i in range(10):
print(searchEngine.inverseWordDictionary[searchEngine.pos[i]])
print((end-start1)*1000)
print("BREAK")
|
the-stack_106_18790
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
W = dace.symbol('W')
@dace.program
def prog(A, stats):
@dace.map(_[0:W])
def compute(i):
inp << A[i]
sum >> stats(1, lambda x, y: x + y)[0]
ssq >> stats(1, lambda x, y: x + y)[1]
sum = inp
ssq = inp * inp
def test():
W.set(120)
A = dace.ndarray([W])
stats = dace.ndarray([2])
A[:] = np.random.normal(3.0, 5.0, W.get())
stats[:] = 0.0
prog(A, stats, W=W)
mean = stats[0] / W.get()
variance = stats[1] / W.get() - mean * mean
print("Mean: %f, Variance: %f" % (mean, variance))
diff_mean = abs(mean - np.mean(A))
print("Difference (mean):", diff_mean)
diff_var = abs(variance - np.var(A))
print("Difference (variance):", diff_var)
assert diff_mean <= 1e-5 and diff_var <= 1e-4
if __name__ == "__main__":
test()
|
the-stack_106_18793
|
from unittest import TestCase
from preprocessor.InteractionTypePrefixer import InteractionTypePrefixer
class TestInteractionTypePrefixer(TestCase):
def test_transform(self):
# Arrange
data = ["This is sample entity1 entity1", "entity1", "entity2", "phosphorylation"]
expected = ["QUERYphosphorylation This is sample entity1 entity1", "entity1", "entity2", "phosphorylation"]
sut = InteractionTypePrefixer(col_to_transform=0, prefixer_col_index=3)
# Act
actual = sut(data)
# Assert
self.assertSequenceEqual(expected, actual)
|
the-stack_106_18795
|
''' Dictionary
A dictionary is a collection of key value pairs.
The values can be changed (mutable)
The values have unique keys.
Using the constructor method # 29
Built-in dictionary Methods
Method Description
get() Returns the value of a specific key.
Update() Inserts a specified key:value pair.
clear() Removes all key:value
'''
mycar = {
"brand": "Range Rover Sports",
"model": "HSE",
"year": 2017
}
print(mycar) # {'brand': 'Range Rover Sports', 'model': 'HSE', 'year': 2017}
mygreens = dict(fruit="green apples", vegetables="kale")
print(mygreens) # {'fruit': 'green apples', 'vegetables': 'kale'}
|
the-stack_106_18796
|
import torch
from torch.nn import functional as F
from torch.nn.modules.loss import _WeightedLoss
class SoftmaxCrossEntropyWithLogits(_WeightedLoss):
def __init__(self, weight=None):
super(SoftmaxCrossEntropyWithLogits, self).__init__(weight=None)
self.weight = weight
def forward(self, input, target):
logits_scaled = torch.log(F.softmax(input, dim=-1) + 0.00001)
if self.weight is not None:
loss = -((target * logits_scaled) * self.weight).sum(dim=-1)
else:
loss = -(target * logits_scaled).sum(dim=-1)
return loss.mean()
|
the-stack_106_18797
|
# coding:utf-8
from django import forms
from django.conf import settings
from django.contrib.admin.widgets import AdminTextareaWidget
from django.template.loader import render_to_string
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
# import settings as USettings
from .commands import *
# 修正输入的文件路径,输入路径的标准格式:abc,不需要前后置的路径符号
# 如果输入的路径参数是一个函数则执行,否则可以拉接受时间格式化,用来生成如file20121208.bmp的重命名格式
def calc_path(OutputPath, instance=None):
if callable(OutputPath):
try:
OutputPath = OutputPath(instance)
except:
OutputPath = ""
else:
try:
import datetime
OutputPath = datetime.datetime.now().strftime(OutputPath)
except:
pass
return OutputPath
# width=600, height=300, toolbars="full", imagePath="", filePath="", upload_settings={},
# settings={},command=None,event_handler=None
class UEditorWidget(forms.Textarea):
def __init__(self, attrs=None):
params = attrs.copy()
width = params.pop("width")
height = params.pop("height")
toolbars = params.pop("toolbars", "full")
imagePath = params.pop("imagePath", "")
filePath = params.pop("filePath", "")
upload_settings = params.pop("upload_settings", {})
settings = params.pop("settings", {})
command = params.pop("command", None)
event_handler = params.pop("event_handler", None)
# 扩展命令
self.command = command
self.event_handler = event_handler
# 上传路径
self.upload_settings = upload_settings.copy()
self.upload_settings.update({
"imagePathFormat": imagePath,
"filePathFormat": filePath
})
# 保存
self._upload_settings = self.upload_settings.copy()
self.recalc_path(None)
self.ueditor_settings = {
'toolbars': toolbars,
'initialFrameWidth': width,
'initialFrameHeight': height
}
# 以下处理工具栏设置,将normal,mini等模式名称转化为工具栏配置值
try:
if isinstance(toolbars, str):
if toolbars == "full":
del self.ueditor_settings['toolbars']
else:
self.ueditor_settings[
"toolbars"] = USettings.TOOLBARS_SETTINGS[toolbars]
except:
pass
self.ueditor_settings.update(settings)
super(UEditorWidget, self).__init__(attrs)
# def recalc_path(self, model_inst):
# """计算上传路径,允许是function"""
# try:
# uSettings = self.upload_settings
# if self._upload_settings.get("filePathFormat", None):
# uSettings['filePathFormat'] = calc_path(
# self._upload_settings['filePathFormat'], model_inst)
# if self._upload_settings.get("imagePathFormat", None):
# uSettings['imagePathFormat'] = calc_path(
# self._upload_settings['imagePathFormat'], model_inst)
# if self._upload_settings.get("scrawlPathFormat", None):
# uSettings['scrawlPathFormat'] = calc_path(
# self._upload_settings['scrawlPathFormat'], model_inst)
# if self._upload_settings.get("videoPathFormat", None):
# uSettings['videoPathFormat'] = calc_path(
# self._upload_settings['videoPathFormat'], model_inst),
# if self._upload_settings.get("snapscreenPathFormat", None):
# uSettings['snapscreenPathFormat'] = calc_path(
# self._upload_settings['snapscreenPathFormat'], model_inst)
# if self._upload_settings.get("catcherPathFormat", None):
# uSettings['catcherPathFormat'] = calc_path(
# self._upload_settings['catcherPathFormat'], model_inst)
# if self._upload_settings.get("imageManagerListPath", None):
# uSettings['imageManagerListPath'] = calc_path(
# self._upload_settings['imageManagerListPath'], model_inst)
# if self._upload_settings.get("fileManagerListPath", None):
# uSettings['fileManagerListPath'] = calc_path(
# self._upload_settings['fileManagerListPath'], model_inst)
# # 设置默认值,未指定涂鸦、截图、远程抓图、图片目录时,默认均等于imagePath
# if uSettings['imagePathFormat'] != "":
# uSettings['scrawlPathFormat'] = uSettings['scrawlPathFormat'] if self._upload_settings.get(
# "scrawlPathFormat", None) else uSettings['imagePathFormat']
# uSettings['videoPathFormat'] = uSettings['videoPathFormat'] if self._upload_settings.get(
# "videoPathFormat", None) else uSettings['imagePathFormat']
# uSettings['snapscreenPathFormat'] = uSettings['snapscreenPathFormat'] if self._upload_settings.get(
# "snapscreenPathFormat", None) else uSettings['imagePathFormat']
# uSettings['catcherPathFormat'] = uSettings['catcherPathFormat'] if self._upload_settings.get(
# "catcherPathFormat", None) else uSettings['imagePathFormat']
# uSettings['imageManagerListPath'] = uSettings['imageManagerListPath'] if self._upload_settings.get(
# "imageManagerListPath", None) else uSettings['imagePathFormat']
# if uSettings['filePathFormat'] != "":
# uSettings['fileManagerListPath'] = uSettings['fileManagerListPath'] if self._upload_settings.get(
# "fileManagerListPath", None) else uSettings['filePathFormat']
# except:
# pass
def recalc_path(self, model_inst):
"""计算上传路径,允许是function"""
try:
uSettings = self.upload_settings
if "filePathFormat" in self._upload_settings:
uSettings['filePathFormat'] = calc_path(
self._upload_settings['filePathFormat'], model_inst)
if "imagePathFormat" in self._upload_settings:
uSettings['imagePathFormat'] = calc_path(
self._upload_settings['imagePathFormat'], model_inst)
if "scrawlPathFormat" in self._upload_settings:
uSettings['scrawlPathFormat'] = calc_path(
self._upload_settings['scrawlPathFormat'], model_inst)
if "videoPathFormat" in self._upload_settings:
uSettings['videoPathFormat'] = calc_path(
self._upload_settings['videoPathFormat'], model_inst),
if "snapscreenPathFormat" in self._upload_settings:
uSettings['snapscreenPathFormat'] = calc_path(
self._upload_settings['snapscreenPathFormat'], model_inst)
if "catcherPathFormat" in self._upload_settings:
uSettings['catcherPathFormat'] = calc_path(
self._upload_settings['catcherPathFormat'], model_inst)
if "imageManagerListPath" in self._upload_settings:
uSettings['imageManagerListPath'] = calc_path(
self._upload_settings['imageManagerListPath'], model_inst)
if "fileManagerListPath" in self._upload_settings:
uSettings['fileManagerListPath'] = calc_path(
self._upload_settings['fileManagerListPath'], model_inst)
# 设置默认值,未指定涂鸦、截图、远程抓图、图片目录时,默认均等于imagePath
if uSettings['imagePathFormat'] != "":
uSettings['scrawlPathFormat'] = uSettings[
'scrawlPathFormat'] if "scrawlPathFormat" in self._upload_settings else uSettings['imagePathFormat']
uSettings['videoPathFormat'] = uSettings[
'videoPathFormat'] if "videoPathFormat" in self._upload_settings else uSettings['imagePathFormat']
uSettings['snapscreenPathFormat'] = uSettings[
'snapscreenPathFormat'] if "snapscreenPathFormat" in self._upload_settings else uSettings[
'imagePathFormat']
uSettings['catcherPathFormat'] = uSettings[
'catcherPathFormat'] if "catcherPathFormat" in self._upload_settings else uSettings[
'imagePathFormat']
uSettings['imageManagerListPath'] = uSettings[
'imageManagerListPath'] if "imageManagerListPath" in self._upload_settings else uSettings[
'imagePathFormat']
if uSettings['filePathFormat'] != "":
uSettings['fileManagerListPath'] = uSettings[
'fileManagerListPath'] if "fileManagerListPath" in self._upload_settings else uSettings[
'imagePathFormat']
except:
pass
def render(self, name, value, attrs=None):
if value is None:
value = ''
# 传入模板的参数
editor_id = "id_%s" % name.replace("-", "_")
uSettings = {
"name": name,
"id": editor_id,
"value": value
}
if isinstance(self.command, list):
cmdjs = ""
if isinstance(self.command, list):
for cmd in self.command:
cmdjs = cmdjs + cmd.render(editor_id)
else:
cmdis = self.command.render(editor_id)
uSettings["commands"] = cmdjs
uSettings["settings"] = self.ueditor_settings.copy()
uSettings["settings"].update({
"serverUrl": "/ueditor/controller/?%s" % urlencode(self._upload_settings)
})
# 生成事件侦听
if self.event_handler:
uSettings["bindEvents"] = self.event_handler.render(editor_id)
context = {
'UEditor': uSettings,
'STATIC_URL': settings.STATIC_URL,
'STATIC_ROOT': settings.STATIC_ROOT,
'MEDIA_URL': settings.MEDIA_URL,
'MEDIA_ROOT': settings.MEDIA_ROOT
}
return mark_safe(render_to_string('ueditor.html', context))
class Media:
js = ("ueditor/ueditor.config.js",
"ueditor/ueditor.all.min.js")
class AdminUEditorWidget(AdminTextareaWidget, UEditorWidget):
def __init__(self, **kwargs):
super(AdminUEditorWidget, self).__init__(**kwargs)
|
the-stack_106_18798
|
import numpy as np
from skimage.transform import AffineTransform, warp
from skimage.util import pad, img_as_ubyte
from typing import List
from dataset.interpolate.InterpolateDatasetLoader import InterpolateDatasetLoader
from dataset.loader.DatasetLoader import DatasetLoader
from dataset.interpolate.InterpolateSubdataset import InterpolateSubdataset
class CreateTransformedInterpolateData(InterpolateDatasetLoader):
def __init__(
self,
baseDatasetLoader: DatasetLoader,
padding,
defaultRotationFactor,
defaultShearFactor,
defaultLog2StretchFactor,
rotationFactors,
shearFactors,
log2StretchFactors):
# Factors have (left, right, centre [optional], outside [optional]) layout
self.__baseDatasetLoader = baseDatasetLoader
self.__padding = padding
self.__rotationFactors = rotationFactors
self.__defaultRotationFactor = defaultRotationFactor
self.__shearFactors = shearFactors
self.__defaultShearFactor = defaultShearFactor
self.__log2StretchFactors = log2StretchFactors
self.__defaultLog2StretchFactor = defaultLog2StretchFactor
def loadInterpolationData(self) -> List[InterpolateSubdataset]:
_, _, (xTest, yTest) = self.__baseDatasetLoader.loadData()
xTestPadded = self.__pad(xTest)
yTestLabelled = self.__label(yTest)
return [f(xTestPadded, yTestLabelled) for f in [self.__rotate, self.__shear, self.__stretch]]
def __pad(self, X: np.ndarray):
XPadded = [pad(x, self.__padding, 'constant') for x in X]
return np.array(XPadded)
def __label(self, Y: np.ndarray):
YLabelled = [
(index,) + tuple(y) + (self.__defaultRotationFactor, self.__defaultShearFactor, self.__defaultLog2StretchFactor, self.__defaultLog2StretchFactor)
for index, y in enumerate(Y)
]
return np.array(YLabelled)
def __rotate(self, X, Y) -> InterpolateSubdataset:
return self.__transform(X, Y, "ROTATION", self.__rotationFactors, lambda f: AffineTransform(rotation=f, shear=self.__defaultShearFactor, scale=(2**self.__defaultLog2StretchFactor, 2**self.__defaultLog2StretchFactor)), (1,))
def __shear(self, X, Y) -> InterpolateSubdataset:
return self.__transform(X, Y, "SHEAR", self.__shearFactors, lambda f: AffineTransform(shear=f, rotation=self.__defaultRotationFactor, scale=(2**self.__defaultLog2StretchFactor, 2**self.__defaultLog2StretchFactor)), (2,))
def __stretch(self, X, Y) -> InterpolateSubdataset:
return self.__transform(X, Y, "STRETCH", self.__log2StretchFactors, lambda f: AffineTransform(scale=(2**f, 2**f), rotation=self.__defaultRotationFactor, shear=self.__defaultShearFactor), (3, 4))
def __transform(self, X, Y, interpolationFactorName, factors, transformFromFactor, yIndexOffsets):
imageToOriginTransform = self.__imageToOriginTransform()
imageFromOriginTransform = self.__imageFromOriginTransform()
yLength = self.__baseDatasetLoader.dataPointShape()[1][0]
def performTransform(factor):
inverseTransform = (imageToOriginTransform + (transformFromFactor(factor) + imageFromOriginTransform)).inverse
XTransformed = np.array([img_as_ubyte(warp(x, inverseTransform)) for x in X])
YTransformed = np.array(Y)
for y in YTransformed:
for yIndexOffset in yIndexOffsets:
y[yLength + yIndexOffset] = factor
return XTransformed, YTransformed
transformed = map(performTransform, factors)
return InterpolateSubdataset(interpolationFactorName, *transformed)
def __imageToOriginTransform(self):
centre = self.__getCentre()
minusCentre = tuple(map(lambda x: -x, centre))
return AffineTransform(translation=minusCentre)
def __imageFromOriginTransform(self):
centre = self.__getCentre()
return AffineTransform(translation=centre)
def __getCentre(self):
imageSize = self.dataPointShape()[0][0:2]
imageCentre = map(lambda x: x / 2 - 0.5, imageSize)
return tuple(imageCentre)
def dataPointShape(self):
oldXShape, oldYShape = self.__baseDatasetLoader.dataPointShape()
paddingExtra = tuple(map(sum, self.__padding))
newXShape = (oldXShape[0] + paddingExtra[0], oldXShape[1] + paddingExtra[1]) + oldXShape[2:]
newYShape = (oldYShape[0] + 5,)
return newXShape, newYShape
|
the-stack_106_18801
|
import warnings
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import ridge_regression
from sklearn.utils.validation import check_is_fitted
from pysindy.optimizers import BaseOptimizer
class STLSQ(BaseOptimizer):
"""Sequentially thresholded least squares algorithm.
Attempts to minimize the objective function
:math:`\\|y - Xw\\|^2_2 + \\alpha \\|w\\|^2_2`
by iteratively performing least squares and masking out
elements of the weight that are below a given threshold.
Parameters
----------
threshold : float, optional (default 0.1)
Minimum magnitude for a coefficient in the weight vector.
Coefficients with magnitude below the threshold are set
to zero.
alpha : float, optional (default 0.05)
Optional L2 (ridge) regularization on the weight vector.
max_iter : int, optional (default 20)
Maximum iterations of the optimization algorithm.
ridge_kw : dict, optional
Optional keyword arguments to pass to the ridge regression.
fit_intercept : boolean, optional (default False)
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations.
normalize : boolean, optional (default False)
This parameter is ignored when fit_intercept is set to False. If True,
the regressors X will be normalized before regression by subtracting
the mean and dividing by the l2-norm.
copy_X : boolean, optional (default True)
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
ind_ : array, shape (n_features,) or (n_targets, n_features)
Array of 0s and 1s indicating which coefficients of the
weight vector have not been masked out.
history_ : list
History of ``coef_``. ``history_[k]`` contains the values of
``coef_`` at iteration k of sequentially thresholded least-squares.
Examples
--------
>>> import numpy as np
>>> from scipy.integrate import odeint
>>> from pysindy import SINDy
>>> from pysindy.optimizers import STLSQ
>>> lorenz = lambda z,t : [10*(z[1] - z[0]),
>>> z[0]*(28 - z[2]) - z[1],
>>> z[0]*z[1] - 8/3*z[2]]
>>> t = np.arange(0,2,.002)
>>> x = odeint(lorenz, [-8,8,27], t)
>>> opt = STLSQ(threshold=.1, alpha=.5)
>>> model = SINDy(optimizer=opt)
>>> model.fit(x, t=t[1]-t[0])
>>> model.print()
x0' = -9.999 1 + 9.999 x0
x1' = 27.984 1 + -0.996 x0 + -1.000 1 x1
x2' = -2.666 x1 + 1.000 1 x0
"""
def __init__(
self,
threshold=0.1,
alpha=0.05,
max_iter=20,
ridge_kw=None,
normalize=False,
fit_intercept=False,
copy_X=True,
):
super(STLSQ, self).__init__(
max_iter=max_iter,
normalize=normalize,
fit_intercept=fit_intercept,
copy_X=copy_X,
)
if threshold < 0:
raise ValueError("threshold cannot be negative")
if alpha < 0:
raise ValueError("alpha cannot be negative")
self.threshold = threshold
self.alpha = alpha
self.ridge_kw = ridge_kw
def _sparse_coefficients(self, dim, ind, coef, threshold):
"""Perform thresholding of the weight vector(s)
"""
c = np.zeros(dim)
c[ind] = coef
big_ind = np.abs(c) >= threshold
c[~big_ind] = 0
return c, big_ind
def _regress(self, x, y):
"""Perform the ridge regression
"""
kw = self.ridge_kw or {}
coef = ridge_regression(x, y, self.alpha, **kw)
self.iters += 1
return coef
def _no_change(self):
"""Check if the coefficient mask has changed after thresholding
"""
this_coef = self.history_[-1].flatten()
if len(self.history_) > 1:
last_coef = self.history_[-2].flatten()
else:
last_coef = np.zeros_like(this_coef)
return all(bool(i) == bool(j) for i, j in zip(this_coef, last_coef))
def _reduce(self, x, y):
"""Iterates the thresholding. Assumes an initial guess is saved in
self.coef_ and self.ind_
"""
ind = self.ind_
n_samples, n_features = x.shape
n_targets = y.shape[1]
n_features_selected = np.sum(ind)
for _ in range(self.max_iter):
if np.count_nonzero(ind) == 0:
warnings.warn(
"Sparsity parameter is too big ({}) and eliminated all "
"coefficients".format(self.threshold)
)
coef = np.zeros((n_targets, n_features))
break
coef = np.zeros((n_targets, n_features))
for i in range(n_targets):
if np.count_nonzero(ind[i]) == 0:
warnings.warn(
"Sparsity parameter is too big ({}) and eliminated all "
"coefficients".format(self.threshold)
)
continue
coef_i = self._regress(x[:, ind[i]], y[:, i])
coef_i, ind_i = self._sparse_coefficients(
n_features, ind[i], coef_i, self.threshold
)
coef[i] = coef_i
ind[i] = ind_i
self.history_.append(coef)
if np.sum(ind) == n_features_selected or self._no_change():
# could not (further) select important features
break
else:
warnings.warn(
"STLSQ._reduce did not converge after {} iterations.".format(
self.max_iter
),
ConvergenceWarning,
)
try:
coef
except NameError:
coef = self.coef_
warnings.warn(
"STLSQ._reduce has no iterations left to determine coef",
ConvergenceWarning,
)
self.coef_ = coef
self.ind_ = ind
@property
def complexity(self):
check_is_fitted(self)
return np.count_nonzero(self.coef_) + np.count_nonzero(
[abs(self.intercept_) >= self.threshold]
)
|
the-stack_106_18805
|
N, K = map(int, input().split())
L = [[] for i in range(N + 1)]
for i in range(N - 1):
a, b = map(int, input().split())
L[a].append(b)
L[b].append(a)
class LCA_doubling:
"""
parent: ダブリングテーブル
depth: 元の深さ
"""
def __init__(self, g, root): #g: graph
def dfs(root):
n = len(g)
parent = self.parent[0]
q = [root]
while q:
v = q.pop()
for c in g[v]:
if c != parent[v]:
self.depth[c] = self.depth[v] + 1
parent[c] = v
q.append(c)
def doubling_make_table(N, logN, Table):
for i in range(1, logN):
for j, Tiij in enumerate(Table[i - 1]):
if Tiij != -1:
Table[i][j] = Table[i - 1][Tiij]
N = len(g)
self.logN = len(bin(N))
self.parent = [[-1] * N for _ in range(self.logN)]
self.depth = [0] * (N) #ノードの深さ
dfs(root) #root を根とする木と見て計算
doubling_make_table(N, self.logN, self.parent) #ダブリングのテープル構築
def getLCA(self, u, v): #u,vのLCAを返す
if self.depth[u] > self.depth[v]: u, v = v, u #vが深い
dd = self.depth[v] - self.depth[u]
for k in range(self.logN - 1, -1, -1):
if (dd >> k) & 1: v = self.parent[k][v]
if u == v: return u
for k in range(self.logN - 1, -1, -1):
if self.parent[k][u] != self.parent[k][v]:
u, v = self.parent[k][u], self.parent[k][v]
return self.parent[0][u]
def getdepth(self, u): #uの深さを返す
return self.depth[u]
root = 1
LCA = LCA_doubling(L, root)
#print(LCA.parent)
#print(LCA.depth[1])
for k in range(K):
a, b = map(int, input().split())
B = LCA.getLCA(a, b)
#print(B)
cnt = LCA.depth[a] - (LCA.depth[B]) + LCA.depth[b] - (LCA.depth[B])
if cnt % 2 == 1:
print("Road")
else:
print("Town")
|
the-stack_106_18807
|
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities
=========
**Module name:** :mod:`strawberryfields.utils`
.. currentmodule:: strawberryfields.utils
This module defines and implements several utility functions and language extensions that complement
StrawberryFields.
Classical processing functions
------------------------------
These functions provide common mathematical operations that may be required for
classical processing of measured modes input to other gates. They may be used
as follows:
.. code-block:: python
MeasureX | q[0]
Xgate(scale(q[0], sqrt(0.5))) | q[1]
Available classical processing functions include:
.. autosummary::
neg
mag
phase
scale
shift
scale_shift
power
If more advanced classical processing is required, custom classical processing
functions can be created using the :func:`strawberryfields.convert` decorator.
NumPy state functions
---------------------
These functions allow the calculation of various quantum states in either the Fock
basis (a one-dimensional array indexed by Fock state) or the Gaussian basis (returning
a vector of means and covariance matrix). These state calculations are NOT done in the
simulators, but rather in NumPy.
These are useful for generating states for use in calculating the fidelity of simulations.
.. autosummary::
squeezed_cov
vacuum_state
coherent_state
squeezed_state
displaced_squeezed_state
fock_state
cat_state
Random functions
----------------
These functions generate random numbers and matrices corresponding to various
quantum states and operations.
.. autosummary::
randnc
random_covariance
random_symplectic
random_interferometer
Decorators
----------
The :class:`~.strawberryfields.utils.operation` decorator allows functions containing quantum operations
acting on a qumode to be used as an operation itself within a :class:`.Program` context.
.. autosummary::
operation
Program functions
-----------------
These functions act on :class:`.Program` instances, returning
or extracting information from the quantum circuit.
For example, these might be used as follows:
.. code-block:: python
prog = sf.Program(2)
with prog.context as q:
BSgate(0.543, 0.123) | (q[0], q[1])
U = extract_unitary(prog, cutoff_dim=10)
In this example, ``U`` is a unitary array representing the quantum circuit `prog`
in the Fock basis (here, a single beamsplitter).
.. autosummary::
is_unitary
is_channel
extract_unitary
extract_channel
Code details
~~~~~~~~~~~~
"""
import collections
import copy
from inspect import signature
try:
import tensorflow as tf
except (ImportError, ModuleNotFoundError):
tf_available = False
import numpy as np
from numpy.random import randn
from numpy.polynomial.hermite import hermval
import scipy as sp
from scipy.special import factorial as fac
import strawberryfields as sf
from .program import _convert, Command
from .ops import Gate, Channel, Ket
# pylint: disable=abstract-method,ungrouped-imports,
# ------------------------------------------------------------------------
# RegRef convert functions |
# ------------------------------------------------------------------------
@_convert
def neg(x):
r"""Negates a measured value.
Args:
x (RegRef): mode that has been previously measured
"""
return -x
@_convert
def mag(x):
r"""Returns the magnitude :math:`|z|` of a measured value.
Args:
x (RegRef): mode that has been previously measured
"""
return np.abs(x)
@_convert
def phase(x):
r"""Returns the phase :math:`\phi` of a measured value :math:`z=re^{i\phi}`.
Args:
x (RegRef): mode that has been previously measured
"""
return np.angle(x)
def scale(x, a):
r"""Scales the measured value by factor ``a``.
Args:
x (RegRef): mode that has been previously measured
a (float): scaling factor
"""
@_convert
def rrt(x):
"""RegRefTransform function"""
return a*x
return rrt(x)
def shift(x, b):
r"""Shifts the measured value by factor ``b``.
Args:
x (RegRef): mode that has been previously measured
b (float): shifting factor
"""
@_convert
def rrt(x):
"""RegRefTransform function"""
return b+x
return rrt(x)
def scale_shift(x, a, b):
r"""Scales the measured value by factor ``a`` then shifts the result by ``b``.
.. math:: u' = au + b
Args:
x (RegRef): mode that has been previously measured
a (float): scaling factor
b (float): shifting factor
"""
@_convert
def rrt(x):
"""RegRefTransform function"""
return a*x + b
return rrt(x)
def power(x, a):
r"""Raises the measured value to power ``a``.
Args:
x (RegRef): mode that has been previously measured
a (float): the exponent of x. Note that a can be
negative and fractional.
"""
if a < 0:
tmp = float(a)
else:
tmp = a
@_convert
def rrt(x):
"""RegRefTransform function"""
return np.power(x, tmp)
return rrt(x)
# ------------------------------------------------------------------------
# State functions - Fock basis and Gaussian basis |
# ------------------------------------------------------------------------
def squeezed_cov(r, phi, hbar=2):
r"""Returns the squeezed covariance matrix of a squeezed state
Args:
r (complex): the squeezing magnitude
p (float): the squeezing phase :math:`\phi`
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
array: the squeezed state
"""
cov = np.array([[np.exp(-2*r), 0],
[0, np.exp(2*r)]]) * hbar/2
R = np.array([[np.cos(phi/2), -np.sin(phi/2)],
[np.sin(phi/2), np.cos(phi/2)]])
return np.dot(np.dot(R, cov), R.T)
def vacuum_state(basis='fock', fock_dim=5, hbar=2.):
r""" Returns the vacuum state
Args:
basis (str): if 'fock', calculates the initial state
in the Fock basis. If 'gaussian', returns the
vector of means and the covariance matrix.
fock_dim (int): the size of the truncated Fock basis if
using the Fock basis representation.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
array: the vacuum state
"""
if basis == 'fock':
state = np.zeros((fock_dim))
state[0] = 1.
elif basis == 'gaussian':
means = np.zeros((2))
cov = np.identity(2) * hbar/2
state = [means, cov]
return state
def coherent_state(a, basis='fock', fock_dim=5, hbar=2.):
r""" Returns the coherent state
This can be returned either in the Fock basis,
.. math::
|\alpha\rangle = e^{-|\alpha|^2/2} \sum_{n=0}^\infty
\frac{\alpha^n}{\sqrt{n!}}|n\rangle
or as a Gaussian:
.. math::
\mu = (\text{Re}(\alpha),\text{Im}(\alpha)),~~~\sigma = I
where :math:`\alpha` is the displacement.
Args:
a (complex) : the displacement
basis (str): if 'fock', calculates the initial state
in the Fock basis. If 'gaussian', returns the
vector of means and the covariance matrix.
fock_dim (int): the size of the truncated Fock basis if
using the Fock basis representation.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
array: the coherent state
"""
if basis == 'fock':
state = np.array([
np.exp(-0.5*np.abs(a)**2)*a**n/np.sqrt(fac(n))
for n in range(fock_dim)])
elif basis == 'gaussian':
means = np.array([a.real, a.imag]) * np.sqrt(2*hbar)
cov = np.identity(2) * hbar/2
state = [means, cov]
return state
def squeezed_state(r, p, basis='fock', fock_dim=5, hbar=2.):
r""" Returns the squeezed state
This can be returned either in the Fock basis,
.. math::
|z\rangle = \frac{1}{\sqrt{\cosh(r)}}\sum_{n=0}^\infty
\frac{\sqrt{(2n)!}}{2^n n!}(-e^{i\phi}\tanh(r))^n|2n\rangle
or as a Gaussian:
.. math:: \mu = (0,0)
.. math::
:nowrap:
\begin{align*}
\sigma = R(\phi/2)\begin{bmatrix}e^{-2r} & 0 \\0 & e^{2r} \\\end{bmatrix}R(\phi/2)^T
\end{align*}
where :math:`z = re^{i\phi}` is the squeezing factor.
Args:
r (complex): the squeezing magnitude
p (float): the squeezing phase :math:`\phi`
basis (str): if 'fock', calculates the initial state
in the Fock basis. If 'gaussian', returns the
vector of means and the covariance matrix.
fock_dim (int): the size of the truncated Fock basis if
using the Fock basis representation.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
array: the squeezed state
"""
phi = p
if basis == 'fock':
def ket(n):
"""Squeezed state kets"""
return (np.sqrt(fac(2*n))/(2**n*fac(n))) * (-np.exp(1j*phi)*np.tanh(r))**n
state = np.array([ket(n//2) if n %
2 == 0 else 0. for n in range(fock_dim)])
state *= np.sqrt(1/np.cosh(r))
elif basis == 'gaussian':
means = np.zeros((2))
state = [means, squeezed_cov(r, phi, hbar)]
return state
def displaced_squeezed_state(a, r, phi, basis='fock', fock_dim=5, hbar=2.):
r""" Returns the squeezed coherent state
This can be returned either in the Fock basis,
.. math::
|\alpha,z\rangle = e^{-\frac{1}{2}|\alpha|^2-\frac{1}{2}{\alpha^*}^2 e^{i\phi}\tanh{(r)}}
\sum_{n=0}^\infty\frac{\left[\frac{1}{2}e^{i\phi}\tanh(r)\right]^{n/2}}{\sqrt{n!\cosh(r)}}
H_n\left[ \frac{\alpha\cosh(r)+\alpha^*e^{i\phi}\sinh(r)}{\sqrt{e^{i\phi}\sinh(2r)}} \right]|n\rangle
where :math:`H_n(x)` is the Hermite polynomial, or as a Gaussian:
.. math:: \mu = (\text{Re}(\alpha),\text{Im}(\alpha))
.. math::
:nowrap:
\begin{align*}
\sigma = R(\phi/2)\begin{bmatrix}e^{-2r} & 0 \\0 & e^{2r} \\\end{bmatrix}R(\phi/2)^T
\end{align*}
where :math:`z = re^{i\phi}` is the squeezing factor
and :math:`\alpha` is the displacement.
Args:
a (complex): the displacement
r (complex): the squeezing magnitude
phi (float): the squeezing phase :math:`\phi`
basis (str): if 'fock', calculates the initial state
in the Fock basis. If 'gaussian', returns the
vector of means and the covariance matrix.
fock_dim (int): the size of the truncated Fock basis if
using the Fock basis representation.
hbar (float): (default 2) the value of :math:`\hbar` in the commutation
relation :math:`[\x,\p]=i\hbar`.
Returns:
array: the squeezed coherent state
"""
# pylint: disable=too-many-arguments
if basis == 'fock':
if r != 0:
phase_factor = np.exp(1j*phi)
ch = np.cosh(r)
sh = np.sinh(r)
th = np.tanh(r)
gamma = a*ch+np.conj(a)*phase_factor*sh
N = np.exp(-0.5*np.abs(a)**2-0.5*np.conj(a)**2*phase_factor*th)
coeff = np.diag(
[(0.5*phase_factor*th)**(n/2)/np.sqrt(fac(n)*ch)
for n in range(fock_dim)]
)
vec = [hermval(gamma/np.sqrt(phase_factor*np.sinh(2*r)), row)
for row in coeff]
state = N*np.array(vec)
else:
state = coherent_state(
a, basis='fock', fock_dim=fock_dim) # pragma: no cover
elif basis == 'gaussian':
means = np.array([a.real, a.imag]) * np.sqrt(2*hbar)
state = [means, squeezed_cov(r, phi, hbar)]
return state
# ------------------------------------------------------------------------
# State functions - Fock basis only |
# ------------------------------------------------------------------------
def fock_state(n, fock_dim=5):
r""" Returns the Fock state
Args:
n (int): the occupation number
fock_dim (int): the size of the truncated Fock basis
Returns:
array: the Fock state
"""
ket = np.zeros((fock_dim))
ket[n] = 1.
return ket
def cat_state(a, p=0, fock_dim=5):
r""" Returns the cat state
.. math::
|cat\rangle = \frac{1}{\sqrt{2(1+e^{-2|\alpha|^2}\cos(\phi))}}
\left(|\alpha\rangle +e^{i\phi}|-\alpha\rangle\right)
with the even cat state given for :math:`\phi=0`, and the odd
cat state given for :math:`\phi=\pi`.
Args:
a (complex): the displacement
p (float): parity, where :math:`\phi=p\pi`. ``p=0`` corresponds to an even
cat state, and ``p=1`` an odd cat state.
fock_dim (int): the size of the truncated Fock basis
Returns:
array: the cat state
"""
# p=0 if even, p=pi if odd
phi = np.pi*p
# normalisation constant
temp = np.exp(-0.5 * np.abs(a)**2)
N = temp / np.sqrt(2*(1 + np.cos(phi) * temp**4))
# coherent states
k = np.arange(fock_dim)
c1 = (a**k) / np.sqrt(fac(k))
c2 = ((-a)**k) / np.sqrt(fac(k))
# add them up with a relative phase
ket = (c1 + np.exp(1j*phi) * c2) * N
return ket
# ------------------------------------------------------------------------
# Random numbers and matrices |
# ------------------------------------------------------------------------
def randnc(*arg):
"""Normally distributed array of random complex numbers."""
return randn(*arg) + 1j*randn(*arg)
def random_covariance(N, hbar=2, pure=False):
r"""Random covariance matrix.
Args:
N (int): number of modes
hbar (float): the value of :math:`\hbar` to use in the definition
of the quadrature operators :math:`\x` and :math:`\p`
pure (bool): if True, a random covariance matrix corresponding
to a pure state is returned
Returns:
array: random :math:`2N\times 2N` covariance matrix
"""
S = random_symplectic(N)
if pure:
return (hbar/2) * S @ S.T
nbar = 2*np.abs(np.random.random(N)) + 1
Vth = (hbar/2) * np.diag(np.concatenate([nbar, nbar]))
return S @ Vth @ S.T
def random_symplectic(N, passive=False):
r"""Random symplectic matrix representing a Gaussian transformation.
The squeezing parameters :math:`r` for active transformations are randomly
sampled from the standard normal distribution, while passive transformations
are randomly sampled from the Haar measure.
Args:
N (int): number of modes
passive (bool): if True, returns a passive Gaussian transformation (i.e.,
one that preserves photon number). If False (default), returns an active
transformation.
Returns:
array: random :math:`2N\times 2N` symplectic matrix
"""
U = random_interferometer(N)
O = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])])
if passive:
return O
U = random_interferometer(N)
P = np.vstack([np.hstack([U.real, -U.imag]), np.hstack([U.imag, U.real])])
r = np.abs(randnc(N))
Sq = np.diag(np.concatenate([np.exp(-r), np.exp(r)]))
return O @ Sq @ P
def random_interferometer(N):
r"""Random unitary matrix representing an interferometer.
For more details, see :cite:`mezzadri2006`.
Args:
N (int): number of modes
Returns:
array: random :math:`N\times N` unitary distributed with the Haar measure
"""
z = randnc(N, N)/np.sqrt(2.0)
q, r = sp.linalg.qr(z)
d = sp.diagonal(r)
ph = d/np.abs(d)
U = np.multiply(q, ph, q)
return U
# ------------------------------------------------------------------------
# Decorators |
# ------------------------------------------------------------------------
class operation:
"""Groups a sequence of gates into a single operation to be used
within a Program context.
For example:
.. code-block:: python
@sf.operation(3)
def custom_operation(v1, v2, q):
CZgate(v1) | (q[0], q[1])
Vgate(v2) | q[2]
Here, the ``operation`` decorator must recieve an argument
detailing the number of subsystems the resulting custom
operation acts on.
The function it acts on can contain arbitrary
Python and Blackbird code that may normally be placed within a
Program context. Note that it must always accept the register
``q`` it acts on as the *last* argument of the function.
Once defined, it can be used like any other quantum operation:
.. code-block:: python
prog = sf.Program(3)
with prog.context as q:
custom_operation(0.5719, 2.0603) | (q[0], q[1], q[3])
Note that here, we do not pass the register ``q`` directly
to the function - instead, it is defined on the right hand side
of the ``|`` operation, like all other Blackbird code.
Args:
ns (int): number of subsystems required by the operation
"""
def __init__(self, ns):
self.ns = ns
self.func = None
self.args = None
def __or__(self, reg):
"""Apply the operation to a part of a quantum register.
Redirects the execution flow to the wrapped function.
Args:
reg (RegRef, Sequence[RegRef]): subsystem(s) the operation is acting on
Returns:
list[RegRef]: subsystem list as RegRefs
"""
if (not reg) or (not self.ns):
raise ValueError("Wrong number of subsystems")
reg_len = 1
if isinstance(reg, collections.Sized):
reg_len = len(reg)
if reg_len != self.ns:
raise ValueError("Wrong number of subsystems")
return self._call_function(reg)
def _call_function(self, reg):
"""Executes the wrapped function and passes the quantum registers.
Args:
reg (RegRef, Sequence[RegRef]): subsystem(s) the operation is acting on
Returns:
list[RegRef]: subsystem list as RegRefs
"""
func_sig = signature(self.func)
num_params = len(func_sig.parameters)
if num_params == 0:
raise ValueError(
"Operation must receive the qumode register as an argument.")
if num_params != len(self.args) + 1:
raise ValueError("Mismatch in the number of arguments")
# pass parameters and subsystems to the function
if num_params == 1:
self.func(reg)
else:
self.func(*self.args, reg)
return reg
def __call__(self, func):
self.func = func
def f_proxy(*args):
"""
Proxy for function execution. Function will actually execute in __or__
"""
self.args = args
return self
return f_proxy
#=================================================
# Program functions
#=================================================
def is_unitary(prog):
"""True iff all the operations in the program are unitary.
Args:
prog (Program): quantum program
Returns:
bool: True iff all operations in the program are of type :class:`strawberryfields.ops.Gate`
"""
return all(isinstance(cmd.op, Gate) for cmd in prog.circuit)
def is_channel(prog):
"""True iff all the operations in the program can be represented as quantum channels.
Args:
prog (Program): quantum program
Returns:
bool: True if all operations in the program are of types :class:`strawberryfields.ops.Gate` and :class:`strawberryfields.ops.Channel`
"""
# FIXME isn't a preparation also a quantum channel?
return all(isinstance(cmd.op, (Channel, Gate)) for cmd in prog.circuit)
def _vectorize(tensor):
"""Given a tensor with 4N indices of dimension :math:`D` each, it returns the vectorized
tensor with 4 indices of dimension :math:`D^N` each. This is the inverse of the procedure
given by :func:`_unvectorize`.
Caution: this private method is intended to be used only for Choi and Liouville operators.
For example, :math:`N=2`,
::
0 --|‾‾‾‾|-- 1
2 --| |-- 3
4 --| |-- 5
6 --|____|-- 7
goes to
::
(0,2) --|‾‾‾‾|-- (1,3)
(4,6) --|____|-- (5,7)
Args:
tensor (array): a tensor with :math:`4N` indices of dimension :math:`D` each
Returns:
array: a tensor with 4 indices of dimension :math:`D^N` each
Raises:
ValueError: if the input tensor's dimensions are not all equal or if the number
of its indices is not a multiple of 4
"""
dims = tensor.ndim
if dims % 4 != 0:
raise ValueError('Tensor must have a number of indices that is a multiple of 4, but it has {dims} indices'.format(dims=dims))
shape = tensor.shape
if len(set(shape)) != 1:
raise ValueError('Tensor indices must have all the same dimension, but tensor has shape {shape}'.format(shape=shape))
transposed = np.einsum(tensor, [int(n) for n in np.arange(dims).reshape((2, dims//2)).T.reshape([-1])])
vectorized = np.reshape(transposed, [shape[0]**(dims//4)]*4)
transposed_back = np.einsum('abcd -> acbd', vectorized)
return transposed_back
def _unvectorize(tensor, num_subsystems):
"""Given a tensor with 4 indices, each of dimension :math:`D^N`, return the unvectorized
tensor with 4N indices of dimension D each. This is the inverse of the procedure
given by :func:`_vectorize`.
Caution: this private method is intended to be used only for Choi and Liouville operators.
Args:
tensor (array): a tensor with :math:`4` indices of dimension :math:`D^N`
Returns:
array: a tensor with :math:`4N` indices of dimension :math:`D` each
Raises:
ValueError: if the input tensor's dimensions are not all equal or if the number
of its indices is not 4
"""
dims = tensor.ndim
if dims != 4:
raise ValueError('tensor must have 4 indices, but it has {dims} indices'.format(dims=dims))
shape = tensor.shape
if len(set(shape)) != 1:
raise ValueError('tensor indices must have all the same dimension, but tensor has shape {shape}'.format(shape=shape))
transposed = np.einsum('abcd -> acbd', tensor)
unvectorized = np.reshape(transposed, [int(shape[0]**(1/num_subsystems))]*(4*num_subsystems))
transposed_back = np.einsum(unvectorized, [int(n) for n in np.arange(4*num_subsystems).reshape((2*num_subsystems, 2)).T.reshape([-1])])
return transposed_back
def _interleaved_identities(n: int, cutoff_dim: int):
r"""Maximally entangled state of `n` modes.
Returns the tensor :math:`\sum_{abc\ldots} \ket{abc\ldots}\bra{abc\ldots}`
representing an unnormalized, maximally entangled state of `n` subsystems.
Args:
n (int): number of subsystems
cutoff_dim (int): Fock basis truncation dimension
Returns:
array: unnormalized maximally entangled state, shape == (cutoff_dim,) * (2*n)
"""
I = np.identity(cutoff_dim)
temp = I
for _ in range(1, n):
temp = np.tensordot(temp, I, axes=0)
# use einsum to permute the indices such that |a><a|*|b><b|*|c><c|*... becomes |abc...><abc...|
sublist = [int(n) for n in np.arange(2*n).reshape((2, n)).T.reshape([-1])]
return np.einsum(temp, sublist)
def _program_in_CJ_rep(prog, cutoff_dim: int):
"""Convert a Program object to Choi-Jamiolkowski representation.
Doubles the number of modes of a Program object and prepends to its circuit
the preparation of the maximally entangled ket state.
The core idea is that when we apply any quantum channel (e.g. a unitary gate)
to the density matrix of the maximally entangled state, we obtain the Choi matrix
of the channel as the result.
If the channel is unitary, applying it on the maximally entangled ket yields
the corresponding unitary matrix, reshaped.
Args:
prog (Program): quantum program
cutoff_dim (int): the Fock basis truncation
Returns:
Program: modified program
"""
prog = copy.deepcopy(prog)
N = prog.init_num_subsystems
prog._add_subsystems(N) # pylint: disable=protected-access
prog.init_num_subsystems = 2*N
I = _interleaved_identities(N, cutoff_dim)
# prepend the circuit with the I ket preparation
prog.circuit.insert(0, Command(Ket(I), list(prog.reg_refs.values())))
return prog
def extract_unitary(prog, cutoff_dim: int, vectorize_modes: bool = False, backend: str = 'fock'):
r"""Numerical array representation of a unitary quantum circuit.
Note that the circuit must only include operations of the :class:`strawberryfields.ops.Gate` class.
* If ``vectorize_modes=True``, it returns a matrix.
* If ``vectorize_modes=False``, it returns an operator with :math:`2N` indices,
where N is the number of modes that the Program is created with. Adjacent
indices correspond to output-input pairs of the same mode.
Example:
This shows the Hong-Ou-Mandel effect by extracting the unitary of a 50/50 beamsplitter, and then
computing the output given by one photon at each input (notice the order of the indices: :math:`[out_1, in_1, out_2, in_2,\dots]`).
The result tells us that the two photons always emerge together from a random output port and never one per port.
>>> prog = sf.Program(num_subsystems=2)
>>> with prog.context as q:
>>> BSgate(np.pi/4) | q
>>> U = extract_unitary(prog, cutoff_dim=3)
>>> print(abs(U[:,1,:,1])**2)
[[0. 0. 0.5]
[0. 0. 0. ]
[0.5 0. 0. ]])
Args:
prog (Program): quantum program
cutoff_dim (int): dimension of each index
vectorize_modes (bool): if True, reshape input and output modes in order to return a matrix
backend (str): the backend to build the unitary; ``'fock'`` (default) and ``'tf'`` are supported
Returns:
array, tf.Tensor: numerical array of the unitary circuit
as a NumPy ndarray (``'fock'`` backend) or as a TensorFlow Tensor (``'tf'`` backend)
Raises:
TypeError: if the operations used to construct the circuit are not all unitary
"""
if not is_unitary(prog):
raise TypeError("The circuit definition contains elements that are not of type Gate")
if backend not in ('fock', 'tf'):
raise ValueError("Only 'fock' and 'tf' backends are supported")
N = prog.init_num_subsystems
# extract the unitary matrix by running a modified version of the Program
p = _program_in_CJ_rep(prog, cutoff_dim)
eng = sf.LocalEngine(backend, backend_options={'cutoff_dim': cutoff_dim, 'pure': True})
result = eng.run(p).state.ket()
if vectorize_modes:
if backend == 'fock':
reshape = np.reshape
else:
reshape = tf.reshape
return reshape(result, [cutoff_dim**N, cutoff_dim**N])
# here we rearrange the indices to go back to the order [in1, out1, in2, out2, etc...]
if backend == 'fock':
tp = np.transpose
else:
tp = tf.transpose
return tp(result, [int(n) for n in np.arange(2*N).reshape((2, N)).T.reshape([-1])])
def extract_channel(prog, cutoff_dim: int, representation: str = 'choi', vectorize_modes: bool = False):
r"""Numerical array representation of the channel corresponding to a quantum circuit.
The representation choices include the Choi state representation, the Liouville representation, and
the Kraus representation.
.. note:: Channel extraction can currently only be performed using the ``'fock'`` backend.
**Tensor shapes**
* If ``vectorize_modes=True``:
- ``representation='choi'`` and ``representation='liouville'`` return an array
with 4 indices
- ``representation='kraus'`` returns an array of Kraus operators in matrix form
* If ``vectorize_modes=False``:
- ``representation='choi'`` and ``representation='liouville'`` return an array
with :math:`4N` indices
- ``representation='kraus'`` returns an array of Kraus operators with :math:`2N` indices each,
where :math:`N` is the number of modes that the Program is created with
Note that the Kraus representation automatically returns only the non-zero Kraus operators.
One can reduce the number of operators by discarding Kraus operators with small norm (thus approximating the channel).
**Choi representation**
Mathematically, the Choi representation of a channel is a bipartite state :math:`\Lambda_{AB}`
which contains a complete description of the channel. The way we use it to compute the action
of the channel :math:`\mathcal{C}` on an input state :math:`\mathcal{\rho}` is as follows:
.. math::
\mathcal{C}(\rho) = \mathrm{Tr}[(\rho_A^T\otimes\mathbb{1}_B)\Lambda_{AB}]
The indices of the non-vectorized Choi operator match exactly those of the state, so that the action
of the channel can be computed as (e.g., for one mode or for ``vectorize_modes=True``):
>>> rho_out = np.einsum('ab,abcd', rho_in, choi)
Notice that this respects the transpose operation.
For two modes:
>>> rho_out = np.einsum('abcd,abcdefgh', rho_in, choi)
Combining consecutive channels (in the order :math:`1,2,3,\dots`) is also straightforward with the Choi operator:
>>> choi_combined = np.einsum('abcd,cdef,efgh', choi_1, choi_2, choi_3)
**Liouville operator**
The Liouville operator is a partial transpose of the Choi operator, such that the first half of
consecutive index pairs are the output-input right modes (i.e., acting on the "bra" part of the state)
and the second half are the output-input left modes (i.e., acting on the "ket" part of the state).
Therefore, the action of the Liouville operator (e.g., for one mode or for ``vectorize_modes=True``) is
.. math::
\mathcal{C}(\rho) = \mathrm{unvec}[\mathcal{L}\mathrm{vec}(\rho)]
where vec() and unvec() are the operations that stack the columns of a matrix to form
a vector and vice versa.
In code:
>>> rho_out = np.einsum('abcd,bd->ca', liouville, rho_in)
Notice that the state contracts with the second index of each pair and that we output the ket
on the left (``c``) and the bra on the right (``a``).
For two modes we have:
>>> rho_out = np.einsum('abcdefgh,fbhd->eagc', liouville, rho_in)
The Liouville representation has the property that if the channel is unitary, the operator is separable.
On the other hand, even if the channel were the identity, the Choi operator would correspond to a maximally entangled state.
The choi and liouville operators in matrix form (i.e., with two indices) can be found as follows, where
``D`` is the dimension of each vectorized index (i.e., for :math:`N` modes, ``D=cutoff_dim**N``):
>>> choi_matrix = liouville.reshape(D**2, D**2).T
>>> liouville_matrix = choi.reshape(D**2, D**2).T
**Kraus representation**
The Kraus representation is perhaps the most well known:
.. math::
\mathcal{C}(\rho) = \sum_k A_k\rho A_k^\dagger
So to define a channel in the Kraus representation one needs to supply a list of Kraus operators :math:`\{A_k\}`.
In fact, the result of ``extract_channel`` in the Kraus representation is a rank-3 tensor, where the first
index is the one indexing the list of operators.
Adjacent indices of each Kraus operator correspond to output-input pairs of the same mode, so the action
of the channel can be written as (here for one mode or for ``vectorize_modes=True``):
>>> rho_out = np.einsum('abc,cd,aed->be', kraus, rho_in, np.conj(kraus))
Notice the transpose on the third index string (``aed`` rather than ``ade``), as the last operator should be the
conjugate transpose of the first, and we cannot just do ``np.conj(kraus).T`` because ``kraus`` has 3 indices and we
just need to transpose the last two.
Example:
Here we show that the Choi operator of the identity channel is proportional to
a maximally entangled Bell :math:`\ket{\phi^+}` state:
>>> prog = sf.Program(num_subsystems=1)
>>> C = extract_channel(prog, cutoff_dim=2, representation='choi')
>>> print(abs(C).reshape((4,4)))
[[1. 0. 0. 1.]
[0. 0. 0. 0.]
[0. 0. 0. 0.]
[1. 0. 0. 1.]]
Args:
prog (Program): program containing the circuit
cutoff_dim (int): dimension of each index
representation (str): choice between ``'choi'``, ``'liouville'`` or ``'kraus'``
vectorize_modes (bool): if True, reshapes the result into rank-4 tensor,
otherwise it returns a rank-4N tensor, where N is the number of modes
Returns:
array: channel, according to the specified options
Raises:
TypeError: if the gates used to construct the circuit are not all unitary or channels
"""
if not is_channel(prog):
raise TypeError("The circuit definition contains elements that are neither of type Gate nor of type Channel")
N = prog.init_num_subsystems
p = _program_in_CJ_rep(prog, cutoff_dim)
eng = sf.LocalEngine('fock', backend_options={'cutoff_dim': cutoff_dim, 'pure': True})
choi = eng.run(p).state.dm()
choi = np.einsum('abcd->cdab', _vectorize(choi))
if representation.lower() == 'choi':
result = choi
if not vectorize_modes:
result = _unvectorize(result, N)
elif representation.lower() == 'liouville':
result = np.einsum('abcd -> dbca', choi)
if not vectorize_modes:
result = _unvectorize(result, N)
elif representation.lower() == 'kraus':
# The liouville operator is the sum of a bipartite product of kraus matrices, so if we vectorize them we obtain
# a matrix whose eigenvectors are proportional to the vectorized kraus operators
vectorized_liouville = np.einsum('abcd -> cadb', choi).reshape([cutoff_dim**(2*N), cutoff_dim**(2*N)])
eigvals, eigvecs = np.linalg.eig(vectorized_liouville)
# We keep only those eigenvectors that correspond to non-zero eigenvalues
eigvecs = eigvecs[:, ~np.isclose(abs(eigvals), 0)]
eigvals = eigvals[~np.isclose(abs(eigvals), 0)]
# We rescale the eigenvectors with the sqrt of the eigenvalues (the other sqrt would rescale the right eigenvectors)
rescaled_eigenvectors = np.einsum('b,ab->ab', np.sqrt(eigvals), eigvecs)
# Finally we reshape the eigenvectors to form matrices, i.e., the Kraus operators and we make the first index
# be the one that indexes the list of Kraus operators.
result = np.einsum('abc->cab', rescaled_eigenvectors.reshape([cutoff_dim**N, cutoff_dim**N, -1]))
if not vectorize_modes:
result = np.einsum(np.reshape(result, [-1]+[cutoff_dim]*(2*N)), range(1+2*N), [0]+[2*n+1 for n in range(N)]+[2*n+2 for n in range(N)])
else:
raise ValueError('representation {} not supported'.format(representation))
return result
|
the-stack_106_18808
|
from random import choice
import home
from home import fund, lp
from time import sleep
lista = ['pedra', 'papel', 'tesoura']
while True:
home.titulo('JO KEN PO', 4, '-')
a = choice(lista)
while True:
try:
b = int(input('Escolhe uma opção\n[1] pedra\n[2] papel\n[3] tesoura\nR: '))
except:
print('\033[7m Opção Inválida! \033[m')
continue
else:
if b == 1 or b == 2 or b == 3:
break
else:
print('\033[7m Opção Inválida! \033[m')
continue
sleep(1)
print('\033[31mJO\033[m')
sleep(1)
print('\033[35mKEN\033[m')
sleep(1)
print('\033[34mPO\033[m')
print(f'Você escolheu {lista[b-1]}')
print('x')
print(f'A maquina escolheu {a}')
if b == 1:
if a == 'pedra':
print(f'{fund(3)}Vocês empataram!{lp}')
elif a == 'papel':
print(f'{fund(1)}Você Perdeu!{lp}')
elif a == 'tesoura':
print(f'{fund(2)}Você Ganhou!{lp}')
elif b == 2:
if a == 'pedra':
print(f'{fund(2)}Você Ganhou!{lp}')
elif a == 'papel':
print(f'{fund(3)}Vocês empataram!{lp}')
elif a == 'tesoura':
print(f'{fund(1)}Você Perdeu!{lp}')
elif b == 3:
if a == 'pedra':
print(f'{fund(1)}Você Perdeu!{lp}')
elif a == 'papel':
print(f'{fund(2)}Você Ganhou!{lp}')
elif a == 'tesoura':
print(f'{fund(3)}Vocês empataram!{lp}')
while True:
cont = str(input('Quer continuar?[S][N]: ')).strip().upper()[0]
if cont in 'SN':
break
else:
print('\033[7m Resoista Inválida. Tente novamente! \033[m')
if cont in 'N':
break
|
the-stack_106_18809
|
from __future__ import division
import argparse
import multiprocessing
import chainer
from chainer.datasets import TransformDataset
from chainer import iterators
from chainer.links import Classifier
from chainer.optimizer import WeightDecay
from chainer.optimizers import CorrectedMomentumSGD
from chainer import training
from chainer.training import extensions
from chainercv.datasets import directory_parsing_label_names
from chainercv.datasets import DirectoryParsingLabelDataset
from chainercv.transforms import center_crop
from chainercv.transforms import random_flip
from chainercv.transforms import random_sized_crop
from chainercv.transforms import resize
from chainercv.transforms import scale
from chainercv.chainer_experimental.training.extensions import make_shift
from chainercv.links.model.resnet import Bottleneck
from chainercv.links import ResNet101
from chainercv.links import ResNet152
from chainercv.links import ResNet50
import chainermn
class TrainTransform(object):
def __init__(self, mean):
self.mean = mean
def __call__(self, in_data):
img, label = in_data
img = random_sized_crop(img)
img = resize(img, (224, 224))
img = random_flip(img, x_random=True)
img -= self.mean
return img, label
class ValTransform(object):
def __init__(self, mean):
self.mean = mean
def __call__(self, in_data):
img, label = in_data
img = scale(img, 256)
img = center_crop(img, (224, 224))
img -= self.mean
return img, label
def main():
model_cfgs = {
'resnet50': {'class': ResNet50, 'score_layer_name': 'fc6',
'kwargs': {'arch': 'fb'}},
'resnet101': {'class': ResNet101, 'score_layer_name': 'fc6',
'kwargs': {'arch': 'fb'}},
'resnet152': {'class': ResNet152, 'score_layer_name': 'fc6',
'kwargs': {'arch': 'fb'}}
}
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('train', help='Path to root of the train dataset')
parser.add_argument('val', help='Path to root of the validation dataset')
parser.add_argument('--model',
'-m', choices=model_cfgs.keys(), default='resnet50',
help='Convnet models')
parser.add_argument('--communicator', type=str,
default='hierarchical', help='Type of communicator')
parser.add_argument('--loaderjob', type=int, default=4)
parser.add_argument('--batchsize', type=int, default=32,
help='Batch size for each worker')
parser.add_argument('--lr', type=float)
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--weight_decay', type=float, default=0.0001)
parser.add_argument('--out', type=str, default='result')
parser.add_argument('--epoch', type=int, default=90)
args = parser.parse_args()
# This fixes a crash caused by a bug with multiprocessing and MPI.
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process()
p.start()
p.join()
comm = chainermn.create_communicator(args.communicator)
device = comm.intra_rank
if args.lr is not None:
lr = args.lr
else:
lr = 0.1 * (args.batchsize * comm.size) / 256
if comm.rank == 0:
print('lr={}: lr is selected based on the linear '
'scaling rule'.format(lr))
label_names = directory_parsing_label_names(args.train)
model_cfg = model_cfgs[args.model]
extractor = model_cfg['class'](
n_class=len(label_names), **model_cfg['kwargs'])
extractor.pick = model_cfg['score_layer_name']
model = Classifier(extractor)
# Following https://arxiv.org/pdf/1706.02677.pdf,
# the gamma of the last BN of each resblock is initialized by zeros.
for l in model.links():
if isinstance(l, Bottleneck):
l.conv3.bn.gamma.data[:] = 0
if comm.rank == 0:
train_data = DirectoryParsingLabelDataset(args.train)
val_data = DirectoryParsingLabelDataset(args.val)
train_data = TransformDataset(
train_data, TrainTransform(extractor.mean))
val_data = TransformDataset(val_data, ValTransform(extractor.mean))
print('finished loading dataset')
else:
train_data, val_data = None, None
train_data = chainermn.scatter_dataset(train_data, comm, shuffle=True)
val_data = chainermn.scatter_dataset(val_data, comm, shuffle=True)
train_iter = chainer.iterators.MultiprocessIterator(
train_data, args.batchsize, n_processes=args.loaderjob)
val_iter = iterators.MultiprocessIterator(
val_data, args.batchsize,
repeat=False, shuffle=False, n_processes=args.loaderjob)
optimizer = chainermn.create_multi_node_optimizer(
CorrectedMomentumSGD(lr=lr, momentum=args.momentum), comm)
optimizer.setup(model)
for param in model.params():
if param.name not in ('beta', 'gamma'):
param.update_rule.add_hook(WeightDecay(args.weight_decay))
if device >= 0:
chainer.cuda.get_device(device).use()
model.to_gpu()
updater = chainer.training.StandardUpdater(
train_iter, optimizer, device=device)
trainer = training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
@make_shift('lr')
def warmup_and_exponential_shift(trainer):
epoch = trainer.updater.epoch_detail
warmup_epoch = 5
if epoch < warmup_epoch:
if lr > 0.1:
warmup_rate = 0.1 / lr
rate = warmup_rate \
+ (1 - warmup_rate) * epoch / warmup_epoch
else:
rate = 1
elif epoch < 30:
rate = 1
elif epoch < 60:
rate = 0.1
elif epoch < 80:
rate = 0.01
else:
rate = 0.001
return rate * lr
trainer.extend(warmup_and_exponential_shift)
evaluator = chainermn.create_multi_node_evaluator(
extensions.Evaluator(val_iter, model, device=device), comm)
trainer.extend(evaluator, trigger=(1, 'epoch'))
log_interval = 0.1, 'epoch'
print_interval = 0.1, 'epoch'
if comm.rank == 0:
trainer.extend(chainer.training.extensions.observe_lr(),
trigger=log_interval)
trainer.extend(
extensions.snapshot_object(
extractor, 'snapshot_model_{.updater.epoch}.npz'),
trigger=(args.epoch, 'epoch'))
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.PrintReport(
['iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss', 'validation/main/loss',
'main/accuracy', 'validation/main/accuracy']
), trigger=print_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.run()
if __name__ == '__main__':
main()
|
the-stack_106_18810
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import threading
import time
from typing import Callable
import pytest
from mock import MagicMock
import intelliflow.api_ext as flow
from intelliflow.api_ext import *
from intelliflow.core.application.application import Application
from intelliflow.core.platform import development as development_module
from intelliflow.core.platform.compute_targets.email import EMAIL
from intelliflow.core.platform.compute_targets.slack import Slack
from intelliflow.core.platform.constructs import ConstructPermission
from intelliflow.core.platform.definitions.compute import (
ComputeFailedSessionState,
ComputeFailedSessionStateType,
ComputeResourceDesc,
ComputeResponse,
ComputeSessionDesc,
ComputeSessionState,
ComputeSuccessfulResponse,
ComputeSuccessfulResponseType,
)
from intelliflow.core.signal_processing import Slot
from intelliflow.core.signal_processing.signal import *
from intelliflow.mixins.aws.test import AWSTestBase
from intelliflow.utils.test.hook import GenericComputeDescriptorHookVerifier, GenericRoutingHookImpl, OnExecBeginHookImpl
from intelliflow.utils.test.inlined_compute import NOOPCompute
class TestAWSApplicationExecutionHooks(AWSTestBase):
def _create_test_application(self, id_or_app: Union[str, Application]):
if isinstance(id_or_app, str):
id = id_or_app
app = AWSApplication(id, region=self.region)
else:
app = id_or_app
id = app.id
ducsi_data = app.marshal_external_data(
GlueTable("booker", "d_unified_cust_shipment_items", partition_keys=["region_id", "ship_day"]),
"DEXML_DUCSI",
{"region_id": {"type": DimensionType.LONG, "ship_day": {"format": "%Y-%m-%d", "type": DimensionType.DATETIME}}},
{"1": {"*": {"timezone": "PST"}}},
SignalIntegrityProtocol("FILE_CHECK", {"file": ["SNAPSHOT"]}),
)
# add a dimensionless table (important corner-case)
ship_options = app.marshal_external_data(
GlueTable(
"dexbi",
"d_ship_option",
partition_keys=[],
),
"ship_options",
{},
{},
SignalIntegrityProtocol("FILE_CHECK", {"file": ["DELTA", "SNAPSHOT"]}),
)
return app
def _test_all_application_hooks(self, hook_generator: Callable):
from test.intelliflow.core.application.test_aws_application_execution_control import TestAWSApplicationExecutionControl
self.patch_aws_start(glue_catalog_has_all_tables=True)
app = self._create_test_application("exec-hooks")
ducsi_data = app.get_data("DEXML_DUCSI", context=Application.QueryContext.DEV_CONTEXT)[0]
ship_options = app.get_data("ship_options", context=Application.QueryContext.DEV_CONTEXT)[0]
email_obj = EMAIL(sender="[email protected]", recipient_list=["[email protected]"])
on_exec_begin_hook = hook_generator()
on_exec_skipped_hook = hook_generator()
on_compute_success_hook = hook_generator()
on_compute_failure_hook = hook_generator()
on_compute_retry_hook = hook_generator()
on_success_hook = hook_generator()
on_failure_hook = hook_generator()
exec_checkpoints = [
RouteCheckpoint(checkpoint_in_secs=10, slot=hook_generator()),
RouteCheckpoint(checkpoint_in_secs=20, slot=hook_generator()),
]
repeat_ducsi = app.create_data(
id="REPEAT_DUCSI",
inputs={
"DEXML_DUCSI": ducsi_data,
},
compute_targets="output=DEXML_DUCSI.limit(100)",
execution_hook=RouteExecutionHook(
on_exec_begin=on_exec_begin_hook,
on_exec_skipped=on_exec_skipped_hook,
on_compute_success=on_compute_success_hook,
on_compute_failure=on_compute_failure_hook,
on_compute_retry=on_compute_retry_hook,
on_success=on_success_hook,
on_failure=on_failure_hook,
checkpoints=exec_checkpoints,
),
)
on_exec_skipped_hook_2 = hook_generator()
on_pending_node_created_hook = hook_generator()
on_expiration_hook = hook_generator()
pending_node_checkpoints = [RouteCheckpoint(checkpoint_in_secs=10, slot=hook_generator())]
# we will be using this second node for Pending Node checks mostly
app.create_data(
id="DUCSI_WITH_SO",
inputs={"DEXML_DUCSI": ducsi_data["*"][:-2], "SHIP_OPTIONS": ship_options},
compute_targets="output=DEXML_DUCSI.limit(100).join(SHIP_OPTIONS, DEXML_DUCSI.customer_ship_option == SHIP_OPTIONS.ship_option)",
execution_hook=RouteExecutionHook(on_exec_skipped=on_exec_skipped_hook_2),
pending_node_hook=RoutePendingNodeHook(
on_pending_node_created=on_pending_node_created_hook, on_expiration=on_expiration_hook, checkpoints=pending_node_checkpoints
),
pending_node_expiration_ttl_in_secs=20,
)
# SERIALIZATION: inject serialize/deserialize sequence for enhanced serialization coverage
json_str = app.dev_context.to_json()
dev_context = CoreData.from_json(json_str)
app._dev_context = dev_context
#
app.activate()
# 1- Inject DUCSI event to trigger execution on the first node/route and create a pending node on the second.
# mock batch_compute response
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
assert materialized_output.alias == "REPEAT_DUCSI"
return TestAWSApplicationExecutionControl.create_batch_compute_response(ComputeSuccessfulResponseType.PROCESSING, "job_id")
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
assert session_desc.session_id == "job_id"
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(ComputeSessionStateType.PROCESSING, session_desc)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.process(
ducsi_data[1]["2020-12-25"],
# make it SYNC (use the local processor instance in sync mode)
with_activated_processor=False,
)
assert len(app.get_active_routes()) == 1
# check if the first exec hook has been hit and done with its own logic
assert on_exec_begin_hook.verify(app)
assert not on_exec_skipped_hook.verify(app)
assert not on_compute_failure_hook.verify(app)
assert not on_compute_success_hook.verify(app)
assert not on_compute_retry_hook.verify(app)
assert not on_success_hook.verify(app)
assert not on_failure_hook.verify(app)
# check the pending node hooks registered on the second route.
assert on_pending_node_created_hook.verify(app)
assert not on_exec_skipped_hook_2.verify(app)
assert not on_expiration_hook.verify(app)
# emulate runtime Processor behaviour, to check the routes otherwise checkpoints won't be checked.
# reason: in unit-tests the Processor does not 'run' in the background. So the following call is somewhat like
# a 'next cycle/tick',
app.update_active_routes_status()
assert not any([c.slot.verify(app) for c in exec_checkpoints])
assert not any([c.slot.verify(app) for c in pending_node_checkpoints])
time.sleep(10)
# next-cycle again
app.update_active_routes_status()
# execution passed the checkpoint 10 secs
assert exec_checkpoints[0].slot.verify(app)
# pending node passed the checkpoint 10 secs
assert pending_node_checkpoints[0].slot.verify(app)
# now the second internal data node (route) in the system actually waits for its second input dependency
# 'ship_options'. Previous process call with ducsi has created a pending node in it as well. a signal for
# 'ship_options' will complete that pending node and cause a trigger.
ship_options = app.get_data("ship_options", context=Application.QueryContext.DEV_CONTEXT)[0]
# mock again
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
assert materialized_output.alias == "DUCSI_WITH_SO"
return TestAWSApplicationExecutionControl.create_batch_compute_response(ComputeSuccessfulResponseType.PROCESSING, "job_id2")
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(ComputeSessionStateType.PROCESSING, session_desc)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.process(
ship_options,
# make it SYNC (use the local processor instance in sync mode)
with_activated_processor=False,
)
assert len(app.get_active_routes()) == 2
# check idempotency
app.process(ducsi_data[1]["2020-12-25"], with_activated_processor=False)
# now we can check the skipped hook due to idempotency related call above
assert on_exec_skipped_hook.verify(app)
# no effect (still the same count on the mock objects)
app.process(ship_options, with_activated_processor=False)
assert on_exec_skipped_hook_2.verify(app)
# initiate another trigger on 'REPEAT_DUCSI' with a different partition (12-26)
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
assert materialized_output.alias == "REPEAT_DUCSI"
return TestAWSApplicationExecutionControl.create_batch_compute_response(ComputeSuccessfulResponseType.PROCESSING, "job_id3")
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(ComputeSessionStateType.PROCESSING, session_desc)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.process(ducsi_data[1]["2020-12-26"], with_activated_processor=False)
# finish first job (from 12-25 on both routes), since Processor is not running in the background now
# we will have to use related app API to force update RoutingTable status.
# only active record remaining should be the most recent one (12-26):
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
raise RuntimeError(
"This should not be called since we are not suppoed to yield a new active record" "at this point in this test"
)
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
if session_desc.session_id in ["job_id"]: # first active record
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(
ComputeSessionStateType.COMPLETED, session_desc
)
else:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(
ComputeSessionStateType.PROCESSING, session_desc
)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.update_active_routes_status()
assert on_compute_success_hook.verify(app)
assert not on_compute_failure_hook.verify(app)
assert not on_compute_retry_hook.verify(app)
assert on_success_hook.verify(app)
assert not on_failure_hook.verify(app)
# we now have only one active record (active batch compute session) and a pending node, move 15 secs to:
# - cause expiration on the only job of the second route
time.sleep(20)
app.update_active_routes_status()
assert on_expiration_hook.verify(app)
# move 10 more to:
# - cause second checkpoint to be called on the first route (due to second execution)
time.sleep(25)
app.update_active_routes_status()
assert exec_checkpoints[1].slot.verify(app)
# finish the third job (12-26) of the first route with FAILURE
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
if session_desc.session_id in ["job_id3"]: # third active record
return TestAWSApplicationExecutionControl.create_batch_compute_failed_session_state(
ComputeFailedSessionStateType.APP_INTERNAL, session_desc
)
else:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(
ComputeSessionStateType.PROCESSING, session_desc
)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.update_active_routes_status()
assert on_compute_failure_hook.verify(app)
assert on_failure_hook.verify(app)
assert not on_compute_retry_hook.verify(app)
self.patch_aws_stop()
def test_all_application_hooks_generic(self):
self._test_all_application_hooks(lambda: GenericRoutingHookImpl())
def test_all_application_hooks_with_EMAIL(self):
email_obj = EMAIL(sender="[email protected]", recipient_list=["[email protected]"])
self._test_all_application_hooks(lambda: GenericComputeDescriptorHookVerifier(email_obj.action()))
def test_all_application_hooks_with_slack(self):
slack_obj = Slack(recipient_list=["https://hooks.slack.com/workflows/1/"], message="test message")
self._test_all_application_hooks(lambda: GenericComputeDescriptorHookVerifier(slack_obj.action()))
def test_application_hooks_generate_right_permissions(self):
"""Test system provided compute targets' compatibility and runtime permission contribution as hooks"""
self.patch_aws_start(glue_catalog_has_all_tables=True)
self.app = AWSApplication(app_name="sys-hooks", region=self.region)
email_obj = EMAIL(sender="[email protected]", recipient_list=["[email protected]"])
self.app.create_data(
id="dummy_node_EMAIL_as_pending_trigger_hook",
compute_targets=[NOOPCompute],
pending_node_hook=RoutePendingNodeHook(on_pending_node_created=email_obj.action()),
)
# Test permissions applied to runtime / exec role as well
# keep reference of actual policy updater method so that we can retore it at the end.
real_put_inlined_policy = development_module.put_inlined_policy
def put_inlined_policy(
role_name: str, policy_name: str, action_resource_pairs: Set[ConstructPermission], base_session: "boto3.Session"
) -> None:
if "IntelliFlowExeRole" in role_name:
# check EMAIL resource in runtime permission resources (SES ARN, etc)
assert any([email_obj.sender in resource for perm in action_resource_pairs for resource in perm.resource])
development_module.put_inlined_policy = MagicMock(side_effect=put_inlined_policy)
# above mock / callback should be called during the activation
self.app.activate()
# just make sure that it was called actually (otherwise there is no point in this test :)
assert development_module.put_inlined_policy.call_count > 0
# restore
development_module.put_inlined_policy = real_put_inlined_policy
self.patch_aws_stop()
# Test permissions applied to runtime / exec role as well
def test_application_retry_hook(self):
from test.intelliflow.core.application.test_aws_application_execution_control import TestAWSApplicationExecutionControl
self.patch_aws_start(glue_catalog_has_all_tables=True)
app = self._create_test_application("exec-hooks")
ducsi_data = app.get_data("DEXML_DUCSI", context=Application.QueryContext.DEV_CONTEXT)[0]
on_failure_hook = GenericRoutingHookImpl()
on_compute_retry_hook = GenericRoutingHookImpl()
on_failure_hook2 = GenericRoutingHookImpl()
on_compute_retry_hook2 = GenericRoutingHookImpl()
app.create_data(
id="REPEAT_DUCSI",
inputs={
"DEXML_DUCSI": ducsi_data,
},
compute_targets=[GlueBatchCompute(code="output=DEXML_DUCSI.limit(100)", retry_count=1)],
execution_hook=RouteExecutionHook(on_compute_retry=on_compute_retry_hook, on_failure=on_failure_hook),
)
app.create_data(
id="REPEAT_DUCSI2",
inputs={
"DEXML_DUCSI": ducsi_data,
},
compute_targets=[GlueBatchCompute(code="output=DEXML_DUCSI.limit(100)", retry_count=0)],
execution_hook=RouteExecutionHook(on_compute_retry=on_compute_retry_hook2, on_failure=on_failure_hook2),
)
app.activate()
# 1- Inject DUCSI event to trigger execution on the nodes/routes
# mock batch_compute response
def compute(
materialized_inputs: List[Signal],
slot: Slot,
materialized_output: Signal,
execution_ctx_id: str,
retry_session_desc: Optional[ComputeSessionDesc] = None,
) -> ComputeResponse:
# both of the nodes will have a new compute session
return TestAWSApplicationExecutionControl.create_batch_compute_response(
ComputeSuccessfulResponseType.PROCESSING, f"job_id-{materialized_output.alias}"
)
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_session_state(ComputeSessionStateType.PROCESSING)
app.platform.batch_compute.compute = MagicMock(side_effect=compute)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.process(
ducsi_data[1]["2020-12-25"],
# make it SYNC (use the local processor instance in sync mode)
with_activated_processor=False,
)
assert not on_compute_retry_hook.verify(app)
assert not on_compute_retry_hook2.verify(app)
assert not on_failure_hook.verify(app)
assert not on_failure_hook2.verify(app)
# now make sure that during the periodical check both of the nodes fails in a transient way.
# this causes implicit retries and yields brand new sessions, however this should not count towards retry limit.
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_failed_session_state(
ComputeFailedSessionStateType.TRANSIENT, session_desc
)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
# emulate runtime Processor behaviour, to check the routes otherwise checkpoints won't be checked.
# reason: in unit-tests the Processor does not 'run' in the background. So the following call is somewhat like
# a 'next cycle/tick',
app.update_active_routes_status()
assert not on_compute_retry_hook.verify(app)
assert not on_compute_retry_hook2.verify(app)
assert not on_failure_hook.verify(app)
assert not on_failure_hook2.verify(app)
# now emulate a job failure:
# only the node with retry > 0 should be retried
def get_session_state(session_desc: ComputeSessionDesc, active_compute_record: "RoutingTable.ComputeRecord") -> ComputeSessionState:
return TestAWSApplicationExecutionControl.create_batch_compute_failed_session_state(
ComputeFailedSessionStateType.APP_INTERNAL, session_desc
)
app.platform.batch_compute.get_session_state = MagicMock(side_effect=get_session_state)
app.update_active_routes_status()
# retried!
assert on_compute_retry_hook.verify(app)
assert not on_failure_hook.verify(app)
# will never be retried since retry_count is 0.
# this should actually be failed and terminated.
assert not on_compute_retry_hook2.verify(app)
assert on_failure_hook2.verify(app)
# now during the second check max_retry_count of 1 must be hit and the compute must fail.
app.update_active_routes_status()
assert on_failure_hook.verify(app)
self.patch_aws_stop()
|
the-stack_106_18813
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_namespace_patch import ResourceNamespacePatch
class SBNamespaceUpdateParameters(ResourceNamespacePatch):
"""Description of a namespace resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict[str, str]
:param sku: Porperties of Sku
:type sku: ~azure.mgmt.servicebus.models.SBSku
:ivar provisioning_state: Provisioning state of the namespace.
:vartype provisioning_state: str
:ivar created_at: The time the namespace was created.
:vartype created_at: datetime
:ivar updated_at: The time the namespace was updated.
:vartype updated_at: datetime
:ivar service_bus_endpoint: Endpoint you can use to perform Service Bus
operations.
:vartype service_bus_endpoint: str
:ivar metric_id: Identifier for Azure Insights metrics
:vartype metric_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'provisioning_state': {'readonly': True},
'created_at': {'readonly': True},
'updated_at': {'readonly': True},
'service_bus_endpoint': {'readonly': True},
'metric_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'sku': {'key': 'sku', 'type': 'SBSku'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'},
'updated_at': {'key': 'properties.updatedAt', 'type': 'iso-8601'},
'service_bus_endpoint': {'key': 'properties.serviceBusEndpoint', 'type': 'str'},
'metric_id': {'key': 'properties.metricId', 'type': 'str'},
}
def __init__(self, *, location: str=None, tags=None, sku=None, **kwargs) -> None:
super(SBNamespaceUpdateParameters, self).__init__(location=location, tags=tags, **kwargs)
self.sku = sku
self.provisioning_state = None
self.created_at = None
self.updated_at = None
self.service_bus_endpoint = None
self.metric_id = None
|
the-stack_106_18814
|
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools for working with the host system"""
# Copyright 2012 Canonical Ltd.
#
# Authors:
# Nick Moffitt <[email protected]>
# Matthew Wedgwood <[email protected]>
import os
import re
import pwd
import glob
import grp
import random
import string
import subprocess
import hashlib
import functools
import itertools
import six
from contextlib import contextmanager
from collections import OrderedDict
from .hookenv import log
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
__platform__ = get_platform()
if __platform__ == "ubuntu":
from charmhelpers.core.host_factory.ubuntu import (
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import (
service_available,
add_new_group,
lsb_release,
cmp_pkgrevno,
) # flake8: noqa -- ignore F401 for this import
def service_start(service_name):
"""Start a system service"""
return service('start', service_name)
def service_stop(service_name):
"""Stop a system service"""
return service('stop', service_name)
def service_restart(service_name):
"""Restart a system service"""
return service('restart', service_name)
def service_reload(service_name, restart_on_failure=False):
"""Reload a system service, optionally falling back to restart if
reload fails"""
service_result = service('reload', service_name)
if not service_result and restart_on_failure:
service_result = service('restart', service_name)
return service_result
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
"""Pause a system service.
Stop it, and prevent it from starting again at boot."""
stopped = True
if service_running(service_name):
stopped = service_stop(service_name)
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('disable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
with open(override_path, 'w') as fh:
fh.write("manual\n")
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "disable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
return stopped
def service_resume(service_name, init_dir="/etc/init",
initd_dir="/etc/init.d"):
"""Resume a system service.
Reenable starting again at boot. Start the service"""
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
sysv_file = os.path.join(initd_dir, service_name)
if init_is_systemd():
service('enable', service_name)
elif os.path.exists(upstart_file):
override_path = os.path.join(
init_dir, '{}.override'.format(service_name))
if os.path.exists(override_path):
os.unlink(override_path)
elif os.path.exists(sysv_file):
subprocess.check_call(["update-rc.d", service_name, "enable"])
else:
raise ValueError(
"Unable to detect {0} as SystemD, Upstart {1} or"
" SysV {2}".format(
service_name, upstart_file, sysv_file))
started = service_running(service_name)
if not started:
started = service_start(service_name)
return started
def service(action, service_name):
"""Control a system service"""
if init_is_systemd():
cmd = ['systemctl', action, service_name]
else:
cmd = ['service', service_name, action]
return subprocess.call(cmd) == 0
_UPSTART_CONF = "/etc/init/{}.conf"
_INIT_D_CONF = "/etc/init.d/{}"
def service_running(service_name):
"""Determine whether a system service is running"""
if init_is_systemd():
return service('is-active', service_name)
else:
if os.path.exists(_UPSTART_CONF.format(service_name)):
try:
output = subprocess.check_output(
['status', service_name],
stderr=subprocess.STDOUT).decode('UTF-8')
except subprocess.CalledProcessError:
return False
else:
# This works for upstart scripts where the 'service' command
# returns a consistent string to represent running
# 'start/running'
if ("start/running" in output or
"is running" in output or
"up and running" in output):
return True
elif os.path.exists(_INIT_D_CONF.format(service_name)):
# Check System V scripts init script return codes
return service('status', service_name)
return False
SYSTEMD_SYSTEM = '/run/systemd/system'
def init_is_systemd():
"""Return True if the host system uses systemd, False otherwise."""
return os.path.isdir(SYSTEMD_SYSTEM)
def adduser(username, password=None, shell='/bin/bash',
system_user=False, primary_group=None,
secondary_groups=None, uid=None, home_dir=None):
"""Add a user to the system.
Will log but otherwise succeed if the user already exists.
:param str username: Username to create
:param str password: Password for user; if ``None``, create a system user
:param str shell: The default shell for the user
:param bool system_user: Whether to create a login or system user
:param str primary_group: Primary group for user; defaults to username
:param list secondary_groups: Optional list of additional groups
:param int uid: UID for user being created
:param str home_dir: Home directory for user
:returns: The password database entry struct, as returned by `pwd.getpwnam`
"""
try:
user_info = pwd.getpwnam(username)
log('user {0} already exists!'.format(username))
if uid:
user_info = pwd.getpwuid(int(uid))
log('user with uid {0} already exists!'.format(uid))
except KeyError:
log('creating user {0}'.format(username))
cmd = ['useradd']
if uid:
cmd.extend(['--uid', str(uid)])
if home_dir:
cmd.extend(['--home', str(home_dir)])
if system_user or password is None:
cmd.append('--system')
else:
cmd.extend([
'--create-home',
'--shell', shell,
'--password', password,
])
if not primary_group:
try:
grp.getgrnam(username)
primary_group = username # avoid "group exists" error
except KeyError:
pass
if primary_group:
cmd.extend(['-g', primary_group])
if secondary_groups:
cmd.extend(['-G', ','.join(secondary_groups)])
cmd.append(username)
subprocess.check_call(cmd)
user_info = pwd.getpwnam(username)
return user_info
def user_exists(username):
"""Check if a user exists"""
try:
pwd.getpwnam(username)
user_exists = True
except KeyError:
user_exists = False
return user_exists
def uid_exists(uid):
"""Check if a uid exists"""
try:
pwd.getpwuid(uid)
uid_exists = True
except KeyError:
uid_exists = False
return uid_exists
def group_exists(groupname):
"""Check if a group exists"""
try:
grp.getgrnam(groupname)
group_exists = True
except KeyError:
group_exists = False
return group_exists
def gid_exists(gid):
"""Check if a gid exists"""
try:
grp.getgrgid(gid)
gid_exists = True
except KeyError:
gid_exists = False
return gid_exists
def add_group(group_name, system_group=False, gid=None):
"""Add a group to the system
Will log but otherwise succeed if the group already exists.
:param str group_name: group to create
:param bool system_group: Create system group
:param int gid: GID for user being created
:returns: The password database entry struct, as returned by `grp.getgrnam`
"""
try:
group_info = grp.getgrnam(group_name)
log('group {0} already exists!'.format(group_name))
if gid:
group_info = grp.getgrgid(gid)
log('group with gid {0} already exists!'.format(gid))
except KeyError:
log('creating group {0}'.format(group_name))
add_new_group(group_name, system_group, gid)
group_info = grp.getgrnam(group_name)
return group_info
def add_user_to_group(username, group):
"""Add a user to a group"""
cmd = ['gpasswd', '-a', username, group]
log("Adding user {} to group {}".format(username, group))
subprocess.check_call(cmd)
def rsync(from_path, to_path, flags='-r', options=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
cmd = ['/usr/bin/rsync', flags]
cmd.extend(options)
cmd.append(from_path)
cmd.append(to_path)
log(" ".join(cmd))
return subprocess.check_output(cmd).decode('UTF-8').strip()
def symlink(source, destination):
"""Create a symbolic link"""
log("Symlinking {} as {}".format(source, destination))
cmd = [
'ln',
'-sf',
source,
destination,
]
subprocess.check_call(cmd)
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
"""Create a directory"""
log("Making dir {} {}:{} {:o}".format(path, owner, group,
perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
realpath = os.path.abspath(path)
path_exists = os.path.exists(realpath)
if path_exists and force:
if not os.path.isdir(realpath):
log("Removing non-directory file {} prior to mkdir()".format(path))
os.unlink(realpath)
os.makedirs(realpath, perms)
elif not path_exists:
os.makedirs(realpath, perms)
os.chown(realpath, uid, gid)
os.chmod(realpath, perms)
def write_file(path, content, owner='root', group='root', perms=0o444):
"""Create or overwrite a file with the contents of a byte string."""
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
target.write(content)
def fstab_remove(mp):
"""Remove the given mountpoint entry from /etc/fstab"""
return Fstab.remove_by_mountpoint(mp)
def fstab_add(dev, mp, fs, options=None):
"""Adds the given device entry to the /etc/fstab file"""
return Fstab.add(dev, mp, fs, options=options)
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
return fstab_add(device, mountpoint, filesystem, options=options)
return True
def umount(mountpoint, persist=False):
"""Unmount a filesystem"""
cmd_args = ['umount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
if persist:
return fstab_remove(mountpoint)
return True
def mounts():
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
with open('/proc/mounts') as f:
# [['/mount/point','/dev/path'],[...]]
system_mounts = [m[1::-1] for m in [l.strip().split()
for l in f.readlines()]]
return system_mounts
def fstab_mount(mountpoint):
"""Mount filesystem using fstab"""
cmd_args = ['mount', mountpoint]
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
return True
def file_hash(path, hash_type='md5'):
"""Generate a hash checksum of the contents of 'path' or None if not found.
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
"""
if os.path.exists(path):
h = getattr(hashlib, hash_type)()
with open(path, 'rb') as source:
h.update(source.read())
return h.hexdigest()
else:
return None
def path_hash(path):
"""Generate a hash checksum of all files matching 'path'. Standard
wildcards like '*' and '?' are supported, see documentation for the 'glob'
module for more information.
:return: dict: A { filename: hash } dictionary for all matched files.
Empty if none found.
"""
return {
filename: file_hash(filename)
for filename in glob.iglob(path)
}
def check_hash(path, checksum, hash_type='md5'):
"""Validate a file using a cryptographic checksum.
:param str checksum: Value of the checksum used to validate the file.
:param str hash_type: Hash algorithm used to generate `checksum`.
Can be any hash alrgorithm supported by :mod:`hashlib`,
such as md5, sha1, sha256, sha512, etc.
:raises ChecksumError: If the file fails the checksum
"""
actual_checksum = file_hash(path, hash_type)
if checksum != actual_checksum:
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
class ChecksumError(ValueError):
"""A class derived from Value error to indicate the checksum failed."""
pass
def restart_on_change(restart_map, stopstart=False, restart_functions=None):
"""Restart services based on configuration files changing
This function is used a decorator, for example::
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
'/etc/apache/sites-enabled/*': [ 'apache2' ]
})
def config_changed():
pass # your code here
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function. The apache2 service would be
restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information.
@param restart_map: {path_file_name: [service_name, ...]
@param stopstart: DEFAULT false; whether to stop, start OR restart
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result from decorated function
"""
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart,
restart_functions)
return wrapped_f
return wrap
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
restart_functions=None):
"""Helper function to perform the restart_on_change function.
This is provided for decorators to restart services if files described
in the restart_map have changed after an invocation of lambda_f().
@param lambda_f: function to call.
@param restart_map: {file: [service, ...]}
@param stopstart: whether to stop, start or restart a service
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result of lambda_f()
"""
if restart_functions is None:
restart_functions = {}
checksums = {path: path_hash(path) for path in restart_map}
r = lambda_f()
# create a list of lists of the services to restart
restarts = [restart_map[path]
for path in restart_map
if path_hash(path) != checksums[path]]
# create a flat list of ordered services without duplicates from lists
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
if services_list:
actions = ('stop', 'start') if stopstart else ('restart',)
for service_name in services_list:
if service_name in restart_functions:
restart_functions[service_name](service_name)
else:
for action in actions:
service(action, service_name)
return r
def pwgen(length=None):
"""Generate a random pasword."""
if length is None:
# A random length is ok to use a weak PRNG
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
# actual password
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
def is_phy_iface(interface):
"""Returns True if interface is not virtual, otherwise False."""
if interface:
sys_net = '/sys/class/net'
if os.path.isdir(sys_net):
for iface in glob.glob(os.path.join(sys_net, '*')):
if '/virtual/' in os.path.realpath(iface):
continue
if interface == os.path.basename(iface):
return True
return False
def get_bond_master(interface):
"""Returns bond master if interface is bond slave otherwise None.
NOTE: the provided interface is expected to be physical
"""
if interface:
iface_path = '/sys/class/net/%s' % (interface)
if os.path.exists(iface_path):
if '/virtual/' in os.path.realpath(iface_path):
return None
master = os.path.join(iface_path, 'master')
if os.path.exists(master):
master = os.path.realpath(master)
# make sure it is a bond master
if os.path.exists(os.path.join(master, 'bonding')):
return os.path.basename(master)
return None
def list_nics(nic_type=None):
"""Return a list of nics of given type(s)"""
if isinstance(nic_type, six.string_types):
int_types = [nic_type]
else:
int_types = nic_type
interfaces = []
if nic_type:
for int_type in int_types:
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
ip_output = subprocess.check_output(cmd).decode('UTF-8')
ip_output = ip_output.split('\n')
ip_output = (line for line in ip_output if line)
for line in ip_output:
if line.split()[1].startswith(int_type):
matched = re.search('.*: (' + int_type +
r'[0-9]+\.[0-9]+)@.*', line)
if matched:
iface = matched.groups()[0]
else:
iface = line.split()[1].replace(":", "")
if iface not in interfaces:
interfaces.append(iface)
else:
cmd = ['ip', 'a']
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
ip_output = (line.strip() for line in ip_output if line)
key = re.compile('^[0-9]+:\s+(.+):')
for line in ip_output:
matched = re.search(key, line)
if matched:
iface = matched.group(1)
iface = iface.partition("@")[0]
if iface not in interfaces:
interfaces.append(iface)
return interfaces
def set_nic_mtu(nic, mtu):
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
subprocess.check_call(cmd)
def get_nic_mtu(nic):
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
cmd = ['ip', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
mtu = ""
for line in ip_output:
words = line.split()
if 'mtu' in words:
mtu = words[words.index("mtu") + 1]
return mtu
def get_nic_hwaddr(nic):
"""Return the Media Access Control (MAC) for a network interface."""
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
ip_output = subprocess.check_output(cmd).decode('UTF-8')
hwaddr = ""
words = ip_output.split()
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr
@contextmanager
def chdir(directory):
"""Change the current working directory to a different directory for a code
block and return the previous directory after the block exits. Useful to
run commands from a specificed directory.
:param str directory: The directory path to change to for this context.
"""
cur = os.getcwd()
try:
yield os.chdir(directory)
finally:
os.chdir(cur)
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
"""Recursively change user and group ownership of files and directories
in given path. Doesn't chown path itself by default, only its children.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
:param bool follow_links: Also Chown links if True
:param bool chowntopdir: Also chown path itself if True
"""
uid = pwd.getpwnam(owner).pw_uid
gid = grp.getgrnam(group).gr_gid
if follow_links:
chown = os.chown
else:
chown = os.lchown
if chowntopdir:
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
if not broken_symlink:
chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for name in dirs + files:
full = os.path.join(root, name)
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
if not broken_symlink:
chown(full, uid, gid)
def lchownr(path, owner, group):
"""Recursively change user and group ownership of files and directories
in a given path, not following symbolic links. See the documentation for
'os.lchown' for more information.
:param str path: The string path to start changing ownership.
:param str owner: The owner string to use when looking up the uid.
:param str group: The group string to use when looking up the gid.
"""
chownr(path, owner, group, follow_links=False)
def get_total_ram():
"""The total amount of system RAM in bytes.
This is what is reported by the OS, and may be overcommitted when
there are multiple containers hosted on the same machine.
"""
with open('/proc/meminfo', 'r') as f:
for line in f.readlines():
if line:
key, value, unit = line.split()
if key == 'MemTotal:':
assert unit == 'kB', 'Unknown unit'
return int(value) * 1024 # Classic, not KiB.
raise NotImplementedError()
UPSTART_CONTAINER_TYPE = '/run/container_type'
def is_container():
"""Determine whether unit is running in a container
@return: boolean indicating if unit is in a container
"""
if init_is_systemd():
# Detect using systemd-detect-virt
return subprocess.call(['systemd-detect-virt',
'--container']) == 0
else:
# Detect using upstart container file marker
return os.path.exists(UPSTART_CONTAINER_TYPE)
|
the-stack_106_18818
|
import asyncio
import json
import re
import ssl
import sys
from time import sleep
import pytest
pytest.importorskip("bokeh")
from bokeh.server.server import BokehTornado
from tlz import first
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
import dask
from dask.core import flatten
from distributed.utils import tokey, format_dashboard_link
from distributed.client import wait
from distributed.metrics import time
from distributed.utils_test import gen_cluster, inc, dec, slowinc, div, get_cert
from distributed.dashboard.components.worker import Counters
from distributed.dashboard.scheduler import applications
from distributed.dashboard.components.scheduler import (
SystemMonitor,
Occupancy,
StealingTimeSeries,
StealingEvents,
Events,
TaskStream,
TaskProgress,
CurrentLoad,
ProcessingHistogram,
NBytesHistogram,
WorkerTable,
TaskGraph,
ProfileServer,
MemoryByKey,
AggregateAction,
ComputePerKey,
)
from distributed.dashboard import scheduler
scheduler.PROFILING = False
@gen_cluster(client=True, scheduler_kwargs={"dashboard": True})
async def test_simple(c, s, a, b):
port = s.http_server.port
future = c.submit(sleep, 1)
await asyncio.sleep(0.1)
http_client = AsyncHTTPClient()
for suffix in applications:
response = await http_client.fetch("http://localhost:%d%s" % (port, suffix))
body = response.body.decode()
assert "bokeh" in body.lower()
assert not re.search("href=./", body) # no absolute links
response = await http_client.fetch(
"http://localhost:%d/individual-plots.json" % port
)
response = json.loads(response.body.decode())
assert response
@gen_cluster(client=True, worker_kwargs={"dashboard": True})
async def test_basic(c, s, a, b):
for component in [TaskStream, SystemMonitor, Occupancy, StealingTimeSeries]:
ss = component(s)
ss.update()
data = ss.source.data
assert len(first(data.values()))
if component is Occupancy:
assert all("127.0.0.1" in addr for addr in data["escaped_worker"])
@gen_cluster(client=True)
async def test_counters(c, s, a, b):
pytest.importorskip("crick")
while "tick-duration" not in s.digests:
await asyncio.sleep(0.01)
ss = Counters(s)
ss.update()
await asyncio.sleep(0.1)
ss.update()
start = time()
while not len(ss.digest_sources["tick-duration"][0].data["x"]):
await asyncio.sleep(1)
assert time() < start + 5
@gen_cluster(client=True)
async def test_stealing_events(c, s, a, b):
se = StealingEvents(s)
futures = c.map(
slowinc, range(100), delay=0.1, workers=a.address, allow_other_workers=True
)
while not b.task_state: # will steal soon
await asyncio.sleep(0.01)
se.update()
assert len(first(se.source.data.values()))
@gen_cluster(client=True)
async def test_events(c, s, a, b):
e = Events(s, "all")
futures = c.map(
slowinc, range(100), delay=0.1, workers=a.address, allow_other_workers=True
)
while not b.task_state:
await asyncio.sleep(0.01)
e.update()
d = dict(e.source.data)
assert sum(a == "add-worker" for a in d["action"]) == 2
@gen_cluster(client=True)
async def test_task_stream(c, s, a, b):
ts = TaskStream(s)
futures = c.map(slowinc, range(10), delay=0.001)
await wait(futures)
ts.update()
d = dict(ts.source.data)
assert all(len(L) == 10 for L in d.values())
assert min(d["start"]) == 0 # zero based
ts.update()
d = dict(ts.source.data)
assert all(len(L) == 10 for L in d.values())
total = c.submit(sum, futures)
await wait(total)
ts.update()
d = dict(ts.source.data)
assert len(set(map(len, d.values()))) == 1
@gen_cluster(client=True)
async def test_task_stream_n_rectangles(c, s, a, b):
ts = TaskStream(s, n_rectangles=10)
futures = c.map(slowinc, range(10), delay=0.001)
await wait(futures)
ts.update()
assert len(ts.source.data["start"]) == 10
@gen_cluster(client=True)
async def test_task_stream_second_plugin(c, s, a, b):
ts = TaskStream(s, n_rectangles=10, clear_interval=10)
ts.update()
futures = c.map(inc, range(10))
await wait(futures)
ts.update()
ts2 = TaskStream(s, n_rectangles=5, clear_interval=10)
ts2.update()
@gen_cluster(client=True)
async def test_task_stream_clear_interval(c, s, a, b):
ts = TaskStream(s, clear_interval=200)
await wait(c.map(inc, range(10)))
ts.update()
await asyncio.sleep(0.010)
await wait(c.map(dec, range(10)))
ts.update()
assert len(set(map(len, ts.source.data.values()))) == 1
assert ts.source.data["name"].count("inc") == 10
assert ts.source.data["name"].count("dec") == 10
await asyncio.sleep(0.300)
await wait(c.map(inc, range(10, 20)))
ts.update()
assert len(set(map(len, ts.source.data.values()))) == 1
assert ts.source.data["name"].count("inc") == 10
assert ts.source.data["name"].count("dec") == 0
@gen_cluster(client=True)
async def test_TaskProgress(c, s, a, b):
tp = TaskProgress(s)
futures = c.map(slowinc, range(10), delay=0.001)
await wait(futures)
tp.update()
d = dict(tp.source.data)
assert all(len(L) == 1 for L in d.values())
assert d["name"] == ["slowinc"]
futures2 = c.map(dec, range(5))
await wait(futures2)
tp.update()
d = dict(tp.source.data)
assert all(len(L) == 2 for L in d.values())
assert d["name"] == ["slowinc", "dec"]
del futures, futures2
while s.tasks:
await asyncio.sleep(0.01)
tp.update()
assert not tp.source.data["all"]
@gen_cluster(client=True)
async def test_TaskProgress_empty(c, s, a, b):
tp = TaskProgress(s)
tp.update()
futures = [c.submit(inc, i, key="f-" + "a" * i) for i in range(20)]
await wait(futures)
tp.update()
del futures
while s.tasks:
await asyncio.sleep(0.01)
tp.update()
assert not any(len(v) for v in tp.source.data.values())
@gen_cluster(client=True)
async def test_CurrentLoad(c, s, a, b):
cl = CurrentLoad(s)
futures = c.map(slowinc, range(10), delay=0.001)
await wait(futures)
cl.update()
d = dict(cl.source.data)
assert all(len(L) == 2 for L in d.values())
assert all(d["nbytes"])
assert cl.cpu_figure.x_range.end == 200
@gen_cluster(client=True)
async def test_ProcessingHistogram(c, s, a, b):
ph = ProcessingHistogram(s)
ph.update()
assert (ph.source.data["top"] != 0).sum() == 1
futures = c.map(slowinc, range(10), delay=0.050)
while not s.tasks:
await asyncio.sleep(0.01)
ph.update()
assert ph.source.data["right"][-1] > 2
@gen_cluster(client=True)
async def test_NBytesHistogram(c, s, a, b):
nh = NBytesHistogram(s)
nh.update()
assert (nh.source.data["top"] != 0).sum() == 1
futures = c.map(inc, range(10))
await wait(futures)
nh.update()
assert nh.source.data["right"][-1] > 5 * 20
@gen_cluster(client=True)
async def test_WorkerTable(c, s, a, b):
wt = WorkerTable(s)
wt.update()
assert all(wt.source.data.values())
assert all(
not v or isinstance(v, (str, int, float))
for L in wt.source.data.values()
for v in L
), {type(v).__name__ for L in wt.source.data.values() for v in L}
assert all(len(v) == 3 for v in wt.source.data.values())
assert wt.source.data["name"][0] == "Total (2)"
nthreads = wt.source.data["nthreads"]
assert all(nthreads)
assert nthreads[0] == nthreads[1] + nthreads[2]
@gen_cluster(client=True)
async def test_WorkerTable_custom_metrics(c, s, a, b):
def metric_port(worker):
return worker.port
def metric_address(worker):
return worker.address
metrics = {"metric_port": metric_port, "metric_address": metric_address}
for w in [a, b]:
for name, func in metrics.items():
w.metrics[name] = func
await asyncio.gather(a.heartbeat(), b.heartbeat())
for w in [a, b]:
assert s.workers[w.address].metrics["metric_port"] == w.port
assert s.workers[w.address].metrics["metric_address"] == w.address
wt = WorkerTable(s)
wt.update()
data = wt.source.data
for name in metrics:
assert name in data
assert all(data.values())
assert all(len(v) == 3 for v in data.values())
my_index = data["address"].index(a.address), data["address"].index(b.address)
assert [data["metric_port"][i] for i in my_index] == [a.port, b.port]
assert [data["metric_address"][i] for i in my_index] == [a.address, b.address]
@gen_cluster(client=True)
async def test_WorkerTable_different_metrics(c, s, a, b):
def metric_port(worker):
return worker.port
a.metrics["metric_a"] = metric_port
b.metrics["metric_b"] = metric_port
await asyncio.gather(a.heartbeat(), b.heartbeat())
assert s.workers[a.address].metrics["metric_a"] == a.port
assert s.workers[b.address].metrics["metric_b"] == b.port
wt = WorkerTable(s)
wt.update()
data = wt.source.data
assert "metric_a" in data
assert "metric_b" in data
assert all(data.values())
assert all(len(v) == 3 for v in data.values())
my_index = data["address"].index(a.address), data["address"].index(b.address)
assert [data["metric_a"][i] for i in my_index] == [a.port, None]
assert [data["metric_b"][i] for i in my_index] == [None, b.port]
@gen_cluster(client=True)
async def test_WorkerTable_metrics_with_different_metric_2(c, s, a, b):
def metric_port(worker):
return worker.port
a.metrics["metric_a"] = metric_port
await asyncio.gather(a.heartbeat(), b.heartbeat())
wt = WorkerTable(s)
wt.update()
data = wt.source.data
assert "metric_a" in data
assert all(data.values())
assert all(len(v) == 3 for v in data.values())
my_index = data["address"].index(a.address), data["address"].index(b.address)
assert [data["metric_a"][i] for i in my_index] == [a.port, None]
@gen_cluster(client=True, worker_kwargs={"metrics": {"my_port": lambda w: w.port}})
async def test_WorkerTable_add_and_remove_metrics(c, s, a, b):
def metric_port(worker):
return worker.port
a.metrics["metric_a"] = metric_port
b.metrics["metric_b"] = metric_port
await asyncio.gather(a.heartbeat(), b.heartbeat())
assert s.workers[a.address].metrics["metric_a"] == a.port
assert s.workers[b.address].metrics["metric_b"] == b.port
wt = WorkerTable(s)
wt.update()
assert "metric_a" in wt.source.data
assert "metric_b" in wt.source.data
# Remove 'metric_b' from worker b
del b.metrics["metric_b"]
await asyncio.gather(a.heartbeat(), b.heartbeat())
wt = WorkerTable(s)
wt.update()
assert "metric_a" in wt.source.data
del a.metrics["metric_a"]
await asyncio.gather(a.heartbeat(), b.heartbeat())
wt = WorkerTable(s)
wt.update()
assert "metric_a" not in wt.source.data
@gen_cluster(client=True)
async def test_WorkerTable_custom_metric_overlap_with_core_metric(c, s, a, b):
def metric(worker):
return -999
a.metrics["executing"] = metric
a.metrics["cpu"] = metric
a.metrics["metric"] = metric
await asyncio.gather(a.heartbeat(), b.heartbeat())
assert s.workers[a.address].metrics["executing"] != -999
assert s.workers[a.address].metrics["cpu"] != -999
assert s.workers[a.address].metrics["metric"] == -999
@gen_cluster(client=True, worker_kwargs={"memory_limit": 0})
async def test_WorkerTable_with_memory_limit_as_0(c, s, a, b):
wt = WorkerTable(s)
wt.update()
assert all(wt.source.data.values())
assert wt.source.data["name"][0] == "Total (2)"
assert wt.source.data["memory_limit"][0] == 0
assert wt.source.data["memory_percent"][0] == ""
@gen_cluster(client=True)
async def test_TaskGraph(c, s, a, b):
gp = TaskGraph(s)
futures = c.map(inc, range(5))
total = c.submit(sum, futures)
await total
gp.update()
assert set(map(len, gp.node_source.data.values())) == {6}
assert set(map(len, gp.edge_source.data.values())) == {5}
json.dumps(gp.edge_source.data)
json.dumps(gp.node_source.data)
da = pytest.importorskip("dask.array")
x = da.random.random((20, 20), chunks=(10, 10)).persist()
y = (x + x.T) - x.mean(axis=0)
y = y.persist()
await wait(y)
gp.update()
gp.update()
await c.compute((x + y).sum())
gp.update()
future = c.submit(inc, 10)
future2 = c.submit(inc, future)
await wait(future2)
key = future.key
del future, future2
while key in s.tasks:
await asyncio.sleep(0.01)
assert "memory" in gp.node_source.data["state"]
gp.update()
gp.update()
assert not all(x == "False" for x in gp.edge_source.data["visible"])
@gen_cluster(client=True)
async def test_TaskGraph_clear(c, s, a, b):
gp = TaskGraph(s)
futures = c.map(inc, range(5))
total = c.submit(sum, futures)
await total
gp.update()
del total, futures
while s.tasks:
await asyncio.sleep(0.01)
gp.update()
gp.update()
start = time()
while any(gp.node_source.data.values()) or any(gp.edge_source.data.values()):
await asyncio.sleep(0.1)
gp.update()
assert time() < start + 5
@gen_cluster(
client=True, config={"distributed.dashboard.graph-max-items": 2,},
)
async def test_TaskGraph_limit(c, s, a, b):
gp = TaskGraph(s)
def func(x):
return x
f1 = c.submit(func, 1)
await wait(f1)
gp.update()
assert len(gp.node_source.data["x"]) == 1
f2 = c.submit(func, 2)
await wait(f2)
gp.update()
assert len(gp.node_source.data["x"]) == 2
f3 = c.submit(func, 3)
await wait(f3)
gp.update()
assert len(gp.node_source.data["x"]) == 2
@gen_cluster(client=True, timeout=30)
async def test_TaskGraph_complex(c, s, a, b):
da = pytest.importorskip("dask.array")
gp = TaskGraph(s)
x = da.random.random((2000, 2000), chunks=(1000, 1000))
y = ((x + x.T) - x.mean(axis=0)).persist()
await wait(y)
gp.update()
assert len(gp.layout.index) == len(gp.node_source.data["x"])
assert len(gp.layout.index) == len(s.tasks)
z = (x - y).sum().persist()
await wait(z)
gp.update()
assert len(gp.layout.index) == len(gp.node_source.data["x"])
assert len(gp.layout.index) == len(s.tasks)
del z
await asyncio.sleep(0.2)
gp.update()
assert len(gp.layout.index) == sum(
v == "True" for v in gp.node_source.data["visible"]
)
assert len(gp.layout.index) == len(s.tasks)
assert max(gp.layout.index.values()) < len(gp.node_source.data["visible"])
assert gp.layout.next_index == len(gp.node_source.data["visible"])
gp.update()
assert set(gp.layout.index.values()) == set(range(len(gp.layout.index)))
visible = gp.node_source.data["visible"]
keys = list(map(tokey, flatten(y.__dask_keys__())))
assert all(visible[gp.layout.index[key]] == "True" for key in keys)
@gen_cluster(client=True)
async def test_TaskGraph_order(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(div, 1, 0)
await wait(y)
gp = TaskGraph(s)
gp.update()
assert gp.node_source.data["state"][gp.layout.index[y.key]] == "erred"
@gen_cluster(
client=True,
config={
"distributed.worker.profile.interval": "10ms",
"distributed.worker.profile.cycle": "50ms",
},
)
async def test_profile_server(c, s, a, b):
ptp = ProfileServer(s)
start = time()
await asyncio.sleep(0.100)
while len(ptp.ts_source.data["time"]) < 2:
await asyncio.sleep(0.100)
ptp.trigger_update()
assert time() < start + 2
@gen_cluster(
client=True, scheduler_kwargs={"dashboard": True},
)
async def test_root_redirect(c, s, a, b):
http_client = AsyncHTTPClient()
response = await http_client.fetch("http://localhost:%d/" % s.http_server.port)
assert response.code == 200
assert "/status" in response.effective_url
@gen_cluster(
client=True,
scheduler_kwargs={"dashboard": True},
worker_kwargs={"dashboard": True},
timeout=180,
)
async def test_proxy_to_workers(c, s, a, b):
try:
import jupyter_server_proxy # noqa: F401
proxy_exists = True
except ImportError:
proxy_exists = False
dashboard_port = s.http_server.port
http_client = AsyncHTTPClient()
response = await http_client.fetch("http://localhost:%d/" % dashboard_port)
assert response.code == 200
assert "/status" in response.effective_url
for w in [a, b]:
host = w.ip
port = w.http_server.port
proxy_url = "http://localhost:%d/proxy/%s/%s/status" % (
dashboard_port,
port,
host,
)
direct_url = "http://localhost:%s/status" % port
http_client = AsyncHTTPClient()
response_proxy = await http_client.fetch(proxy_url)
response_direct = await http_client.fetch(direct_url)
assert response_proxy.code == 200
if proxy_exists:
assert b"Crossfilter" in response_proxy.body
else:
assert b"python -m pip install jupyter-server-proxy" in response_proxy.body
assert response_direct.code == 200
assert b"Crossfilter" in response_direct.body
@gen_cluster(
client=True,
scheduler_kwargs={"dashboard": True},
config={
"distributed.scheduler.dashboard.tasks.task-stream-length": 10,
"distributed.scheduler.dashboard.status.task-stream-length": 10,
},
)
async def test_lots_of_tasks(c, s, a, b):
import tlz as toolz
ts = TaskStream(s)
ts.update()
futures = c.map(toolz.identity, range(100))
await wait(futures)
tsp = [p for p in s.plugins if "taskstream" in type(p).__name__.lower()][0]
assert len(tsp.buffer) == 10
ts.update()
assert len(ts.source.data["start"]) == 10
assert "identity" in str(ts.source.data)
futures = c.map(lambda x: x, range(100), pure=False)
await wait(futures)
ts.update()
assert "lambda" in str(ts.source.data)
@gen_cluster(
client=True,
scheduler_kwargs={"dashboard": True},
config={
"distributed.scheduler.dashboard.tls.key": get_cert("tls-key.pem"),
"distributed.scheduler.dashboard.tls.cert": get_cert("tls-cert.pem"),
"distributed.scheduler.dashboard.tls.ca-file": get_cert("tls-ca-cert.pem"),
},
)
async def test_https_support(c, s, a, b):
port = s.http_server.port
assert (
format_dashboard_link("localhost", port) == "https://localhost:%d/status" % port
)
ctx = ssl.create_default_context()
ctx.load_verify_locations(get_cert("tls-ca-cert.pem"))
http_client = AsyncHTTPClient()
response = await http_client.fetch(
"https://localhost:%d/individual-plots.json" % port, ssl_options=ctx
)
response = json.loads(response.body.decode())
for suffix in [
"system",
"counters",
"workers",
"status",
"tasks",
"stealing",
"graph",
] + [url.strip("/") for url in response.values()]:
req = HTTPRequest(
url="https://localhost:%d/%s" % (port, suffix), ssl_options=ctx
)
response = await http_client.fetch(req)
assert response.code < 300
body = response.body.decode()
assert not re.search("href=./", body) # no absolute links
@gen_cluster(client=True, scheduler_kwargs={"dashboard": True})
async def test_memory_by_key(c, s, a, b):
mbk = MemoryByKey(s)
da = pytest.importorskip("dask.array")
x = (da.random.random((20, 20), chunks=(10, 10)) + 1).persist(optimize_graph=False)
await x
y = await dask.delayed(inc)(1).persist()
mbk.update()
assert mbk.source.data["name"] == ["add", "inc"]
assert mbk.source.data["nbytes"] == [x.nbytes, sys.getsizeof(1)]
@gen_cluster(client=True, scheduler_kwargs={"dashboard": True})
async def test_aggregate_action(c, s, a, b):
mbk = AggregateAction(s)
da = pytest.importorskip("dask.array")
x = (da.ones((20, 20), chunks=(10, 10)) + 1).persist(optimize_graph=False)
await x
y = await dask.delayed(inc)(1).persist()
z = (x + x.T) - x.mean(axis=0)
await c.compute(z.sum())
mbk.update()
http_client = AsyncHTTPClient()
response = await http_client.fetch(
"http://localhost:%d/individual-aggregate-time-per-action" % s.http_server.port
)
assert response.code == 200
assert ("transfer") in mbk.action_source.data["names"]
assert ("compute") in mbk.action_source.data["names"]
@gen_cluster(client=True, scheduler_kwargs={"dashboard": True})
async def test_compute_per_key(c, s, a, b):
mbk = ComputePerKey(s)
da = pytest.importorskip("dask.array")
x = (da.ones((20, 20), chunks=(10, 10)) + 1).persist(optimize_graph=False)
await x
y = await dask.delayed(inc)(1).persist()
z = (x + x.T) - x.mean(axis=0)
await c.compute(z.sum())
mbk.update()
http_client = AsyncHTTPClient()
response = await http_client.fetch(
"http://localhost:%d/individual-compute-time-per-key" % s.http_server.port
)
assert response.code == 200
assert ("sum-aggregate") in mbk.compute_source.data["names"]
assert ("add") in mbk.compute_source.data["names"]
assert "angles" in mbk.compute_source.data.keys()
@gen_cluster(scheduler_kwargs={"http_prefix": "foo-bar", "dashboard": True})
async def test_prefix_bokeh(s, a, b):
prefix = "foo-bar"
http_client = AsyncHTTPClient()
response = await http_client.fetch(
f"http://localhost:{s.http_server.port}/{prefix}/status"
)
assert response.code == 200
assert (
f'<script type="text/javascript" src="/{prefix}/static/'
in response.body.decode()
)
bokeh_app = s.http_application.applications[0]
assert isinstance(bokeh_app, BokehTornado)
assert bokeh_app.prefix == f"/{prefix}"
|
the-stack_106_18819
|
import os
import cv2
import numpy as np
from server.services.errors import Errors, PortalError
from server.services.hashing import get_hash
from server.models.abstract.BaseModel import BaseModel
class DarknetModel(BaseModel):
def _load_label_map_(self):
labels = (
open(os.path.join(self._directory_, self._labelsname_))
.read()
.strip()
.split("\n")
)
self._label_map_ = {
str(label_index): {"id": label_index, "name": label_name}
for label_index, label_name in enumerate(labels)
}
def register(self):
self._labelsname_ = self._weightsname_ = self._configname_ = ""
labels = weights = configs = 0
for file in os.listdir(self._directory_):
if file.endswith(".names"):
self._labelsname_ = os.path.join(self._directory_, file)
labels += 1
if file.endswith(".weights"):
self._weightsname_ = os.path.join(self._directory_, file)
weights += 1
if file.endswith(".cfg"):
self._configname_ = os.path.join(self._directory_, file)
configs += 1
if self._labelsname_ == "":
raise PortalError(
Errors.INVALIDFILEPATH,
"class label file .names is not found in given directory.",
)
if labels > 1:
raise PortalError(
Errors.OVERLOADED, "multiple class label files found."
)
if self._weightsname_ == "":
raise PortalError(
Errors.INVALIDFILEPATH,
"weights file .weights is not found in given directory",
)
if weights > 1:
raise PortalError(
Errors.OVERLOADED, "multiple weights label files found."
)
if self._configname_ == "":
raise PortalError(
Errors.INVALIDFILEPATH,
"config file .cfg is not found in given directory.",
)
if configs > 1:
raise PortalError(
Errors.OVERLOADED, "multiple config files found."
)
with open(self._configname_, "r") as conf:
heightcheck = False
widthcheck = False
for line in conf:
if heightcheck and widthcheck:
break
if "height" in line:
self._height_ = int(
line.replace("=", "").replace("height", "").strip()
)
heightcheck = True
if "width" in line:
self._width_ = int(
line.replace("=", "").replace("width", "").strip()
)
widthcheck = True
self._load_label_map_()
self._key_ = get_hash(self._directory_)
return self._key_, self
def load(self):
loaded_model = cv2.dnn.readNetFromDarknet(
self._configname_, self._weightsname_
)
self._model_ = loaded_model
def predict(self, image_array):
try:
model = self._model_
(H, W) = image_array.shape[:2]
ln = model.getLayerNames()
ln = [ln[i[0] - 1] for i in model.getUnconnectedOutLayers()]
blob = cv2.dnn.blobFromImage(
image_array,
1 / 255.0,
(self._height_, self._width_),
swapRB=True,
crop=False,
)
model.setInput(blob)
layerOutputs = model.forward(ln)
boxes = []
confidences = []
classIDs = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
box = detection[0:4]
(centerX, centerY, width, height) = box
xmin = centerX - (width / 2)
ymin = centerY - (height / 2)
xmax = xmin + width
ymax = ymin + height
boxes.append([ymin, xmin, ymax, xmax])
confidences.append(float(confidence))
classIDs.append(classID)
detections = {}
detections["detection_masks"] = None
detections["detection_boxes"] = np.squeeze(np.array(boxes))
detections["detection_scores"] = np.squeeze(np.array(confidences))
detections["detection_classes"] = np.squeeze(np.array(classIDs))
return detections
except Exception as e:
raise PortalError(Errors.FAILEDPREDICTION, str(e))
|
the-stack_106_18820
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''Code to load instruction words into a simulator'''
import struct
from typing import List, Optional, Tuple, Type
from .err_bits import ILLEGAL_INSN
from .isa import DecodeError, OTBNInsn
from .insn import INSN_CLASSES
from .state import OTBNState
# A tuple as returned by get_insn_masks: an element (m0, m1, cls) means "if a
# word has all the bits in m0 clear and all the bits in m1 set, then you should
# decode it with the given class".
_MaskTuple = Tuple[int, int, Type[OTBNInsn]]
class IllegalInsn(OTBNInsn):
'''A catch-all subclass of Instruction for bad data
This handles anything that doesn't decode correctly. Doing so for OTBN is
much easier than if we wanted to support compressed-mode (RV32IC), because
we don't need to worry about whether we have 16 or 32 bits of rubbish.
Note that we declare this with an opcode of zero. Note that this implies
the bottom two bits are 0, which would imply a compressed instruction, so
we know this doesn't match any real instruction.
'''
def __init__(self, pc: int, raw: int, msg: str) -> None:
super().__init__(raw, {})
self.msg = msg
# Override the memoized disassembly for the instruction, avoiding us
# disassembling the underlying DummyInsn.
self._disasm = (pc, '?? 0x{:08x}'.format(raw))
def execute(self, state: OTBNState) -> None:
state.stop_at_end_of_cycle(ILLEGAL_INSN)
MASK_TUPLES = None # type: Optional[List[_MaskTuple]]
def get_insn_masks() -> List[_MaskTuple]:
'''Generate a list of zeros/ones masks for known instructions
The result is memoized.
'''
global MASK_TUPLES
if MASK_TUPLES is None:
tuples = []
for cls in INSN_CLASSES:
# cls is the class for some OTBNInsn: an object that represents a
# decoded instruction. It has a class variable called "insn", which is
# the subclass of insn_yaml.Insn that represents that instruction
# (without operand values).
insn = cls.insn
if insn.encoding is None:
continue
m0, m1 = insn.encoding.get_masks()
# Encoding.get_masks sets bits that are 'x', so we have to do a
# difference operation too.
tuples.append((m0 & ~m1, m1 & ~m0, cls))
MASK_TUPLES = tuples
return MASK_TUPLES
def _decode_word(pc: int, word: int) -> OTBNInsn:
found_cls = None
for m0, m1, cls in get_insn_masks():
# If any bit is set that should be zero or if any bit is clear that
# should be one, ignore this instruction.
if word & m0 or (~ word) & m1:
continue
found_cls = cls
break
if found_cls is None:
return IllegalInsn(pc, word, 'No legal decoding')
# Decode the instruction. We know that we have an encoding (we checked in
# get_insn_masks).
assert cls.insn.encoding is not None
enc_vals = cls.insn.encoding.extract_operands(word)
# Make sense of these encoded values as "operand values" (doing any
# shifting, sign interpretation etc.)
op_vals = cls.insn.enc_vals_to_op_vals(pc, enc_vals)
# Catch any decode errors raised by the instruction constructor. This lets
# us generate errors if an instruction encoding has extra constraints that
# can't be captured by the logic in the Encoding class.
try:
return cls(word, op_vals)
except DecodeError as err:
return IllegalInsn(pc, word, str(err))
def decode_bytes(base_addr: int, data: bytes) -> List[OTBNInsn]:
'''Decode instruction bytes as instructions'''
assert len(data) & 3 == 0
return [_decode_word(base_addr + 4 * offset, int_val[0])
for offset, int_val in enumerate(struct.iter_unpack('<I', data))]
def decode_file(base_addr: int, path: str) -> List[OTBNInsn]:
with open(path, 'rb') as handle:
return decode_bytes(base_addr, handle.read())
|
the-stack_106_18821
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('forum', '0005_auto_20160402_1336'),
]
operations = [
migrations.CreateModel(
name='ComentarioPregunta',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('creado_en', models.DateTimeField(auto_now_add=True)),
('modificado_en', models.DateTimeField(auto_now=True)),
('comentario', models.TextField(blank=True, max_length=3000)),
('comentador', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Comentarios',
'ordering': ('-creado_en',),
'verbose_name': 'Comentario',
},
),
migrations.CreateModel(
name='ComentarioRespuesta',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('creado_en', models.DateTimeField(auto_now_add=True)),
('modificado_en', models.DateTimeField(auto_now=True)),
('comentario', models.TextField(blank=True, max_length=3000)),
('comentador', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Comentarios',
'ordering': ('-creado_en',),
'verbose_name': 'Comentario',
},
),
migrations.RemoveField(
model_name='comentario',
name='comentador',
),
migrations.RemoveField(
model_name='pregunta',
name='comentarios',
),
migrations.RemoveField(
model_name='respuesta',
name='comentarios',
),
migrations.DeleteModel(
name='Comentario',
),
migrations.AddField(
model_name='comentariorespuesta',
name='respuesta',
field=models.ForeignKey(to='forum.Respuesta'),
),
migrations.AddField(
model_name='comentariopregunta',
name='pregunta',
field=models.ForeignKey(to='forum.Pregunta'),
),
]
|
the-stack_106_18822
|
from flask import Blueprint, render_template, redirect, url_for, flash
from solarvibes import db
from solarvibes.site.forms import EmailForm, EmailAndTextForm, ContactUsForm # Wesite Forms
from solarvibes.site.models import NewsletterTable, AgrimoduleFBTable, PlatformFBTable, WorkWithUsTable, ContactUsTable
site = Blueprint(
'site',
__name__,
template_folder="templates",
static_folder="static",
)
@site.route('/', methods=['GET', 'POST'])
def index():
form = EmailForm()
if form.validate_on_submit():
email = form.email.data
agrimodulefb = NewsletterTable(email=email)
db.session.add(agrimodulefb)
db.session.commit()
form = None
flash('Thanks. We will maintain you update!')
return redirect(url_for('site.index'))
return render_template('site/index.html', form=form)
@site.route('/agrimodule', methods=['GET', 'POST'])
def agrimodule():
form = EmailAndTextForm()
if form.validate_on_submit():
email = form.email.data
msg = form.msg.data
agrimodulefb = AgrimoduleFBTable(email=email, msg=msg)
db.session.add(agrimodulefb)
db.session.commit()
form = None
flash('Thanks. We definitely give a lot of thought about it!')
return redirect(url_for('site.agrimodule'))
return render_template('site/agrimodule.html', form=form)
@site.route('/platform', methods=['GET', 'POST'])
def platform():
form = EmailAndTextForm()
if form.validate_on_submit():
email = form.email.data
msg = form.msg.data
platformfb = PlatformFBTable(email=email, msg=msg)
db.session.add(platformfb)
db.session.commit()
form = None
flash('Thanks. Your feedback is valuable to us!')
return redirect(url_for('site.platform'))
return render_template('site/platform.html', form=form)
@site.route('/about', methods=['GET', 'POST'])
def about():
form = EmailAndTextForm()
if form.validate_on_submit():
email = form.email.data
msg = form.msg.data
workwithusus = WorkWithUsTable(email=email, msg=msg)
db.session.add(workwithusus)
db.session.commit()
form = None
flash('Thanks. Our HR department will contact you!')
return redirect(url_for('site.about'))
return render_template('site/about.html', form=form)
@site.route('/contact', methods=['GET', 'POST'])
def contact():
# pre_contact = PreContactUsForm('Carlos','[email protected]','+176-55858585','I would like to get a quotation for my farm 1 hectare located in Berlin')
form = ContactUsForm()
if form.validate_on_submit():
name = form.name.data
email = form.email.data
phone = form.phone.data
msg = form.msg.data
newsletter = ContactUsTable(name=name, email=email, phone=phone, msg=msg)
db.session.add(newsletter)
db.session.commit()
form = None
flash('Thanks. We will get back to your shortly!')
return redirect(url_for('site.contact'))
return render_template('site/contact.html', form=form)
|
the-stack_106_18823
|
##############################################################################
# STEP 1:
# CREATING dewey_classification DATABASE
# FROM file create_dewey_classification.sql
# STEP 2:
# FORMATING DATA AND FILES
# FROM file Dewey_decimal_classification_FR.txt
# STEP 3:
# POPULATING dewey_classification DATABASE
# FROM file populate_dewey_classification_db.py
##############################################################################
import mysql.connector
from mysql.connector import Error
import re
import os
import psutil
import time
from os import environ
from populate_dewey_classification_db import populate_dewey_classification_db
##############################################################################
def create_dewey_classification():
try:
user = environ.get('MYSQL_USER')
password = environ.get('MYSQL_PASSWORD')
# connect to dewey_classification database
connect = mysql.connector.connect(
user = user,
password = password,
host = 'localhost',
database = 'dewey_classification')
except Error as err:
print("MySQL Error message: {}".format(err.msg))
else:
cursor = connect.cursor()
cursor.execute("select database();")
db = cursor.fetchone()
# print("Info: Connected to MySQL database", db)
create_dewey_classification_db(
'create_dewey_classification_db.sql', connect, cursor)
cursor.close()
connect.close()
# print("Info: MySQL sever connection is closed")
##############################################################################
def create_dewey_classification_db(filename, connect, cursor):
# reading file create_dewey_classification_db.sql ...
with open(filename, 'r') as file:
mysql_file = file.read()
print('Info: Reading sql file / ', memory_use())
# ... and separate each query
mysql_queries = mysql_file.split(';')
# format queries
for k in range(len(mysql_queries)):
mysql_queries[k] = mysql_queries[k] + ";"
del mysql_queries[-1]
for query in mysql_queries:
# execute all queries except SELECT queries at the end of the file
# wich are used for debug and verification
if query.find('SELECT') == -1:
try:
cursor.execute(query)
connect.commit()
except Error as err:
print("MySQL Error message: {}".format(err.msg))
##############################################################################
def formate_data_files():
# writing a temporary file where all void lines
# from source file Dewey_decimal_classification_FR.txt are removed
with open("Dewey_decimal_classification_FR.txt", 'r') as file:
print('Info: reading data from source file / ', memory_use())
for line in file:
if line == '\n':
pass # do nothing
else:
with open("temp_file.txt", "a+") as temp:
temp.write(line)
# create lists of classes, divisions, sections and subsections
with open("temp_file.txt", 'r') as file:
category = ''
class_number_list = []
class_name_list = []
division_number_list = []
division_name_list = []
section_number_list = []
section_name_list = []
subsection_number_list = []
subsection_name_list = []
i, j, k, l = 0, 0, 0, 0
for line in file:
# using reular expressions to verify if line contains
# class, division, section or subsection number
class_number = re.match("^[0-9]00$", line)
division_number = re.match("^[0-9][0-9]0$", line)
section_number = re.match("^[0-9][0-9][1-9]$", line)
subsection_number = re.match("^[0-9]+\.[0-9 ]{,30}$", line)
if line == '\n':
pass
elif bool(class_number):
i = 0
category = 'class'
class_number_list.append(line.strip('\n'))
elif bool(division_number):
j = 0
category = 'division'
division_number_list.append(line.strip('\n'))
elif bool(section_number):
k = 0
category = 'section'
section_number_list.append(line.strip('\n'))
elif bool(subsection_number):
l = 0
category = 'subsection'
subsection_number_list.append(line.strip('\n'))
else:
# populate category lists
# and concatenate separated text lines
if category == 'class':
if i < 1:
class_name_list.append(line.strip('\n'))
i += 1
else:
class_name_list[-1] += " " + line.strip('\n')
elif category == 'division':
if j < 1:
division_name_list.append(line.strip('\n'))
j += 1
else:
division_name_list[-1] += " " + line.strip('\n')
elif category == 'section':
if k < 1:
section_name_list.append(line.strip('\n'))
k += 1
else:
section_name_list[-1] += " " + line.strip('\n')
elif category == 'subsection':
if l < 1:
subsection_name_list.append(line.strip('\n'))
l += 1
else:
subsection_name_list[-1] += " " + line.strip('\n')
os.remove("temp_file.txt")
print('Info: data lists created / ', memory_use())
# creating files with classes, divisions, sections and subsections data
with open("dewey_classification_class.txt", "w+") as file:
file.write("Class\n\n")
for k in range(len(class_number_list)):
# writing class number
file.write(class_number_list[k] + ':')
# formating and writing class name text
class_name_list[k] = class_name_list[k].replace(" ", " ")
class_name_list[k] = class_name_list[k].replace(" ,", ",")
class_name_list[k] = class_name_list[k].replace(" ;", ";")
class_name_list[k] = class_name_list[k].replace(" )", ")")
class_name_list[k] = class_name_list[k].replace("\"", "\'")
file.write(class_name_list[k] + '\n' + '\n')
with open("dewey_classification_division.txt", "w+") as file:
file.write("Division\n\n")
for k in range(len(division_number_list)):
# writing division number
file.write(division_number_list[k] + ':')
# formating and writing division name text
division_name_list[k] = division_name_list[k].replace(" ", " ")
division_name_list[k] = division_name_list[k].replace(" ,", ",")
division_name_list[k] = division_name_list[k].replace(" ;", ";")
division_name_list[k] = division_name_list[k].replace(" )", ")")
division_name_list[k] = division_name_list[k].replace("\"", "\'")
file.write(division_name_list[k] + '\n' + '\n')
with open("dewey_classification_section.txt", "w+") as file:
file.write("Section\n\n")
for k in range(len(section_number_list)):
# writing section number
file.write(section_number_list[k] + ':')
# formating and writing section name text
section_name_list[k] = section_name_list[k].replace(" ", " ")
section_name_list[k] = section_name_list[k].replace(" ,", ",")
section_name_list[k] = section_name_list[k].replace(" ;", ";")
section_name_list[k] = section_name_list[k].replace(" )", ")")
section_name_list[k] = section_name_list[k].replace("\"", "\'")
file.write(section_name_list[k] + '\n' + '\n')
with open("dewey_classification_subsection.txt", "w+") as file:
file.write("Subsection\n\n")
for k in range(len(subsection_number_list)):
# writing subsection number
file.write(subsection_number_list[k] + ':')
# formating and writing subsection name text
subsection_name_list[k] = subsection_name_list[k].replace(" ", " ")
subsection_name_list[k] = subsection_name_list[k].replace(" ,", ",")
subsection_name_list[k] = subsection_name_list[k].replace(" ;", ";")
subsection_name_list[k] = subsection_name_list[k].replace(" )", ")")
subsection_name_list[k] = subsection_name_list[k].replace("\"", "\'")
file.write(subsection_name_list[k] + '\n' + '\n')
##############################################################################
def memory_use():
process = psutil.Process(os.getpid())
mem_use = process.memory_info()
return f'Merory use: {mem_use.rss//8000} Ko'
##############################################################################
if __name__ == '__main__':
start_time = time.time()
print('1: CREATING dewey_classification DATABASE ...')
create_dewey_classification()
step_time1 = time.time()
print('Step 1 running time: ',
'{:.3f}'.format(step_time1 - start_time),
'sec', '\n')
print('2: FORMATING DATA AND FILES ...')
formate_data_files()
step_time2 = time.time()
print('Step 2 running time: ',
'{:.3f}'.format(step_time2 - step_time1),
'sec', '\n')
print('3: POPULATING dewey_classification DATABASE')
populate_dewey_classification_db() # < populate_dewey_classification_db.py
step_time3 = time.time()
print('Step 3 running time: ',
'{:.3f}'.format(step_time3 - step_time2),
'sec', '\n')
print('JOB DONE / Total running time: ',
'{:.3f}'.format(time.time() - start_time),
'sec', '\n')
|
the-stack_106_18828
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Score Invoke and Query"""
import leveldb
import logging
import sqlite3
import unittest
import testcase.unittest.test_util as test_util
from loopchain import configure as conf
from loopchain.blockchain import BlockChain, BlockStatus, Block
from loopchain.blockchain import ScoreBase
from loopchain.utils import loggers
from testcase.unittest.mock_peer import set_mock
loggers.set_preset_type(loggers.PresetType.develop)
loggers.update_preset()
class TestScore(unittest.TestCase):
chain = None
test_block_db = 'test_chain_code_block'
score = None
__peer_id = 'aaa'
@classmethod
def setUpClass(cls):
"""
블럭체인 생성 및 DB입력
"""
cls.__peer_auth = test_util.create_default_peer_auth()
cls.__peer_auth = test_util.create_default_peer_auth()
set_mock(cls)
# BlockChain 을 만듬
test_db = leveldb.LevelDB('./' + cls.test_block_db, create_if_missing=True)
cls.assertIsNotNone(test_db, "DB생성 불가")
cls.chain = BlockChain(test_db)
cls.score = cls.SampleScore()
def setUp(self):
test_util.print_testname(self._testMethodName)
@classmethod
def tearDownClass(cls):
"""
테스트로 생성한 블럭체인 디비 제거
"""
leveldb.DestroyDB(cls.test_block_db)
def generate_block(self):
"""임시 블럭 생성하는 메소드
:return: 임시 블럭
"""
block = Block(channel_name=conf.LOOPCHAIN_DEFAULT_CHANNEL)
for x in range(10):
tx = test_util.create_basic_tx(self.__peer_id, self.__peer_auth)
block.put_transaction(tx)
block.generate_block(self.chain.last_block)
return block
class SampleScore(ScoreBase):
""" 체인코드 샘플
체인코드의 샘플이므로 invoke 시에 블럭의 tx를 그냥 저장하는 역활만 합니다.
"""
def __init__(self):
ScoreBase.__init__(self)
self.sample_db = sqlite3.connect('sample_score', check_same_thread=False)
self.cursor = self.sample_db.cursor()
self.cursor.execute("CREATE TABLE IF NOT EXISTS BLOCK_TX(Tx_Data text, Tx_hash text, Block_hash text)")
self.cursor.execute("DELETE FROM BLOCK_TX")
def invoke(self, tx, block):
block_tx_list = []
block_hash = block.block_hash
tx_data = str(tx.get_data(), 'utf-8')
tx_hash = tx.tx_hash
block_tx_list.append((tx_data, tx_hash, block_hash))
self.cursor.executemany("INSERT INTO BLOCK_TX VALUES(?, ?, ?)", block_tx_list)
self.sample_db.commit()
def query(self, **kwargs):
f = kwargs.get('function')
if f == 'block_data':
block_hash = kwargs.get('block_hash')
return self.cursor.execute('SELECT * FROM BLOCK_TX WHERE Block_hash = ?', [block_hash])
else:
return None
def info(self):
return None
def test_invoke_and_query(self):
"""
생성된 블럭체인에 Score를 실행하고
체인코드에서 쿼리로 블럭데이터를 가져와, 블럭을 검증하는 테스트 코드
"""
for x in range(10):
block = self.generate_block()
block.block_status = BlockStatus.confirmed
self.chain.add_block(block)
block_data = self.score.query(function='block_data', block_hash=self.chain.last_block.block_hash)
logging.debug("query response: " + str(block_data))
logging.debug("MK ROOT : %s", self.chain.last_block.merkle_tree_root_hash)
for row in block_data:
self.assertEqual(row[2], self.chain.last_block.block_hash)
block_index = self.chain.last_block.find_transaction_index(row[1])
logging.debug(block_index)
logging.debug(self.chain.last_block.mk_merkle_proof(block_index))
if __name__ == '__main__':
unittest.main()
|
the-stack_106_18831
|
import os
import stat
import warnings
from collections import OrderedDict
from collections.abc import Mapping
from unittest.mock import MagicMock
import pytest
import torch
import torch.nn as nn
from pkg_resources import parse_version
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.handlers import Checkpoint, DiskSaver, EarlyStopping, ModelCheckpoint, global_step_from_engine
from ignite.handlers.checkpoint import BaseSaveHandler
_PREFIX = "PREFIX"
class DummyModel(nn.Module):
def __init__(self):
super(DummyModel, self).__init__()
self.net = nn.Linear(1, 1)
def forward(self, x):
return self.net(x)
class DummyPretrainedModel(nn.Module):
def __init__(self):
super(DummyPretrainedModel, self).__init__()
self.features = nn.Linear(4, 2, bias=False)
self.fc = nn.Linear(2, 1)
def forward(self, x):
x = self.features(x)
x = self.fc(x)
return x
def test_checkpoint_wrong_input():
with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):
Checkpoint(12, lambda x: x, "prefix")
with pytest.raises(TypeError, match=r"Argument `to_save` should be a dictionary"):
Checkpoint([12], lambda x: x, "prefix")
model = DummyModel()
to_save = {"model": model}
with pytest.raises(
TypeError, match=r"Argument `save_handler` should be a string or callable or inherit from BaseSaveHandler"
):
Checkpoint(to_save, 12, "prefix")
with pytest.raises(TypeError, match=r"global_step_transform should be a function."):
Checkpoint(to_save, lambda x: x, score_function=lambda e: 123, score_name="acc", global_step_transform=123)
with pytest.raises(ValueError, match=r"Cannot have key 'checkpointer' if `include_self` is True"):
Checkpoint({"checkpointer": model}, lambda x: x, include_self=True)
class ImmutableMapping(Mapping):
def __getitem__(self, key):
return to_save[key]
def __iter__(self):
return iter(to_save)
def __len__(self):
return len(to_save)
with pytest.raises(TypeError, match="If `include_self` is True, then `to_save` must be mutable"):
Checkpoint(ImmutableMapping(), lambda x: x, include_self=True)
def test_save_handler_as_str(dirname):
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=dirname)
assert isinstance(checkpointer.save_handler, DiskSaver)
def test_checkpoint_score_function_wrong_output():
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, lambda x: x, score_function=lambda e: {"1": 1}, score_name="acc")
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
with pytest.raises(ValueError, match=r"Output of score_function should be a number"):
checkpointer(trainer)
def test_checkpoint_default():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0}
save_handler.assert_called_with(obj, f"{name}_0.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 1234
save_handler.assert_called_with(obj, f"{name}_1234.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_0.pt")
assert checkpointer.last_checkpoint == f"{name}_1234.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_include_self_state_dict():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, include_self=True)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
fname = f"{name}_0.pt"
obj["checkpointer"] = OrderedDict([("saved", [(0, fname)])])
metadata = {"basename": name, "score_name": None, "priority": 0}
save_handler.assert_called_with(obj, fname, metadata)
# Swap object, state should be maintained
checkpointer2 = Checkpoint(to_save, save_handler=save_handler, include_self=True)
checkpointer2.load_state_dict(checkpointer.state_dict())
assert checkpointer2.last_checkpoint == fname
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer2(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 1234
# This delete only happens if state was restored correctly.
save_handler.remove.assert_called_with(f"{name}_0.pt")
fname = f"{name}_1234.pt"
obj["checkpointer"] = OrderedDict([("saved", [(1234, fname)])])
save_handler.assert_called_with(obj, fname, metadata)
assert save_handler.remove.call_count == 1
assert checkpointer2.last_checkpoint == fname
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_with_dp():
model = DummyModel()
dp_model = nn.DataParallel(model)
to_save = {"model": dp_model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": "model", "score_name": None, "priority": 0}
save_handler.assert_called_with(model.state_dict(), "model_0.pt", metadata)
def test_checkpoint_with_global_step_transform():
def _test(filename_prefix, to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
filename_prefix=filename_prefix,
global_step_transform=lambda e, _: e.state.epoch,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=2, iteration=1)
checkpointer(trainer)
assert save_handler.call_count == 1
if len(filename_prefix) > 0:
filename_prefix += "_"
metadata = {"basename": f"{filename_prefix}{name}", "score_name": None, "priority": 2}
save_handler.assert_called_with(obj, f"{filename_prefix}{name}_2.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 12
save_handler.assert_called_with(obj, f"{filename_prefix}{name}_12.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{filename_prefix}{name}_2.pt")
assert checkpointer.last_checkpoint == f"{filename_prefix}{name}_12.pt"
for prefix in ["", "dummytask"]:
model = DummyModel()
to_save = {"model": model}
_test(prefix, to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(prefix, to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_with_score_function():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, score_function=lambda e: e.state.score)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1, score=0.77)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_0.7700.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
trainer.state.score = 0.78
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_0.7800.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_with_score_name_only():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_name="val_acc",
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "val_acc", "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_val_acc=0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_val_acc=0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_val_acc=0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_val_acc=0.7800.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
def test_checkpoint_with_score_name_and_function():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save, save_handler=save_handler, score_name="loss", score_function=lambda e: e.state.score
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1, score=-0.77)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "loss", "priority": -0.77}
save_handler.assert_called_with(obj, f"{name}_loss=-0.7700.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
trainer.state.score = -0.76
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = -0.76
save_handler.assert_called_with(obj, f"{name}_loss=-0.7600.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_loss=-0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_loss=-0.7600.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
def test_checkpoint_with_int_score():
def _test(to_save, obj, name, score_name=None):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save, save_handler=save_handler, score_name=score_name, score_function=lambda e: e.state.epoch
)
if score_name is None:
score_name = ""
else:
score_name += "="
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=1)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": score_name[:-1] if len(score_name) > 0 else None, "priority": 1}
save_handler.assert_called_with(obj, f"{name}_{score_name}1.pt", metadata)
trainer.state.epoch = 12
trainer.state.iteration = 1234
checkpointer(trainer)
assert save_handler.call_count == 2
metadata["priority"] = 12
save_handler.assert_called_with(obj, f"{name}_{score_name}12.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_{score_name}1.pt")
assert checkpointer.last_checkpoint == f"{name}_{score_name}12.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
_test(to_save, model.state_dict(), "model", "epoch")
model = DummyModel()
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
to_save = {"model": model, "optimizer": optimizer}
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint")
_test(to_save, {"model": model.state_dict(), "optimizer": optimizer.state_dict()}, "checkpoint", "epoch")
def test_checkpoint_with_score_function_and_trainer_epoch():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_function=lambda e: e.state.metrics["val_acc"],
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": None, "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_0.7800.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
def test_checkpoint_with_score_name_and_function_and_trainer_epoch():
def _test(to_save, obj, name):
save_handler = MagicMock(spec=BaseSaveHandler)
trainer = Engine(lambda e, b: None)
evaluator = Engine(lambda e, b: None)
trainer.state = State(epoch=11, iteration=1)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
global_step_transform=lambda _1, _2: trainer.state.epoch,
score_name="val_acc",
score_function=lambda e: e.state.metrics["val_acc"],
)
evaluator.state = State(epoch=1, iteration=1000, metrics={"val_acc": 0.77})
checkpointer(evaluator)
assert save_handler.call_count == 1
metadata = {"basename": name, "score_name": "val_acc", "priority": 0.77}
save_handler.assert_called_with(obj, f"{name}_11_val_acc=0.7700.pt", metadata)
trainer.state.epoch = 12
evaluator.state.metrics["val_acc"] = 0.78
checkpointer(evaluator)
assert save_handler.call_count == 2
metadata["priority"] = 0.78
save_handler.assert_called_with(obj, f"{name}_12_val_acc=0.7800.pt", metadata)
assert save_handler.remove.call_count == 1
save_handler.remove.assert_called_with(f"{name}_11_val_acc=0.7700.pt")
assert checkpointer.last_checkpoint == f"{name}_12_val_acc=0.7800.pt"
model = DummyModel()
to_save = {"model": model}
_test(to_save, model.state_dict(), "model")
def test_checkpoint_last_checkpoint():
save_handler = MagicMock(spec=BaseSaveHandler)
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
trainer = Engine(lambda e, b: None)
for i in range(10):
trainer.state = State(epoch=1, iteration=i)
checkpointer(trainer)
assert save_handler.call_count == 10
assert checkpointer.last_checkpoint == "model_9.pt"
def test_checkpoint_last_checkpoint_on_score():
save_handler = MagicMock(spec=BaseSaveHandler)
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
n_saved=None,
score_name="val_acc",
score_function=lambda e: e.state.metrics["val_acc"],
)
trainer = Engine(lambda e, b: None)
val_acc = 0.0
for i in range(10):
val_acc = i * 0.1
trainer.state = State(epoch=1, iteration=i, metrics={"val_acc": val_acc})
checkpointer(trainer)
assert save_handler.call_count == 10
assert checkpointer.last_checkpoint == "model_val_acc=0.9000.pt"
def test_checkpoint_save_handler_callable():
def save_handler(c, f):
assert f == "model_12.pt"
to_save = {"model": DummyModel()}
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=1, iteration=12)
checkpointer(trainer)
def test_model_checkpoint_args_validation(dirname):
existing = os.path.join(dirname, "existing_dir")
nonempty = os.path.join(dirname, "nonempty")
os.makedirs(existing)
os.makedirs(nonempty)
with open(os.path.join(nonempty, f"{_PREFIX}_name_0.pt"), "w"):
pass
with pytest.raises(ValueError, match=r"with extension '.pt' are already present "):
ModelCheckpoint(nonempty, _PREFIX)
with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):
ModelCheckpoint(os.path.join(dirname, "non_existing_dir"), _PREFIX, create_dir=False)
with pytest.raises(TypeError, match=r"global_step_transform should be a function"):
ModelCheckpoint(existing, _PREFIX, create_dir=False, global_step_transform=1234)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)
assert h.last_checkpoint is None
with pytest.raises(RuntimeError, match=r"No objects to checkpoint found."):
h(None, [])
def test_model_checkpoint_simple_recovery(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
fname = h.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
assert loaded_objects == model.state_dict()
def test_model_checkpoint_simple_recovery_from_existing_non_empty(dirname):
def _test(ext, require_empty):
previous_fname = os.path.join(dirname, f"{_PREFIX}_obj_{1}{ext}")
with open(previous_fname, "w") as f:
f.write("test")
h = ModelCheckpoint(dirname, _PREFIX, create_dir=True, require_empty=require_empty)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
fname = h.last_checkpoint
ext = ".pt"
assert isinstance(fname, str)
assert os.path.join(dirname, f"{_PREFIX}_model_{1}{ext}") == fname
assert os.path.exists(fname)
assert os.path.exists(previous_fname)
loaded_objects = torch.load(fname)
assert loaded_objects == model.state_dict()
os.remove(fname)
_test(".txt", require_empty=True)
_test(".pt", require_empty=False)
def test_model_checkpoint_invalid_save_handler(dirname):
h = ModelCheckpoint(dirname, _PREFIX)
to_save = {"model": DummyModel()}
# Redefine save_handler
h.save_handler = lambda x, y: None
h(Engine(lambda x, y: None), to_save)
with pytest.raises(
RuntimeError, match=rf"Unable to save checkpoint, save_handler should be DiskSaver, got {type(h.save_handler)}."
):
h.last_checkpoint
def test_disk_saver_atomic(dirname):
model = DummyModel()
to_save_serializable = {"model": model}
to_save_non_serializable = {"model": lambda x: x}
def _test_existance(atomic, _to_save, expected):
saver = DiskSaver(dirname, atomic=atomic, create_dir=False, require_empty=False)
fname = "test.pt"
try:
with warnings.catch_warnings():
# Ignore torch/serialization.py:292: UserWarning: Couldn't retrieve source code for container of type
# DummyModel. It won't be checked for correctness upon loading.
warnings.simplefilter("ignore", category=UserWarning)
saver(_to_save, fname)
except Exception:
pass
fp = os.path.join(saver.dirname, fname)
assert os.path.exists(fp) == expected
if expected:
# related to https://github.com/pytorch/ignite/issues/1876
mode = stat.filemode(os.stat(fp).st_mode)
assert [mode[1], mode[4], mode[7]] == ["r", "r", "r"], mode
if expected:
saver.remove(fname)
_test_existance(atomic=False, _to_save=to_save_serializable, expected=True)
_test_existance(atomic=False, _to_save=to_save_non_serializable, expected=True)
_test_existance(atomic=True, _to_save=to_save_serializable, expected=True)
_test_existance(atomic=True, _to_save=to_save_non_serializable, expected=False)
@pytest.mark.skipif(
parse_version(torch.__version__) < parse_version("1.4.0"), reason="Zipfile serialization was introduced in 1.4.0"
)
def test_disk_saver_zipfile_serialization_keyword(dirname):
model = DummyModel()
to_save = {"model": model}
saver = DiskSaver(dirname, create_dir=False, _use_new_zipfile_serialization=False)
fname = "test.pt"
saver(to_save, fname)
fp = os.path.join(saver.dirname, fname)
assert os.path.exists(fp)
saver.remove(fname)
def test_disk_saver_unknown_keyword(dirname):
model = DummyModel()
to_save = {"model": model}
saver = DiskSaver(dirname, create_dir=False, unknown_keyword="")
fname = "test.pt"
with pytest.raises(TypeError, match=r"got an unexpected keyword argument 'unknown_keyword'"):
saver(to_save, fname)
def test_last_k(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
h(engine, to_save)
for i in range(1, 9):
engine.state.iteration = i
h(engine, to_save)
expected = [f"{_PREFIX}_model_{i}.pt" for i in [7, 8]]
assert sorted(os.listdir(dirname)) == expected, f"{sorted(os.listdir(dirname))} vs {expected}"
def test_disabled_n_saved(dirname):
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=None)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
num_iters = 100
for i in range(num_iters):
engine.state.iteration = i
h(engine, to_save)
saved_files = sorted(os.listdir(dirname))
assert len(saved_files) == num_iters, f"{saved_files}"
expected = sorted([f"{_PREFIX}_model_{i}.pt" for i in range(num_iters)])
assert saved_files == expected, f"{saved_files} vs {expected}"
def test_best_k(dirname):
scores = iter([1.2, -2.0, 3.1, -4.0])
def score_function(_):
return next(scores)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(4):
h(engine, to_save)
expected = [f"{_PREFIX}_model_{i:.4f}.pt" for i in [1.2, 3.1]]
assert sorted(os.listdir(dirname)) == expected
def test_best_k_with_suffix(dirname):
scores = [0.3456789, 0.1234, 0.4567, 0.134567]
scores_iter = iter(scores)
def score_function(engine):
return next(scores_iter)
h = ModelCheckpoint(
dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function, score_name="val_loss"
)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(4):
engine.state.epoch += 1
h(engine, to_save)
expected = [f"{_PREFIX}_model_val_loss={scores[e - 1]:.4}.pt" for e in [1, 3]]
assert sorted(os.listdir(dirname)) == expected
def test_removes_each_score_at_most_once(dirname):
scores = [0, 1, 1, 2, 3]
scores_iter = iter(scores)
def score_function(_):
return next(scores_iter)
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2, score_function=score_function)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
model = DummyModel()
to_save = {"model": model}
for _ in range(len(scores)):
h(engine, to_save)
# If a score was removed multiple times, the code above would have raise a
# FileNotFoundError. So this just tests the absence of such a failure
# without futher assertions.
def test_with_engine(dirname):
def update_fn(_1, _2):
pass
name = "model"
engine = Engine(update_fn)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1], max_epochs=4)
expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [3 * 2, 4 * 2]])
assert sorted(os.listdir(dirname)) == expected
def test_with_state_dict(dirname):
def update_fn(_1, _2):
pass
engine = Engine(update_fn)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1, 2], max_epochs=4)
saved_model = os.path.join(dirname, os.listdir(dirname)[0])
load_model = torch.load(saved_model)
assert not isinstance(load_model, DummyModel)
assert isinstance(load_model, dict)
model_state_dict = model.state_dict()
loaded_model_state_dict = load_model
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.numpy() == loaded_model_value.numpy()
def test_valid_state_dict_save(dirname):
model = DummyModel()
h = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=0)
to_save = {"name": 42}
with pytest.raises(TypeError, match=r"should have `state_dict` method"):
h(engine, to_save)
to_save = {"name": model}
try:
h(engine, to_save)
except ValueError:
pytest.fail("Unexpected ValueError")
def _test_save_model_optimizer_lr_scheduler_with_state_dict(device, dirname, on_zero_rank=False):
torch.manual_seed(23)
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 1)).to(device)
optim.zero_grad()
y = model(x)
# Below code raises: RuntimeError: torch_xla/csrc/tensor_impl.cpp:144 : XLA tensors do not have storage
# Probably related to https://github.com/pytorch/xla/issues/2576
# loss = y.pow(2.0).sum()
loss = y.sum()
print(loss.device, y.device, x.device)
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
engine = Engine(update_fn)
if (not on_zero_rank) or (on_zero_rank and idist.get_rank() == 0):
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=True, n_saved=1)
engine.add_event_handler(
Events.EPOCH_COMPLETED, handler, {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
)
engine.run([0, 1, 2], max_epochs=4)
idist.barrier()
saved_objects = sorted(os.listdir(dirname))
# saved object is ['PREFIX_checkpoint_3.pt', ]
saved_checkpoint = os.path.join(dirname, saved_objects[0])
if idist.has_xla_support:
device = "cpu"
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["model", "optimizer", "lr_scheduler"]:
assert f in loaded_obj
loaded_model_state_dict = loaded_obj["model"]
loaded_optimizer_state_dict = loaded_obj["optimizer"]
loaded_lr_scheduler_state_dict = loaded_obj["lr_scheduler"]
assert isinstance(loaded_model_state_dict, dict)
assert isinstance(loaded_optimizer_state_dict, dict)
assert isinstance(loaded_lr_scheduler_state_dict, dict)
# Specifically move device to CPU first
model_state_dict = model.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()
optim_state_dict = optim.state_dict()
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
lr_scheduler_state_dict = lr_scheduler.state_dict()
for key in lr_scheduler_state_dict.keys():
assert key in loaded_lr_scheduler_state_dict
lr_scheduler_value = lr_scheduler_state_dict[key]
loaded_lr_scheduler_value = loaded_lr_scheduler_state_dict[key]
assert lr_scheduler_value == loaded_lr_scheduler_value
def test_save_model_optimizer_lr_scheduler_with_state_dict(dirname):
_test_save_model_optimizer_lr_scheduler_with_state_dict("cpu", dirname)
def _test_save_model_optimizer_lr_scheduler_with_validation(device, dirname, on_zero_rank=False):
torch.manual_seed(23)
def _build_objects(acc_list):
model = DummyModel().to(device)
optim = torch.optim.SGD(model.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
def update_fn(engine, batch):
x = torch.rand((4, 1)).to(device)
optim.zero_grad()
y = model(x)
loss = y.pow(2.0).sum()
loss.backward()
if idist.has_xla_support:
import torch_xla.core.xla_model as xm
xm.optimizer_step(optim, barrier=True)
else:
optim.step()
lr_scheduler.step()
trainer = Engine(update_fn)
evaluator = Engine(lambda e, b: None)
acc_iter = iter(acc_list)
@evaluator.on(Events.EPOCH_COMPLETED)
def setup_result():
evaluator.state.metrics["accuracy"] = next(acc_iter)
@trainer.on(Events.EPOCH_COMPLETED)
def run_eval():
evaluator.run([0, 1, 2])
def score_function(engine):
return engine.state.metrics["accuracy"]
save_handler = DiskSaver(dirname, create_dir=True, require_empty=False)
early_stop = EarlyStopping(score_function=score_function, patience=2, trainer=trainer)
evaluator.add_event_handler(Events.COMPLETED, early_stop)
checkpointer = Checkpoint(
{
"trainer": trainer,
"model": model,
"optim": optim,
"lr_scheduler": lr_scheduler,
"early_stop": early_stop,
},
save_handler,
include_self=True,
global_step_transform=global_step_from_engine(trainer),
)
evaluator.add_event_handler(Events.COMPLETED, checkpointer)
return trainer, evaluator, model, optim, lr_scheduler, early_stop, checkpointer
trainer, evaluator, model, optim, scheduler, early, checkpointer = _build_objects([0.2, 0.3, 0.2])
trainer.run([0, 1, 2], max_epochs=3)
saved_objects = sorted(os.listdir(dirname))
saved_checkpoint = os.path.join(dirname, saved_objects[0])
loaded_obj = torch.load(saved_checkpoint, map_location=device)
for f in ["trainer", "model", "optim", "lr_scheduler", "early_stop", "checkpointer"]:
assert f in loaded_obj
trainer2, evaluator2, model2, optim2, scheduler2, early2, checkpointer2 = _build_objects([0.1, 0.1, 0.1])
Checkpoint.load_objects(
{
"trainer": trainer2,
"model": model2,
"optim": optim2,
"lr_scheduler": scheduler2,
"early_stop": early2,
"checkpointer": checkpointer2,
},
loaded_obj,
)
assert checkpointer2.last_checkpoint == checkpointer.last_checkpoint
model_state_dict = model.cpu().state_dict()
loaded_model_state_dict = model2.cpu().state_dict()
for key in model_state_dict.keys():
assert key in loaded_model_state_dict
model_value = model_state_dict[key]
loaded_model_value = loaded_model_state_dict[key]
assert model_value.cpu().numpy() == loaded_model_value.cpu().numpy()
optim_state_dict = optim.state_dict()
loaded_optimizer_state_dict = optim2.state_dict()
# "params" contains tensor IDs, which are different
del optim_state_dict["param_groups"][0]["params"]
del loaded_optimizer_state_dict["param_groups"][0]["params"]
for key in optim_state_dict.keys():
assert key in loaded_optimizer_state_dict
optim_value = optim_state_dict[key]
loaded_optim_value = loaded_optimizer_state_dict[key]
if idist.get_rank() == 0:
assert optim_value == loaded_optim_value
def _check_state_dict(original, loaded):
original_state_dict = original.state_dict()
loaded_state_dict = loaded.state_dict()
for key in original_state_dict.keys():
assert key in loaded_state_dict
original_value = original_state_dict[key]
loaded_value = loaded_state_dict[key]
assert original_value == loaded_value
_check_state_dict(trainer, trainer2)
_check_state_dict(scheduler, scheduler2)
_check_state_dict(early, early2)
_check_state_dict(checkpointer, checkpointer2)
trainer2.run([0, 1, 2], max_epochs=6)
# early stopping should have triggered
assert trainer2.state.epoch == 4
# If Checkpoint's state was restored correctly, it should continue to respect n_saved
# and delete old checkpoints, and have the correct last_checkpoint.
assert os.listdir(dirname) == ["checkpoint_4.pt"]
assert checkpointer2.last_checkpoint == "checkpoint_4.pt"
def test_save_model_optimizer_lr_scheduler_with_validation(dirname):
_test_save_model_optimizer_lr_scheduler_with_validation("cpu", dirname)
def test_checkpoint_load_objects():
with pytest.raises(TypeError, match=r"Argument checkpoint should be a string or a dictionary"):
Checkpoint.load_objects({}, [])
with pytest.raises(TypeError, match=r"should have `load_state_dict` method"):
Checkpoint.load_objects({"a": None}, {"a": None})
model = DummyModel()
to_load = {"model": model, "another_model": model}
with pytest.raises(ValueError, match=r"from `to_load` is not found in the checkpoint"):
Checkpoint.load_objects(to_load, {})
model = DummyModel()
to_load = {"model": model}
model2 = DummyModel()
chkpt = {"model": model2.state_dict()}
Checkpoint.load_objects(to_load, chkpt)
assert model.state_dict() == model2.state_dict()
def test_checkpoint_load_objects_from_saved_file(dirname):
def _get_single_obj_to_save():
model = DummyModel()
to_save = {"model": model}
return to_save
def _get_multiple_objs_to_save():
model = DummyModel()
optim = torch.optim.SGD(model.parameters(), lr=0.001)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optim, gamma=0.5)
to_save = {"model": model, "optimizer": optim, "lr_scheduler": lr_scheduler}
return to_save
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
# case: load from filepath
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
Checkpoint.load_objects(to_save, fname)
os.remove(fname)
# case: multiple objects
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
Checkpoint.load_objects(to_save, loaded_objects)
os.remove(fname)
# case: saved multiple objects, loaded single object
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_multiple_objs_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
to_load = {"model": to_save["model"]}
Checkpoint.load_objects(to_load, loaded_objects)
os.remove(fname)
# case: single object
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
to_save = _get_single_obj_to_save()
handler(trainer, to_save)
fname = handler.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
Checkpoint.load_objects(to_save, loaded_objects)
os.remove(fname)
def test_load_checkpoint_with_different_num_classes(dirname):
model = DummyPretrainedModel()
to_save_single_object = {"model": model}
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=1)
handler(trainer, to_save_single_object)
fname = handler.last_checkpoint
loaded_checkpoint = torch.load(fname)
to_load_single_object = {"pretrained_features": model.features}
with pytest.raises(RuntimeError):
Checkpoint.load_objects(to_load_single_object, loaded_checkpoint)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=UserWarning)
Checkpoint.load_objects(to_load_single_object, loaded_checkpoint, strict=False, blah="blah")
loaded_weights = to_load_single_object["pretrained_features"].state_dict()["weight"]
assert torch.all(model.state_dict()["features.weight"].eq(loaded_weights))
def test_disksaver_wrong_input(dirname):
with pytest.raises(ValueError, match=r"Directory path '\S+' is not found"):
DiskSaver("/tmp/non-existing-folder", create_dir=False)
def _test(ext):
previous_fname = os.path.join(dirname, f"{_PREFIX}_obj_{1}{ext}")
with open(previous_fname, "w") as f:
f.write("test")
with pytest.raises(ValueError, match=r"with extension '.pt' are already present"):
DiskSaver(dirname, require_empty=True)
_test(".pt")
def _test_checkpoint_with_ddp(device):
torch.manual_seed(0)
model = DummyModel().to(device)
device_ids = None if "cpu" in device.type else [device]
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
to_save = {"model": ddp_model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
assert save_handler.call_count == 1
metadata = {"basename": "model", "score_name": None, "priority": 0}
save_handler.assert_called_with(model.state_dict(), "model_0.pt", metadata)
def _test_checkpoint_load_objects_ddp(device):
model = DummyModel().to(device)
device_ids = None if "cpu" in device.type else [device]
ddp_model = nn.parallel.DistributedDataParallel(model, device_ids=device_ids)
opt = torch.optim.SGD(ddp_model.parameters(), lr=0.01)
# single object:
to_load = {"model": ddp_model}
checkpoint = ddp_model.module.state_dict()
Checkpoint.load_objects(to_load, checkpoint)
# multiple objects:
to_load = {"model": ddp_model, "opt": opt}
checkpoint = {"model": ddp_model.module.state_dict(), "opt": opt.state_dict()}
Checkpoint.load_objects(to_load, checkpoint)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo, get_rank_zero_dirname):
device = idist.device()
dirname = get_rank_zero_dirname()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, "1"))
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, "2"), on_zero_rank=True)
_test_checkpoint_with_ddp(device)
_test_checkpoint_load_objects_ddp(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl, get_rank_zero_dirname):
device = idist.device()
dirname = get_rank_zero_dirname()
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, "1"))
_test_save_model_optimizer_lr_scheduler_with_state_dict("cpu", os.path.join(dirname, "2"), on_zero_rank=True)
_test_checkpoint_with_ddp(device=device)
_test_checkpoint_load_objects_ddp(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor, get_rank_zero_dirname):
device = torch.device("cpu" if not torch.cuda.is_available() else "cuda")
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
dirname = get_rank_zero_dirname()
gloo_hvd_executor(
_test_save_model_optimizer_lr_scheduler_with_state_dict,
(device, os.path.join(dirname, "1")),
np=nproc,
do_init=True,
)
gloo_hvd_executor(
_test_save_model_optimizer_lr_scheduler_with_state_dict,
("cpu", os.path.join(dirname, "2"), True),
np=nproc,
do_init=True,
)
def _test_tpu_saves_to_cpu(device, dirname):
torch.manual_seed(0)
h = ModelCheckpoint(dirname, _PREFIX)
engine = Engine(lambda e, b: None)
engine.state = State(epoch=0, iteration=1)
model = DummyModel().to(device)
to_save = {"model": model}
h(engine, to_save)
idist.barrier()
fname = h.last_checkpoint
assert isinstance(fname, str)
assert os.path.join(dirname, _PREFIX) in fname
assert os.path.exists(fname)
loaded_objects = torch.load(fname)
assert loaded_objects == model.cpu().state_dict()
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla(dirname):
assert "xla" in idist.device().type
_test_tpu_saves_to_cpu(idist.device(), os.path.join(dirname, "1"))
_test_save_model_optimizer_lr_scheduler_with_state_dict(idist.device(), os.path.join(dirname, "2"))
def _test_tpu_saves_to_cpu_nprocs(index, dirname):
device = idist.device()
_test_tpu_saves_to_cpu(device, os.path.join(dirname, "1"))
_test_save_model_optimizer_lr_scheduler_with_state_dict(device, os.path.join(dirname, "2"))
import time
# hack to have all proc properly sync:
time.sleep(1)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Not on TPU device")
def test_distrib_single_device_xla_nprocs(xmp_executor, dirname):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_tpu_saves_to_cpu_nprocs, args=(dirname,), nprocs=n)
def test_checkpoint_filename_pattern():
def _test(
to_save,
filename_prefix="",
score_function=None,
score_name=None,
global_step_transform=None,
filename_pattern=None,
):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(
to_save,
save_handler=save_handler,
filename_prefix=filename_prefix,
score_function=score_function,
score_name=score_name,
global_step_transform=global_step_transform,
filename_pattern=filename_pattern,
)
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=12, iteration=203, score=0.9999)
checkpointer(trainer)
return checkpointer.last_checkpoint
model = DummyModel()
to_save = {"model": model}
assert _test(to_save) == "model_203.pt"
assert _test(to_save, "best") == "best_model_203.pt"
assert _test(to_save, score_function=lambda e: e.state.score) == "model_0.9999.pt"
res = _test(to_save, score_function=lambda e: e.state.score, global_step_transform=lambda e, _: e.state.epoch)
assert res == "model_12_0.9999.pt"
assert _test(to_save, score_function=lambda e: e.state.score, score_name="acc") == "model_acc=0.9999.pt"
res = _test(
to_save,
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
)
assert res == "model_12_acc=0.9999.pt"
assert _test(to_save, "best", score_function=lambda e: e.state.score) == "best_model_0.9999.pt"
res = _test(
to_save, "best", score_function=lambda e: e.state.score, global_step_transform=lambda e, _: e.state.epoch
)
assert res == "best_model_12_0.9999.pt"
res = _test(to_save, "best", score_function=lambda e: e.state.score, score_name="acc")
assert res == "best_model_acc=0.9999.pt"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
)
assert res == "best_model_12_acc=0.9999.pt"
pattern = "{name}.{ext}"
assert _test(to_save, filename_pattern=pattern) == "model.pt"
pattern = "chk-{name}--{global_step}.{ext}"
assert _test(to_save, to_save, filename_pattern=pattern) == "chk-model--203.pt"
pattern = "chk-{filename_prefix}--{name}--{global_step}.{ext}"
assert _test(to_save, "best", filename_pattern=pattern) == "chk-best--model--203.pt"
pattern = "chk-{name}--{score}.{ext}"
assert _test(to_save, score_function=lambda e: e.state.score, filename_pattern=pattern) == "chk-model--0.9999.pt"
pattern = "{global_step}-{name}-{score}.chk.{ext}"
res = _test(
to_save,
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
)
assert res == "12-model-0.9999.chk.pt"
pattern = "chk-{name}--{score_name}--{score}.{ext}"
res = _test(to_save, score_function=lambda e: e.state.score, score_name="acc", filename_pattern=pattern)
assert res == "chk-model--acc--0.9999.pt"
pattern = "chk-{name}-{global_step}-{score_name}-{score}.{ext}"
res = _test(
to_save,
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
)
assert res == "chk-model-12-acc-0.9999.pt"
pattern = "{filename_prefix}-{name}-{score}.chk"
res = _test(to_save, "best", score_function=lambda e: e.state.score, filename_pattern=pattern)
assert res == "best-model-0.9999.chk"
pattern = "resnet-{filename_prefix}-{name}-{global_step}-{score}.chk"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
)
assert res == "resnet-best-model-12-0.9999.chk"
pattern = "{filename_prefix}-{name}-{score_name}-{score}.chk"
res = _test(to_save, "best", score_function=lambda e: e.state.score, score_name="acc", filename_pattern=pattern)
assert res == "best-model-acc-0.9999.chk"
pattern = "{global_step}-{filename_prefix}-{name}-{score_name}-{score}"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
)
assert res == "12-best-model-acc-0.9999"
pattern = "SAVE:{name}-{score_name}-{score}.pth"
res = _test(
to_save,
"best",
score_function=lambda e: e.state.score,
score_name="acc",
global_step_transform=lambda e, _: e.state.epoch,
filename_pattern=pattern,
)
assert res == "SAVE:model-acc-0.9999.pth"
pattern = "{global_step}-chk-{filename_prefix}-{name}-{score_name}-{score}.{ext}"
assert _test(to_save, filename_pattern=pattern) == "203-chk--model-None-None.pt"
with pytest.raises(KeyError, match=r"random_key"):
pattern = "SAVE:{random_key}.{ext}"
_test(to_save, filename_pattern=pattern)
def test_setup_filename_pattern():
# default filename pattern
assert Checkpoint.setup_filename_pattern() == "{filename_prefix}_{name}_{global_step}_{score_name}={score}.{ext}"
assert Checkpoint.setup_filename_pattern(False) == "{name}_{global_step}_{score_name}={score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, False, False) == "{name}_{global_step}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, False) == "{name}_{global_step}_{score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, False, False) == "{name}_{score}.{ext}"
assert Checkpoint.setup_filename_pattern(False, True, True, False) == "{name}_{score_name}={score}.{ext}"
with pytest.raises(ValueError, match=r"At least one of with_score and with_global_step should be True."):
Checkpoint.setup_filename_pattern(False, False, False, False)
with pytest.raises(ValueError, match=r"If with_score_name is True, with_score should be also True"):
Checkpoint.setup_filename_pattern(True, False, True, True)
def _setup_checkpoint():
save_handler = MagicMock(spec=BaseSaveHandler)
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=0)
checkpointer(trainer)
trainer.state.iteration = 10
checkpointer(trainer)
trainer.state.iteration = 20
checkpointer(trainer)
assert save_handler.call_count == 3
return checkpointer
def test_checkpoint_state_dict():
checkpointer = _setup_checkpoint()
sd = checkpointer.state_dict()
assert "saved" in sd
assert isinstance(sd["saved"], list) and len(sd["saved"]) == len(checkpointer._saved)
for saved_item, true_item in zip(sd["saved"], checkpointer._saved):
assert saved_item[0] == true_item.priority
assert saved_item[1] == true_item.filename
def test_checkpoint_load_state_dict():
true_checkpointer = _setup_checkpoint()
save_handler = MagicMock(spec=BaseSaveHandler)
model = DummyModel()
to_save = {"model": model}
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=None)
sd = {"saved": [(0, "model_0.pt"), (10, "model_10.pt"), (20, "model_20.pt")]}
checkpointer.load_state_dict(sd)
assert checkpointer._saved == true_checkpointer._saved
def test_checkpoint_fixed_filename():
model = DummyModel()
to_save = {"model": model}
def _test(n_saved):
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=n_saved, filename_pattern="{name}.{ext}")
trainer = Engine(lambda e, b: None)
for i in range(10):
trainer.state = State(epoch=i, iteration=i)
checkpointer(trainer)
assert save_handler.call_count == i + 1
metadata = {"basename": "model", "score_name": None, "priority": i}
save_handler.assert_called_with(model.state_dict(), "model.pt", metadata)
_test(None)
_test(1)
_test(3)
def test_checkpoint_reset():
model = DummyModel()
to_save = {"model": model}
save_handler = MagicMock(spec=BaseSaveHandler)
checkpointer = Checkpoint(to_save, save_handler=save_handler, n_saved=2)
assert checkpointer.last_checkpoint is None
trainer = Engine(lambda e, b: None)
trainer.state = State(epoch=0, iteration=123)
checkpointer(trainer)
trainer.state.iteration = 234
checkpointer(trainer)
assert save_handler.call_count == 2
assert checkpointer.last_checkpoint == "model_234.pt"
assert len(checkpointer._saved) == 2
assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_123.pt", "model_234.pt"])
checkpointer.reset()
assert len(checkpointer._saved) == 0
trainer.state.iteration = 124
checkpointer(trainer)
assert save_handler.call_count == 3
assert checkpointer.last_checkpoint == "model_124.pt"
assert len(checkpointer._saved) == 1
assert sorted([item.filename for item in checkpointer._saved]) == sorted(["model_124.pt"])
def test_checkpoint_reset_with_engine(dirname):
name = "model"
engine = Engine(lambda e, b: None)
handler = ModelCheckpoint(dirname, _PREFIX, create_dir=False, n_saved=2)
model = DummyModel()
to_save = {"model": model}
engine.add_event_handler(Events.EPOCH_COMPLETED, handler, to_save)
engine.run([0, 1], max_epochs=10)
expected = sorted([f"{_PREFIX}_{name}_{i}.pt" for i in [9 * 2, 10 * 2]])
assert sorted(os.listdir(dirname)) == expected
assert "PREFIX_model_20.pt" in handler.last_checkpoint
handler.reset()
engine.state.max_epochs = None
engine.run([0, 1], max_epochs=2)
expected += [f"{_PREFIX}_{name}_{i}.pt" for i in [1 * 2, 2 * 2]]
assert sorted(os.listdir(dirname)) == sorted(expected)
assert "PREFIX_model_4.pt" in handler.last_checkpoint
def test_greater_or_equal():
scores = iter([1, 2, 2, 2])
def score_function(_):
return next(scores)
class Saver:
def __init__(self):
self.counter = 0
def __call__(self, c, f, m):
if self.counter == 0:
assert f == "model_1.pt"
else:
assert f == "model_2.pt"
self.counter += 1
handler = Saver()
checkpointer = Checkpoint(
to_save={"model": DummyModel()},
save_handler=handler,
score_function=score_function,
n_saved=2,
greater_or_equal=True,
)
trainer = Engine(lambda e, b: None)
for _ in range(4):
checkpointer(trainer)
assert handler.counter == 4
def test_get_default_score_fn():
with pytest.raises(ValueError, match=r"Argument score_sign should be 1 or -1"):
Checkpoint.get_default_score_fn("acc", 2.0)
engine = Engine(lambda e, b: None)
engine.state.metrics["acc"] = 0.9
engine.state.metrics["loss"] = 0.123
score_fn = Checkpoint.get_default_score_fn("acc")
score = score_fn(engine)
assert score == 0.9
score_fn = Checkpoint.get_default_score_fn("loss", -1)
score = score_fn(engine)
assert score == -0.123
|
the-stack_106_18832
|
from time import time
from django.test.client import Client, FakePayload
from django.conf import global_settings
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.core.handlers.wsgi import WSGIHandler
from djangobench.utils import run_comparison_benchmark
class RequestFactory(Client):
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
This class re-uses the django.test.client.Client interface, docs here:
http://www.djangoproject.com/documentation/testing/#the-test-client
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
Author: Simon (http://djangosnippets.org/users/simon/)
djangosnippet URL: (http://djangosnippets.org/snippets/963/)
"""
def request(self, **request):
"""
Similar to parent class, but returns the request object as soon as it
has created it.
"""
environ = {
'HTTP_COOKIE': self.cookies,
'PATH_INFO': '/',
'QUERY_STRING': '',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.input': FakePayload(''),
}
environ.update(self.defaults)
environ.update(request)
return WSGIRequest(environ)
def setup():
global req_factory, handler_default_middleware, handler_no_middleware
req_factory = RequestFactory()
settings.MIDDLEWARE_CLASSES = global_settings.MIDDLEWARE_CLASSES
handler_default_middleware = WSGIHandler()
handler_default_middleware.load_middleware()
settings.MIDDLEWARE_CLASSES = []
handler_no_middleware = WSGIHandler()
handler_no_middleware.load_middleware()
def benchmark_request(middleware_classes):
settings.MIDDLEWARE_CLASSES = middleware_classes
req_factory = RequestFactory()
handler = WSGIHandler()
handler.load_middleware()
handler.get_response(req_factory.get('/'))
def benchmark_default_middleware():
global req_factory, handler_default_middleware
handler_default_middleware.get_response(req_factory.get('/'))
def benchmark_no_middleware():
global req_factory, handler_no_middleware
handler_no_middleware.get_response(req_factory.get('/'))
run_comparison_benchmark(
benchmark_default_middleware,
benchmark_no_middleware,
setup = setup,
syncdb = False,
meta = {
'description': 'Request/response overhead added by the default middleware.',
}
)
|
the-stack_106_18833
|
from collections import namedtuple
from datetime import datetime
from typing import List
import requests
Success = namedtuple("Success", ["RowAffected"])
class GorseException(BaseException):
def __init__(self, status_code: int, message: str):
self.status_code = status_code
self.message = message
class Gorse:
def __init__(self, entry_point):
self.entry_point = entry_point
def insert_feedback(
self, feedback_type: str, user_id: str, item_id: str
) -> Success:
r = requests.post(
self.entry_point + "/api/feedback",
json=[
{
"FeedbackType": feedback_type,
"UserId": user_id,
"ItemId": item_id,
"Timestamp": datetime.now().isoformat(),
}
],
)
if r.status_code == 200:
return r.json()
raise GorseException(r.status_code, r.text)
def get_recommend(self, user_id: str, n: int = 1) -> List[str]:
r = requests.get(self.entry_point + "/api/recommend/%s?n=%d" % (user_id, n))
if r.status_code == 200:
return r.json()
raise GorseException(r.status_code, r.text)
def insert_feedbacks(self, feedbacks) -> Success:
r = requests.post(self.entry_point + "/api/feedback", json=feedbacks)
if r.status_code == 200:
return r.json()
raise GorseException(r.status_code, r.text)
def insert_item(self, item) -> List[str]:
r = requests.post(self.entry_point + "/api/item", json=item)
if r.status_code == 200:
return r.json()
raise GorseException(r.status_code, r.text)
|
the-stack_106_18835
|
"""Class to handle S-parameters."""
import numpy as np
from numpy.linalg import inv
from numpy import diag, sqrt, identity, matmul, iscomplex
import matplotlib.pyplot as plt
from copy import deepcopy as copy
from rftools.parameters import *
class Network(object):
"""Class to handle S-parameters.
Args:
filename (str): Touchstone file to load
Keyword Args:
comment (str): comment to describe this specific instance
"""
def __init__(self, filename=None, **kwargs):
self.comment = kwargs.get('comment', '')
if filename is not None:
self.f, self.s, self.z0, self.ports = _read_touchstone(filename)
else:
self.f = kwargs.get('f', None)
self.s = kwargs.get('s', None)
self.z0 = kwargs.get('z0', None)
self.ports = kwargs.get('ports', ())
### Dunder ###
def __str__(self):
msg = '<Network: {} ports, {} points, {}>'
return msg.format(self.count_ports(), self.count_points(), self.ports)
def __repr__(self):
return self.__str__()
def __mul__(self, network2):
"""Cascade S-parameter matrices.
Args:
network2 (rftools.network.Network): second network
Returns:
rftools.network.Network: cascaded network
"""
network1 = self.copy()
assert isinstance(network2, Network), \
"Only Networks can be multiplied together."
assert np.all(network1.f == network2.f), \
"Frequencies must match."
assert network1._is_2port(), \
"First network must have 2 ports."
assert network2._is_1port() or network2._is_2port(), \
"Second network must have 1 or 2 ports."
if network2._is_1port():
_sparam2 = np.ones((2, 2, np.alen(self.f)), dtype=complex) * 1e-15
_sparam2[0, 0, :] = network2.s[0, 0]
_z02 = np.ones((2, np.alen(self.f)), dtype=complex) * network2.z0[0]
abcd1 = s_to_abcd(network1.s, network1.z0)
abcd2 = s_to_abcd(_sparam2, _z02)
abcd = np.empty_like(abcd1)
for i in range(abcd1.shape[2]):
abcd[:,:,i] = np.dot(abcd1[:,:,i], abcd2[:,:,i])
z01 = network1.z0[0]
z02 = _z02[1]
z0 = np.vstack((z01, z02))
s = abcd_to_sparam(abcd, z0)
result = Network()
result.f = network1.f
result.s = np.zeros((1, 1, np.alen(self.f)), dtype=complex)
result.s[0, 0, :] = s[0, 0]
result.z0 = np.zeros((1, np.alen(self.f)), dtype=float)
result.z0[0, :] = network1.z0[0]
result.ports = (network1.ports[0])
return result
if network2._is_2port():
abcd1 = s_to_abcd(network1.s, network1.z0)
abcd2 = s_to_abcd(network2.s, network2.z0)
abcd = np.empty_like(abcd1)
for i in range(abcd1.shape[2]):
abcd[:,:,i] = np.dot(abcd1[:,:,i], abcd2[:,:,i])
z01 = network1.z0[0]
z02 = network2.z0[1]
z0 = np.vstack((z01, z02))
s = abcd_to_sparam(abcd, z0)
result = Network()
result.f = network1.f
result.s = s
result.z0 = z0
result.ports = (network1.ports[0], network2.ports[1])
return result
### Meta ###
def copy(self):
"""Do a deep copy."""
return copy(self)
def count_ports(self):
"""Count number of ports.
Returns:
int: number of ports
"""
return self.s.shape[0]
def count_points(self):
"""Count number of frequency points.
Returns:
int: number of frequency points
"""
return self.s.shape[2]
def check_ports(self):
"""Check S-parameter matrix."""
sn, sm, _ = self.s.shape
assert sn == sm, "S-parameters not square."
nports = sn
assert nports == self.z0.shape[0]
assert len(self.ports) == nports
### Checks ###
def _is_1port(self):
return self.count_ports() == 1
def _is_2port(self):
return self.count_ports() == 2
def _is_normalised(self):
"""Is it normalized to 50 ohms?"""
return np.array_equal(self.z0, np.ones_like(self.z0)*50.)
def is_reciprocal(self):
"""Does S21 equal S12?"""
return np.allclose(self.s[0, 1], self.s[1, 0])
def is_symmetrical(self):
"""Does S11 equal S22?"""
return np.allclose(self.s[0, 0], self.s[1, 1])
def is_lossless(self):
"""Is the matrix lossless?"""
return np.allclose(np.abs(self.s[0, 0])**2 + np.abs(self.s[0, 1])**2, np.ones_like(self.f))
def is_lossless_reciprocal(self):
"""Is the matrix lossless and reciprocal?"""
return self.is_reciprocal() & self.is_lossless()
### Get other parameters ###
def tparam(self):
"""Return T-parameters."""
return s_to_tparam(self.s)
def zparam(self):
"""Return Z-parameters."""
return s_to_zparam(self.s, self.z0)
def sparam_db(self):
"""Return S-parameter matrix in [dB]."""
with np.errstate(divide='ignore'):
s_db = 20*np.log10(np.abs(self.s))
return s_db
### Port impedance ###
def renormalize_ports(self, z0new=None):
# http://qucs.sourceforge.net/tech/node98.html
# print("THIS FUNCTION DOESNT WORK")
_, _, npts = self.s.shape
nports = self.count_ports()
if z0new is None:
z0new = np.ones((len(self.z0), npts)) * 50.
elif isinstance(z0new, float) or isinstance(z0new, int):
z0new = np.ones((len(self.z0), npts)) * z0new
assert not iscomplex(z0new).any(), "Renormalization does not work with complex port impedances."
elif z0new.ndim == 1:
assert not iscomplex(z0new).any(), "Renormalization does not work with complex port impedances."
z0new = np.array([z0new for _ in range(npts)]).T
assert z0new.shape[0] == nports
assert z0new.shape[1] == npts
assert not iscomplex(self.z0).any(), "Renormalization does not work with complex port impedances."
for i in range(npts):
# original s parameter matrix
s = self.s[..., i]
# reference impedance before renomalization
zn_before = self.z0[..., i]
# reference impedance after renormalization
zn = z0new[..., i]
# new s parameters
snew = z_to_sparam(s_to_zparam(s, zn_before), zn)
self.s[:,:,i] = snew
# new port reference impedance
self.z0[:,i] = zn
### Manipulate ports ###
def get_port_number(self, port_name):
"""Get port number from port name.
Args:
port_name (str): port name
Returns:
int: port number
"""
for i in range(len(self.ports)):
if self.ports[i] == port_name:
return i
raise ValueError
def list_ports(self):
"""List all port numbers and port names."""
for i in range(len(self.ports)):
print("{} : {}".format(i, self.ports[i]))
def delete_port(self, port_name):
"""Delete port.
Assumes that the port is matched, i.e., that it can be simply deleted.
Args:
port_name (str): port name
"""
try:
port_num = self.get_port_number(port_name)
mask = np.arange(len(self.ports)) != port_num
s = self.s[mask]
self.s = s[:,mask]
self.z0 = self.z0[mask]
ports = list(self.ports)
ports.remove(port_name)
self.ports = tuple(ports)
except:
print("The \"{}\" port does not exist.".format(port_name))
def rename_port(self, old_name, new_name):
"""Rename port.
Args:
old_name (str): old port name
new_name (str): new port name
"""
ports = list(self.ports)
new_ports = []
for port in ports:
if port == old_name:
new_ports.append(new_name)
else:
new_ports.append(port)
self.ports = tuple(new_ports)
def flip_ports(self):
"""Flip ports.
Only for two port networks.
"""
npts = self.count_points()
assert self._is_2port()
# Flip s-parameters
for i in range(npts):
mat = np.array([[0., 1.], [1., 0.]])
self.s[:,:,i] = np.dot(np.dot(mat, self.s[:,:,i]), mat)
# Flip Z0
z01 = self.z0[0]
z02 = self.z0[1]
self.z0 = np.vstack((z02, z01))
# Flip port names
self.ports = (self.ports[1], self.ports[0])
### Truncate data ###
def truncate_frequency(self, fmin, fmax):
"""Truncate frequency range.
Args:
fmin (float): lower frequency
fmax (float): upper frequency
"""
mask = (fmin <= self.f) & (self.f <= fmax)
self.f = self.f[mask]
self.s = self.s[:, :, mask]
self.z0 = self.z0[:, mask]
### Get property from given port name ###
def get_s(self, port1, port2):
"""Get specified S-parameters.
Args:
port1: port 1
port2: port 2
Returns:
ndarray: S-parameters
"""
if not isinstance(port1, int) and not isinstance(port2, int):
port1 = self.get_port_number(port1)
port2 = self.get_port_number(port2)
return self.s[port1, port2]
def get_s_db(self, port1, port2, sigma=None):
"""Get specified S-parameters in [dB].
Args:
port1: port 1
port2: port 2
Returns:
ndarray: S-parameters in dB
"""
if not isinstance(port1, int) and not isinstance(port2, int):
port1 = self.get_port_number(port1)
port2 = self.get_port_number(port2)
s_db = self.sparam_db()[port1, port2]
if sigma is not None:
sigma = sigma / (self.f[1] - self.f[0])
s_db = _gauss_conv(s_db, sigma)
return s_db
def get_s_mag(self, port1, port2, sigma=None):
"""Get specified S-parameters (mag).
Args:
port1: port 1
port2: port 2
Returns:
ndarray: S-parameter magnitude
"""
if not isinstance(port1, int) and not isinstance(port2, int):
port1 = self.get_port_number(port1)
port2 = self.get_port_number(port2)
s_mag = np.abs(self.s[port1, port2])
if sigma is not None:
sigma = sigma / (self.f[1] - self.f[0])
s_mag = _gauss_conv(s_mag, sigma)
return s_mag
def get_p_mag(self, port1, port2, sigma=None):
"""Get specified power magnitude.
Args:
port1: port 1
port2: port 2
Returns:
ndarray: power magnitude
"""
if not isinstance(port1, int) and not isinstance(port2, int):
port1 = self.get_port_number(port1)
port2 = self.get_port_number(port2)
s_mag = np.abs(self.s[port1, port2])
if sigma is not None:
sigma = sigma / (self.f[1] - self.f[0])
s_mag = _gauss_conv(s_mag, sigma)
return s_mag**2
def get_gain(self, port1, port2, sigma=None):
"""Get gain.
Args:
port1: port 1
port2: port 2
Keyword Args:
sigma: width of filter (set to None for no filtering)
Returns:
ndarray: gain
"""
return self.get_p_mag(port1, port2, sigma)
def get_z0(self, port):
"""Get characteristic / port impedance.
Args:
port: port
Returns:
ndarray: characteristic / port impedance
"""
if not isinstance(port, int):
port = self.get_port_number(port)
return self.z0[port]
def get_zin(self, port, sigma=None):
"""Get input impedance.
Args:
port: port
Keyword Args:
sigma: width of filter (set to None for no filtering)
Returns:
ndarray: input impedance
"""
if not isinstance(port, int):
port = self.get_port_number(port)
zin = (1 + self.s[port, port]) / (1 - self.s[port, port]) * self.z0[port]
if sigma is not None:
sigma = sigma / (self.f[1] - self.f[0])
zreal = _gauss_conv(zin.real, sigma)
zimag = _gauss_conv(zin.imag, sigma)
zin = zreal + 1j * zimag
return zin
### Get value at a certain frequency ###
def get_zin_at_f(self, f, port, sigma=None):
"""Get input impedance at specified frequency.
Args:
f: frequency
port: port
Keyword Args:
sigma: width of filter (set to None for no filtering)
Returns:
ndarray: input impedance
"""
if not isinstance(port, int):
port = self.get_port_number(port)
zin = (1 + self.s[port, port]) / (1 - self.s[port, port]) * self.z0[port]
if sigma is not None:
sigma = sigma / (self.f[1] - self.f[0])
zreal = _gauss_conv(zin.real, sigma)
zimag = _gauss_conv(zin.imag, sigma)
zin = zreal + 1j * zimag
return np.interp(f, self.f, zin)
def get_z0_at_f(self, f, port):
"""Get characteristic impedance at specified frequency.
Args:
f: frequency
port: port
Returns:
ndarray: characteristic impedance
"""
if not isinstance(port, int):
port = self.get_port_number(port)
z0 = self.z0[port]
return np.interp(f, self.f, z0)
def get_s_mag_at_f(self, f, port1, port2, sigma=None):
"""Get S-parameter at specified frequency.
Args:
f: frequency
port1: port 1
port2: port 2
Keyword Args:
sigma: width of filter (set to None for no filtering)
Returns:
ndarray: S-parameter magnitude
"""
s_mag = self.get_s_mag(port1, port2, sigma=None)
return np.interp(f, self.f, s_mag)
def get_gain_at_f(self, f, port1=None, port2=None, sigma=None):
"""Get gain at specified frequency.
Args:
f: frequency
port1: port 1
port2: port 2
Keyword Args:
sigma: width of filter (set to None for no filtering)
Returns:
ndarray: gain
"""
if port1 is None or port2 is None:
port1 = self.ports[1]
port2 = self.ports[0]
il = self.get_s_mag(port1, port2, sigma=None)
return np.interp(f, self.f, il)**2
### Plot network properties ###
def plot_sparam(self, filename=None, ax=None, **params):
"""Plot S-parameters.
Keyword Args:
filename: figure file name
ax: matplotlib axis to use
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
for i, key1 in enumerate(self.ports):
for j, key2 in enumerate(self.ports):
if i == 1:
ax.plot(self.f, self.get_s_db(key1, key2), label="S({},{})".format(key1, key2), ls='--')
else:
ax.plot(self.f, self.get_s_db(key1, key2), label="S({},{})".format(key1, key2))
ax.legend()
ax.set(**params)
ax.set_xlabel('Frequency (GHz)')
ax.set_ylabel('Magnitude (dB)')
ax.set_xlim([self.f.min(), self.f.max()])
if filename is not None:
fig.savefig(fig_name, bbox_inches='tight')
plt.close(fig)
return
else:
return ax
def plot_zin(self, port, filename=None, ax=None, **params):
"""Plot input impedance.
Args:
port: port to plot
Keyword Args:
filename: figure file name
ax: matplotlib axis to use
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
zin = self.get_zin(port)
ax.plot(self.f, zin.real, 'b', label='Real')
ax.plot(self.f, zin.imag, 'r', label='Imaginary')
ax.set_xlabel(r'Frequency (GHz)')
ax.set_ylabel(r'Impedance ($\Omega$)')
ax.legend(title=r'$Z_\mathrm{{in}}$ at {}'.format(port))
ax.set_xlim([self.f.min(), self.f.max()])
ax.set(**params)
if filename is not None:
fig.savefig(fig_name, bbox_inches='tight')
plt.close(fig)
return
else:
return ax
def plot_z0(self, port=None, filename=None, ax=None, **params):
"""Plot port impedance.
Args:
port: port to plot, will plot all if not specified
Keyword Args:
filename: figure file name
ax: matplotlib axis to use
"""
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.get_figure()
if port is not None:
zin = self.get_z0(port)
ax.plot(self.f, zin.real, 'b', label='Real')
ax.plot(self.f, zin.imag, 'r', label='Imaginary')
ax.legend(title=r'$Z_0$ at {}'.format(port))
else:
ports = self.ports
for port in ports:
zin = self.get_z0(port)
l = ax.plot(self.f, zin.real, label='{} real'.format(port))
ax.plot(self.f, zin.imag, label='{} imag'.format(port), c=l[0].get_color(), ls='--')
ax.legend()
ax.set_xlabel(r'Frequency (GHz)')
ax.set_ylabel(r'Port Impedance ($\Omega$)')
ax.set_xlim([self.f.min(), self.f.max()])
ax.set(**params)
if filename is not None:
fig.savefig(fig_name, bbox_inches='tight')
plt.close(fig)
return
else:
return ax
def plot(self, filename=None, ax=None, **params):
"""Plot S-parameters.
Keyword Args:
filename: figure file name
ax: matplotlib axis to use
"""
return self.plot_sparam(filename, ax=None, **params)
# Read touchstone ------------------------------------------------------------
def _read_touchstone(filename, max_size=10000, z0=None):
"""Read Touchstone file.
Only tested with Touchstone files from HFSS.
Args:
filename: file to import
Keyword Args:
max_size (int): maximum number of lines to read
z0 (ndarray): characteristic impedance
Returns:
tuple: frequency, S-parameters, characterisitc impedance, ports
"""
with open(filename, 'r') as fin:
data = fin.readlines()
# Get header
for i, line in enumerate(data):
if line[0] != '!' and line[0] != '#':
break
header = data[:i]
data = data[i:]
# Strip trailing spaces / newlines
for i in range(len(header)):
header[i] = header[i].rstrip()
for i in range(len(data)):
data[i] = data[i].rstrip()
# READ HEADER ------------------------------------------------------------
# Get port names
ports = []
for line in header:
if 'Port[' in line:
port_name = line.split()[-1]
port_name = port_name.split(':')[0]
ports.append(port_name)
ports = tuple(ports)
nports = len(ports)
# Initialize S-parameters
f = np.zeros(max_size, dtype=float)
s = np.zeros((nports, nports, max_size), dtype=complex)
# READ DATA --------------------------------------------------------------
# Remove carriage returns in data
new_data = []
for line in data:
if line == '' or line[0] != ' ':
new_data.append(line)
elif line[0] == ' ':
new_data[-1] += line
data = new_data
# Read data lines
idx = 0
for line in data:
if len(line) > 0 and line[0] != '!':
dat = [float(entry) for entry in line.split()]
f[idx] = dat[0]
dat = dat[1:]
mag = dat[::2]
arg = dat[1::2]
for i in range(nports):
for j in range(nports):
s[i,j,idx] = mag.pop(0) * np.exp(1j * _deg_to_rad(arg.pop(0)))
idx += 1
# Read other data (z0)
if z0 is None:
idx = 0
z0 = np.zeros((nports, max_size))
for line in data:
if len(line) > 0 and line[0:10] == '! Port Imp':
# TODO: read imaginary component as well!
z0[:, idx] = [float(entry) for entry in line[16:].split()[::2] if _is_float(entry)]
idx += 1
else:
z0 = z0 * np.ones((nports, max_size))
# Format output data
mask = f != 0
f = f[mask]
s = s[:,:,mask]
z0 = z0[:,mask]
# Check matrix dimensions
nports1, nports2, nfreq = s.shape
assert nports1 == nports2, "S-matrix must be square."
assert nfreq == np.alen(f)
assert nfreq == np.alen(z0[0])
assert nports1 == np.alen(z0[:,0])
# If 2-port, exchange s12 and s21 (historical reasons...)
if nports1 == 2:
s[0, 1], s[1, 0] = s[1, 0], s[0, 1]
return f, s, z0, ports
# Helper functions -----------------------------------------------------------
def _is_float(entry):
"""Is float?"""
try:
float(entry)
return True
except ValueError:
return False
def _deg_to_rad(deg):
"""Degrees to radians."""
return deg * np.pi / 180
# Filters -------------------------------------------------------------------
def _gauss_conv(x, sigma=10, ext_x=3):
"""Gaussian convolution filter.
Args:
x (ndarray): noisy data
sigma (float): standard deviation of Gaussian curve
ext_x (float): extend Gaussian in each direction by ext_x * sigma
Returns:
ndarray: filtered data
"""
wind = _gauss(sigma, ext_x)
wlen = np.alen(wind)
assert wlen <= np.alen(x), "Window size must be smaller than data size"
assert sigma * ext_x >= 1, \
"Window size must be larger than 1. Increase ext_x."
s = np.r_[x[wlen - 1:0:-1], x, x[-2:-wlen - 1:-1]]
y_out = np.convolve(wind / wind.sum(), s, mode='valid')
y_out = y_out[wlen // 2:-wlen // 2 + 1]
return y_out
def _gauss(sigma, n_sigma=3):
"""Discrete, normalized Gaussian centered on zero. Used for filtering data.
Args:
sigma (float): standard deviation of Gaussian
n_sigma (float): extend x in each direction by ext_x * sigma
Returns:
ndarray: discrete Gaussian curve
"""
x_range = n_sigma * sigma
x = np.arange(-x_range, x_range + 1e-5, 1, dtype=float)
y = 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(-0.5 * (x / sigma)**2)
return y
# Main -----------------------------------------------------------------------
if __name__ == "__main__":
data = Network('../workbooks/data/simple-waveguide.s2p')
print(data)
print('No. ports: ', data.count_ports())
print('No. points: ', data.count_points())
print('2 port: ', data._is_2port())
print('Normalised: ', data._is_normalised())
# data.plot_sparam()
data1 = data.copy()
data1.rename_port('1', 'IN')
data1.rename_port('2', 'OUT')
data2 = data.copy()
data2.flip_ports()
data2.rename_port('1', 'IN')
data2.rename_port('2', 'OUT')
# new = data1 * data2
# new.plot_sparam()
# # data.renormalise_ports()
# # data.plot_sparam()
# # print data.z0
plt.show()
|
the-stack_106_18836
|
#Common imports
import numpy as np
import matplotlib.pyplot as plt
#Relative Imports
from context import kmodel
from kmodel.kronecker_model import model_loader
model_dir = '../../data/kronecker_models/'
#%%
def plot_model():
#%%
#%matplotlib qt
model_foot = model_loader('foot_model.pickle')
model_shank = model_loader('shank_model.pickle')
model_dict = {'jointangles_shank_x': model_shank,
'jointangles_foot_x': model_foot}
#Variables
##########################################################################
subject = 'AB10'
joint_angle = 'jointangles_foot_x'
model = model_dict[joint_angle]
trials = ['s0x8i0','s0x8i10','s1x2i0']
#trials = ['s0x8i0']
#trials = list(df.trial.unique())[:]
mean_plots = False
##########################################################################
joint_str = joint_angle.split('_')[1].capitalize()
try:
subject_dict = model.subjects[subject]
except KeyError:
print("Not in subject dict, checking left out dict")
subject_dict = model.one_left_out_subjects[subject]
print("In left out dict")
#Get constants
inter_subject_average_fit = model.inter_subject_average_fit
personalization_map_scaled = model.personalization_map_scaled
bad_personalization_map_scaled = model.personalization_map
gait_fingerprints = subject_dict['gait_coefficients']
bad_gait_fingerprints = subject_dict['gait_coefficients_unscaled']
optimal_fit = subject_dict['optimal_xi']
df = pd.read_parquet(subject_dict['filename'])
points_per_stride = 150
x = np.linspace(0,1+1/points_per_stride,points_per_stride)
fig, ax = plt.subplots(1,len(trials), sharex='all',sharey ='all')
if (len(trials)==1):
ax = [ax]
for i,trial in enumerate(trials):
#Level ground walking
#Get measured data
trial_df = df[df['trial'] == trial]
#Uncomment to get rmse for all trials
#trial_df = df
measured_angles = trial_df[joint_angle]
foot_mean, foot_std_dev= get_mean_std_dev(measured_angles)
#Get regressor rows
if(mean_plots == True):
foot_angle_evaluated = model.evaluate_pandas(trial_df)
else:
measured_angles_total = measured_angles
measured_angles = measured_angles[:150]
foot_angle_evaluated = model.evaluate_pandas(trial_df)[:150]
print(trial)
#Optimal fit
optimal_estimate = foot_angle_evaluated @ optimal_fit
optimal_mean, optimal_std_dev = get_mean_std_dev(optimal_estimate)
optimal_rmse = get_rmse(optimal_estimate,measured_angles)
print("Optimal rmse {}".format(optimal_rmse))
#Intersubject fit
inter_subject_estimate = foot_angle_evaluated @ inter_subject_average_fit
inter_subject_mean, inter_subject_std_dev = get_mean_std_dev(inter_subject_estimate)
inter_subject_rmse = get_rmse(inter_subject_estimate,measured_angles)
print("Inter subject average rmse {}".format(inter_subject_rmse))
#Gait fingerprint fit
gait_fingerprint_estimate = foot_angle_evaluated @ (inter_subject_average_fit + personalization_map_scaled @ gait_fingerprints)
gait_fingerprint_mean, gait_fingerprint_std_dev = get_mean_std_dev(gait_fingerprint_estimate)
gait_fingerprint_rmse = get_rmse(gait_fingerprint_estimate,measured_angles)
print("Gait fingerprint rmse {}".format(gait_fingerprint_rmse))
#Bad gait fingerprint fit
bad_gait_fingerprint_estimate = foot_angle_evaluated @ (inter_subject_average_fit + bad_personalization_map_scaled @ bad_gait_fingerprints)
bad_gait_fingerprint_mean, bad_gait_fingerprint_std_dev = get_mean_std_dev(bad_gait_fingerprint_estimate)
bad_gait_fingerprint_rmse = get_rmse(bad_gait_fingerprint_estimate,measured_angles)
print("Bad gait fingerprint rmse {}".format(bad_gait_fingerprint_rmse))
clrs = cm.get_cmap('tab20').colors
if(mean_plots == True):
#Measured
#Mean plots with width
ax[i].plot(x, foot_mean,label='Measured Foot Angle', c=clrs[0], linestyle = 'solid')
ax[i].fill_between(x, foot_mean-foot_std_dev, foot_mean+foot_std_dev ,alpha=0.3, facecolor=clrs[0])
#Optimal
ax[i].plot(x, optimal_mean,label='Optimal Fit RMSE:{:.2f}'.format(optimal_rmse), c=clrs[1])
ax[i].fill_between(x, optimal_mean-optimal_std_dev, optimal_mean+optimal_std_dev ,alpha=0.3, facecolor=clrs[1])
#Inter subject average
ax[i].plot(x, inter_subject_mean,label='Inter-Subject Averate Fit RMSE:{:.2f}'.format(inter_subject_rmse), c=clrs[2])
ax[i].fill_between(x, inter_subject_mean-inter_subject_std_dev, inter_subject_mean+inter_subject_std_dev ,alpha=0.3, facecolor=clrs[2])
#Gait fingerprint
ax[i].plot(x, gait_fingerprint_mean,label='Gait Fingerprint Fit RMSE:{:.2f}'.format(gait_fingerprint_rmse), c=clrs[3])
ax[i].fill_between(x, gait_fingerprint_mean-gait_fingerprint_std_dev, gait_fingerprint_mean+gait_fingerprint_std_dev ,alpha=0.3, facecolor=clrs[3])
#Bad Gait fingerprint
ax[i].plot(x, bad_gait_fingerprint_mean,label='Bad Gait Fingerprint Fit RMSE:{:.2f}'.format(bad_gait_fingerprint_rmse), c=clrs[4])
ax[i].fill_between(x, bad_gait_fingerprint_mean-bad_gait_fingerprint_std_dev, bad_gait_fingerprint_mean+bad_gait_fingerprint_std_dev ,alpha=0.3, facecolor=clrs[4])
else:
line_width = 6
# Individual line plots
stride_data = measured_angles_total.values.reshape(-1,150)
for k in range (0,stride_data.shape[0],3):
if (150 - np.count_nonzero(stride_data[k,:]) > 20):
continue
if k == 0:
ax[i].plot(x, stride_data[k,:],label='Measured Foot Angle', linestyle = 'solid', alpha=0.2, linewidth=5, c='darkgrey')
else:
ax[i].plot(x, stride_data[k,:], linestyle = 'solid', alpha=0.3, linewidth=5, c='darkgrey')
#Inter subject average
ax[i].plot(x, inter_subject_estimate,label='Inter-Subject Averate Fit RMSE:{:.2f}'.format(inter_subject_rmse),
linewidth=line_width, c=clrs[6])#, linestyle=(0, (1, 1)), alpha=0.8) #Densely dotted
#Bad Gait fingerprint
# ax[i].plot(x, bad_gait_fingerprint_estimate,label='Bad Gait Fingerprint Fit RMSE:{:.2f}'.format(bad_gait_fingerprint_rmse),
# linewidth=line_width,c=clrs[4],linestyle=(0,(6,1,1,1)), alpha=0.8)
#Optimal fit
ax[i].plot(x, optimal_estimate,label='Optimal Fit RMSE:{:.2f}'.format(optimal_rmse),
linewidth=line_width, c=clrs[2])#, linestyle=(0, (6, 1)), alpha=0.8) #Densely dash dot dotted
#Gait fingerprint
ax[i].plot(x, gait_fingerprint_estimate,label='Gait Fingerprint Fit RMSE:{:.2f}'.format(gait_fingerprint_rmse),
linewidth=line_width, c=clrs[0], linestyle='solid', alpha=0.8)
ax[i].spines["top"].set_visible(False)
ax[i].spines["right"].set_visible(False)
ax[i].title.set_text(trial_to_string(trial,joint_str))
ax[i].legend()
#%%
def plot_cumulative_variance():
pass
#%%
model_foot = model_loader('foot_model.pickle')
model_shank = model_loader('shank_model.pickle')
model_foot_dot = model_loader('foot_dot_model.pickle')
model_shank_dot = model_loader('shank_dot_model.pickle')
clrs = cm.get_cmap('tab20').colors
model = model_foot
pca_values = model.scaled_pca_eigenvalues
pca_values_sum= np.sum(pca_values)
marker_on = [5]
pca_cum_sum = np.cumsum(pca_values)/pca_values_sum
plt.plot(pca_cum_sum[:11], '-o', markevery=marker_on, linewidth=7, markersize=15, mfc = 'r', mec='r',c=clrs[0])
plt.xticks(np.arange(0, 11, 1.0))
plt.show()
|
the-stack_106_18838
|
# -*- coding: utf-8 -*-
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from framework.auth.core import Auth
from osf.models.files import File, Folder, BaseFileNode
from addons.base import exceptions
from addons.swift.serializer import SwiftSerializer
from addons.swift.utils import container_exists, get_container_names
from addons.swift.provider import SwiftProvider
class SwiftFileNode(BaseFileNode):
_provider = 'swift'
class SwiftFolder(SwiftFileNode, Folder):
pass
class SwiftFile(SwiftFileNode, File):
version_identifier = 'version'
class UserSettings(BaseOAuthUserSettings):
oauth_provider = SwiftProvider
serializer = SwiftSerializer
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = SwiftProvider
serializer = SwiftSerializer
folder_id = models.TextField(blank=True, null=True)
folder_name = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
if self._api is None:
self._api = SwiftProvider(self.external_account)
return self._api
@property
def folder_path(self):
return self.folder_name
@property
def display_name(self):
return u'{0}: {1}'.format(self.config.full_name, self.folder_id)
def set_folder(self, folder_id, auth):
provider = SwiftProvider(self.external_account)
if not container_exists(provider.auth_version,
provider.auth_url, provider.username,
provider.user_domain_name, provider.password,
provider.tenant_name,
provider.project_domain_name, folder_id):
error_message = ('We are having trouble connecting to that container. '
'Try a different one.')
raise exceptions.InvalidFolderError(error_message)
self.folder_id = str(folder_id)
self.folder_name = str(folder_id)
self.save()
self.nodelogger.log(action='bucket_linked', extra={'bucket': str(folder_id)}, save=True)
def get_folders(self, **kwargs):
try:
containers = get_container_names(self)
except Exception:
raise exceptions.InvalidAuthError()
return [
{
'addon': 'swift',
'kind': 'folder',
'id': container,
'name': container,
'path': container,
'urls': {
'folders': ''
}
}
for container in containers
]
@property
def complete(self):
return self.has_auth and self.folder_id is not None
def authorize(self, user_settings, save=False):
self.user_settings = user_settings
self.nodelogger.log(action='node_authorized', save=save)
def clear_settings(self):
self.folder_id = None
self.folder_name = None
def deauthorize(self, auth=None, log=True):
"""Remove user authorization from this node and log the event."""
self.clear_settings()
self.clear_auth() # Also performs a save
if log:
self.nodelogger.log(action='node_deauthorized', save=True)
def delete(self, save=True):
self.deauthorize(log=False)
super(NodeSettings, self).delete(save=save)
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Cannot serialize credentials for OpenStack Swift addon')
provider = SwiftProvider(self.external_account)
return {
'auth_version': provider.auth_version,
'auth_url': provider.auth_url,
'tenant_name': provider.tenant_name,
'user_domain_name': provider.user_domain_name,
'project_domain_name': provider.project_domain_name,
'username': provider.username,
'password': provider.password
}
def serialize_waterbutler_settings(self):
if not self.folder_id:
raise exceptions.AddonError('Cannot serialize settings for OpenStack Swift addon')
return {
'container': self.folder_id
}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file', path=metadata['path'], provider='swift')
self.owner.add_log(
'swift_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'path': metadata['materialized'],
'bucket': self.folder_id,
'urls': {
'view': url,
'download': url + '?action=download'
}
},
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), log=True)
|
the-stack_106_18839
|
# encoding: utf-8
"""
Unit tests for the sumatra.recordstore package
"""
from __future__ import unicode_literals
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import object
import unittest
import os
import sys
import tempfile
import shutil
from datetime import datetime, timedelta
from glob import glob
from sumatra.records import Record
from sumatra.programs import Executable
from sumatra.recordstore import (shelve_store, django_store, http_store,
serialization, get_record_store)
from sumatra.versioncontrol import vcs_list
import sumatra.launch
import sumatra.datastore
import sumatra.parameters
from sumatra.core import component
import json
import urllib.parse
originals = []
django_store1 = None
django_store2 = None
django_dir = None
this_directory = os.path.dirname(__file__)
@component
class MockExecutable(Executable):
name = "a.out"
executable_names = ("/usr/local/bin/a.out",)
file_extensions = []
path = "/usr/local/bin/a.out"
version = "999"
options = "-v"
def __init__(self, *args, **kwargs):
pass
class MockRepository(object):
url = "http://svn.example.com/"
upstream = None
type = "MockRepository"
def __init__(self, *args, **kwargs):
pass
class MockLaunchMode(object):
type = "SerialLaunchMode"
def __getstate__(self):
return {}
class MockDataStore(object):
type = "FileSystemDataStore"
def __init__(self, **parameters):
self.root = parameters.get("root", None) or "/tmp"
def __getstate__(self):
return {'root': self.root}
def copy(self):
return self
class MockDependency(object):
name = "some_module"
path = "/usr/lib/python/some_module.py"
version = "1.0"
diff = ""
module = "python"
source = "http://git.example.com/"
class MockPlatformInformation(object):
architecture_bits = 32
architecture_linkage = ""
machine = ""
network_name = ""
ip_addr = "192.168.0.0"
processor = ""
release = ""
system_name = ""
version = ""
class MockParameterSet(object):
def __init__(self, initialiser):
pass
def as_dict(self):
return {}
def pop(self, k, d):
return None
class MockRecord(object):
def __init__(self, label, timestamp=datetime.now()):
self.label = label
self.timestamp = timestamp
self.reason = "because"
self.duration = 7543.2
self.outcome = None
self.stdout_stderr = "ok"
self.main_file = "test"
self.version = "99863a9dc5f"
self.output_data = []
self.executable = MockExecutable()
self.repository = MockRepository()
self.launch_mode = MockLaunchMode()
self.datastore = MockDataStore()
self.input_datastore = MockDataStore()
self.parameters = MockParameterSet({})
self.tags = set([])
self.dependencies = [MockDependency(), MockDependency()]
self.platforms = [MockPlatformInformation()]
self.diff = ""
self.user = "michaelpälin"
self.input_data = []
self.script_arguments = "arg1 arg2"
self.repeats = None
def __eq__(self, other):
return self.label == other.label and self.duration == other.duration
class MockProject(object):
name = "TestProject"
def setup():
global django_store1, django_store2, django_dir
django_dir = tempfile.mkdtemp(prefix='sumatra-test-')
cwd_before = os.getcwd()
os.chdir(django_dir)
sumatra.launch.MockLaunchMode = MockLaunchMode
sumatra.datastore.MockDataStore = MockDataStore
sumatra.parameters.MockParameterSet = MockParameterSet
if django_store1 is None:
django_store1 = django_store.DjangoRecordStore(db_file="test.db")
if django_store2 is None:
django_store2 = django_store.DjangoRecordStore(db_file="test2.db")
django_store.db_config.configure()
vcs_list.append(sys.modules[__name__])
os.chdir(cwd_before)
def teardown():
global django_store1, django_store2, django_dir
del sumatra.launch.MockLaunchMode
del sumatra.datastore.MockDataStore
del sumatra.parameters.MockParameterSet
shutil.rmtree(django_dir)
vcs_list.remove(sys.modules[__name__])
class BaseTestRecordStore(object):
def setUp(self):
self.dir = tempfile.mkdtemp(prefix='sumatra-test-')
self.cwd_before_test = os.getcwd()
os.chdir(self.dir)
def tearDown(self):
django_store1.delete_all()
django_store2.delete_all()
os.chdir(self.cwd_before_test)
shutil.rmtree(self.dir)
def add_some_records(self):
# records must have a delta timestamp of one second as RecordStores
# might serialize the record using serialization.encode_record
# (like HttpRecordStore). There, the timestamp will be cut off
# milliseconds.
now = datetime.now()
r1 = MockRecord("record1", timestamp=now - timedelta(seconds=2))
r2 = MockRecord("record2", timestamp=now - timedelta(seconds=1))
r3 = MockRecord("record3", timestamp=now)
for r in r1, r2, r3:
#print "saving record %s" % r.label
self.store.save(self.project.name, r)
def add_some_tags(self):
# records must have a delta timestamp of one second as RecordStores
# might serialize the record using serialization.encode_record
# (like HttpRecordStore). There, the timestamp will be cut off
# milliseconds.
now = datetime.now()
r1 = MockRecord("record1", timestamp=now - timedelta(seconds=1))
r3 = MockRecord("record3", timestamp=now)
r1.tags.add("tag1")
r1.tags.add("tag2")
r3.tags.add("tag1")
self.store.save(self.project.name, r1)
self.store.save(self.project.name, r3)
def test_create_record_store_should_not_produce_errors(self):
pass
def test_save_should_not_produce_errors(self):
self.add_some_records()
def test_get(self):
self.add_some_records()
r = self.store.get(self.project.name, "record1")
assert isinstance(r, (MockRecord, Record)), type(r)
assert r.label == "record1", r.label
def test_get_nonexistent_record_raises_KeyError(self):
self.assertRaises(KeyError, self.store.get, self.project.name, "foo")
def test_list_without_tags_should_return_all_records(self):
self.add_some_records()
records = self.store.list(self.project.name)
assert isinstance(records, list), type(records)
self.assertEqual(len(records), 3)
def test_list_for_tags_should_filter_records_appropriately(self):
self.add_some_records()
self.add_some_tags()
records = self.store.list(self.project.name, "tag1")
self.assertEqual(len(records), 2)
def test_labels_without_tags_should_return_all_labels(self):
self.add_some_records()
labels = self.store.labels(self.project.name)
assert isinstance(labels, list), type(labels)
self.assertEqual(len(labels), 3)
self.assertIsInstance(labels[0], str)
def test_labels_for_tags_should_filter_records_appropriately(self):
self.add_some_records()
self.add_some_tags()
labels = self.store.labels(self.project.name, "tag1")
self.assertEqual(len(labels), 2)
self.assertIsInstance(labels[0], str)
def test_delete_removes_record(self):
self.add_some_records()
key = "record1"
self.store.delete(self.project.name, key)
self.assertRaises(KeyError, self.store.get, self.project.name, key)
def test_delete_by_tag(self):
self.add_some_records()
self.assertEqual(len(self.store.list(self.project.name)), 3)
self.add_some_tags()
r = self.store.get(self.project.name, "record1")
self.assertEqual(r.tags, set(['tag1', 'tag2']))
n = self.store.delete_by_tag(self.project.name, "tag1")
self.assertEqual(n, 2)
self.assertEqual(len(self.store.list(self.project.name)), 1)
self.assertRaises(KeyError, self.store.get, self.project.name, "record1")
def test_delete_nonexistent_label(self):
self.add_some_records()
self.assertRaises(Exception, # could be KeyError or DoesNotExist
self.store.delete,
self.project.name,
"notarecord")
# should emit warning but not exception
def test_str(self):
#this test is pointless, just to increase coverage
assert isinstance(str(self.store), str)
def test_most_recent(self):
self.add_some_records()
self.assertEqual(self.store.most_recent(self.project.name), "record3")
self.store.delete(self.project.name, "record3")
self.assertEqual(self.store.most_recent(self.project.name), "record2")
def test_sync_with_shelve_store(self):
self.add_some_records()
other_store = shelve_store.ShelveRecordStore(shelf_name="test_record_store2")
self.assertEqual(len(other_store.list(self.project.name)), 0)
self.store.sync(other_store, self.project.name)
self.assertEqual(sorted(rec.label for rec in self.store.list(self.project.name)),
sorted(rec.label for rec in other_store.list(self.project.name)))
def test_sync_with_django_store(self):
other_store = django_store2
self.add_some_records()
self.assertEqual(len(other_store.list(self.project.name)), 0)
self.store.sync(other_store, self.project.name)
self.assertEqual(sorted(rec.label for rec in self.store.list(self.project.name)),
sorted(rec.label for rec in other_store.list(self.project.name)))
def test_update(self):
self.add_some_records()
self.store.update(self.project.name, "datastore.root", "/new/path/to/store")
updated_value, = set(rec.datastore.root for rec in self.store.list(self.project.name))
self.assertEqual(updated_value, "/new/path/to/store")
def test_clear(self):
self.add_some_records()
self.store.clear()
class TestShelveRecordStore(unittest.TestCase, BaseTestRecordStore):
def setUp(self):
BaseTestRecordStore.setUp(self)
self.store = shelve_store.ShelveRecordStore(shelf_name="test_record_store")
self.project = MockProject()
def tearDown(self):
del self.store # this is necessary when using the dumbdbm module, which otherwise creates files after test
BaseTestRecordStore.tearDown(self)
def test_record_store_is_pickleable(self):
import pickle
self.add_some_records()
s = pickle.dumps(self.store)
del self.store
self.store = pickle.loads(s)
self.assertEqual(self.store._shelf_name, "test_record_store")
class TestDjangoRecordStore(unittest.TestCase, BaseTestRecordStore):
def setUp(self):
BaseTestRecordStore.setUp(self)
self.store = django_store1
self.project = MockProject()
def tearDown(self):
BaseTestRecordStore.tearDown(self)
def test_record_store_is_pickleable(self):
import pickle
self.add_some_records()
s = pickle.dumps(self.store)
del self.store
django_store.db_config.configured = False
django_store.db_config._settings['DATABASES'] = {}
unpickled = pickle.loads(s)
#assert unpickled._shelf_name == "test_record_store"
#assert os.path.exists(unpickled._shelf_name)
class MockResponse(object):
def __init__(self, status):
self.status = status
def check_record(record):
# this is a rather basic test. Should also check the keys of the
# subsidiary dicts, and the types of the values
assert set(record.keys()) == set(["executable", "parameters", "repository",
"tags", "main_file", "label", "platforms",
"reason", "version", "user", "launch_mode",
"timestamp", "duration", "diff",
"datastore", "outcome", "output_data",
"dependencies", "input_data",
"script_arguments", "stdout_stderr",
"input_datastore", "repeats"])
class MockCredentials(object):
credentials = [['domain', 'username', 'password']]
class MockHttp(object):
def __init__(self, *args, **kwargs):
self.records = {}
self.debug = False
self.last_record = None
self.credentials = MockCredentials()
def add_credentials(self, *args, **kwargs):
pass
def request(self, uri, method="GET", body=None, headers=None, **kwargs):
u = urllib.parse.urlparse(uri)
parts = u.path.split("/")[1:-1]
if self.debug:
print("\n<<<<< %s %s %d %s %s %s %s %s" % (uri, u.path, len(parts),
method, body, headers,
u.params, u.query))
if len(parts) == 2: # record uri
if method == "PUT":
record = json.loads(body)
check_record(record)
self.records[parts[1]] = record
content = ""
status = 200
self.last_record = record
elif method == "GET":
label = parts[1]
if label == "last":
content = json.dumps(self.last_record)
else:
content = json.dumps(self.records[label])
status = 200
elif method == "DELETE":
self.records.pop(parts[1])
most_recent = ""
for record in self.records.values():
if record["timestamp"] > most_recent:
most_recent = record["timestamp"]
self.last_record = record
content = ""
status = 204
elif len(parts) == 1: # project uri
if method == "GET":
if u.query:
tags = u.query.split("=")[1].split(",")
records = set([])
for tag in tags:
records = records.union(["%s://%s/%s/%s/" % (
u.scheme, u.netloc, parts[0], path)
for path in self.records.keys() if tag in self.records[path]['tags']])
records = list(records)
else:
records = ["%s://%s/%s/%s/" % (u.scheme, u.netloc, parts[0], path)
for path in self.records.keys()]
content = json.dumps({"records": records, "name": "TestProject", "description": ""})
status = 200
elif method == "PUT":
content = ""
status = 201
elif len(parts) == 3: # tagged records uri
if method == "DELETE":
tag = parts[2]
n = 0
for key, record in list(self.records.items()):
if tag in record["tags"]:
self.records.pop(key)
n += 1
status = 200
content = str(n)
elif len(parts) == 0: # list projects uri
status = 200
content = '[{"id": "TestProject"}]'
if self.debug:
print(">>>>> %s %s" % (status, content))
return MockResponse(status), content
class MockHttpLib(object):
@staticmethod
def Http(*args, **kwargs):
return MockHttp(*args, **kwargs)
class TestHttpRecordStore(unittest.TestCase, BaseTestRecordStore):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.real_httplib = http_store.httplib2
http_store.httplib2 = MockHttpLib()
def __del__(self):
http_store.httplib2 = self.real_httplib
def setUp(self):
BaseTestRecordStore.setUp(self)
self.store = http_store.HttpRecordStore("http://127.0.0.1:8000/", "testuser", "z6Ty49HY")
self.project = MockProject()
def tearDown(self):
BaseTestRecordStore.tearDown(self)
def test_record_store_is_pickleable(self):
import pickle
self.add_some_records()
s = pickle.dumps(self.store)
del self.store
unpickled = pickle.loads(s)
def test_process_url(self):
url, username, password = http_store.process_url("http://foo:[email protected]:8000/path/file.html")
self.assertEqual(username, "foo")
self.assertEqual(password, "bar")
self.assertEqual(url, "http://example.com:8000/path/file.html")
def test_list_projects(self):
self.assertEqual(self.store.list_projects(), [self.project.name])
def test_create_project(self):
self.store.create_project("NewProject", "A new test project", "A long description")
def test_project_info(self):
self.assertEqual(self.store.project_info("TestProject")["name"], "TestProject")
def test_clear(self):
pass # override base class test to avoid UserWarning
class TestSerialization(unittest.TestCase):
maxDiff = None
def test_build_record_v0p4(self):
with open(os.path.join(this_directory, "example_0.4.json")) as fp:
record = serialization.build_record(json.load(fp))
self.assertEqual(record.label, "haggling")
def test_build_record_v0p3(self):
with open(os.path.join(this_directory, "example_0.3.json")) as fp:
record = serialization.build_record(json.load(fp))
self.assertEqual(record.label, "haggling")
def test_build_record_v0p5(self):
with open(os.path.join(this_directory,"example_0.5.json")) as fp:
record = serialization.build_record(json.load(fp))
self.assertEqual(record.label, "haggling")
def test_build_record_v0p6(self):
with open(os.path.join(this_directory, "example_0.6.json")) as fp:
record = serialization.build_record(json.load(fp))
self.assertEqual(record.label, "haggling")
def test_round_trip(self):
with open(os.path.join(this_directory, "example_0.7.json")) as fp:
data_in = json.load(fp)
record = serialization.build_record(data_in)
data_out = json.loads(serialization.encode_record(record, indent=2))
# tags in records are a set, hence have arbitrary order.
self.assertTrue('tags' in data_out)
data_in['tags'] = sorted(data_in['tags'])
data_out['tags'] = sorted(data_out['tags'])
self.assertEqual(data_in, data_out)
def test_encode_project_info(self):
serialization.encode_project_info("foo", "description of foo")
class TestModuleFunctions(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp(prefix='sumatra-test-')
self.cwd_before_test = os.getcwd()
os.chdir(self.dir)
def tearDown(self):
os.chdir(self.cwd_before_test)
shutil.rmtree(self.dir)
def test_get_record_store_http(self, ):
self.assertIsInstance(get_record_store("http://records.example.com/"),
http_store.HttpRecordStore)
def test_get_record_store_shelve(self):
store = shelve_store.ShelveRecordStore(shelf_name="test_record_store.shelf")
key = "foo".__str__() # string wrapping is necessary for dumbdbm, which fails with unicode in Py2
store.shelf[key] = "bar"
store.shelf.sync()
del store
assert len(glob("test_record_store.shelf*")) > 0
self.assertIsInstance(get_record_store("test_record_store.shelf"),
shelve_store.ShelveRecordStore)
def test_get_record_store_create_shelve(self):
assert len(glob("test_record_store.shelf*")) == 0
self.assertIsInstance(get_record_store("test_record_store.shelf"),
shelve_store.ShelveRecordStore)
if __name__ == '__main__':
setup()
unittest.main()
teardown()
|
the-stack_106_18840
|
# Copyright (c) 2021 AllSeeingEyeTolledEweSew
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
import contextlib
import os
import pathlib
import tempfile
from typing import AsyncIterator
from later.unittest.backport import async_case
import libtorrent as lt
from tvaf import concurrency
from tvaf import config as config_lib
from tvaf import resume as resume_lib
from tvaf import services
from . import lib
from . import tdummy
class TemporaryDirectoryTestCase(async_case.IsolatedAsyncioTestCase):
async def asyncSetUp(self) -> None:
self.cwd = await concurrency.to_thread(pathlib.Path.cwd)
self.tempdir = await concurrency.to_thread(tempfile.TemporaryDirectory)
await concurrency.to_thread(os.chdir, self.tempdir.name)
self.config = lib.create_isolated_config()
await self.config.write_to_disk(services.CONFIG_PATH)
async def asyncTearDown(self) -> None:
await concurrency.to_thread(os.chdir, self.cwd)
await concurrency.to_thread(self.tempdir.cleanup)
class LifespanTest(TemporaryDirectoryTestCase):
@contextlib.asynccontextmanager
async def start_stop_session(self) -> AsyncIterator[None]:
await services.startup()
yield
await services.shutdown()
async def test_with_config(self) -> None:
self.assertTrue(await concurrency.to_thread(services.CONFIG_PATH.is_file))
async with self.start_stop_session():
pass
async def test_empty_directory(self) -> None:
# this technically breaks isolation (non-isolated config listens on
# default ports and will bootstrap dht, etc), but it must be tested!
await concurrency.to_thread(services.CONFIG_PATH.unlink)
contents = await concurrency.alist(
concurrency.iter_in_thread(pathlib.Path().iterdir())
)
self.assertEqual(contents, [])
async with self.start_stop_session():
pass
async def test_set_config(self) -> None:
async with self.start_stop_session():
self.config["__test_key__"] = "value"
await services.set_config(self.config)
# Test loaded into available config
config = await services.get_config()
self.assertEqual(config["__test_key__"], "value")
# Test written to disk
config = await config_lib.Config.from_disk(services.CONFIG_PATH)
self.assertEqual(config, self.config)
async def test_set_invalid_config(self) -> None:
async with self.start_stop_session():
self.config["torrent_default_storage_mode"] = "invalid"
with self.assertRaises(config_lib.InvalidConfigError):
await services.set_config(self.config)
config = await services.get_config()
self.assertNotEqual(
config.get_str("torrent_default_storage_mode"), "invalid"
)
async def test_save_and_load_resume_data(self) -> None:
async with self.start_stop_session():
session = await services.get_session()
atp = tdummy.DEFAULT.atp()
atp.save_path = self.tempdir.name
session.async_add_torrent(atp)
resume_data = await concurrency.alist(
resume_lib.iter_resume_data_from_disk(services.RESUME_DATA_PATH)
)
self.assertEqual(len(resume_data), 1)
async with self.start_stop_session():
session = await services.get_session()
torrents = await concurrency.to_thread(session.get_torrents)
self.assertEqual(len(torrents), 1)
async def test_process_lock(self) -> None:
async with self.start_stop_session():
with self.assertRaises(AssertionError):
await services.startup()
class TestDefaultATP(TemporaryDirectoryTestCase):
async def asyncSetUp(self) -> None:
await super().asyncSetUp()
await services.startup()
async def asyncTearDown(self) -> None:
await services.shutdown()
await super().asyncTearDown()
async def test_config_defaults(self) -> None:
save_path = str(await concurrency.to_thread(pathlib.Path("download").resolve))
config = await services.get_config()
self.assertEqual(config["torrent_default_save_path"], save_path)
atp = await services.get_default_atp()
self.assertEqual(atp.save_path, save_path)
self.assertEqual(atp.flags, lt.torrent_flags.default_flags)
self.assertEqual(atp.storage_mode, lt.add_torrent_params().storage_mode)
async def test_set_non_defaults(self) -> None:
# Set all non-default configs
config = config_lib.Config(
torrent_default_save_path=self.tempdir.name,
torrent_default_flags_apply_ip_filter=False,
torrent_default_storage_mode="allocate",
)
await services.set_config(config)
atp = await services.get_default_atp()
self.assertEqual(atp.save_path, self.tempdir.name)
self.assertEqual(
atp.flags,
lt.torrent_flags.default_flags & ~lt.torrent_flags.apply_ip_filter,
)
self.assertEqual(atp.storage_mode, lt.storage_mode_t.storage_mode_allocate)
async def test_save_path_loop(self) -> None:
bad_link = pathlib.Path("bad_link")
await concurrency.to_thread(
bad_link.symlink_to, bad_link, target_is_directory=True
)
config = config_lib.Config(torrent_default_save_path=str(bad_link))
with self.assertRaises(config_lib.InvalidConfigError):
await services.set_config(config)
async def test_flags_apply_ip_filter_null(self) -> None:
config = config_lib.Config(torrent_default_flags_apply_ip_filter=None)
with self.assertRaises(config_lib.InvalidConfigError):
await services.set_config(config)
async def test_storage_mode_invalid(self) -> None:
config = config_lib.Config(torrent_default_storage_mode="invalid")
with self.assertRaises(config_lib.InvalidConfigError):
await services.set_config(config)
|
the-stack_106_18841
|
#!/usr/bin/env python3
import localpath
from typing import Iterator, Sequence
from libcpu.opcode_builder import MicroCode
from libcpu.util import ControlSignal
from libcpu.opcodes import opcodes, fetch
from libcpu.ctrl_word import CtrlWord
control = CtrlWord()
def finalize_steps(microcode: MicroCode, flags: int) -> Iterator[Sequence[ControlSignal]]:
# fetch stage
for f_step in fetch:
yield f_step
for s_idx in range(8-len(fetch)):
step = microcode.get_step(s_idx, flags)
if step is not None:
yield step
else:
yield []
def generate_microcode() -> None:
for key, microcode in opcodes.items():
for f in range(16):
process_steps(key, microcode, f)
def process_steps(key: str, microcode: MicroCode, flags: int) -> None:
for step, pins in enumerate(finalize_steps(microcode, flags)):
control.reset()
for pin in pins:
pin.enable()
print ("{0:13} {4:02x} {1:04b} {2} {3:016b} {3:04x}".format(key, flags, step, control.c_word, microcode.opcode))
if __name__ == "__main__":
generate_microcode()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.