gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# coding=utf-8
# Copyright 2019 The SEED Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for utils."""
import collections
from absl.testing import parameterized
import numpy as np
from seed_rl.common import utils
import tensorflow as tf
class NonDyingEnvsTest(tf.test.TestCase):
def test_basic(self):
nondying_envs_mask, nondying_env_ids = utils.get_non_dying_envs(
envs_needing_reset=tf.constant([], dtype=tf.int32),
reset_mask=tf.constant([False, False]),
env_ids=tf.constant([0, 1]))
self.assertAllEqual(nondying_envs_mask, [True, True])
self.assertAllEqual(nondying_env_ids, [0, 1])
def test_has_resets(self):
nondying_envs_mask, nondying_env_ids = utils.get_non_dying_envs(
envs_needing_reset=tf.constant([6], dtype=tf.int32),
reset_mask=tf.constant([False, True]),
env_ids=tf.constant([5, 6]))
self.assertAllEqual(nondying_envs_mask, [True, True])
self.assertAllEqual(nondying_env_ids, [5, 6])
def test_has_dying_env(self):
nondying_envs_mask, nondying_env_ids = utils.get_non_dying_envs(
envs_needing_reset=tf.constant([6], dtype=tf.int32),
reset_mask=tf.constant([False, False, True, False]),
env_ids=tf.constant([0, 5, 6, 6]))
self.assertAllEqual(nondying_envs_mask, [True, True, True, False])
self.assertAllEqual(nondying_env_ids, [0, 5, 6])
def test_repeated_dying_envs(self):
with self.assertRaises(tf.errors.InvalidArgumentError):
utils.get_non_dying_envs(
envs_needing_reset=tf.constant([6], dtype=tf.int32),
reset_mask=tf.constant([False, False, False, True, True]),
env_ids=tf.constant([0, 5, 6, 6, 6]))
def test_multiple_new_transitions(self):
with self.assertRaises(tf.errors.InvalidArgumentError):
utils.get_non_dying_envs(
# Such duplicate will happen a given actor has 2 new transitions with
# new run IDs (e.g. restarted twice while the same inference batch is
# being processed).
envs_needing_reset=tf.constant([1, 1], dtype=tf.int32),
reset_mask=tf.constant([False, True, True, False]),
env_ids=tf.constant([0, 1, 1, 2]))
class UnrollStoreTest(tf.test.TestCase):
def test_duplicate_actor_id(self):
store = utils.UnrollStore(
num_envs=2, unroll_length=3, timestep_specs=tf.TensorSpec([], tf.int32))
with self.assertRaises(tf.errors.InvalidArgumentError):
store.append(
tf.constant([2, 2], dtype=tf.int32),
tf.constant([42, 43], dtype=tf.int32))
def test_full(self):
store = utils.UnrollStore(
num_envs=4, unroll_length=3, timestep_specs=tf.TensorSpec([], tf.int32))
def gen():
yield False, 0, 10
yield False, 2, 30
yield False, 1, 20
yield False, 0, 11
yield False, 2, 31
yield False, 3, 40
yield False, 0, 12
yield False, 2, 32
yield False, 3, 41
yield False, 0, 13 # Unroll: 10, 11, 12, 13
yield False, 1, 21
yield True, 2, 33 # No unroll because of reset
yield False, 0, 14
yield False, 2, 34
yield False, 3, 42
yield False, 0, 15
yield False, 1, 22
yield False, 2, 35
yield False, 0, 16 # Unroll: 13, 14, 15, 16
yield False, 1, 23 # Unroll: 20, 21, 22, 23
yield False, 2, 36 # Unroll: 33, 34, 35, 36
dataset = tf.data.Dataset.from_generator(gen, (tf.bool, tf.int32, tf.int32),
([], [], []))
dataset = dataset.batch(3, drop_remainder=True)
i = iter(dataset)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
self.assertAllEqual(tf.zeros([0, 4]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
self.assertAllEqual(tf.zeros([0, 4]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
self.assertAllEqual(tf.zeros([0, 4]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.constant([0]), completed_ids)
self.assertAllEqual(tf.constant([[10, 11, 12, 13]]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
self.assertAllEqual(tf.zeros([0, 4]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
self.assertAllEqual(tf.zeros([0, 4]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.constant([0, 1, 2]), completed_ids)
self.assertAllEqual(
tf.constant([[13, 14, 15, 16], [20, 21, 22, 23], [33, 34, 35, 36]]),
unrolls)
def test_structure(self):
named_tuple = collections.namedtuple('named_tuple', 'x y')
num_envs = 2
unroll_length = 10
store = utils.UnrollStore(
num_envs=num_envs,
unroll_length=unroll_length,
timestep_specs=named_tuple(
x=tf.TensorSpec([], tf.int32), y=tf.TensorSpec([], tf.int32)))
for _ in range(unroll_length):
completed_ids, unrolls = store.append(
tf.range(num_envs),
named_tuple(
tf.zeros([num_envs], tf.int32), tf.zeros([num_envs], tf.int32)))
self.assertAllEqual(tf.constant(()), completed_ids)
self.assertAllEqual(
named_tuple(
tf.zeros([0, unroll_length + 1]),
tf.zeros([0, unroll_length + 1])), unrolls)
completed_ids, unrolls = store.append(
tf.range(num_envs),
named_tuple(
tf.zeros([num_envs], tf.int32), tf.zeros([num_envs], tf.int32)))
self.assertAllEqual(tf.range(num_envs), completed_ids)
self.assertAllEqual(
named_tuple(
tf.zeros([num_envs, unroll_length + 1]),
tf.zeros([num_envs, unroll_length + 1])), unrolls)
def test_overlap_2(self):
store = utils.UnrollStore(
num_envs=2,
unroll_length=2,
timestep_specs=tf.TensorSpec([], tf.int32),
num_overlapping_steps=2)
def gen():
yield False, 0, 10
yield False, 1, 20
yield False, 0, 11
yield False, 1, 21
yield False, 0, 12 # Unroll: 0, 0, 10, 11, 12
yield True, 1, 22
yield False, 0, 13
yield False, 1, 23
yield False, 0, 14 # Unroll: 10, 11, 12, 13, 14
yield False, 1, 24 # Unroll: 0, 0, 22, 23, 24
yield True, 0, 15
yield False, 1, 25
yield False, 0, 16
yield False, 1, 26 # Unroll: 22, 23, 24, 25, 26
yield False, 0, 17 # Unroll: 0, 0, 15, 16, 17
yield False, 1, 27
dataset = tf.data.Dataset.from_generator(gen, (tf.bool, tf.int32, tf.int32),
([], [], []))
dataset = dataset.batch(2, drop_remainder=True)
i = iter(dataset)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.constant([0]), completed_ids)
self.assertAllEqual(tf.constant([[0, 0, 10, 11, 12]]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.constant([0, 1]), completed_ids)
self.assertAllEqual(
tf.constant([[10, 11, 12, 13, 14], [0, 0, 22, 23, 24]]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.zeros([0]), completed_ids)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.constant([1]), completed_ids)
self.assertAllEqual(tf.constant([[22, 23, 24, 25, 26]]), unrolls)
should_reset, env_ids, values = next(i)
store.reset(env_ids[should_reset])
completed_ids, unrolls = store.append(env_ids, values)
self.assertAllEqual(tf.constant([0]), completed_ids)
self.assertAllEqual(tf.constant([[0, 0, 15, 16, 17]]), unrolls)
class AggregatorTest(tf.test.TestCase):
def test_full(self):
agg = utils.Aggregator(num_envs=4, specs=tf.TensorSpec([], tf.int32))
self.assertAllEqual([0, 0, 0, 0], agg.read([0, 1, 2, 3]))
agg.add([0, 1], tf.convert_to_tensor([42, 43]))
self.assertAllEqual([42, 43], agg.read([0, 1]))
self.assertAllEqual([42, 43, 0, 0], agg.read([0, 1, 2, 3]))
agg.reset([0])
self.assertAllEqual([0, 43, 0, 0], agg.read([0, 1, 2, 3]))
agg.replace([0, 2], tf.convert_to_tensor([1, 2]))
self.assertAllEqual([1, 43, 2, 0], agg.read([0, 1, 2, 3]))
class BatchApplyTest(tf.test.TestCase):
def test_simple(self):
def f(a, b):
return tf.reduce_sum(a, axis=-1), tf.reduce_max(b, axis=-1)
a_sum, b_max = utils.batch_apply(f, (
tf.constant([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
tf.constant([[[8, 9], [10, 11]], [[12, 13], [14, 15]]]),
))
self.assertAllEqual(tf.constant([[1, 5], [9, 13]]), a_sum)
self.assertAllEqual(tf.constant([[9, 11], [13, 15]]), b_max)
class PrioritizedReplayTest(tf.test.TestCase):
def test_simple(self):
rb = utils.PrioritizedReplay(
size=2,
specs=tf.TensorSpec([], tf.int32),
importance_sampling_exponent=.5)
insert_indices = rb.insert(tf.constant([1, 2]), tf.constant([1., 1.]))
self.assertAllEqual([0, 1], insert_indices)
sampled_indices, weights, sampled_values = rb.sample(2, .5)
self.assertAllEqual(sampled_indices + 1, sampled_values)
self.assertAllEqual([1., 1.], weights)
sampled_indices, weights, sampled_values = rb.sample(2, 0)
self.assertAllEqual(sampled_indices + 1, sampled_values)
self.assertAllEqual([1., 1.], weights)
def test_nests(self):
specs = (tf.TensorSpec([], tf.int32), [tf.TensorSpec([2], tf.int64)])
zeros = tf.nest.map_structure(lambda ts: tf.zeros([1] + ts.shape, ts.dtype),
specs)
rb = utils.PrioritizedReplay(
size=2, specs=specs, importance_sampling_exponent=.5)
_ = rb.insert(zeros, tf.constant([1.]))
_, _, sampled_values = rb.sample(1, .5)
tf.nest.map_structure(self.assertAllEqual, zeros, sampled_values)
def test_update_priorities(self):
rb = utils.PrioritizedReplay(
size=2,
specs=tf.TensorSpec([], tf.int32),
importance_sampling_exponent=.5)
insert_indices = rb.insert(tf.constant([1, 2]), tf.constant([1., 1.]))
self.assertAllEqual([0, 1], insert_indices)
rb.update_priorities([0], [100])
sampled_indices, weights, sampled_values = rb.sample(2, .5)
self.assertAllEqual([0, 0], sampled_indices)
self.assertAllEqual([1, 1], sampled_values)
self.assertAllEqual([1., 1.], weights)
def test_initial_priorities(self):
tf.random.set_seed(5)
rb = utils.PrioritizedReplay(
size=2,
specs=tf.TensorSpec([], tf.int32),
importance_sampling_exponent=.5)
rb.insert(tf.constant([1, 2]), tf.constant([0.1, 0.9]))
num_sampled = 1000
_, _, sampled_values = rb.sample(num_sampled, 1)
counted_values = collections.Counter(sampled_values.numpy())
self.assertGreater(counted_values[1], num_sampled * 0.1 * 0.7)
self.assertLess(counted_values[1], num_sampled * 0.1 * 1.3)
def _check_weights(self, sampled_weights, sampled_values, expected_weights):
actual_weights = [None, None]
for w, v in zip(sampled_weights, sampled_values):
if actual_weights[v.numpy()] is None:
actual_weights[v.numpy()] = w
else:
self.assertAllClose(actual_weights[v.numpy()], w,
msg='v={}'.format(v))
self.assertAllClose(actual_weights, expected_weights)
def test_importance_sampling_weights1(self):
tf.random.set_seed(5)
rb = utils.PrioritizedReplay(
size=2,
specs=tf.TensorSpec([], tf.int32),
importance_sampling_exponent=1)
rb.insert(tf.constant([0, 1]), tf.constant([0.3, 0.9]))
_, weights, sampled_values = rb.sample(100, 1)
expected_weights = np.array([
(0.3 + 0.9) / 0.3,
(0.3 + 0.9) / 0.9,
])
expected_weights /= np.max(expected_weights)
self._check_weights(weights, sampled_values, expected_weights)
def test_importance_sampling_weights2(self):
tf.random.set_seed(5)
rb = utils.PrioritizedReplay(
size=2,
specs=tf.TensorSpec([], tf.int32),
importance_sampling_exponent=.3)
rb.insert(tf.constant([0, 1]), tf.constant([0.3, 0.9]))
_, weights, sampled_values = rb.sample(100, .7)
inv_sampling_probs = np.array([((0.3 ** .7 + 0.9 ** .7) / 0.3 ** .7),
((0.3 ** .7 + 0.9 ** .7) / 0.9 ** .7)])
expected_weights = inv_sampling_probs ** .3
expected_weights /= np.max(expected_weights)
self._check_weights(weights, sampled_values, expected_weights)
class HindsightExperienceReplayTest(tf.test.TestCase):
def wrap(self, x, y=None):
unroll = collections.namedtuple('unroll', 'env_outputs')
return unroll(
env_outputs=utils.EnvOutput(
observation={
'achieved_goal': x,
'desired_goal': y if (y is not None) else x
},
done=tf.zeros(x.shape[:-1], tf.bool),
reward=tf.zeros(x.shape[:-1], tf.float32),
abandoned=tf.zeros(x.shape[:-1], tf.bool),
episode_step=tf.ones(x.shape[:-1], tf.int32),))
def compute_reward_fn(self, achieved_goal, desired_goal):
return tf.norm(tf.cast(achieved_goal - desired_goal, tf.float32), axis=-1)
def test_subsampling(self):
rb = utils.HindsightExperienceReplay(
size=2,
specs=self.wrap(tf.TensorSpec([5, 1], tf.int32),
tf.TensorSpec([5, 1], tf.int32)),
importance_sampling_exponent=1,
unroll_length=2,
compute_reward_fn=self.compute_reward_fn,
substitution_probability=0.
)
rb.insert(self.wrap(tf.constant([[[10], [20], [30], [40], [50]]])),
tf.constant([1.]))
samples = rb.sample(1000, 1.)[-1].env_outputs.observation['achieved_goal']
assert samples.shape == (1000, 3, 1)
samples = tf.squeeze(samples, axis=-1)
for i in range(samples.shape[0]):
assert samples[i][0] in [10, 20, 30]
assert samples[i][1] == samples[i][0] + 10
assert samples[i][2] == samples[i][0] + 20
for val in [10, 20, 30]:
assert (samples[:, 0] == val).numpy().any()
def test_goal_substitution(self):
rb = utils.HindsightExperienceReplay(
size=2,
specs=self.wrap(tf.TensorSpec([5, 2], tf.int32),
tf.TensorSpec([5, 2], tf.int32)),
importance_sampling_exponent=1,
unroll_length=4,
compute_reward_fn=self.compute_reward_fn,
substitution_probability=1.
)
rb.insert(self.wrap(
tf.constant([[[10, 10], [20, 20], [30, 30], [40, 40], [50, 50]]]),
tf.constant([[[100, 100], [200, 200], [300, 300], [400, 400],
[500, 500]]]),
),
tf.constant([1.]))
samples = rb.sample(1000, 1.)[-1].env_outputs.observation
for key in ['achieved_goal', 'desired_goal']:
assert samples[key].shape == (1000, 5, 2)
assert (samples[key][..., 0] == samples[key][..., 1]).numpy().all()
samples = tf.nest.map_structure(lambda t: t[..., 0], samples)
diffs = set()
for i in range(samples['achieved_goal'].shape[0]):
assert (samples['achieved_goal'][i] == [10, 20, 30, 40, 50]).numpy().all()
for t in range(5):
goal = samples['desired_goal'][i][t]
assert goal in [10, 20, 30, 40, 50]
goal //= 10
assert goal > t + 1 or t == 4
diffs.add(goal.numpy() - t - 1)
assert len(diffs) == 5
class TPUEncodeTest(tf.test.TestCase, parameterized.TestCase):
def setUp(self):
super(TPUEncodeTest, self).setUp()
self.data = (
# Supported on TPU
tf.random.uniform([128], maxval=100000, dtype=tf.int32),
# Not supported on TPU
tf.cast(
tf.random.uniform([128], maxval=65535, dtype=tf.int32), tf.uint16),
# Not supported on TPU
tf.cast(
tf.random.uniform([64, 84, 84, 4], maxval=256, dtype=tf.int32),
tf.uint8),
# Not supported on TPU
tf.cast(tf.random.uniform([1], maxval=256, dtype=tf.int32), tf.uint8),
# Not supported on TPU
tf.cast(
tf.random.uniform([100, 128, 1, 1, 1], maxval=256, dtype=tf.int32),
tf.uint8),
# Not supported on TPU
tf.cast(
tf.random.uniform([128, 100, 1, 1, 1], maxval=256, dtype=tf.int32),
tf.uint8),
)
def test_simple(self):
encoded = utils.tpu_encode(self.data)
decoded = utils.tpu_decode(encoded)
self.assertEqual(tf.int32, encoded[1].dtype)
self.assertIsInstance(encoded[2], utils.TPUEncodedUInt8)
self.assertEqual(tf.bfloat16, encoded[3].dtype)
self.assertIsInstance(encoded[4], utils.TPUEncodedUInt8)
self.assertIsInstance(encoded[5], utils.TPUEncodedUInt8)
for a, b in zip(decoded, self.data):
self.assertAllEqual(a, b)
def test_dataset(self):
def gen():
yield 0
dataset = tf.data.Dataset.from_generator(gen, tf.int64)
dataset = dataset.map(lambda _: utils.tpu_encode(self.data))
encoded = list(dataset)[0]
decoded = utils.tpu_decode(encoded)
for a, b in zip(decoded, self.data):
self.assertAllEqual(a, b)
@parameterized.parameters((1,), (2,))
def test_strategy(self, num_cores):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver('')
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
da = tf.tpu.experimental.DeviceAssignment.build(topology,
num_replicas=num_cores)
strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=da)
def dataset_fn(unused_ctx):
def gen():
yield 0
yield 1
dataset = tf.data.Dataset.from_generator(gen, (tf.int64))
return dataset.map(lambda _: utils.tpu_encode(self.data))
dataset = strategy.experimental_distribute_datasets_from_function(
dataset_fn)
encoded = next(iter(dataset))
decoded = strategy.run(
tf.function(lambda args: utils.tpu_decode(args, encoded)), (encoded,))
decoded = tf.nest.map_structure(
lambda t: strategy.experimental_local_results(t)[0], decoded)
for a, b in zip(decoded, self.data):
self.assertAllEqual(a, b)
class SplitStructureTest(tf.test.TestCase):
def test_basic(self):
prefix, suffix = utils.split_structure(
[tf.constant([1, 2, 3]),
tf.constant([[4, 5], [6, 7], [8, 9]])], 1)
self.assertAllEqual(prefix[0], tf.constant([1]))
self.assertAllEqual(prefix[1], tf.constant([[4, 5]]))
self.assertAllEqual(suffix[0], tf.constant([2, 3]))
self.assertAllEqual(suffix[1], tf.constant([[6, 7], [8, 9]]))
def test_zero_length_prefix(self):
prefix, suffix = utils.split_structure(tf.constant([1, 2, 3]), 0)
self.assertAllEqual(prefix, tf.constant([]))
self.assertAllEqual(suffix, tf.constant([1, 2, 3]))
class MakeTimeMajorTest(tf.test.TestCase):
def test_static(self):
x = tf.constant([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
self.assertAllEqual(utils.make_time_major(x),
tf.constant([[[1, 2], [5, 6]], [[3, 4], [7, 8]]]))
def test_dynamic(self):
x, = tf.py_function(lambda: np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),
[], [tf.int32])
self.assertAllEqual(utils.make_time_major(x),
tf.constant([[[1, 2], [5, 6]], [[3, 4], [7, 8]]]))
def test_uint16(self):
x = tf.constant([[1, 2], [3, 4]], tf.uint16)
self.assertAllEqual(utils.make_time_major(x), tf.constant([[1, 3], [2, 4]]))
def test_nest(self):
x = (tf.constant([[1, 2], [3, 4]]), tf.constant([[1], [2]]))
a, b = utils.make_time_major(x)
self.assertAllEqual(a, tf.constant([[1, 3], [2, 4]]))
self.assertAllEqual(b, tf.constant([[1, 2]]))
class MinimizeTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters((1,), (2,))
def test_minimize(self, num_training_tpus):
resolver = tf.distribute.cluster_resolver.TPUClusterResolver('')
strategy = tf.distribute.experimental.TPUStrategy(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
training_da = tf.tpu.experimental.DeviceAssignment.build(
topology, num_replicas=num_training_tpus)
training_strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=training_da)
with strategy.scope():
a = tf.Variable(1., trainable=True)
temp_grad = tf.Variable(
tf.zeros_like(a),
trainable=False,
synchronization=tf.VariableSynchronization.ON_READ)
@tf.function
def compute_gradients():
with tf.GradientTape() as tape:
tape.watch(a)
loss = a * 2
g = tape.gradient(loss, a)
temp_grad.assign(g)
return loss
loss = training_strategy.run(compute_gradients, ())
loss = training_strategy.experimental_local_results(loss)[0]
optimizer = tf.keras.optimizers.SGD(.1)
@tf.function
def apply_gradients(_):
optimizer.apply_gradients([(temp_grad, a)])
strategy.run(apply_gradients, (loss,))
a_values = [v.read_value() for v in strategy.experimental_local_results(a)]
expected_a = 1. - num_training_tpus * .2
self.assertAllClose([expected_a, expected_a], a_values)
class ProgressLoggerTest(tf.test.TestCase):
def test_logger(self):
logger = utils.ProgressLogger()
logger.start()
logger._log()
@tf.function(input_signature=(tf.TensorSpec([], tf.int32, 'value'),))
def log_something(value):
session = logger.log_session()
logger.log(session, 'value_1', value)
logger.log(session, 'value_2', value + 1)
logger.step_end(session)
log_something(tf.constant(10))
logger._log()
self.assertAllEqual(logger.ready_values.read_value(), tf.constant([10, 11]))
log_something(tf.constant(15))
self.assertAllEqual(logger.ready_values.read_value(), tf.constant([15, 16]))
logger._log()
logger.shutdown()
if __name__ == '__main__':
tf.test.main()
|
|
#
# test01
#
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from skimage.transform import resize
from skimage import data
from scipy.misc import imresize
import tensorflow as tf
from libs import gif
import IPython.display as ipyd
n_iterations = 500
LAYERSIZE=256
NHIDLAYERS=2
tamimg=128
#filenames=["barvert.png", "barvert.png"]
#filenames=["barhoriz.png", "barhoriz.png"]
#filenames=["barhoriz.png", "barvert.png"]
#filenames=["fot1.jpg", "fot1.jpg"]
filenames=["../../fot2.jpg", "fot1.jpg"]
gif_frames=50
plot_step=20
#np.set_printoptions(threshold=np.inf) # display FULL array (infinite)
# (from utils.py)
def linear(x, n_output, name=None, activation=None, reuse=None):
if len(x.get_shape()) != 2:
x = flatten(x, reuse=reuse)
n_input = x.get_shape().as_list()[1]
with tf.variable_scope(name or "fc", reuse=reuse):
W = tf.get_variable(
name='W',
shape=[n_input, n_output],
dtype=tf.float32,
#initializer=tf.contrib.layers.xavier_initializer())
initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
b = tf.get_variable(
name='b',
shape=[n_output],
dtype=tf.float32,
#initializer=tf.constant_initializer(0.0))
initializer=tf.constant_initializer())
#h = tf.nn.bias_add(
# name='h',
# value=tf.matmul(x, W),
# bias=b)
#if activation:
# h = activation(h)
h = tf.matmul(x, W) + b
if activation is not None: # esta linea da error: 'Tensor' object is not iterable
h = activation(h)
# return h
return h, W
def split_image(img):
xs = []
ys = []
for row_i in range(img.shape[0]):
for col_i in range(img.shape[1]):
xs.append([row_i, col_i])
ys.append(img[row_i, col_i])
xs = np.array(xs)
ys = np.array(ys)
return xs, ys
#########################################
#
# MAIN
#
#dja
plotgraph=True
#plt.style.use('ggplot')
plt.style.use('bmh')
#dja
plt.ion()
#plt.show()
#plt.pause(2)
origimg = [plt.imread(fname)[..., :3] for fname in filenames] # falla con .jpg?
#origimg=[plt.imread(filenames[0]), plt.imread(filenames[1])]
print(origimg)
imrsz0=imresize(origimg[0], (tamimg,tamimg))
imrsz1=imresize(origimg[1], (tamimg,tamimg))
#print("imrsz0=",imrsz0)
#print("imrsz1=",imrsz1)
scaledimg = [imrsz0, imrsz1]
#print("scaledimg=",scaledimg)
if plotgraph:
plt.figure(figsize=(5, 5))
plt.imshow(scaledimg[1])
plt.title("(preparing the data)")
plt.show()
plt.pause(1)
#plt.close()
#plt.imsave(fname='session2_batch_reference.png', arr=scaledimg)
#print(scaledimg.shape)
xs0, ys0 = split_image(scaledimg[0])
xs1, ys1 = split_image(scaledimg[1])
#print(xs0.__class__)
xs=np.asarray([xs0, xs1])
ys=np.asarray([ys0, ys1])
#print("xs=",xs)
#print("ys=",ys)
#print(xs.__class__)
#print(xs.shape)
#print(xs.shape)
xs=xs.reshape(xs.shape[0]*xs.shape[1], 2)
ys=ys.reshape(ys.shape[0]*ys.shape[1], 3)
print("xs, ys shape:" , xs.shape, ys.shape)
#print("xs=",xs)
#print("ys=",ys)
#print("============ ys:")
#print(ys)
xs = (xs - np.mean(xs)) / np.std(xs)
#print("norm xs: ", xs)
print("norm. x min/max", np.min(xs), np.max(xs))
assert(np.min(xs) > -3.0 and np.max(xs) < 3.0)
# don't look next line
print("y min/max", np.min(ys), np.max(ys))
CLIPVALUE=255
#if np.max(ys)>1.1: # YA ESTA NORMALIZADO??
# ys = ys / 255.0
#print("norm. y min/max",np.min(ys), np.max(ys))
tf.reset_default_graph()
X = tf.placeholder(tf.float32, shape=[None, 2], name='X')
Y = tf.placeholder(tf.float32, shape=[None, 3], name='Y')
n_neurons = [2, LAYERSIZE, LAYERSIZE, LAYERSIZE, LAYERSIZE, LAYERSIZE, LAYERSIZE, 3]
print("LAYERSIZE=",LAYERSIZE)
# Input layer
HOUT, W = linear(X, LAYERSIZE, activation=tf.nn.relu, name='Lay1')
# Hidden layers
for i in range(1, NHIDLAYERS+1):
print("creating hidd lay ", i)
HOUT, W = linear(HOUT, LAYERSIZE, activation=tf.nn.relu, name='HidLay'+str(i))
# Output layer
Y_pred, W7 = linear(HOUT, 3, activation=None, name='pred')
assert(X.get_shape().as_list() == [None, 2])
assert(Y_pred.get_shape().as_list() == [None, 3])
assert(Y.get_shape().as_list() == [None, 3])
#errortot = tf.abs(Y_pred - Y)
#errortot = tf.pow(tf.sub(Y_pred, Y), 2)
errortot = (Y_pred - Y) ** 2
assert(errortot.get_shape().as_list() == [None, 3])
print("error.shape: ", errortot.get_shape())
sum_errorred = tf.reduce_sum(errortot, 1)
assert(sum_errorred.get_shape().as_list() == [None])
costtot = tf.reduce_mean(sum_errorred)
assert(costtot.get_shape().as_list() == [])
myoptimizer =tf.train.AdamOptimizer(0.001).minimize(costtot)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
gifimgs = []
costs = []
gif_step = max(n_iterations // gif_frames, 1)
print("gif_step: ", gif_step)
batch_size = int(np.sqrt(len(xs)))
for it_i in range(1, n_iterations+1):
print("iteration: ", it_i, end="", flush=True);
# Get a random sampling of the dataset
idxs = np.random.permutation(range(len(xs)))
###print("idxs=",idxs)
# The number of batches we have to iterate over
n_batches = max(len(idxs) // batch_size, 1)
#print(" n_batches: ", n_batches, end="", flush=True);
# Now iterate over our stochastic minibatches:
for batch_i in range(n_batches):
idxs_i = idxs[batch_i * batch_size: (batch_i + 1) * batch_size]
#print("===============================")
#print("xs feed: ", xs[idxs_i])
#print("ys feed: ", ys[idxs_i])
sess.run(myoptimizer, feed_dict={X: xs[idxs_i], Y: ys[idxs_i]})
#OJO, indent
training_cost = sess.run(costtot, feed_dict={X: xs, Y: ys})
#print(" cost: ", training_cost / n_batches);
print(" cost: ", training_cost);
if (it_i % gif_step) == 0 or (it_i % plot_step) == 0:
idxs_j=range(len(xs)//2)
ys_pred = Y_pred.eval(feed_dict={X: xs[idxs_j]}, session=sess)
### por que funcionaba esto??
###plotimg = np.clip(np.array(ys_pred.reshape(scaledimg[0].shape))*255, 0, CLIPVALUE).astype(np.uint8)
# FIXED (probado jpg/png):
plotimg = np.clip(np.array(ys_pred.reshape(scaledimg[0].shape)), 0, CLIPVALUE).astype(np.uint8)
if (it_i % gif_step) == 0:
gifimgs.append(plotimg)
if (it_i % plot_step) == 0:
costs.append(training_cost)
if plotgraph:
plt.imshow(plotimg)
plt.title('Iteration {}'.format(it_i))
plt.show()
plt.pause(1)
#print(ys_pred)
if plotgraph:
# Save the images as a GIF
_ = gif.build_gif(gifimgs, saveto='test01_single.gif', show_gif=False, interval=0.3)
plt.imsave(fname='test01_predicted.png', arr=plotimg)
plt.pause(5)
plt.close()
# eop
|
|
import sys
import os
import logging
import tempfile
import shutil
import six
from six.moves import shlex_quote
if six.PY3:
from collections.abc import Sequence
else:
from collections import Sequence
logger = logging.getLogger(__name__)
class ContainerCommandError(Exception):
def __init__(self, command, exit_code, stdout, stderr):
self.command = command
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
def __str__(self):
if isinstance(self.command, file):
return "Script exited with code {exit_code}: {script}".format(
exit_code=self.exit_code,
script=self.command.name
)
else:
return "Command exited with code {exit_code}: {command}".format(
exit_code=self.exit_code,
command=self.command
)
class Container:
TEMP_VOLUME = '/provisioning'
COMMAND_FILE = 'command.sh'
STDOUT_FILE = 'stdout'
STDERR_FILE = 'stderr'
SHELL = '/bin/sh'
def __init__(self, client, image, encoding='utf-8', build_volumes=None,
build_volumes_from=None, **container_configuration):
self.client = client
self.encoding = encoding
self.container_configuration = container_configuration
self.container_configuration['image'] = image
self.container_configuration['user'] = 'root'
self.container_configuration['stdin_open'] = True
self.build_volumes = build_volumes or {}
self.build_volumes_from = build_volumes_from or []
self.id = None
self.temp_dir = None
def __str__(self):
if self.id is not None:
return self.id[:12]
else:
return '<new container>'
def __enter__(self):
self.create()
return self
def __exit__(self, *args, **kwargs):
self.remove()
def create(self):
if self.temp_dir is None:
self.temp_dir = tempfile.mkdtemp()
os.chmod(self.temp_dir, 0777)
result = self.client.create_container(
command=[
self.SHELL,
'-c',
'{command_file} >{stdout_file} 2>{stderr_file}'.format(
command_file='/'.join((
self.TEMP_VOLUME,
self.COMMAND_FILE
)),
stdout_file='/'.join((
self.TEMP_VOLUME,
self.STDOUT_FILE
)),
stderr_file='/'.join((
self.TEMP_VOLUME,
self.STDERR_FILE
)),
),
],
**self.container_configuration
)
self.id = result['Id']
logger.info("Created container: {}".format(self))
def remove(self):
if self.id:
self.client.remove_container(
self.id,
v=True,
force=True
)
logger.info("Removed container: {}".format(self))
self.id = None
if self.temp_dir is not None:
shutil.rmtree(self.temp_dir, ignore_errors=True)
self.temp_dir = None
def run(self, command, **additional_configuration):
if hasattr(command, 'read'):
logger.info("Running script.")
exit_code, stdout, stderr = self.execute(
command,
**additional_configuration
)
else:
logger.info("Running: {}".format(
command
if isinstance(command, six.string_types)
else ' '.join(command)
))
exit_code, stdout, stderr = self.execute(
command,
**additional_configuration
)
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
if exit_code != 0:
raise ContainerCommandError(command, exit_code, stdout, stderr)
def execute(self, script, stdin=None, **additional_configuration):
assert self.id is not None
assert self.temp_dir is not None
if isinstance(script, six.text_type):
script = script.strip().encode(self.encoding) + b'\n'
elif isinstance(script, six.binary_type):
script = script.strip() + b'\n'
elif isinstance(script, Sequence):
script = six.text_type(' ').join(
map(shlex_quote, map(six.text_type, script))
)
script = script.encode(self.encoding) + b'\n'
elif not hasattr(script, 'read'):
raise TypeError(
"Invalid script type: must be a string, a sequence of strings"
" or a file."
)
command_local_path = os.path.join(self.temp_dir, self.COMMAND_FILE)
with open(command_local_path, 'w') as command_file:
if hasattr(script, 'read'):
shutil.copyfileobj(script, command_file)
else:
command_file.write(script)
os.chmod(command_local_path, 0755)
additional_configuration.setdefault('binds', {})
additional_configuration['binds'].update(self.build_volumes)
additional_configuration['binds'][self.temp_dir] = {
'bind': self.TEMP_VOLUME,
'ro': False,
}
if self.build_volumes_from:
additional_configuration['volumes_from'] = self.build_volumes_from
self.client.start(
self.id,
**additional_configuration
)
if stdin is not None:
socket = self.client.attach_socket(
self.id,
params={
'stdout': 0,
'stderr': 0,
'stdin': 1,
'stream': 1,
}
)
if hasattr(stdin, 'read'):
stdin = stdin.read()
if isinstance(stdin, six.text_type):
stdin = stdin.encode(self.encoding)
socket.sendall(stdin)
socket.close()
exit_code = self.client.wait(self.id)
stdout_local_path = os.path.join(self.temp_dir, self.STDOUT_FILE)
stderr_local_path = os.path.join(self.temp_dir, self.STDERR_FILE)
with open(stdout_local_path, 'r') as stdout_file:
stdout = stdout_file.read()
with open(stderr_local_path, 'r') as stderr_file:
stderr = stderr_file.read()
return exit_code, stdout, stderr
def read_file(self, path):
return self.client.copy(
self.id,
path
)
|
|
# Very rudimentary test of threading module
import test.support
from test.support import verbose, strip_python_stderr
import random
import re
import sys
_thread = test.support.import_module('_thread')
threading = test.support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, stopped -?\d+\)>',
repr(t)))
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
_thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256kB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1MB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
raise unittest.SkipTest("cannot import ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = _thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = _thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertTrue(not t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
raise unittest.SkipTest("cannot import ctypes")
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + ascii(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
stderr = strip_python_stderr(stderr)
self.assertEqual(stderr, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertFalse('daemon' in repr(t))
t.daemon = True
self.assertTrue('daemon' in repr(t))
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx'):
raise unittest.SkipTest('due to known OS bugs on ' + sys.platform)
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
raise unittest.SkipTest('due to known OS bugs on ' + sys.platform)
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of reseting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
# Skip platforms with known problems forking from a worker thread.
# See http://bugs.python.org/issue3863.
if sys.platform in ('freebsd4', 'freebsd5', 'freebsd6', 'os2emx'):
raise unittest.SkipTest('due to known OS bugs on ' + sys.platform)
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
def test_main():
test.support.run_unittest(LockTests, PyRLockTests, CRLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
BarrierTests
)
if __name__ == "__main__":
test_main()
|
|
import datetime
from dateutil.tz import *
from django.db import models
from django.contrib.auth.models import User
from django.test import TestCase
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.fields import *
from tastypie.resources import ModelResource
from core.models import Note, Subject, MediaBit
from tastypie.utils import aware_datetime, aware_date
class ApiFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = ApiField()
self.assertEqual(field_1.instance_name, None)
self.assertEqual(field_1.attribute, None)
self.assertEqual(field_1._default, NOT_PROVIDED)
self.assertEqual(field_1.null, False)
self.assertEqual(field_1.value, None)
self.assertEqual(field_1.help_text, '')
field_2 = ApiField(attribute='foo', default=True, null=True, readonly=True, help_text='Foo.')
self.assertEqual(field_2.instance_name, None)
self.assertEqual(field_2.attribute, 'foo')
self.assertEqual(field_2._default, True)
self.assertEqual(field_2.null, True)
self.assertEqual(field_2.value, None)
self.assertEqual(field_2.readonly, True)
self.assertEqual(field_2.help_text, 'Foo.')
def test_dehydrated_type(self):
field_1 = ApiField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_has_default(self):
field_1 = ApiField()
self.assertEqual(field_1.has_default(), False)
field_2 = ApiField(default=True)
self.assertEqual(field_2.has_default(), True)
def test_default(self):
field_1 = ApiField()
self.assertEqual(isinstance(field_1.default, NOT_PROVIDED), True)
field_2 = ApiField(default=True)
self.assertEqual(field_2.default, True)
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# With no attribute or default, we should get ``None``.
field_1 = ApiField()
self.assertEqual(field_1.dehydrate(bundle), None)
# Still no attribute, so we should pick up the default
field_2 = ApiField(default=True)
self.assertEqual(field_2.dehydrate(bundle), True)
# Wrong attribute should yield default.
field_3 = ApiField(attribute='foo', default=True)
self.assertEqual(field_3.dehydrate(bundle), True)
# Wrong attribute should yield null.
field_4 = ApiField(attribute='foo', null=True)
self.assertEqual(field_4.dehydrate(bundle), None)
# Correct attribute.
field_5 = ApiField(attribute='title', default=True)
self.assertEqual(field_5.dehydrate(bundle), u'First Post!')
# Correct callable attribute.
field_6 = ApiField(attribute='what_time_is_it', default=True)
self.assertEqual(field_6.dehydrate(bundle), aware_datetime(2010, 4, 1, 0, 48))
def test_convert(self):
field_1 = ApiField()
self.assertEqual(field_1.convert('foo'), 'foo')
self.assertEqual(field_1.convert(True), True)
def test_hydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# With no value, default or nullable, we should get an ``ApiFieldError``.
field_1 = ApiField()
field_1.instance_name = 'api'
self.assertRaises(ApiFieldError, field_1.hydrate, bundle)
# The default.
field_2 = ApiField(default='foo')
field_2.instance_name = 'api'
self.assertEqual(field_2.hydrate(bundle), 'foo')
# The callable default.
def foo():
return 'bar'
field_3 = ApiField(default=foo)
field_3.instance_name = 'api'
self.assertEqual(field_3.hydrate(bundle), 'bar')
# The nullable case.
field_4 = ApiField(null=True)
field_4.instance_name = 'api'
self.assertEqual(field_4.hydrate(bundle), None)
# The readonly case.
field_5 = ApiField(readonly=True)
field_5.instance_name = 'api'
bundle.data['api'] = 'abcdef'
self.assertEqual(field_5.hydrate(bundle), None)
# A real, live attribute!
field_6 = ApiField(attribute='title')
field_6.instance_name = 'api'
bundle.data['api'] = note.title
self.assertEqual(field_6.hydrate(bundle), u'First Post!')
# Make sure it uses attribute when there's no data
field_7 = ApiField(attribute='title')
field_7.instance_name = 'notinbundle'
self.assertEqual(field_7.hydrate(bundle), u'First Post!')
# Make sure it falls back to instance name if there is no attribute
field_8 = ApiField()
field_8.instance_name = 'title'
self.assertEqual(field_8.hydrate(bundle), u'First Post!')
# Attribute & null regression test.
# First, simulate data missing from the bundle & ``null=True``.
field_9 = ApiField(attribute='notinbundle', null=True)
field_9.instance_name = 'notinbundle'
self.assertEqual(field_9.hydrate(bundle), None)
# The do something in the bundle also with ``null=True``.
field_10 = ApiField(attribute='title', null=True)
field_10.instance_name = 'title'
self.assertEqual(field_10.hydrate(bundle), u'First Post!')
# The blank case.
field_11 = ApiField(attribute='notinbundle', blank=True)
field_11.instance_name = 'notinbundle'
self.assertEqual(field_11.hydrate(bundle), None)
bundle.data['title'] = note.title
field_12 = ApiField(attribute='title', blank=True)
field_12.instance_name = 'title'
self.assertEqual(field_12.hydrate(bundle), u'First Post!')
class CharFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = CharField()
self.assertEqual(field_1.help_text, 'Unicode string data. Ex: "Hello World"')
field_2 = CharField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = CharField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = CharField(attribute='title', default=True)
self.assertEqual(field_1.dehydrate(bundle), u'First Post!')
field_2 = CharField(default=20)
self.assertEqual(field_2.dehydrate(bundle), u'20')
class FileFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = FileField()
self.assertEqual(field_1.help_text, 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"')
field_2 = FileField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = FileField()
self.assertEqual(field_1.dehydrated_type, 'string')
def test_dehydrate(self):
bit = MediaBit.objects.get(pk=1)
bundle = Bundle(obj=bit)
field_1 = FileField(attribute='image', default=True)
self.assertEqual(field_1.dehydrate(bundle), u'http://localhost:8080/media/lulz/catz.gif')
field_2 = FileField(default='http://media.example.com/img/default_avatar.jpg')
self.assertEqual(field_2.dehydrate(bundle), u'http://media.example.com/img/default_avatar.jpg')
bit = MediaBit.objects.get(pk=1)
bit.image = ''
bundle = Bundle(obj=bit)
field_3 = FileField(attribute='image', default=True)
self.assertEqual(field_3.dehydrate(bundle), None)
bit.image = None
bundle = Bundle(obj=bit)
field_4 = FileField(attribute='image', null=True)
self.assertEqual(field_4.dehydrate(bundle), None)
class IntegerFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = IntegerField()
self.assertEqual(field_1.help_text, 'Integer data. Ex: 2673')
field_2 = IntegerField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = IntegerField()
self.assertEqual(field_1.dehydrated_type, 'integer')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = IntegerField(default=25)
self.assertEqual(field_1.dehydrate(bundle), 25)
field_2 = IntegerField(default='20')
self.assertEqual(field_2.dehydrate(bundle), 20)
field_3 = IntegerField(default=18.5)
self.assertEqual(field_3.dehydrate(bundle), 18)
class FloatFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = FloatField()
self.assertEqual(field_1.help_text, 'Floating point numeric data. Ex: 26.73')
field_2 = FloatField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = FloatField()
self.assertEqual(field_1.dehydrated_type, 'float')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = FloatField(default=20)
self.assertEqual(field_1.dehydrate(bundle), 20.0)
field_2 = IntegerField(default=18.5)
self.assertEqual(field_2.dehydrate(bundle), 18)
class DecimalFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = DecimalField()
self.assertEqual(field_1.help_text, 'Fixed precision numeric data. Ex: 26.73')
field_2 = DecimalField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = DecimalField()
self.assertEqual(field_1.dehydrated_type, 'decimal')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = DecimalField(default='20')
self.assertEqual(field_1.dehydrate(bundle), Decimal('20.0'))
field_2 = DecimalField(default='18.5')
self.assertEqual(field_2.dehydrate(bundle), Decimal('18.5'))
def test_model_resource_correct_association(self):
api_field = ModelResource.api_field_from_django_field(models.DecimalField())
self.assertEqual(api_field, DecimalField)
class ListFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = ListField()
self.assertEqual(field_1.help_text, "A list of data. Ex: ['abc', 26.73, 8]")
field_2 = ListField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = ListField()
self.assertEqual(field_1.dehydrated_type, 'list')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = ListField(default=[1, 2, 3])
self.assertEqual(field_1.dehydrate(bundle), [1, 2, 3])
field_2 = ListField(default=['abc'])
self.assertEqual(field_2.dehydrate(bundle), ['abc'])
class DictFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = DictField()
self.assertEqual(field_1.help_text, "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}")
field_2 = DictField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = DictField()
self.assertEqual(field_1.dehydrated_type, 'dict')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = DictField(default={'price': 12.34, 'name': 'Daniel'})
self.assertEqual(field_1.dehydrate(bundle), {'price': 12.34, 'name': 'Daniel'})
field_2 = DictField(default={'name': 'Daniel'})
self.assertEqual(field_2.dehydrate(bundle), {'name': 'Daniel'})
class BooleanFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = BooleanField()
self.assertEqual(field_1.help_text, 'Boolean data. Ex: True')
field_2 = BooleanField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = BooleanField()
self.assertEqual(field_1.dehydrated_type, 'boolean')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = BooleanField(attribute='is_active', default=False)
self.assertEqual(field_1.dehydrate(bundle), True)
field_2 = BooleanField(default=True)
self.assertEqual(field_2.dehydrate(bundle), True)
class TimeFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = TimeField()
self.assertEqual(field_1.help_text, 'A time as string. Ex: "20:05:23"')
field_2 = TimeField(help_text='Custom.')
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = TimeField()
self.assertEqual(field_1.dehydrated_type, 'time')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = TimeField(attribute='created')
self.assertEqual(field_1.dehydrate(bundle), aware_datetime(2010, 3, 30, 20, 5))
field_2 = TimeField(default=datetime.time(23, 5, 58))
self.assertEqual(field_2.dehydrate(bundle), datetime.time(23, 5, 58))
field_3 = TimeField(attribute='created_string')
note.created_string = '13:06:00'
self.assertEqual(field_3.dehydrate(bundle), datetime.time(13, 6))
note.created_string = '13:37:44'
self.assertEqual(field_3.dehydrate(bundle), datetime.time(13, 37, 44))
note.created_string = 'hello'
self.assertRaises(ApiFieldError, field_3.dehydrate, bundle)
def test_hydrate(self):
bundle_1 = Bundle(data={'time': '03:49'})
field_1 = TimeField(attribute='created')
field_1.instance_name = 'time'
self.assertEqual(field_1.hydrate(bundle_1), datetime.time(3, 49))
bundle_2 = Bundle()
field_2 = TimeField(default=datetime.time(17, 40))
field_2.instance_name = 'doesnotmatter' # Wont find in bundle data
self.assertEqual(field_2.hydrate(bundle_2), datetime.time(17, 40))
bundle_3 = Bundle(data={'time': '22:08:11'})
field_3 = TimeField(attribute='created_string')
field_3.instance_name = 'time'
self.assertEqual(field_3.hydrate(bundle_3), datetime.time(22, 8, 11))
bundle_4 = Bundle(data={'time': '07:45'})
field_4 = TimeField(attribute='created')
field_4.instance_name = 'time'
self.assertEqual(field_4.hydrate(bundle_4), datetime.time(7, 45))
bundle_5 = Bundle(data={'time': None})
field_5 = TimeField(attribute='created', null=True)
field_5.instance_name = 'time'
self.assertEqual(field_5.hydrate(bundle_5), None)
class DateFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = CharField()
self.assertEqual(field_1.help_text, 'Unicode string data. Ex: "Hello World"')
field_2 = CharField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = DateField()
self.assertEqual(field_1.dehydrated_type, 'date')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = DateField(attribute='created')
self.assertEqual(field_1.dehydrate(bundle), aware_datetime(2010, 3, 30, 20, 5))
field_2 = DateField(default=datetime.date(2010, 4, 1))
self.assertEqual(field_2.dehydrate(bundle), datetime.date(2010, 4, 1))
note.created_string = '2010-04-02'
field_3 = DateField(attribute='created_string')
self.assertEqual(field_3.dehydrate(bundle), datetime.date(2010, 4, 2))
def test_hydrate(self):
note = Note.objects.get(pk=1)
bundle_1 = Bundle(data={
'date': '2010-05-12',
})
field_1 = DateField(attribute='created')
field_1.instance_name = 'date'
self.assertEqual(field_1.hydrate(bundle_1), datetime.date(2010, 5, 12))
bundle_2 = Bundle()
field_2 = DateField(default=datetime.date(2010, 4, 1))
field_2.instance_name = 'date'
self.assertEqual(field_2.hydrate(bundle_2), datetime.date(2010, 4, 1))
bundle_3 = Bundle(data={
'date': 'Wednesday, May 12, 2010',
})
field_3 = DateField(attribute='created_string')
field_3.instance_name = 'date'
self.assertEqual(field_3.hydrate(bundle_3), datetime.date(2010, 5, 12))
bundle_4 = Bundle(data={
'date': '5 Apr 2010',
})
field_4 = DateField(attribute='created')
field_4.instance_name = 'date'
self.assertEqual(field_4.hydrate(bundle_4), datetime.date(2010, 4, 5))
bundle_5 = Bundle(data={
'date': None,
})
field_5 = DateField(attribute='created', null=True)
field_5.instance_name = 'date'
self.assertEqual(field_5.hydrate(bundle_5), None)
class DateTimeFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = CharField()
self.assertEqual(field_1.help_text, 'Unicode string data. Ex: "Hello World"')
field_2 = CharField(help_text="Custom.")
self.assertEqual(field_2.help_text, 'Custom.')
def test_dehydrated_type(self):
field_1 = DateTimeField()
self.assertEqual(field_1.dehydrated_type, 'datetime')
def test_dehydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_1 = DateTimeField(attribute='created')
self.assertEqual(field_1.dehydrate(bundle), aware_datetime(2010, 3, 30, 20, 5))
field_2 = DateTimeField(default=aware_datetime(2010, 4, 1, 1, 7))
self.assertEqual(field_2.dehydrate(bundle), aware_datetime(2010, 4, 1, 1, 7))
note.created_string = '2010-04-02 01:11:00'
field_3 = DateTimeField(attribute='created_string')
self.assertEqual(field_3.dehydrate(bundle), aware_datetime(2010, 4, 2, 1, 11))
def test_hydrate(self):
note = Note.objects.get(pk=1)
bundle_1 = Bundle(data={
'datetime': '2010-05-12 10:36:28',
})
field_1 = DateTimeField(attribute='created')
field_1.instance_name = 'datetime'
self.assertEqual(field_1.hydrate(bundle_1), aware_datetime(2010, 5, 12, 10, 36, 28))
bundle_2 = Bundle()
field_2 = DateTimeField(default=aware_datetime(2010, 4, 1, 2, 0))
field_2.instance_name = 'datetime'
self.assertEqual(field_2.hydrate(bundle_2), aware_datetime(2010, 4, 1, 2, 0))
bundle_3 = Bundle(data={
'datetime': 'Tue, 30 Mar 2010 20:05:00 -0500',
})
field_3 = DateTimeField(attribute='created_string')
field_3.instance_name = 'datetime'
self.assertEqual(field_3.hydrate(bundle_3), aware_datetime(2010, 3, 30, 20, 5, tzinfo=tzoffset(None, -18000)))
bundle_4 = Bundle(data={
'datetime': None,
})
field_4 = DateField(attribute='created', null=True)
field_4.instance_name = 'datetime'
self.assertEqual(field_4.hydrate(bundle_4), None)
class UserResource(ModelResource):
class Meta:
resource_name = 'users'
queryset = User.objects.all()
def get_resource_uri(self, bundle):
return '/api/v1/users/%s/' % bundle.obj.id
class ToOneFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_init(self):
field_1 = ToOneField(UserResource, 'author')
self.assertEqual(field_1.instance_name, None)
self.assertEqual(issubclass(field_1.to, UserResource), True)
self.assertEqual(field_1.attribute, 'author')
self.assertEqual(field_1.related_name, None)
self.assertEqual(field_1.null, False)
self.assertEqual(field_1.full, False)
self.assertEqual(field_1.readonly, False)
self.assertEqual(field_1.help_text, 'A single related resource. Can be either a URI or set of nested resource data.')
field_2 = ToOneField(UserResource, 'author', null=True, help_text="Points to a User.")
self.assertEqual(field_2.instance_name, None)
self.assertEqual(issubclass(field_2.to, UserResource), True)
self.assertEqual(field_2.attribute, 'author')
self.assertEqual(field_2.related_name, None)
self.assertEqual(field_2.null, True)
self.assertEqual(field_2.full, False)
self.assertEqual(field_2.readonly, False)
self.assertEqual(field_2.help_text, 'Points to a User.')
field_3 = ToOneField(UserResource, 'author', default=1, null=True, help_text="Points to a User.")
self.assertEqual(field_3.instance_name, None)
self.assertEqual(issubclass(field_3.to, UserResource), True)
self.assertEqual(field_3.attribute, 'author')
self.assertEqual(field_3.related_name, None)
self.assertEqual(field_3.null, True)
self.assertEqual(field_3.default, 1)
self.assertEqual(field_3.full, False)
self.assertEqual(field_3.readonly, False)
self.assertEqual(field_3.help_text, 'Points to a User.')
field_4 = ToOneField(UserResource, 'author', default=1, null=True, readonly=True, help_text="Points to a User.")
self.assertEqual(field_4.instance_name, None)
self.assertEqual(issubclass(field_4.to, UserResource), True)
self.assertEqual(field_4.attribute, 'author')
self.assertEqual(field_4.related_name, None)
self.assertEqual(field_4.null, True)
self.assertEqual(field_4.default, 1)
self.assertEqual(field_4.full, False)
self.assertEqual(field_4.readonly, True)
self.assertEqual(field_4.help_text, 'Points to a User.')
def test_dehydrated_type(self):
field_1 = ToOneField(UserResource, 'author')
self.assertEqual(field_1.dehydrated_type, 'related')
def test_has_default(self):
field_1 = ToOneField(UserResource, 'author')
self.assertEqual(field_1.has_default(), False)
field_1 = ToOneField(UserResource, 'author', default=1)
self.assertEqual(field_1.has_default(), True)
def test_default(self):
field_1 = ToOneField(UserResource, 'author')
self.assertTrue(isinstance(field_1.default, NOT_PROVIDED))
field_2 = ToOneField(UserResource, 'author', default=1)
self.assertEqual(field_2.default, 1)
def test_dehydrate(self):
note = Note()
bundle = Bundle(obj=note)
field_1 = ToOneField(UserResource, 'author')
self.assertRaises(ApiFieldError, field_1.dehydrate, bundle)
field_2 = ToOneField(UserResource, 'author', null=True)
self.assertEqual(field_2.dehydrate(bundle), None)
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
field_3 = ToOneField(UserResource, 'author')
self.assertEqual(field_3.dehydrate(bundle), '/api/v1/users/1/')
field_4 = ToOneField(UserResource, 'author', full=True)
user_bundle = field_4.dehydrate(bundle)
self.assertEqual(isinstance(user_bundle, Bundle), True)
self.assertEqual(user_bundle.data['username'], u'johndoe')
self.assertEqual(user_bundle.data['email'], u'[email protected]')
def test_hydrate(self):
note = Note()
bundle = Bundle(obj=note)
# With no value or nullable, we should get an ``ApiFieldError``.
field_1 = ToOneField(UserResource, 'author')
self.assertRaises(ApiFieldError, field_1.hydrate, bundle)
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# The nullable case.
field_2 = ToOneField(UserResource, 'author', null=True)
field_2.instance_name = 'fk'
bundle.data['fk'] = None
self.assertEqual(field_2.hydrate(bundle), None)
# Wrong resource URI.
field_3 = ToOneField(UserResource, 'author')
field_3.instance_name = 'fk'
bundle.data['fk'] = '/api/v1/users/abc/'
self.assertRaises(NotFound, field_3.hydrate, bundle)
# A real, live attribute!
field_4 = ToOneField(UserResource, 'author')
field_4.instance_name = 'fk'
bundle.data['fk'] = '/api/v1/users/1/'
fk_bundle = field_4.hydrate(bundle)
self.assertEqual(fk_bundle.data['username'], u'johndoe')
self.assertEqual(fk_bundle.data['email'], u'[email protected]')
self.assertEqual(fk_bundle.obj.username, u'johndoe')
self.assertEqual(fk_bundle.obj.email, u'[email protected]')
field_5 = ToOneField(UserResource, 'author')
field_5.instance_name = 'fk'
bundle.data['fk'] = {
'username': u'mistersmith',
'email': u'[email protected]',
'password': u'foobar',
}
fk_bundle = field_5.hydrate(bundle)
self.assertEqual(fk_bundle.data['username'], u'mistersmith')
self.assertEqual(fk_bundle.data['email'], u'[email protected]')
self.assertEqual(fk_bundle.obj.username, u'mistersmith')
self.assertEqual(fk_bundle.obj.email, u'[email protected]')
# Regression - Make sure Unicode keys get converted to regular strings
# so that we can **kwargs them.
field_6 = ToOneField(UserResource, 'author')
field_6.instance_name = 'fk'
bundle.data['fk'] = {
u'username': u'mistersmith',
u'email': u'[email protected]',
u'password': u'foobar',
}
fk_bundle = field_6.hydrate(bundle)
self.assertEqual(fk_bundle.data['username'], u'mistersmith')
self.assertEqual(fk_bundle.data['email'], u'[email protected]')
self.assertEqual(fk_bundle.obj.username, u'mistersmith')
self.assertEqual(fk_bundle.obj.email, u'[email protected]')
# Attribute & null regression test.
# First, simulate data missing from the bundle & ``null=True``.
# Use a Note with NO author, so that the lookup for the related
# author fails.
note = Note.objects.create(
title='Biplanes for all!',
slug='biplanes-for-all',
content='Somewhere, east of Manhattan, will lie the mythical land of planes with more one wing...'
)
bundle = Bundle(obj=note)
field_7 = ToOneField(UserResource, 'notinbundle', null=True)
field_7.instance_name = 'notinbundle'
self.assertEqual(field_7.hydrate(bundle), None)
# Then do something in the bundle also with ``null=True``.
field_8 = ToOneField(UserResource, 'author', null=True)
field_8.instance_name = 'author'
fk_bundle = field_8.hydrate(bundle)
self.assertEqual(field_8.hydrate(bundle), None)
# Then use an unsaved object in the bundle also with ``null=True``.
new_note = Note(
title='Biplanes for all!',
slug='biplanes-for-all',
content='Somewhere, east of Manhattan, will lie the mythical land of planes with more one wing...'
)
new_bundle = Bundle(obj=new_note)
field_9 = ToOneField(UserResource, 'author', null=True)
field_9.instance_name = 'author'
self.assertEqual(field_9.hydrate(bundle), None)
# The blank case.
field_10 = ToOneField(UserResource, 'fk', blank=True)
field_10.instance_name = 'fk'
self.assertEqual(field_10.hydrate(bundle), None)
bundle.data['author'] = '/api/v1/users/1/'
field_11 = ToOneField(UserResource, 'author', blank=True)
field_11.instance_name = 'author'
fk_bundle = field_11.hydrate(bundle)
self.assertEqual(fk_bundle.obj.username, 'johndoe')
# The readonly case.
field_12 = ToOneField(UserResource, 'author', readonly=True)
field_12.instance_name = 'author'
self.assertEqual(field_12.hydrate(bundle), None)
# A related object.
field_13 = ToOneField(UserResource, 'author')
field_13.instance_name = 'fk'
bundle.related_obj = User.objects.get(pk=1)
bundle.related_name = 'author'
fk_bundle = field_13.hydrate(bundle)
self.assertEqual(fk_bundle.obj.username, u'johndoe')
self.assertEqual(fk_bundle.obj.email, u'[email protected]')
def test_resource_from_uri(self):
ur = UserResource()
field_1 = ToOneField(UserResource, 'author')
fk_bundle = field_1.resource_from_uri(ur, '/api/v1/users/1/')
self.assertEqual(fk_bundle.data['username'], u'johndoe')
self.assertEqual(fk_bundle.data['email'], u'[email protected]')
self.assertEqual(fk_bundle.obj.username, u'johndoe')
self.assertEqual(fk_bundle.obj.email, u'[email protected]')
fk_bundle = field_1.resource_from_uri(ur, '/api/v1/users/1/', related_obj='Foo', related_name='Bar')
self.assertEqual(fk_bundle.related_obj, None)
self.assertEqual(fk_bundle.related_name, None)
def test_resource_from_data(self):
ur = UserResource()
field_1 = ToOneField(UserResource, 'author')
fk_bundle = field_1.resource_from_data(ur, {
'username': u'mistersmith',
'email': u'[email protected]',
'password': u'foobar',
})
self.assertEqual(fk_bundle.data['username'], u'mistersmith')
self.assertEqual(fk_bundle.data['email'], u'[email protected]')
self.assertEqual(fk_bundle.obj.username, u'mistersmith')
self.assertEqual(fk_bundle.obj.email, u'[email protected]')
fk_bundle = field_1.resource_from_data(ur, {
'username': u'mistersmith',
'email': u'[email protected]',
'password': u'foobar',
}, related_obj='Foo', related_name='Bar')
self.assertEqual(fk_bundle.related_obj, 'Foo')
self.assertEqual(fk_bundle.related_name, 'Bar')
def test_resource_from_pk(self):
user = User.objects.get(pk=1)
ur = UserResource()
field_1 = ToOneField(UserResource, 'author')
fk_bundle = field_1.resource_from_pk(ur, user)
self.assertEqual(fk_bundle.data['username'], u'johndoe')
self.assertEqual(fk_bundle.data['email'], u'[email protected]')
self.assertEqual(fk_bundle.obj.username, u'johndoe')
self.assertEqual(fk_bundle.obj.email, u'[email protected]')
fk_bundle = field_1.resource_from_pk(ur, user, related_obj='Foo', related_name='Bar')
self.assertEqual(fk_bundle.related_obj, None)
self.assertEqual(fk_bundle.related_name, None)
def test_traversed_attribute_dehydrate(self):
user = User.objects.get(pk=1)
mediabit = MediaBit(note=Note(author=user))
bundle = Bundle(obj=mediabit)
field_1 = ToOneField(UserResource, 'note__author')
field_1.instance_name = 'fk'
self.assertEqual(field_1.dehydrate(bundle), '/api/v1/users/1/')
field_2 = ToOneField(UserResource, 'fakefield__author')
field_2.instance_name = 'fk'
self.assertRaises(ApiFieldError, field_2.hydrate, bundle)
class SubjectResource(ModelResource):
class Meta:
resource_name = 'subjects'
queryset = Subject.objects.all()
def get_resource_uri(self, bundle):
return '/api/v1/subjects/%s/' % bundle.obj.id
class MediaBitResource(ModelResource):
class Meta:
resource_name = 'mediabits'
queryset = MediaBit.objects.all()
def get_resource_uri(self, bundle):
return '/api/v1/mediabits/%s/' % bundle.obj.id
class ToManyFieldTestCase(TestCase):
fixtures = ['note_testdata.json']
urls = 'core.tests.field_urls'
def setUp(self):
self.note_1 = Note.objects.get(pk=1)
self.note_2 = Note.objects.get(pk=2)
self.note_3 = Note.objects.get(pk=3)
self.subject_1 = Subject.objects.create(
name='News',
url='/news/'
)
self.subject_2 = Subject.objects.create(
name='Photos',
url='/photos/'
)
self.subject_3 = Subject.objects.create(
name='Personal Interest',
url='/news/personal-interest/'
)
self.note_1.subjects.add(self.subject_1)
self.note_1.subjects.add(self.subject_2)
self.note_2.subjects.add(self.subject_1)
self.note_2.subjects.add(self.subject_3)
def test_init(self):
field_1 = ToManyField(SubjectResource, 'subjects')
self.assertEqual(field_1.instance_name, None)
self.assertEqual(issubclass(field_1.to, SubjectResource), True)
self.assertEqual(field_1.attribute, 'subjects')
self.assertEqual(field_1.related_name, None)
self.assertEqual(field_1.null, False)
self.assertEqual(field_1.full, False)
self.assertEqual(field_1.readonly, False)
self.assertEqual(field_1.help_text, 'Many related resources. Can be either a list of URIs or list of individually nested resource data.')
field_2 = ToManyField(SubjectResource, 'subjects', null=True, help_text='Points to many Subjects.')
self.assertEqual(field_2.instance_name, None)
self.assertEqual(issubclass(field_2.to, SubjectResource), True)
self.assertEqual(field_2.attribute, 'subjects')
self.assertEqual(field_2.related_name, None)
self.assertEqual(field_2.null, True)
self.assertEqual(field_2.full, False)
self.assertEqual(field_2.readonly, False)
self.assertEqual(field_2.help_text, 'Points to many Subjects.')
field_3 = ToManyField(SubjectResource, 'subjects', default=1, null=True, help_text='Points to many Subjects.')
self.assertEqual(field_3.instance_name, None)
self.assertEqual(issubclass(field_3.to, SubjectResource), True)
self.assertEqual(field_3.attribute, 'subjects')
self.assertEqual(field_3.related_name, None)
self.assertEqual(field_3.null, True)
self.assertEqual(field_3.default, 1)
self.assertEqual(field_3.full, False)
self.assertEqual(field_3.readonly, False)
self.assertEqual(field_3.help_text, 'Points to many Subjects.')
field_4 = ToManyField(SubjectResource, 'subjects', default=1, null=True, readonly=True, help_text='Points to many Subjects.')
self.assertEqual(field_4.instance_name, None)
self.assertEqual(issubclass(field_4.to, SubjectResource), True)
self.assertEqual(field_4.attribute, 'subjects')
self.assertEqual(field_4.related_name, None)
self.assertEqual(field_4.null, True)
self.assertEqual(field_4.default, 1)
self.assertEqual(field_4.full, False)
self.assertEqual(field_4.readonly, True)
self.assertEqual(field_4.help_text, 'Points to many Subjects.')
def test_dehydrated_type(self):
field_1 = ToManyField(SubjectResource, 'subjects')
self.assertEqual(field_1.dehydrated_type, 'related')
def test_has_default(self):
field_1 = ToManyField(SubjectResource, 'subjects')
self.assertEqual(field_1.has_default(), False)
field_2 = ToManyField(SubjectResource, 'subjects', default=1)
self.assertEqual(field_2.has_default(), True)
def test_default(self):
field_1 = ToManyField(SubjectResource, 'subjects')
self.assertTrue(isinstance(field_1.default, NOT_PROVIDED))
field_2 = ToManyField(SubjectResource, 'subjects', default=1)
self.assertEqual(field_2.default, 1)
def test_dehydrate(self):
note = Note()
bundle_1 = Bundle(obj=note)
field_1 = ToManyField(SubjectResource, 'subjects')
field_1.instance_name = 'm2m'
try:
# self.assertRaises isn't cooperating here. Do it the hard way.
field_1.dehydrate(bundle_1)
self.fail()
except ApiFieldError:
pass
field_2 = ToManyField(SubjectResource, 'subjects', null=True)
field_2.instance_name = 'm2m'
self.assertEqual(field_2.dehydrate(bundle_1), [])
field_3 = ToManyField(SubjectResource, 'subjects')
field_3.instance_name = 'm2m'
bundle_3 = Bundle(obj=self.note_1)
self.assertEqual(field_3.dehydrate(bundle_3), ['/api/v1/subjects/1/', '/api/v1/subjects/2/'])
field_4 = ToManyField(SubjectResource, 'subjects', full=True)
field_4.instance_name = 'm2m'
bundle_4 = Bundle(obj=self.note_1)
subject_bundle_list = field_4.dehydrate(bundle_4)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(isinstance(subject_bundle_list[0], Bundle), True)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
self.assertEqual(isinstance(subject_bundle_list[1], Bundle), True)
self.assertEqual(subject_bundle_list[1].data['name'], u'Photos')
self.assertEqual(subject_bundle_list[1].data['url'], u'/photos/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Photos')
self.assertEqual(subject_bundle_list[1].obj.url, u'/photos/')
field_5 = ToManyField(SubjectResource, 'subjects')
field_5.instance_name = 'm2m'
bundle_5 = Bundle(obj=self.note_2)
self.assertEqual(field_5.dehydrate(bundle_5), ['/api/v1/subjects/1/', '/api/v1/subjects/3/'])
field_6 = ToManyField(SubjectResource, 'subjects')
field_6.instance_name = 'm2m'
bundle_6 = Bundle(obj=self.note_3)
self.assertEqual(field_6.dehydrate(bundle_6), [])
try:
# Regression for missing variable initialization.
field_7 = ToManyField(SubjectResource, None)
field_7.instance_name = 'm2m'
bundle_7 = Bundle(obj=self.note_3)
field_7.dehydrate(bundle_7)
self.fail('ToManyField requires an attribute of some type.')
except ApiFieldError:
pass
def test_dehydrate_with_callable(self):
note = Note()
bundle_1 = Bundle(obj=self.note_2)
field_1 = ToManyField(SubjectResource, attribute=lambda bundle: Subject.objects.filter(notes=bundle.obj, name__startswith='Personal'))
field_1.instance_name = 'm2m'
self.assertEqual(field_1.dehydrate(bundle_1), ['/api/v1/subjects/3/'])
def test_hydrate(self):
note = Note.objects.get(pk=1)
bundle = Bundle(obj=note)
# With no value or nullable, we should get an ``ApiFieldError``.
field_1 = ToManyField(SubjectResource, 'subjects')
field_1.instance_name = 'm2m'
self.assertRaises(ApiFieldError, field_1.hydrate_m2m, bundle)
# The nullable case.
field_2 = ToManyField(SubjectResource, 'subjects', null=True)
field_2.instance_name = 'm2m'
empty_bundle = Bundle()
self.assertEqual(field_2.hydrate_m2m(empty_bundle), [])
field_3 = ToManyField(SubjectResource, 'subjects', null=True)
field_3.instance_name = 'm2m'
bundle_3 = Bundle(data={'m2m': []})
self.assertEqual(field_3.hydrate_m2m(bundle_3), [])
# Wrong resource URI.
field_4 = ToManyField(SubjectResource, 'subjects')
field_4.instance_name = 'm2m'
bundle_4 = Bundle(data={'m2m': ['/api/v1/subjects/abc/']})
self.assertRaises(NotFound, field_4.hydrate_m2m, bundle_4)
# A real, live attribute!
field_5 = ToManyField(SubjectResource, 'subjects')
field_5.instance_name = 'm2m'
bundle_5 = Bundle(data={'m2m': ['/api/v1/subjects/1/']})
subject_bundle_list = field_5.hydrate_m2m(bundle_5)
self.assertEqual(len(subject_bundle_list), 1)
self.assertEqual(subject_bundle_list[0].data['name'], u'News')
self.assertEqual(subject_bundle_list[0].data['url'], u'/news/')
self.assertEqual(subject_bundle_list[0].obj.name, u'News')
self.assertEqual(subject_bundle_list[0].obj.url, u'/news/')
field_6 = ToManyField(SubjectResource, 'subjects')
field_6.instance_name = 'm2m'
bundle_6 = Bundle(data={'m2m': [
{
'name': u'Foo',
'url': u'/foo/',
},
{
'name': u'Bar',
'url': u'/bar/',
},
]})
subject_bundle_list = field_6.hydrate_m2m(bundle_6)
self.assertEqual(len(subject_bundle_list), 2)
self.assertEqual(subject_bundle_list[0].data['name'], u'Foo')
self.assertEqual(subject_bundle_list[0].data['url'], u'/foo/')
self.assertEqual(subject_bundle_list[0].obj.name, u'Foo')
self.assertEqual(subject_bundle_list[0].obj.url, u'/foo/')
self.assertEqual(subject_bundle_list[1].data['name'], u'Bar')
self.assertEqual(subject_bundle_list[1].data['url'], u'/bar/')
self.assertEqual(subject_bundle_list[1].obj.name, u'Bar')
self.assertEqual(subject_bundle_list[1].obj.url, u'/bar/')
# The blank case.
field_7 = ToManyField(SubjectResource, 'fk', blank=True)
field_7.instance_name = 'fk'
self.assertEqual(field_7.hydrate(bundle_6), None)
field_8 = ToManyField(SubjectResource, 'm2m', blank=True)
field_8.instance_name = 'm2m'
subject_bundle_list_2 = field_8.hydrate_m2m(bundle_6)
self.assertEqual(len(subject_bundle_list_2), 2)
self.assertEqual(subject_bundle_list_2[0].data['name'], u'Foo')
self.assertEqual(subject_bundle_list_2[0].data['url'], u'/foo/')
self.assertEqual(subject_bundle_list_2[0].obj.name, u'Foo')
self.assertEqual(subject_bundle_list_2[0].obj.url, u'/foo/')
self.assertEqual(subject_bundle_list_2[1].data['name'], u'Bar')
self.assertEqual(subject_bundle_list_2[1].data['url'], u'/bar/')
self.assertEqual(subject_bundle_list_2[1].obj.name, u'Bar')
self.assertEqual(subject_bundle_list_2[1].obj.url, u'/bar/')
# The readonly case.
field_9 = ToManyField(SubjectResource, 'subjects', readonly=True)
field_9.instance_name = 'm2m'
self.assertEqual(field_9.hydrate(bundle_6), None)
# A related object.
field_10 = ToManyField(MediaBitResource, 'media_bits', related_name='note')
field_10.instance_name = 'mbs'
note_1 = Note.objects.get(pk=1)
bundle_10 = Bundle(obj=note_1, data={'mbs': [
{
'title': 'Foo!',
},
]})
media_bundle_list = field_10.hydrate_m2m(bundle_10)
self.assertEqual(len(media_bundle_list), 1)
self.assertEqual(media_bundle_list[0].obj.title, u'Foo!')
def test_traversed_attribute_dehydrate(self):
mediabit = MediaBit(id=1, note=self.note_1)
bundle = Bundle(obj=mediabit)
field_1 = ToManyField(SubjectResource, 'note__subjects')
field_1.instance_name = 'm2m'
self.assertEqual(field_1.dehydrate(bundle), ['/api/v1/subjects/1/', '/api/v1/subjects/2/'])
field_2 = ToOneField(SubjectResource, 'fakefield__subjects')
field_2.instance_name = 'm2m'
self.assertRaises(ApiFieldError, field_2.hydrate, bundle)
|
|
import uuid
from collections import defaultdict
from datetime import datetime
from functools import partial
from django.db import models, transaction
from django.db.models import Q
import jsonfield
from django_bulk_update.helper import bulk_update as bulk_update_helper
from django_cte import CTEQuerySet
from memoized import memoized
from corehq.apps.domain.models import Domain
from corehq.apps.locations.adjacencylist import AdjListManager, AdjListModel
from corehq.apps.products.models import SQLProduct
from corehq.form_processor.exceptions import CaseNotFound
from corehq.form_processor.interfaces.supply import SupplyInterface
class LocationTypeManager(models.Manager):
def full_hierarchy(self, domain):
"""
Returns a graph of the form
{
'<loc_type_id>: (
loc_type,
{'<child_loc_type_id>': (child_loc_type, [...])}
)
}
"""
hierarchy = {}
def insert_loc_type(loc_type):
"""
Get parent location's hierarchy, insert loc_type into it, and return
hierarchy below loc_type
"""
if not loc_type.parent_type:
lt_hierarchy = hierarchy
else:
lt_hierarchy = insert_loc_type(loc_type.parent_type)
if loc_type.id not in lt_hierarchy:
lt_hierarchy[loc_type.id] = (loc_type, {})
return lt_hierarchy[loc_type.id][1]
for loc_type in self.filter(domain=domain).all():
insert_loc_type(loc_type)
return hierarchy
def by_domain(self, domain):
"""
Sorts location types by hierarchy
"""
ordered_loc_types = []
def step_through_graph(hierarchy):
for _, (loc_type, children) in hierarchy.items():
ordered_loc_types.append(loc_type)
step_through_graph(children)
step_through_graph(self.full_hierarchy(domain))
return ordered_loc_types
StockLevelField = partial(models.DecimalField, max_digits=10, decimal_places=1)
@memoized
def stock_level_config_for_domain(domain, commtrack_enabled):
if not commtrack_enabled:
return None
from corehq.apps.commtrack.models import CommtrackConfig
ct_config = CommtrackConfig.for_domain(domain)
if ct_config is None or not hasattr(ct_config, 'stocklevelsconfig'):
return None
else:
return ct_config.stocklevelsconfig
class LocationType(models.Model):
domain = models.CharField(max_length=255, db_index=True)
name = models.CharField(max_length=255)
code = models.SlugField(db_index=False, null=True)
parent_type = models.ForeignKey('self', null=True, on_delete=models.CASCADE)
administrative = models.BooleanField(default=False)
shares_cases = models.BooleanField(default=False)
view_descendants = models.BooleanField(default=False)
# Sync optimization controls
_expand_from = models.ForeignKey(
'self',
null=True,
related_name='+',
db_column='expand_from',
on_delete=models.CASCADE,
) # levels below this location type that we start expanding from
_expand_from_root = models.BooleanField(default=False, db_column='expand_from_root')
expand_to = models.ForeignKey('self', null=True, related_name='+', on_delete=models.CASCADE) # levels above this type that are synced
include_without_expanding = models.ForeignKey(
'self',
null=True,
related_name='+',
on_delete=models.SET_NULL,
) # include all levels of this type and their ancestors
# If specified, include only the linked types
include_only = models.ManyToManyField('self', symmetrical=False, related_name='included_in')
last_modified = models.DateTimeField(auto_now=True, db_index=True)
has_user = models.BooleanField(default=False)
emergency_level = StockLevelField(default=0.5)
understock_threshold = StockLevelField(default=1.5)
overstock_threshold = StockLevelField(default=3.0)
objects = LocationTypeManager()
class Meta(object):
app_label = 'locations'
unique_together = (
('domain', 'code'),
('domain', 'name'),
)
def __init__(self, *args, **kwargs):
super(LocationType, self).__init__(*args, **kwargs)
self._administrative_old = self.administrative
@property
def expand_from(self):
return self._expand_from
@expand_from.setter
def expand_from(self, value):
if self._expand_from_root is True:
self._expand_from_root = False
self._expand_from = value
@property
def expand_from_root(self):
return self._expand_from_root
@expand_from_root.setter
def expand_from_root(self, value):
if self._expand_from_root is False and value is True:
self._expand_from = None
self._expand_from_root = value
@property
@memoized
def commtrack_enabled(self):
return Domain.get_by_name(self.domain).commtrack_enabled
def _populate_stock_levels(self, config):
self.emergency_level = config.emergency_level
self.understock_threshold = config.understock_threshold
self.overstock_threshold = config.overstock_threshold
def save(self, *args, **kwargs):
if not self.code:
from corehq.apps.commtrack.util import unicode_slug
self.code = unicode_slug(self.name)
if not self.commtrack_enabled:
self.administrative = True
config = stock_level_config_for_domain(self.domain, self.commtrack_enabled)
if config:
self._populate_stock_levels(config)
is_not_first_save = self.pk is not None
super(LocationType, self).save(*args, **kwargs)
if is_not_first_save:
self.sync_administrative_status()
def sync_administrative_status(self, sync_supply_points=True):
from .tasks import sync_administrative_status
if self._administrative_old != self.administrative:
if sync_supply_points:
sync_administrative_status.delay(self)
self._administrative_old = self.administrative
def __str__(self):
return self.name
def __repr__(self):
return "LocationType(domain='{}', name='{}', administrative={})".format(
self.domain,
self.name,
self.administrative,
)
@property
@memoized
def can_have_children(self):
return LocationType.objects.filter(parent_type=self).exists()
@classmethod
def _pre_bulk_save(cls, objects):
if not objects:
return
commtrack_enabled = objects[0].commtrack_enabled
if not commtrack_enabled:
for o in objects:
o.administrative = True
domain = objects[0].domain
stock_config = stock_level_config_for_domain(domain, commtrack_enabled)
if stock_config:
for o in objects:
o._populate_stock_levels(stock_config)
@classmethod
def bulk_create(cls, objects):
# 'objects' is a list of new LocationType objects to be created
if not objects:
return []
cls._pre_bulk_save(objects)
cls.objects.bulk_create(objects)
return list(objects)
@classmethod
def bulk_update(cls, objects):
# 'objects' is a list of existing LocationType objects to be updated
# Note: this is tightly coupled with .bulk_management.NewLocationImporter.bulk_commit()
# so it can't be used on its own
cls._pre_bulk_save(objects)
now = datetime.utcnow()
for o in objects:
o.last_modified = now
# the caller should call 'sync_administrative_status' for individual objects
bulk_update_helper(objects)
@classmethod
def bulk_delete(cls, objects):
# Given a list of existing SQL objects, bulk delete them
if not objects:
return
ids = [o.id for o in objects]
cls.objects.filter(id__in=ids).delete()
class LocationQueriesMixin(object):
def location_ids(self):
return self.values_list('location_id', flat=True)
def accessible_to_user(self, domain, user):
if user.has_permission(domain, 'access_all_locations'):
return self.filter(domain=domain)
assigned_location_ids = user.get_location_ids(domain)
if not assigned_location_ids:
return self.none() # No locations are assigned to this user
return SQLLocation.objects.get_locations_and_children(assigned_location_ids)
def delete(self, *args, **kwargs):
from .document_store import publish_location_saved
for domain, location_id in self.values_list('domain', 'location_id'):
publish_location_saved(domain, location_id, is_deletion=True)
return super(LocationQueriesMixin, self).delete(*args, **kwargs)
class LocationQuerySet(LocationQueriesMixin, CTEQuerySet):
def accessible_to_user(self, domain, user):
ids_query = super(LocationQuerySet, self).accessible_to_user(domain, user)
return self.filter(id__in=ids_query)
class LocationManager(LocationQueriesMixin, AdjListManager):
def get_or_None(self, **kwargs):
try:
return self.get(**kwargs)
except SQLLocation.DoesNotExist:
return None
def get_queryset(self):
return LocationQuerySet(self.model, using=self._db)
def get_from_user_input(self, domain, user_input):
"""
First check by site-code, if that fails, fall back to name.
Note that name lookup may raise MultipleObjectsReturned.
"""
try:
return self.get(domain=domain, site_code=user_input)
except self.model.DoesNotExist:
return self.get(domain=domain, name__iexact=user_input)
def filter_by_user_input(self, domain, user_input):
"""
Returns a queryset based on user input
- Matching happens by name or site-code
- Adding a slash to the input string starts a new search node among descendants
- Matching is partial unless the query node is wrapped in quotes
Refer to TestFilterByUserInput for example usages.
"""
query = None
for part in user_input.split('/'):
query = self.get_queryset_descendants(query) if query is not None else self
if part:
if part.startswith('"') and part.endswith('"'):
query = query.filter(name__iexact=part[1:-1])
else:
part = part.lstrip('"')
query = query.filter(
Q(name__icontains=part) | Q(site_code__icontains=part)
)
return query
def get_locations(self, location_ids):
return self.filter(location_id__in=location_ids)
def get_locations_and_children(self, location_ids):
"""
Takes a set of location ids and returns a django queryset of those
locations and their children.
"""
locations = self.filter(location_id__in=location_ids)
return self.get_queryset_descendants(locations, include_self=True)
def get_locations_and_children_ids(self, location_ids):
return list(self.get_locations_and_children(location_ids).location_ids())
class OnlyUnarchivedLocationManager(LocationManager):
def get_queryset(self):
return (super(OnlyUnarchivedLocationManager, self).get_queryset()
.filter(is_archived=False))
def accessible_location_ids(self, domain, user):
return list(self.accessible_to_user(domain, user).location_ids())
class SQLLocation(AdjListModel):
domain = models.CharField(max_length=255, db_index=True)
name = models.CharField(max_length=255, null=True)
location_id = models.CharField(max_length=100, db_index=True, unique=True)
location_type = models.ForeignKey(LocationType, on_delete=models.CASCADE)
site_code = models.CharField(max_length=255)
external_id = models.CharField(max_length=255, null=True, blank=True)
metadata = jsonfield.JSONField(default=dict, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
last_modified = models.DateTimeField(auto_now=True, db_index=True)
is_archived = models.BooleanField(default=False)
archived_on = models.DateTimeField(null=True, blank=True)
latitude = models.DecimalField(max_digits=20, decimal_places=10, null=True, blank=True)
longitude = models.DecimalField(max_digits=20, decimal_places=10, null=True, blank=True)
parent = models.ForeignKey('self', null=True, blank=True, related_name='children', on_delete=models.CASCADE)
# Use getter and setter below to access this value
# since stocks_all_products can cause an empty list to
# be what is stored for a location that actually has
# all products available.
_products = models.ManyToManyField(SQLProduct)
stocks_all_products = models.BooleanField(default=True)
supply_point_id = models.CharField(max_length=255, db_index=True, unique=True, null=True, blank=True)
# No longer used. Should be removed once all references have been tracked down and removed
user_id = models.CharField(max_length=255, blank=True)
objects = _tree_manager = LocationManager()
# This should really be the default location manager
active_objects = OnlyUnarchivedLocationManager()
def get_ancestor_of_type(self, type_code):
"""
Returns the ancestor of given location_type_code of the location
"""
try:
return self.get_ancestors().get(location_type__code=type_code)
except self.DoesNotExist:
return None
@classmethod
def get_sync_fields(cls):
return ["domain", "name", "site_code", "external_id",
"metadata", "is_archived"]
def save(self, *args, **kwargs):
from corehq.apps.commtrack.models import sync_supply_point
from .document_store import publish_location_saved
if not self.location_id:
self.location_id = uuid.uuid4().hex
with transaction.atomic():
set_site_code_if_needed(self)
sync_supply_point(self)
super(SQLLocation, self).save(*args, **kwargs)
publish_location_saved(self.domain, self.location_id)
def delete(self, *args, **kwargs):
"""Delete this location and all descentants
Supply point cases and user updates are performed asynchronously.
"""
from .tasks import update_users_at_locations
from .document_store import publish_location_saved
to_delete = self.get_descendants(include_self=True)
for loc in to_delete:
loc._remove_user()
super(SQLLocation, self).delete(*args, **kwargs)
update_users_at_locations.delay(
self.domain,
[loc.location_id for loc in to_delete],
[loc.supply_point_id for loc in to_delete if loc.supply_point_id],
list(self.get_ancestors().location_ids()),
)
for loc in to_delete:
publish_location_saved(loc.domain, loc.location_id, is_deletion=True)
full_delete = delete
def get_descendants(self, include_self=False, **kwargs):
if include_self:
where = Q(domain=self.domain, id=self.id)
else:
where = Q(domain=self.domain, parent_id=self.id)
return SQLLocation.objects.get_descendants(
where, **kwargs
)
def get_ancestors(self, include_self=False, **kwargs):
where = Q(domain=self.domain, id=self.id if include_self else self.parent_id)
return SQLLocation.objects.get_ancestors(
where, **kwargs
)
@classmethod
def bulk_delete(cls, locations, ancestor_location_ids):
"""Bulk delete the given locations and update their ancestors
WARNING databases may be left in an inconsistent state if the
transaction in which this deletion is performed is rolled back.
This method mutates other databases that will not be reverted on
transaction rollback.
:param locations: A list of SQLLocation objects. All locations
in the list are expected to be leaf nodes or parents of nodes
that are also in the list. Behavior of passing a non-leaf node
without also passing all of its descendants is undefined.
:param ancestor_location_ids: A list of ancestor `location_id`s
for the given `locations`.
"""
from .tasks import update_users_at_locations
from .document_store import publish_location_saved
if not locations:
return
if len(set(loc.domain for loc in locations)) != 1:
raise ValueError("cannot bulk delete locations for multiple domains")
cls.objects.filter(id__in=[loc.id for loc in locations]).delete()
# NOTE _remove_user() not called here. No domains were using
# SQLLocation.user_id at the time this was written, and that
# field is slated for removal.
update_users_at_locations.delay(
locations[0].domain,
[loc.location_id for loc in locations],
[loc.supply_point_id for loc in locations if loc.supply_point_id],
ancestor_location_ids,
)
for loc in locations:
publish_location_saved(loc.domain, loc.location_id, is_deletion=True)
def to_json(self, include_lineage=True):
json_dict = {
'name': self.name,
'site_code': self.site_code,
'_id': self.location_id,
'location_id': self.location_id,
'doc_type': 'Location',
'domain': self.domain,
'external_id': self.external_id,
'is_archived': self.is_archived,
'archived_on': self.archived_on.isoformat() if self.archived_on else None,
'last_modified': self.last_modified.isoformat(),
'latitude': float(self.latitude) if self.latitude else None,
'longitude': float(self.longitude) if self.longitude else None,
'metadata': self.metadata,
'location_type': self.location_type.name,
'location_type_code': self.location_type.code,
'parent_location_id': self.parent_location_id,
}
if include_lineage:
# lineage requires a non-trivial db hit
json_dict['lineage'] = self.lineage
return json_dict
@property
def lineage(self):
return list(reversed(self.path[:-1]))
_id = property(lambda self: self.location_id)
get_id = property(lambda self: self.location_id)
@property
def products(self):
"""
If there are no products specified for this location, assume all
products for the domain are relevant.
"""
if self.stocks_all_products:
return SQLProduct.by_domain(self.domain)
else:
return self._products.all()
@products.setter
def products(self, value):
# this will set stocks_all_products to true if the user
# has added all products in the domain to this location
self.stocks_all_products = (set(value) ==
set(SQLProduct.by_domain(self.domain)))
self._products.set(value)
def _remove_user(self):
"""
Unassigns the users assigned to that location.
Used by both archive and delete methods
"""
if self.user_id:
from corehq.apps.users.models import CommCareUser
user = CommCareUser.get(self.user_id)
user.active = False
user.save()
def archive(self):
"""
Mark a location and its descendants as archived and unassigns users
assigned to the location.
"""
from .tasks import update_users_at_locations
locations = self.get_descendants(include_self=True)
for loc in locations:
loc.is_archived = True
loc.archived_on = datetime.utcnow()
loc.save()
loc._remove_user()
update_users_at_locations.delay(
self.domain,
[loc.location_id for loc in locations],
[loc.supply_point_id for loc in locations if loc.supply_point_id],
list(self.get_ancestors().location_ids()),
)
def unarchive(self):
"""
Unarchive a location and reopen supply point case if it
exists.
"""
import itertools
from corehq.apps.users.models import CommCareUser
for loc in itertools.chain(self.get_descendants(include_self=True), self.get_ancestors()):
loc.is_archived = False
loc.archived_on = None
loc.save()
if loc.user_id:
user = CommCareUser.get(loc.user_id)
user.active = True
user.save()
class Meta(object):
app_label = 'locations'
unique_together = ('domain', 'site_code',)
def __str__(self):
return "{} ({})".format(self.name, self.domain)
def __repr__(self):
return "SQLLocation(domain='{}', name='{}', location_type='{}')".format(
self.domain,
self.name,
self.location_type.name if hasattr(self, 'location_type') else None,
)
@property
def display_name(self):
return "{} [{}]".format(self.name, self.location_type.name)
def archived_descendants(self):
"""
Returns a list of archived descendants for this location.
"""
return self.get_descendants().filter(is_archived=True)
def child_locations(self, include_archive_ancestors=False):
"""
Returns a list of this location's children.
"""
children = self.get_children()
return filter_for_archived(children, include_archive_ancestors)
@classmethod
def root_locations(cls, domain, include_archive_ancestors=False):
roots = cls.objects.root_nodes().filter(domain=domain)
return filter_for_archived(roots, include_archive_ancestors)
def get_path_display(self):
return '/'.join(self.get_ancestors(include_self=True)
.values_list('name', flat=True))
def case_sharing_group_object(self, user_id=None):
"""
Returns a fake group object that cannot be saved.
This is used for giving users access via case sharing groups, without
having a real group for every location that we have to manage/hide.
"""
from corehq.apps.groups.models import UnsavableGroup
group = UnsavableGroup(
domain=self.domain,
users=[user_id] if user_id else [],
last_modified=datetime.utcnow(),
name=self.get_path_display() + '-Cases',
_id=self.location_id,
case_sharing=True,
reporting=False,
metadata={
'commcare_location_type': self.location_type.name,
'commcare_location_name': self.name,
},
)
for key, val in self.metadata.items():
group.metadata['commcare_location_' + key] = val
return group
def is_direct_ancestor_of(self, location):
return (location.get_ancestors(include_self=True)
.filter(pk=self.pk).exists())
@classmethod
def by_domain(cls, domain):
return cls.objects.filter(domain=domain)
@property
def path(self):
try:
return self._path
except AttributeError:
self._path = list(self.get_ancestors(include_self=True).location_ids())
return self._path
@path.setter
def path(self, value):
self._path = value
@classmethod
def by_location_id(cls, location_id):
try:
return cls.objects.get(location_id=location_id)
except cls.DoesNotExist:
return None
# For quick_find compatability
by_id = by_location_id
def linked_supply_point(self):
if not self.supply_point_id:
return None
try:
return SupplyInterface(self.domain).get_supply_point(self.supply_point_id)
except CaseNotFound:
return None
@property
def parent_location_id(self):
return self.parent.location_id if self.parent else None
@property
def location_type_object(self):
return self.location_type
@property
def location_type_name(self):
return self.location_type.name
@property
def sql_location(self):
# For backwards compatability
return self
def filter_for_archived(locations, include_archive_ancestors):
"""
Perform filtering on a location queryset.
include_archive_ancestors toggles between selecting only active
children and selecting any child that is archived or has
archived descendants.
"""
if include_archive_ancestors:
return [
item for item in locations
if item.is_archived or item.archived_descendants()
]
else:
return locations.filter(is_archived=False)
def make_location(**kwargs):
"""API compatabile with `Location.__init__`, but returns a SQLLocation"""
loc_type_name = kwargs.pop('location_type')
try:
sql_location_type = LocationType.objects.get(
domain=kwargs['domain'],
name=loc_type_name,
)
except LocationType.DoesNotExist:
msg = "You can't create a location without a real location type"
raise LocationType.DoesNotExist(msg)
kwargs['location_type'] = sql_location_type
parent = kwargs.pop('parent', None)
kwargs['parent'] = parent.sql_location if parent else None
return SQLLocation(**kwargs)
def get_location(location_id, domain=None):
"""Drop-in replacement for `Location.get`, but returns a SQLLocation"""
if domain:
return SQLLocation.objects.get(domain=domain, location_id=location_id)
else:
return SQLLocation.objects.get(location_id=location_id)
def set_site_code_if_needed(location):
from corehq.apps.commtrack.util import generate_code
if not location.site_code:
all_codes = [
code.lower() for code in
(SQLLocation.objects.exclude(location_id=location.location_id)
.filter(domain=location.domain)
.values_list('site_code', flat=True))
]
location.site_code = generate_code(location.name, all_codes)
class LocationFixtureConfiguration(models.Model):
domain = models.CharField(primary_key=True, max_length=255)
sync_flat_fixture = models.BooleanField(default=True)
sync_hierarchical_fixture = models.BooleanField(default=True)
def __repr__(self):
return '{}: flat: {}, hierarchical: {}'.format(
self.domain, self.sync_flat_fixture, self.sync_hierarchical_fixture
)
@classmethod
def for_domain(cls, domain):
try:
return cls.objects.get(domain=domain)
except cls.DoesNotExist:
return cls(domain=domain)
def get_case_sharing_groups_for_locations(locations, for_user_id=None):
# safety check to make sure all locations belong to same domain
assert len(set([l.domain for l in locations])) < 2
for location in locations:
if location.location_type.shares_cases:
yield location.case_sharing_group_object(for_user_id)
location_ids = [l.pk for l in locations if l.location_type.view_descendants]
descendants = []
if location_ids:
where = Q(domain=locations[0].domain, parent_id__in=location_ids)
descendants = SQLLocation.objects.get_queryset_descendants(where).filter(
location_type__shares_cases=True, is_archived=False)
for loc in descendants:
yield loc.case_sharing_group_object(for_user_id)
|
|
"""
@author: Bryan Silverthorn <[email protected]>
"""
__all__ = [
"CoercionError",
"Value",
]
import numpy
import qy
import qy.llvm as llvm
class CoercionError(TypeError):
"""
Failed to coerce a value to that of another type.
"""
def __init__(self, from_type, to_type):
"""
Initialize.
"""
TypeError.__init__(
self,
"don't know how to convert from %s to %s" % (from_type, to_type),
)
class Value(object):
"""
Value in the wrapper language.
"""
def __init__(self, value):
"""
Initialize.
"""
if not isinstance(value, llvm.Value):
raise TypeError("Value constructor requires an LLVM value")
elif self.kind is not None and value.type.kind != self.kind:
raise TypeError(
"cannot covariable's nstruct an %s instance from a %s value",
type(self).__name__,
type(value).__name,
)
self._value = value
def __str__(self):
"""
Return a readable string representation of this value.
"""
return str(self._value)
def __repr__(self):
"""
Return a parseable string representation of this value.
"""
return "Value.from_low(%s)" % repr(self._value)
def __lt__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __le__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __gt__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __ge__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __eq__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __ne__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __add__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __sub__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __mul__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __div__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __floordiv__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __mod__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __divmod__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __pow__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __and__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __xor__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __or__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __lshift__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __rshift__(self, other):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __neg__(self):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __pos__(self):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __abs__(self):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __invert__(self):
"""
XXX.
"""
raise TypeError("%s value does not define this operator" % type(self).__name__)
def __radd__(self, other):
"""
Apply the "+" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return other + self
def __rsub__(self, other):
"""
Apply the "-" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return other - self
def __rmul__(self, other):
"""
Apply the "*" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return other * self
def __rdiv__(self, other):
"""
Apply the "/" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return other / self
def __rmod__(self, other):
"""
Apply the "%" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return other % self
def __rdivmod__(self, other):
"""
Apply the "divmod" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return divmod(other, self)
def __rpow__(self, other):
"""
Apply the "**" operator.
"""
raise TypeError("%s value does not have right-operator ** defined" % type(self).__name__)
def __rlshift__(self, other):
"""
Apply the "<<" operator.
"""
raise TypeError("%s value does not have right-operator << defined" % type(self).__name__)
def __rrshift__(self, other):
"""
Apply the ">>" operator.
"""
raise TypeError("%s value does not have right-operator >> defined" % type(self).__name__)
def __rand__(self, other):
"""
Apply the "&" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return other & self
def __rxor__(self, other):
"""
Apply the "^" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return other ^ self
def __ror__(self, other):
"""
Apply the "|" operator.
"""
other = qy.value_from_any(other).cast_to(self.type_)
return other | self
def store(self, pointer):
"""
Store this value to the specified pointer.
"""
return qy.get().builder.store(self._value, pointer._value)
@property
def low(self):
"""
The associated LLVM value.
"""
return self._value
@property
def type_(self):
"""
The type of the associated LLVM value.
"""
return self._value.type
@property
def kind(self):
"""
Enum describing the general kind of this value, or None.
"""
return None
@staticmethod
def from_any(value):
"""
Build a Qy value from some value.
"""
if isinstance(value, Value):
return value
elif isinstance(value, llvm.Value):
return Value.from_low(value)
elif isinstance(value, int):
return \
Value.from_low(
llvm.Constant.int(
llvm.Type.int(numpy.dtype(int).itemsize * 8),
int(value),
),
)
elif isinstance(value, long):
return \
Value.from_low(
llvm.Constant.int(
llvm.Type.int(numpy.dtype(long).itemsize * 8),
long(value),
),
)
elif isinstance(value, float):
return \
Value.from_low(
llvm.Constant.real(llvm.Type.double(), value),
)
elif isinstance(value, bool):
return Value.from_low(llvm.Constant.int(llvm.Type.int(1), int(value)))
else:
raise TypeError("cannot build value from \"%s\" instance" % type(value))
@staticmethod
def from_low(value):
"""
Build a Qy value from an LLVM value.
"""
# sanity
if not isinstance(value, llvm.Value):
raise TypeError("value is not an LLVM value")
# generate an appropriate value type
if value.type.kind == llvm.TYPE_INTEGER:
return qy.IntegerValue(value)
elif value.type.kind == llvm.TYPE_DOUBLE:
return qy.RealValue(value)
elif value.type.kind == llvm.TYPE_POINTER:
return qy.PointerValue(value)
else:
return qy.Value(value)
|
|
"""Support for Nexia / Trane XL Thermostats."""
from __future__ import annotations
from nexia.const import UNIT_CELSIUS
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import DOMAIN
from .coordinator import NexiaDataUpdateCoordinator
from .entity import NexiaThermostatEntity, NexiaThermostatZoneEntity
from .util import percent_conv
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up sensors for a Nexia device."""
coordinator: NexiaDataUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
nexia_home = coordinator.nexia_home
entities: list[NexiaThermostatEntity] = []
# Thermostat / System Sensors
for thermostat_id in nexia_home.get_thermostat_ids():
thermostat = nexia_home.get_thermostat_by_id(thermostat_id)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_system_status",
"System Status",
None,
None,
None,
)
)
# Air cleaner
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_air_cleaner_mode",
"Air Cleaner Mode",
None,
None,
None,
)
)
# Compressor Speed
if thermostat.has_variable_speed_compressor():
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_current_compressor_speed",
"Current Compressor Speed",
None,
PERCENTAGE,
SensorStateClass.MEASUREMENT,
percent_conv,
)
)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_requested_compressor_speed",
"Requested Compressor Speed",
None,
PERCENTAGE,
SensorStateClass.MEASUREMENT,
percent_conv,
)
)
# Outdoor Temperature
if thermostat.has_outdoor_temperature():
unit = (
TEMP_CELSIUS
if thermostat.get_unit() == UNIT_CELSIUS
else TEMP_FAHRENHEIT
)
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_outdoor_temperature",
"Outdoor Temperature",
SensorDeviceClass.TEMPERATURE,
unit,
SensorStateClass.MEASUREMENT,
)
)
# Relative Humidity
if thermostat.has_relative_humidity():
entities.append(
NexiaThermostatSensor(
coordinator,
thermostat,
"get_relative_humidity",
"Relative Humidity",
SensorDeviceClass.HUMIDITY,
PERCENTAGE,
SensorStateClass.MEASUREMENT,
percent_conv,
)
)
# Zone Sensors
for zone_id in thermostat.get_zone_ids():
zone = thermostat.get_zone_by_id(zone_id)
unit = (
TEMP_CELSIUS
if thermostat.get_unit() == UNIT_CELSIUS
else TEMP_FAHRENHEIT
)
# Temperature
entities.append(
NexiaThermostatZoneSensor(
coordinator,
zone,
"get_temperature",
"Temperature",
SensorDeviceClass.TEMPERATURE,
unit,
SensorStateClass.MEASUREMENT,
None,
)
)
# Zone Status
entities.append(
NexiaThermostatZoneSensor(
coordinator, zone, "get_status", "Zone Status", None, None, None
)
)
# Setpoint Status
entities.append(
NexiaThermostatZoneSensor(
coordinator,
zone,
"get_setpoint_status",
"Zone Setpoint Status",
None,
None,
None,
)
)
async_add_entities(entities)
class NexiaThermostatSensor(NexiaThermostatEntity, SensorEntity):
"""Provides Nexia thermostat sensor support."""
def __init__(
self,
coordinator,
thermostat,
sensor_call,
sensor_name,
sensor_class,
sensor_unit,
state_class,
modifier=None,
):
"""Initialize the sensor."""
super().__init__(
coordinator,
thermostat,
name=f"{thermostat.get_name()} {sensor_name}",
unique_id=f"{thermostat.thermostat_id}_{sensor_call}",
)
self._call = sensor_call
self._modifier = modifier
self._attr_device_class = sensor_class
self._attr_native_unit_of_measurement = sensor_unit
self._attr_state_class = state_class
@property
def native_value(self):
"""Return the state of the sensor."""
val = getattr(self._thermostat, self._call)()
if self._modifier:
val = self._modifier(val)
if isinstance(val, float):
val = round(val, 1)
return val
class NexiaThermostatZoneSensor(NexiaThermostatZoneEntity, SensorEntity):
"""Nexia Zone Sensor Support."""
def __init__(
self,
coordinator,
zone,
sensor_call,
sensor_name,
sensor_class,
sensor_unit,
state_class,
modifier=None,
):
"""Create a zone sensor."""
super().__init__(
coordinator,
zone,
name=f"{zone.get_name()} {sensor_name}",
unique_id=f"{zone.zone_id}_{sensor_call}",
)
self._call = sensor_call
self._modifier = modifier
self._attr_device_class = sensor_class
self._attr_native_unit_of_measurement = sensor_unit
self._attr_state_class = state_class
@property
def native_value(self):
"""Return the state of the sensor."""
val = getattr(self._zone, self._call)()
if self._modifier:
val = self._modifier(val)
if isinstance(val, float):
val = round(val, 1)
return val
|
|
#!/usr/bin/env python
import numpy as np
import numpy.random as nr
from numpy.testing import(assert_array_equal, assert_array_almost_equal,
assert_almost_equal)
from nose.tools import assert_true, assert_equal
from ..graph import (WeightedGraph, complete_graph, mst, knn, eps_nn,
wgraph_from_adjacency, wgraph_from_coo_matrix,
concatenate_graphs, wgraph_from_3d_grid)
def basicdata():
x = np.array( [[- 1.998, - 2.024], [- 0.117, - 1.010], [1.099, - 0.057],
[ 1.729, - 0.252], [1.003, - 0.021], [1.703, - 0.739],
[- 0.557, 1.382],[- 1.200, - 0.446],[- 0.331, - 0.256],
[- 0.800, - 1.584]])
return x
def basic_graph():
l = np.linspace(0, 2 * np.pi, 20, endpoint=False)
x = np.column_stack((np.cos(l), np.sin(l)))
G = knn(x, 2)
return G
def basic_graph_2():
l = np.linspace(0, 2 * np.pi, 20, endpoint=False)
x = np.column_stack((np.cos(l), np.sin(l)))
G = knn(x, 2)
return G, x
def test_complete():
v = 10
G = complete_graph(v)
a = G.get_edges()[:, 0]
b = G.get_edges()[:, 1]
inds = np.indices((v, v)).reshape( (2, v * v) )
assert_array_equal(inds, (a, b))
def test_knn_1():
x = basicdata()
G = knn(x, 1)
A = G.get_edges()[:, 0]
assert_equal(np.shape(A)[0], 14)
def test_set_euclidian():
G, x = basic_graph_2()
d = G.weights
G.set_euclidian(x / 10)
D = G.weights
assert_true(np.allclose(D, d / 10, 1e-7))
def test_set_gaussian():
G, x = basic_graph_2()
d = G.weights
G.set_gaussian(x, 1.0)
D = G.weights
assert_true(np.allclose(D, np.exp(- d * d / 2), 1e-7))
def test_set_gaussian_2():
G, x = basic_graph_2()
d = G.weights
G.set_gaussian(x)
D = G.weights
sigma = np.sum(d * d) / len(d)
assert_true(np.allclose(D, np.exp(-d * d / (2 * sigma)), 1e-7))
def test_eps_1():
x = basicdata()
G = eps_nn(x, 1.)
D = G.weights
assert_equal(np.size(D), 16)
assert_true((D < 1).all())
def test_mst_1():
x = basicdata()
G = mst(x)
D = G.weights
assert_equal(np.size(D), 18)
def test_3d_grid():
"""test the 6nn graph
"""
x0 = np.array([0, 0, 0])
x1 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0], [0, -1, 0],
[0, 0, -1]])
x2 = np.array([[1, 1, 0], [0, 1, 1], [1, 0, 1], [1, -1, 0], [0, 1, -1],
[1, 0, -1], [-1, 1, 0], [0, -1, 1], [-1, 0, 1],
[-1, -1, 0], [-1, 0, -1], [0, -1, -1]])
x3 = np.array([[1, 1, 1], [1, 1, -1], [1, -1, 1], [1, -1, -1],
[-1, 1, 1], [-1, 1, -1], [-1, -1, 1], [-1, -1, -1]])
for x in x1:
xyz = np.vstack((x0, x))
assert_equal(wgraph_from_3d_grid(xyz, 6).E, 2)
assert_equal(wgraph_from_3d_grid(xyz, 18).E, 2)
assert_equal(wgraph_from_3d_grid(xyz, 26).E, 2)
for x in x2:
xyz = np.vstack((x0, x))
assert_equal(wgraph_from_3d_grid(xyz, 6).E, 0)
assert_equal(wgraph_from_3d_grid(xyz, 18).E, 2)
assert_equal(wgraph_from_3d_grid(xyz, 26).E, 2)
for x in x3:
xyz = np.vstack((x0, x))
assert_equal(wgraph_from_3d_grid(xyz, 6).E, 0)
assert_equal(wgraph_from_3d_grid(xyz, 18).E, 0)
assert_equal(wgraph_from_3d_grid(xyz, 26).E, 2)
def test_grid_3d_1():
""" Test the 6 nn graphs on 3d grid
"""
nx, ny, nz = 9, 6, 1
xyz = np.mgrid[0:nx, 0:ny, 0:nz]
xyz = np.reshape(xyz, (3, nx * ny * nz)).T
G = wgraph_from_3d_grid(xyz, 6)
assert_equal(G.E, 186)
def test_grid_3d_2():
""" Test the 18-nn graph on a 3d grid
"""
nx, ny, nz = 9, 6, 1
xyz = np.mgrid[0:nx, 0:ny, 0:nz]
xyz = np.reshape(xyz,(3, nx * ny * nz)).T
G = wgraph_from_3d_grid(xyz, 18)
assert_equal(G.E, 346)
def test_grid_3d_3():
""" Test the 26-nn graph on a 3d grid
"""
nx, ny, nz = 9, 6, 1
xyz = np.mgrid[0:nx, 0:ny, 0:nz]
xyz = np.reshape(xyz,(3, nx * ny * nz)).T
G = wgraph_from_3d_grid(xyz, 26)
assert_equal(G.E, 346)
def test_grid_3d_4():
nx, ny, nz = 10, 10, 10
xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T
G = wgraph_from_3d_grid(xyz, 26)
D = G.weights
# 6 * 9 * 10 * 10
assert_equal(sum(D == 1), 5400 )
# 26 * 8 ** 3 + 6 * 8 ** 2 * 17 + 12 * 8 * 11 + 8 * 7
assert_equal(np.size(D), 20952 )
# 18 * 8 ** 3 + 6 * 8 ** 2 * 13 + 12 * 8 * 9 + 8 * 6
assert_equal(sum(D < 1.5), 15120)
def test_grid_3d_5():
nx, ny, nz = 5, 5, 5
xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T
G = wgraph_from_3d_grid(xyz, 26)
D = G.weights.copy()
G.set_euclidian(xyz)
assert_array_almost_equal(G.weights, D)
def test_grid_3d_6():
nx, ny, nz = 5, 5, 5
xyz = np.reshape(np.indices((nx, ny, nz)), (3, nx * ny * nz)).T
adj = wgraph_from_3d_grid(xyz, 26).to_coo_matrix().tolil()
assert_equal(len(adj.rows[63]), 26)
for i in [62, 64, 58, 68, 38, 88, 57, 67, 37, 87, 59, 69, 39, 89, 33,
83, 43, 93, 32, 82, 42, 92, 34, 84, 44, 94]:
assert_true(i in adj.rows[63])
def test_grid_3d_7():
""" Check that the grid graph is symmetric
"""
xyz = np.array(np.where(np.random.rand(5, 5, 5) > 0.5)).T
adj = wgraph_from_3d_grid(xyz, 6).to_coo_matrix()
assert_equal((adj - adj.T).nnz, 0)
adj = wgraph_from_3d_grid(xyz, 18).to_coo_matrix()
assert_equal((adj - adj.T).nnz, 0)
adj = wgraph_from_3d_grid(xyz, 26).to_coo_matrix()
assert_equal((adj - adj.T).nnz, 0)
def test_cut_redundancies():
G = basic_graph()
e = G.E
edges = G.get_edges()
weights = G.weights
G.E = 2 * G.E
G.edges = np.concatenate((edges, edges))
G.weights = np.concatenate((weights, weights))
K = G.cut_redundancies()
assert_equal(K.E, e)
def test_degrees():
G = basic_graph()
(r, l) = G.degrees()
assert_true((r == 2).all())
assert_true((l == 2).all())
def test_normalize():
G = basic_graph()
G.normalize()
M = G.to_coo_matrix()
sM = np.array(M.sum(1)).ravel()
assert_true((np.abs(sM - 1) < 1.e-7).all())
def test_normalize_2():
G = basic_graph()
G.normalize(0)
M = G.to_coo_matrix()
sM = np.array(M.sum(1)).ravel()
assert_true((np.abs(sM - 1) < 1.e-7).all())
def test_normalize_3():
G = basic_graph()
G.normalize(1)
M = G.to_coo_matrix()
sM = np.array(M.sum(0)).ravel()
assert_true((np.abs(sM - 1) < 1.e-7).all())
def test_adjacency():
G = basic_graph()
M = G.to_coo_matrix()
assert_true(( M.diagonal() == 0 ).all())
A = M.toarray()
assert_true(( np.diag(A, 1) != 0 ).all())
assert_true(( np.diag(A, -1) != 0 ).all())
def test_cc():
G = basic_graph()
l = G.cc()
L = np.array(l==0)
assert_true(L.all())
def test_isconnected():
G = basic_graph()
assert_true(G.is_connected())
def test_main_cc():
x = basicdata()
G = knn(x, 1)
l = G.cc()
l = G.main_cc()
assert_equal(np.size(l), 6)
def test_dijkstra():
""" Test dijkstra's algorithm
"""
G = basic_graph()
l = G.dijkstra(0)
assert_true(np.abs(l[10] - 20 * np.sin(np.pi / 20)) < 1.e-7)
def test_dijkstra_multiseed():
""" Test dijkstra's algorithm, multi_seed version
"""
G = basic_graph()
l = G.dijkstra([0, 1])
assert_true(np.abs(l[10] - 18 * np.sin(np.pi / 20)) < 1.e-7)
def test_dijkstra2():
""" Test dijkstra's algorithm, API detail
"""
G = basic_graph()
l = G.dijkstra()
assert_true(np.abs(l[10] - 20 * np.sin(np.pi / 20)) < 1.e-7)
def test_compact_representation():
""" Test that the compact representation of the graph is indeed correct
"""
G = basic_graph()
idx, ne, we = G.compact_neighb()
assert_equal(len(idx), 21)
assert_equal(idx[0], 0)
assert_equal(idx[20], G.E)
assert_equal(len(ne), G.E)
assert_equal(len(we), G.E)
def test_floyd_1():
""" Test Floyd's algo without seed
"""
G = basic_graph()
l = G.floyd()
for i in range(10):
plop = np.abs(np.diag(l, i) - 2 * i * np.sin(2 * np.pi / 40))
assert_true(plop.max() < 1.e-4)
def test_floyd_2():
""" Test Floyd's algo, with seed
"""
G = basic_graph()
seeds = np.array([0,10])
l = G.floyd(seeds)
for i in range(10):
plop = np.abs(l[0, i] - 2 * i * np.sin(2 * np.pi / 40))
assert_true(plop.max() < 1.e-4)
plop = np.abs(l[0,19 - i] - 2 * (i + 1) * np.sin(2 * np.pi / 40))
assert_true(plop.max() < 1.e-4)
for i in range(10):
plop = np.abs(l[1, i] - 2 * (10 - i) * np.sin(2 * np.pi / 40))
assert_true(plop.max() < 1.e-4)
plop = np.abs(l[1, 19 - i] - 2 * (9 - i) * np.sin(2 * np.pi / 40))
assert_true(plop.max() < 1.e-4)
def test_symmeterize():
a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6])
b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1])
edges = np.vstack((a, b)).T
d = np.ones(14)
G = WeightedGraph(7, edges, d)
G.symmeterize()
d = G.weights
assert_true((d == 0.5).all())
def test_voronoi():
""" test voronoi labelling with 2 seeds
"""
a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6])
b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1])
d = np.array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]);
edges = np.transpose(np.vstack((a, b)))
G = WeightedGraph(7, edges,d)
G.symmeterize()
seed = np.array([0, 6])
label = G.voronoi_labelling(seed)
assert_equal(label[1], 0)
def test_voronoi2():
""" test voronoi labelling with one seed
"""
a = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6])
b = np.array([1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 0, 0, 1])
d = np.array([1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2]);
edges = np.vstack((a, b)).T
G = WeightedGraph(7, edges,d)
G.symmeterize()
seed = np.array([0])
label = G.voronoi_labelling(seed)
assert_equal(label[4], 0)
def test_voronoi3():
""" test voronoi labelling with non-connected components
"""
a = np.array([0, 1, 2, 5, 6])
b = np.array([1, 2, 3, 6, 0])
d = np.array([1, 1, 1, 1, 1]);
edges = np.vstack((a, b)).T
G = WeightedGraph(7, edges,d)
G.symmeterize()
seed = np.array([0])
label = G.voronoi_labelling(seed)
assert_equal(label[4], - 1)
def test_concatenate1(n=10):
x1 = nr.randn(n, 2)
x2 = nr.randn(n, 2)
G1 = knn(x1, 5)
G2 = knn(x2, 5)
G = concatenate_graphs(G1, G2)
assert_true(G.cc().max() > 0)
def test_concatenate2(n=10):
G1 = complete_graph(n)
G2 = complete_graph(n)
G = concatenate_graphs(G1, G2)
assert_true(G.cc().max() == 1)
def test_anti_symmeterize():
n = 10
eps = 1.e-7
M = (nr.rand(n, n) > 0.7).astype(np.float)
C = M - M.T
G = wgraph_from_adjacency(M)
G.anti_symmeterize()
A = G.to_coo_matrix()
assert_true(np.sum(C - A) ** 2 < eps)
def test_subgraph_1(n=10):
x = nr.randn(n, 2)
G = WeightedGraph(x.shape[0])
valid = np.zeros(n)
assert(G.subgraph(valid) is None)
def test_subgraph_2(n=10):
x = nr.randn(n, 2)
G = knn(x, 5)
valid = np.zeros(n)
valid[:n / 2] = 1
assert_true(G.subgraph(valid).edges.max() < n / 2)
def test_graph_create_from_array():
"""Test the creation of a graph from a sparse coo_matrix
"""
a = np.random.randn(5, 5)
wg = wgraph_from_adjacency(a)
b = wg.to_coo_matrix()
assert_array_equal(a, b.todense())
def test_graph_create_from_coo_matrix():
"""Test the creation of a graph from a sparse coo_matrix
"""
import scipy.sparse as spp
a = (np.random.randn(5, 5) > .8).astype(np.float)
s = spp.coo_matrix(a)
wg = wgraph_from_coo_matrix(s)
b = wg.to_coo_matrix()
assert_array_equal(b.todense(), a)
def test_to_coo_matrix():
""" Test the generation of a sparse matrix as output
"""
a = (np.random.randn(5, 5)>.8).astype(np.float)
wg = wgraph_from_adjacency(a)
b = wg.to_coo_matrix().todense()
assert_array_equal(a, b)
def test_list_neighbours():
""" test the generation of neighbours list
"""
bg = basic_graph()
nl = bg.list_of_neighbors()
assert_equal(len(nl), bg.V)
for ni in nl:
assert_equal(len(ni), 2)
def test_kruskal():
""" test Kruskal's algor to thin the graph
"""
x = basicdata()
dmax = np.sqrt((x ** 2).sum())
m = mst(x)
g = eps_nn(x, dmax)
k = g.kruskal()
assert_almost_equal(k.weights.sum(), m.weights.sum())
def test_concatenate3():
""" test the graph concatenation utlitity
"""
bg = basic_graph()
cg = concatenate_graphs(bg, bg)
valid = np.zeros(cg.V)
valid[:bg.V] = 1
sg = cg.subgraph(valid)
assert_array_equal(sg.edges, bg.edges)
assert_array_equal(sg.weights, bg.weights)
def test_cliques():
""" test the computation of cliques
"""
x = np.random.rand(20, 2)
x[15:] += 2.
g = knn(x, 5)
g.set_gaussian(x, 1.)
cliques = g.cliques()
assert_true(len(np.unique(cliques)) > 1)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
|
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import super
from builtins import int
from future import standard_library
standard_library.install_aliases()
import re
from qtpandas.compat import QtCore, QtGui, Qt, Slot, Signal
from qtpandas.models.SupportedDtypes import SupportedDtypes
import numpy
from pandas import Timestamp
from pandas.tslib import NaTType
import warnings
class DefaultValueValidator(QtGui.QValidator):
def __init__(self, parent=None):
super(DefaultValueValidator, self).__init__(parent)
self.dtype = None
self.intPattern = re.compile('[-+]?\d+')
self.uintPattern = re.compile('\d+')
self.floatPattern = re.compile('[+-]? *(?:\d+(?:\.\d*)?|\.\d+)')
self.boolPattern = re.compile('(1|t|0|f){1}$')
@Slot(numpy.dtype)
def validateType(self, dtype):
self.dtype = dtype
def fixup(self, string):
pass
def validate(self, s, pos):
if not s:
# s is emtpy
return (QtGui.QValidator.Acceptable, s, pos)
if self.dtype in SupportedDtypes.strTypes():
return (QtGui.QValidator.Acceptable, s, pos)
elif self.dtype in SupportedDtypes.boolTypes():
match = re.match(self.boolPattern, s)
if match:
return (QtGui.QValidator.Acceptable, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
elif self.dtype in SupportedDtypes.datetimeTypes():
try:
ts = Timestamp(s)
except ValueError as e:
return (QtGui.QValidator.Intermediate, s, pos)
return (QtGui.QValidator.Acceptable, s, pos)
else:
dtypeInfo = None
if self.dtype in SupportedDtypes.intTypes():
match = re.search(self.intPattern, s)
if match:
try:
value = int(match.string)
except ValueError as e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.iinfo(self.dtype)
elif self.dtype in SupportedDtypes.uintTypes():
match = re.search(self.uintPattern, s)
if match:
try:
value = int(match.string)
except ValueError as e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.iinfo(self.dtype)
elif self.dtype in SupportedDtypes.floatTypes():
match = re.search(self.floatPattern, s)
print(match)
if match:
try:
value = float(match.string)
except ValueError as e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.finfo(self.dtype)
if dtypeInfo is not None:
if value >= dtypeInfo.min and value <= dtypeInfo.max:
return (QtGui.QValidator.Acceptable, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
return (QtGui.QValidator.Invalid, s, pos)
class AddAttributesDialog(QtGui.QDialog):
accepted = Signal(str, object, object)
def __init__(self, parent=None):
super(AddAttributesDialog, self).__init__(parent)
self.initUi()
def initUi(self):
self.setModal(True)
self.resize(303, 168)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(self)
self.dialogHeading = QtGui.QLabel(self.tr('Add a new attribute column'), self)
self.gridLayout = QtGui.QGridLayout()
self.columnNameLineEdit = QtGui.QLineEdit(self)
self.columnNameLabel = QtGui.QLabel(self.tr('Name'), self)
self.dataTypeComboBox = QtGui.QComboBox(self)
self.dataTypeComboBox.addItems(SupportedDtypes.names())
self.columnTypeLabel = QtGui.QLabel(self.tr('Type'), self)
self.defaultValueLineEdit = QtGui.QLineEdit(self)
self.lineEditValidator = DefaultValueValidator(self)
self.defaultValueLineEdit.setValidator(self.lineEditValidator)
self.defaultValueLabel = QtGui.QLabel(self.tr('Inital Value(s)'), self)
self.gridLayout.addWidget(self.columnNameLabel, 0, 0, 1, 1)
self.gridLayout.addWidget(self.columnNameLineEdit, 0, 1, 1, 1)
self.gridLayout.addWidget(self.columnTypeLabel, 1, 0, 1, 1)
self.gridLayout.addWidget(self.dataTypeComboBox, 1, 1, 1, 1)
self.gridLayout.addWidget(self.defaultValueLabel, 2, 0, 1, 1)
self.gridLayout.addWidget(self.defaultValueLineEdit, 2, 1, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.dialogHeading)
self.verticalLayout.addLayout(self.gridLayout)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.dataTypeComboBox.currentIndexChanged.connect(self.updateValidatorDtype)
self.updateValidatorDtype(self.dataTypeComboBox.currentIndex())
def accept(self):
super(AddAttributesDialog, self).accept()
newColumn = self.columnNameLineEdit.text()
dtype = SupportedDtypes.dtype(self.dataTypeComboBox.currentText())
defaultValue = self.defaultValueLineEdit.text()
try:
if dtype in SupportedDtypes.intTypes() + SupportedDtypes.uintTypes():
defaultValue = int(defaultValue)
elif dtype in SupportedDtypes.floatTypes():
defaultValue = float(defaultValue)
elif dtype in SupportedDtypes.boolTypes():
defaultValue = defaultValue.lower() in ['t', '1']
elif dtype in SupportedDtypes.datetimeTypes():
defaultValue = Timestamp(defaultValue)
if isinstance(defaultValue, NaTType):
defaultValue = Timestamp('')
else:
defaultValue = dtype.type()
except ValueError as e:
defaultValue = dtype.type()
self.accepted.emit(newColumn, dtype, defaultValue)
@Slot(int)
def updateValidatorDtype(self, index):
(dtype, name) = SupportedDtypes.tupleAt(index)
self.defaultValueLineEdit.clear()
self.lineEditValidator.validateType(dtype)
class RemoveAttributesDialog(QtGui.QDialog):
accepted = Signal(list)
def __init__(self, columns, parent=None):
super(RemoveAttributesDialog, self).__init__(parent)
self.columns = columns
self.initUi()
def initUi(self):
self.setWindowTitle(self.tr('Remove Attributes'))
self.setModal(True)
self.resize(366, 274)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
self.setSizePolicy(sizePolicy)
self.gridLayout = QtGui.QGridLayout(self)
self.dialogHeading = QtGui.QLabel(self.tr('Select the attribute column(s) which shall be removed'), self)
self.listView = QtGui.QListView(self)
model = QtGui.QStandardItemModel()
for column in self.columns:
item = QtGui.QStandardItem(column)
model.appendRow(item)
self.listView.setModel(model)
self.listView.setSelectionMode(QtGui.QListView.MultiSelection)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.gridLayout.addWidget(self.dialogHeading, 0, 0, 1, 1)
self.gridLayout.addWidget(self.listView, 1, 0, 1, 1)
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def accept(self):
selection = self.listView.selectedIndexes()
names = []
for index in selection:
position = index.row()
names.append((position, index.data(QtCore.Qt.DisplayRole)))
super(RemoveAttributesDialog, self).accept()
self.accepted.emit(names)
|
|
''' Represent transformations of data to happen on the client (browser) side.
'''
from __future__ import absolute_import
from textwrap import dedent
from types import FunctionType
from ..core.enums import StepMode, JitterRandomDistribution
from ..core.has_props import abstract
from ..core.properties import Bool, Dict, Either, Enum, Float, Instance, Seq, String
from ..model import Model
from ..util.compiler import nodejs_compile, CompilationError
from ..util.dependencies import import_required
from ..util.future import get_param_info, signature
from .sources import ColumnarDataSource
@abstract
class Transform(Model):
''' Base class for ``Transform`` models that represent a computation
to be carried out on the client-side.
JavaScript implementations should implement the following methods:
.. code-block: coffeescript
compute: (x) ->
# compute the transform of a single value
v_compute: (xs) ->
# compute the transform of an array of values
'''
pass
class CustomJSTransform(Transform):
''' Apply a custom defined transform to data.
'''
@classmethod
def from_py_func(cls, func, v_func):
''' Create a CustomJSTransform instance from a pair of Python
functions. The function is translated to JavaScript using PyScript.
The python functions must have no positional arguments. It's
possible to pass Bokeh models (e.g. a ColumnDataSource) as keyword
arguments to the functions.
The ``func`` function namespace will contain the variable ``x`` (the
untransformed value) at render time. The ``v_func`` function namespace
will contain the variable ``xs`` (the untransformed vector) at render
time.
.. warning::
The vectorized function, ``v_func``, must return an array of the
same length as the input ``xs`` array.
Example:
.. code-block:: python
def transform():
from flexx.pyscript.stubs import Math
return Math.cos(x)
def v_transform():
from flexx.pyscript.stubs import Math
return [Math.cos(x) for x in xs]
customjs_transform = CustomJSTransform.from_py_func(transform, v_transform)
Args:
func (function) : a scalar function to transform a single ``x`` value
v_func (function) : a vectorized function to transform a vector ``xs``
Returns:
CustomJSTransform
'''
if not isinstance(func, FunctionType) or not isinstance(v_func, FunctionType):
raise ValueError('CustomJSTransform.from_py_func only accepts function objects.')
pyscript = import_required(
'flexx.pyscript',
dedent("""\
To use Python functions for CustomJSTransform, you need Flexx
'("conda install -c bokeh flexx" or "pip install flexx")""")
)
def pyscript_compile(func):
sig = signature(func)
all_names, default_values = get_param_info(sig)
if len(all_names) - len(default_values) != 0:
raise ValueError("Function may only contain keyword arguments.")
if default_values and not any([isinstance(value, Model) for value in default_values]):
raise ValueError("Default value must be a Bokeh Model.")
func_kwargs = dict(zip(all_names, default_values))
# Wrap the code attr in a function named `formatter` and call it
# with arguments that match the `args` attr
code = pyscript.py2js(func, 'transformer') + 'return transformer(%s);\n' % ', '.join(all_names)
return code, func_kwargs
jsfunc, func_kwargs = pyscript_compile(func)
v_jsfunc, v_func_kwargs = pyscript_compile(v_func)
# Have to merge the function arguments
func_kwargs.update(v_func_kwargs)
return cls(func=jsfunc, v_func=v_jsfunc, args=func_kwargs)
@classmethod
def from_coffeescript(cls, func, v_func, args={}):
''' Create a CustomJSTransform instance from a pair of CoffeeScript
snippets. The function bodies are translated to JavaScript functions
using node and therefore require return statements.
The ``func`` snippet namespace will contain the variable ``x`` (the
untransformed value) at render time. The ``v_func`` snippet namespace
will contain the variable ``xs`` (the untransformed vector) at render
time.
Example:
.. code-block:: coffeescript
func = "return Math.cos(x)"
v_func = "return [Math.cos(x) for x in xs]"
transform = CustomJSTransform.from_coffeescript(func, v_func)
Args:
func (str) : a coffeescript snippet to transform a single ``x`` value
v_func (str) : a coffeescript snippet function to transform a vector ``xs``
Returns:
CustomJSTransform
'''
compiled = nodejs_compile(func, lang="coffeescript", file="???")
if "error" in compiled:
raise CompilationError(compiled.error)
v_compiled = nodejs_compile(v_func, lang="coffeescript", file="???")
if "error" in v_compiled:
raise CompilationError(v_compiled.error)
return cls(func=compiled.code, v_func=v_compiled.code, args=args)
args = Dict(String, Instance(Model), help="""
A mapping of names to Bokeh plot objects. These objects are made
available to the callback code snippet as the values of named
parameters to the callback.
""")
func = String(default="", help="""
A snippet of JavaScript code to transform a single value. The variable
``x`` will contain the untransformed value and can be expected to be
present in the function namespace at render time. The snippet will be
into the body of a function and therefore requires a return statement.
Example:
.. code-block:: javascript
func = '''
return Math.floor(x) + 0.5
'''
""")
v_func = String(default="", help="""
A snippet of JavaScript code to transform an array of values. The variable
``xs`` will contain the untransformed array and can be expected to be
present in the function namespace at render time. The snippet will be
into the body of a function and therefore requires a return statement.
Example:
.. code-block:: javascript
v_func = '''
new_xs = new Array(xs.length)
for(i = 0; i < xs.length; i++) {
new_xs[i] = xs[i] + 0.5
}
return new_xs
'''
.. warning::
The vectorized function, ``v_func``, must return an array of the
same length as the input ``xs`` array.
""")
class Dodge(Transform):
''' Apply either fixed dodge amount to data.
'''
value = Float(default=0, help="""
The amount to dodge the input data.
""")
range = Instance("bokeh.models.ranges.Range", help="""
When applying ``Dodge`` to categorical data values, the corresponding
``FactorRange`` must be supplied as the ``range`` property.
""")
class Jitter(Transform):
''' Apply either a uniform or normally sampled random jitter to data.
'''
mean = Float(default=0, help="""
The central value for the random sample
""")
width = Float(default=1, help="""
The width (absolute for uniform distribution and sigma for the normal
distribution) of the random sample.
""")
distribution = Enum(JitterRandomDistribution, default='uniform', help="""
The random distribution upon which to pull the random scatter
""")
range = Instance("bokeh.models.ranges.Range", help="""
When applying Jitter to Categorical data values, the corresponding
``FactorRange`` must be supplied as the ``range`` property.
""")
@abstract
class Interpolator(Transform):
''' Base class for interpolator transforms.
Interpolators return the value of a function which has been evaluated
between specified (x, y) pairs of data. As an example, if two control
point pairs were provided to the interpolator, a linear interpolaction
at a specific value of 'x' would result in the value of 'y' which existed
on the line conneting the two control points.
The control point pairs for the interpolators can be specified through either
* A literal sequence of values:
.. code-block: python
interp = Interpolator(x=[1, 2, 3, 4, 5], y=[2, 5, 10, 12, 16])
* or a pair of columns defined in a `ColumnDataSource` object:
.. code-block: python
interp = Interpolator(x="year", y="earnings", data=jewlery_prices))
This is the base class and is not intended to end use. Please see the
documentation for the final derived classes (Jitter, LineraInterpolator,
StepInterpolator) for mor information on their specific methods of
interpolation.
'''
x = Either(String, Seq(Float), help="""
Independant coordiante denoting the location of a point.
""")
y = Either(String, Seq(Float), help="""
Dependant coordinate denoting the value of a point at a location.
""")
data = Instance(ColumnarDataSource, help="""
Data which defines the source for the named columns if a string is passed to either the ``x`` or ``y`` parameters.
""")
clip = Bool(True, help="""
Determine if the interpolation should clip the result to include only values inside its predefined range.
If this is set to False, it will return the most value of the closest point.
""")
# Define an initialization routine to do some cross checking of input values
def __init__(self, **kwargs):
super(Interpolator, self).__init__(**kwargs)
class LinearInterpolator(Interpolator):
''' Compute a linear interpolation between the control points provided through
the ``x``, ``y``, and ``data`` parameters.
'''
pass
class StepInterpolator(Interpolator):
''' Compute a step-wise interpolation between the points provided through
the ``x``, ``y``, and ``data`` parameters.
'''
mode = Enum(StepMode, default="after", help="""
Adjust the behavior of the returned value in relation to the control points. The parameter can assume one of three values:
* ``after`` (default): Assume the y-value associated with the nearest x-value which is less than or equal to the point to transform.
* ``before``: Assume the y-value associated with the nearest x-value which is greater than the point to transform.
* ``center``: Assume the y-value associated with the nearest x-value to the point to transform.
""")
|
|
# Copyright 2022 The T5 Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for running offline evaluation."""
import collections
import os
from absl import logging
import numpy as np
import pandas as pd
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
class Metric(object):
def __init__(self, name, group=None):
self.name = name
self.group = group or name
# This OrderedDict maps TensorBoard tags to nice-looking metric names.
# The order of the keys in the dict determine the order they get logged.
METRIC_NAMES = collections.OrderedDict([
("glue_average", Metric("Average GLUE Score")),
("glue_cola_v002/matthews_corrcoef", Metric("CoLA")),
("glue_sst2_v002/accuracy", Metric("SST-2")),
("glue_mrpc_v002/f1", Metric("MRPC (F1)", "MRPC")),
("glue_mrpc_v002/accuracy", Metric("MRPC (accuracy)", "MRPC")),
("glue_stsb_v002/pearson_corrcoef", Metric("STSB (Pearson)", "STSB")),
("glue_stsb_v002/spearman_corrcoef", Metric("STSB (Spearman)", "STSB")),
("glue_qqp_v002/f1", Metric("QQP (F1)", "QQP")),
("glue_qqp_v002/accuracy", Metric("QQP (accuracy)", "QQP")),
("glue_mnli_matched_v002/accuracy", Metric("MNLIm", "MNLI")),
("glue_mnli_mismatched_v002/accuracy", Metric("MNLImm", "MNLI")),
("glue_qnli_v002/accuracy", Metric("QNLI")),
("glue_rte_v002/accuracy", Metric("GLUE RTE")),
("cnn_dailymail_v002/rouge1", Metric("CNN/DM (ROUGE-1)", "CNN/DM")),
("cnn_dailymail_v002/rouge2", Metric("CNN/DM (ROUGE-2)", "CNN/DM")),
("cnn_dailymail_v002/rougeL", Metric("CNN/DM (ROUGE-L)", "CNN/DM")),
("cnn_dailymail_v002/rougeLsum", Metric("CNN/DM (ROUGE-L)", "CNN/DM")),
("squad_v010_allanswers/em", Metric("SQuAD (EM)", "SQuAD")),
("squad_v010_allanswers/f1", Metric("SQuAD (F1)", "SQuAD")),
("squad_v010_allanswers_span/em", Metric("SQuAD (EM)", "SQuAD")),
("squad_v010_allanswers_span/f1", Metric("SQuAD (F1)", "SQuAD")),
("squad_v010/em", Metric("SQuAD (EM)", "SQuAD")),
("squad_v010/f1", Metric("SQuAD (F1)", "SQuAD")),
("super_glue_average", Metric("Average SuperGLUE Score")),
("super_glue_boolq_v102/accuracy", Metric("BoolQ (accuracy)")),
("super_glue_cb_v102/mean_3class_f1", Metric("CB (F1)", "CB")),
("super_glue_cb_v102/accuracy", Metric("CB (accuracy)", "CB")),
("super_glue_copa_v102/accuracy", Metric("CoPA")),
("super_glue_multirc_v102/f1", Metric("MultiRC (F1)", "MultiRC")),
("super_glue_multirc_v102/exact_match", Metric("MultiRC (EM)", "MultiRC")),
("super_glue_record_v102/f1", Metric("ReCoRD (F1)", "ReCoRD")),
("super_glue_record_v102/em", Metric("ReCoRD (EM)", "ReCoRD")),
("super_glue_rte_v102/accuracy", Metric("SuperGLUE RTE")),
("super_glue_wic_v102/accuracy", Metric("WiC")),
("super_glue_wsc_v102_simple_eval/accuracy", Metric("WSC")),
("dpr_v001_simple/accuracy", Metric("DPR")),
("wmt_t2t_ende_v003/bleu", Metric("WMT T2T En-De")),
("wmt14_ende_v003/bleu", Metric("WMT14 En-De")),
("wmt15_enfr_v003/bleu", Metric("WMT15 En-Fr")),
("wmt16_enro_v003/bleu", Metric("WMT16 En-Ro")),
])
Event = collections.namedtuple("event", ["step", "value"])
def parse_events_files(tb_summary_dir, seqio_summaries=False):
"""Parse all TensorBoard events files in tb_summary_dir.
Args:
tb_summary_dir: str, path to look for events files in.
seqio_summaries: boolean, whether event summaries are generated by SeqIO
Evaluator.
Returns:
A dict, where each key is a TensorBoard tag and each value is a list of
Event tuples with step and value attributes.
"""
events = collections.defaultdict(list)
for events_file in tf.io.gfile.glob(os.path.join(tb_summary_dir, "events.*")):
try:
serialized_events = list(
tfds.as_numpy(tf.data.TFRecordDataset(events_file)))[1:]
for idx, e in enumerate(tf.train.summary_iterator(events_file)):
for v in e.summary.value:
if seqio_summaries:
event = tf.compat.v1.Event.FromString(
serialized_events[idx-1]).summary.value[0]
# Need to check if event has a tensor or scalar since we need to
# handle both cases.
if event.HasField("tensor"):
metric_value = tf.make_ndarray(event.tensor)
else:
metric_value = event.simple_value
else:
metric_value = v.simple_value
events[v.tag].append(Event(e.step, metric_value))
except tf.errors.DataLossError:
logging.info("Skipping %s due to truncated record.", events_file)
return events
def get_eval_metric_values(events, task_name=None):
"""Filter TensorBoard events to only include those for eval metrics.
Args:
events: dict of list of (step, value) tuples where keys are tags.
task_name: string, if not provided, then the function will look for the
task name in the events tags.
Returns:
Dict where key is task_name/metric_name and value is (step, value) tuple.
"""
eval_values = {}
for tag, event_values in events.items():
if tag.startswith("eval"):
if task_name:
_, metric_name = tag.split("/")
else:
_, task_name_from_tag, metric_name = tag.split("/")
eval_task_name = task_name if task_name else task_name_from_tag
eval_values["{}/{}".format(eval_task_name, metric_name)] = event_values
return eval_values
def sort_columns(df, metric_names=None):
metric_names = metric_names or METRIC_NAMES
column_order = list(collections.OrderedDict.fromkeys(
[m.name for m in metric_names.values() if m.name in df.columns]
))
return df.reindex(columns=column_order)
def compute_avg_glue(df, metric_names=None):
"""Compute average GLUE and SuperGLUE scores from a DataFrame.
Will only compute a given average score if all of the metrics for that
benchmark appear as columns in the DataFrame.
Args:
df: pandas.DataFrame, columns should be metric names.
metric_names: dict mapping tensorboard tag to metric name.
Returns:
A pandas.DataFrame which has GLUE and SuperGLUE averages calculated.
"""
# Use METRIC_NAMES defined at the top as default
metric_names = metric_names or METRIC_NAMES
all_glue_tags = {
k for k in metric_names.keys() if "glue" in k and "average" not in k
}
superglue_tags = {k for k in all_glue_tags if "super" in k}
glue_tags = all_glue_tags - superglue_tags
average_keys = ["Average GLUE Score", "Average SuperGLUE Score"]
for average_key, tags in zip(average_keys, [glue_tags, superglue_tags]):
# Only compute average if all metric names appear as columns in the DF
if {metric_names[t].name for t in tags}.issubset(set(df.columns)):
# Compute average over each metric group
group_to_metrics = collections.defaultdict(set)
for tag in tags:
metric = metric_names[tag]
group_to_metrics[metric.group].add(metric.name)
accum = None
for metrics in group_to_metrics.values():
group_avg = np.mean([df[k] for k in metrics], axis=0)
accum = group_avg if accum is None else accum + group_avg
# Compute average across all groups
average = accum/len(group_to_metrics)
df[average_key] = average
return df
def scores_to_df(scores, metric_names=None):
"""Convert `scores` into a pandas DataFrame."""
# Use METRIC_NAMES defined at the top as default
metric_names = metric_names or METRIC_NAMES
for tag in scores.keys():
if tag not in metric_names:
metric_names[tag] = Metric(tag)
logging.warning(
"TensorBoard tag %s not found in metric_names. "
"Using tag as metric name.",
tag)
# Sort the tags in scores according to metric_names order
sorted_tags = sorted(
scores.keys(), key=lambda x: list(metric_names.keys()).index(x)
)
columns = [metric_names[t].name for t in sorted_tags]
# Convert scores to dict with the format
# {step_number: {tag1: value, tag2: value, ...}}
step_scores = collections.defaultdict(
lambda: collections.OrderedDict([(t, np.nan) for t in sorted_tags])
)
for tag in sorted_tags:
for step, value in scores[tag]:
# If a job gets evicted and restarts from a prior checkpoint, it's
# possible that a single step has more than one eval result. In that case,
# we pick the max value across all the eval results.
if step_scores[step][tag]:
step_scores[step][tag] = max(value, step_scores[step][tag])
else:
step_scores[step][tag] = value
sorted_items = sorted(list(step_scores.items()))
data = [list(r.values()) for _, r in sorted_items]
index = [s for s, _ in sorted_items]
df = pd.DataFrame(data, index, columns)
df.index.name = "step"
return df
def metric_group_max(df, metric_names=None):
"""Find the step which achieves the highest mean value for a group of metrics."""
# Use METRIC_NAMES defined at the top as default
metric_names = metric_names or METRIC_NAMES
group_to_metrics = collections.defaultdict(set)
for metric in metric_names.values():
group_to_metrics[metric.group].add(metric.name)
group_df = pd.DataFrame()
for group, metrics in group_to_metrics.items():
if not all(m in df for m in metrics):
continue
group_df[group] = df[metrics].mean(axis=1)
# Need to replace nan with large negative value for idxmax
group_max_step = group_df.fillna(-1e9).idxmax(axis=0)
metric_max = pd.Series()
metric_max_step = pd.Series()
for group_name, max_step in group_max_step.iteritems():
for metric in group_to_metrics[group_name]:
metric_max[metric] = df[metric][max_step]
metric_max_step[metric] = max_step
metric_max = metric_max.reindex(df.columns)
metric_max_step = metric_max_step.reindex(df.columns)
return metric_max, metric_max_step
def log_csv(df, metric_names=None, output_file=None):
"""Log scores to be copy/pasted into a spreadsheet."""
logging.info(",".join(df.columns))
metric_max, metric_max_step = metric_group_max(df, metric_names)
max_row = "max," + ",".join("{:.3f}".format(m) for m in metric_max)
logging.info(max_row)
idx_row = "step," + ",".join("{:d}".format(i) for i in metric_max_step)
logging.info(idx_row)
if output_file is not None:
with tf.io.gfile.GFile(output_file, "w") as f:
csv_string = df.to_csv(float_format="%.3f")
f.write(csv_string + max_row + "\n" + idx_row)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class P2SVpnServerConfigurationsOperations:
"""P2SVpnServerConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs: Any
) -> "_models.P2SVpnServerConfiguration":
"""Retrieves the details of a P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the P2SVpnServerConfiguration.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: P2SVpnServerConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.P2SVpnServerConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
p2_s_vpn_server_configuration_parameters: "_models.P2SVpnServerConfiguration",
**kwargs: Any
) -> "_models.P2SVpnServerConfiguration":
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(p2_s_vpn_server_configuration_parameters, 'P2SVpnServerConfiguration')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
p2_s_vpn_server_configuration_parameters: "_models.P2SVpnServerConfiguration",
**kwargs: Any
) -> AsyncLROPoller["_models.P2SVpnServerConfiguration"]:
"""Creates a P2SVpnServerConfiguration to associate with a VirtualWan if it doesn't exist else
updates the existing P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:param p2_s_vpn_server_configuration_parameters: Parameters supplied to create or Update a
P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_parameters: ~azure.mgmt.network.v2019_06_01.models.P2SVpnServerConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either P2SVpnServerConfiguration or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_06_01.models.P2SVpnServerConfiguration]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.P2SVpnServerConfiguration"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
p2_s_vpn_server_configuration_name=p2_s_vpn_server_configuration_name,
p2_s_vpn_server_configuration_parameters=p2_s_vpn_server_configuration_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('P2SVpnServerConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_wan_name: str,
p2_s_vpn_server_configuration_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a P2SVpnServerConfiguration.
:param resource_group_name: The resource group name of the P2SVpnServerConfiguration.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:param p2_s_vpn_server_configuration_name: The name of the P2SVpnServerConfiguration.
:type p2_s_vpn_server_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_wan_name=virtual_wan_name,
p2_s_vpn_server_configuration_name=p2_s_vpn_server_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
'p2SVpnServerConfigurationName': self._serialize.url("p2_s_vpn_server_configuration_name", p2_s_vpn_server_configuration_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations/{p2SVpnServerConfigurationName}'} # type: ignore
def list_by_virtual_wan(
self,
resource_group_name: str,
virtual_wan_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListP2SVpnServerConfigurationsResult"]:
"""Retrieves all P2SVpnServerConfigurations for a particular VirtualWan.
:param resource_group_name: The resource group name of the VirtualWan.
:type resource_group_name: str
:param virtual_wan_name: The name of the VirtualWan.
:type virtual_wan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListP2SVpnServerConfigurationsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_06_01.models.ListP2SVpnServerConfigurationsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListP2SVpnServerConfigurationsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_virtual_wan.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualWanName': self._serialize.url("virtual_wan_name", virtual_wan_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListP2SVpnServerConfigurationsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_virtual_wan.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualWans/{virtualWanName}/p2sVpnServerConfigurations'} # type: ignore
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import cStringIO
import functools
import warnings
from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable
from ..layer_helper import LayerHelper
__all__ = [
'deprecated',
'generate_layer_fn',
'autodoc',
]
def _convert_(name):
"""
Formatting.
Args:
name: The name/alias
This function takes in a name and converts it to a standard format of
group1_group2. Where as per the regular expression, group1 can have
alphabets and numbers and group2 has capital alphabets.
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _generate_doc_string_(op_proto):
"""
Generate docstring by OpProto
Args:
op_proto (framework_pb2.OpProto): a protobuf message typed OpProto
Returns:
str: the document string
"""
def _type_to_str_(tp):
return framework_pb2.AttrType.Name(tp)
if not isinstance(op_proto, framework_pb2.OpProto):
raise TypeError("OpProto should be `framework_pb2.OpProto`")
buf = cStringIO.StringIO()
buf.write(op_proto.comment)
buf.write('\nArgs:\n')
for each_input in op_proto.inputs:
line_begin = ' {0}: '.format(_convert_(each_input.name))
buf.write(line_begin)
buf.write(each_input.comment)
buf.write('\n')
buf.write(' ' * len(line_begin))
buf.write('Duplicable: ')
buf.write(str(each_input.duplicable))
buf.write(' Optional: ')
buf.write(str(each_input.dispensable))
buf.write('\n')
for each_attr in op_proto.attrs:
buf.write(' ')
buf.write(each_attr.name)
buf.write(' (')
buf.write(_type_to_str_(each_attr.type))
buf.write('): ')
buf.write(each_attr.comment)
buf.write('\n')
if len(op_proto.outputs) != 0:
buf.write('\nReturns:\n')
buf.write(' ')
for each_opt in op_proto.outputs:
if not each_opt.intermediate:
break
buf.write(each_opt.comment)
return buf.getvalue()
def generate_layer_fn(op_type):
"""Register the Python layer for an Operator.
Args:
op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, mean , average etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
not_intermediate_outputs = \
filter(lambda output: not output.intermediate, op_proto.outputs)
intermediate_outputs = \
filter(lambda output: output.intermediate, op_proto.outputs)
if len(not_intermediate_outputs) != 1:
raise ValueError("Only one non intermediate output operator can be",
"automatically generated.")
if not_intermediate_outputs[0].duplicable:
raise ValueError(
"Only non duplicable op can be automatically generated.")
for output in intermediate_outputs:
if output.duplicable:
raise ValueError("The op can be automatically generated only when ",
"all intermediate ops are not duplicable.")
o_name = not_intermediate_outputs[0].name
intermediate_output_names = [output.name for output in intermediate_outputs]
def infer_and_check_dtype(op_proto, *args, **kwargs):
"""
This function performs the sanity check for dtype and
instance type.
"""
dtype = None
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
if len(val) == 0:
val = [args[0]]
args = args[1:]
for each in val:
if not isinstance(each, Variable):
raise ValueError("input of {0} must be variable".format(
op_type))
if dtype is None:
dtype = each.dtype
elif dtype != each.dtype:
raise ValueError(
"operator {0} must input same dtype. {1} vs {2}".format(
op_type, dtype, each.dtype))
return dtype
def func(*args, **kwargs):
helper = LayerHelper(op_type, **kwargs)
dtype = infer_and_check_dtype(op_proto, *args, **kwargs)
inputs = dict()
for ipt in op_proto.inputs:
name = _convert_(ipt.name)
val = kwargs.pop(name, [])
if not isinstance(val, list) and not isinstance(val, tuple):
val = [val]
if len(val) == 0 and len(args) != 0:
val = args[0]
args = args[1:]
inputs[ipt.name] = val
outputs = dict()
out = kwargs.pop(_convert_(o_name), [])
if out:
out_var = out[0] if (isinstance(out, list) or
isinstance(out, tuple)) else out
else:
out_var = helper.create_tmp_variable(dtype=dtype)
outputs[o_name] = [out_var]
for name in intermediate_output_names:
outputs[name] = [helper.create_tmp_variable(dtype=dtype)]
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=kwargs)
return helper.append_activation(out_var)
func.__name__ = op_type
func.__doc__ = _generate_doc_string_(op_proto)
return func
def deprecated(func_or_class):
"""
Deprecated warning decorator. It will result a warning message.
Should be used before class or function, member function
"""
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
"""
Wrap func with deprecated warning
"""
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return func_wrapper
def autodoc(comment=""):
def __impl__(func):
func.__doc__ = _generate_doc_string_(OpProtoHolder.instance(
).get_op_proto(func.__name__)) + comment
return func
return __impl__
|
|
"""
MIT License
Copyright (c) 2016 Jon Sellars
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Please see Exiftool, Gnu Parallel and Enblend for their license.
http://www.sno.phy.queensu.ca/~phil/exiftool/
https://www.gnu.org/software/parallel/
http://enblend.sourceforge.net/index.htm
.ExifTool_config has to be in place for exiftool to write the custom tags.
Expects the output from prep-mosaic.py. Specifically that mismarked pixels in the alpha channel have been set to
black and the data have all been warped to a uniform resolution.
"""
import os,glob
from xml.dom.minidom import parse
from osgeo import gdal
blur = '0x4'
# This will compress the original tif after processing.
# If space is really tight, delete original
spaceIsTight = False
# Set up directories
if not os.path.isdir('final'):
os.mkdir('final')
if not os.path.isdir('big-blur'):
os.mkdir('big-blur')
if not os.path.isdir('little-blur'):
os.mkdir('little-blur')
if not os.path.isdir('corr'):
os.mkdir('corr')
#############################################################################
# Switches for exiftool and enblend
exifSw = 'exiftool -overwrite_original_in_place -ResolutionUnit=inches -XPosition=%s -YPosition=%s -XResolution=1 -YResolution=1 %s'
enblendSw = 'enblend-mp --fine-mask -f %sx%s --no-optimize -a -o ../big-blur/%s.tif LC*.tif'
#
#############################################################################
# got to have this at ~/.ExifTool_config
def exifToolsConfig():
etConfig ="""%Image::ExifTool::UserDefined = (
# All EXIF tags are added to the Main table, and WriteGroup is used to
# specify where the tag is written (default is ExifIFD if not specified):
'Image::ExifTool::Exif::Main' => {
0xd000 => {
Name => 'XResolution',
Writable => 'int16u',
},{
Name => 'YResolution',
Writable => 'int16u',
},{
Name => 'XPosition',
Writable => 'int16u',
},{
Name => 'YPosition',
Writable => 'int16u',
},{
Name => 'ResolutionUnit',
Writable => 'int16u',
}
# add more user-defined EXIF tags here...
},
);
print "LOADED!\n";"""
return etConfig
#############################################################################
def procTifs():
if not os.path.isdir('scanline'):
os.mkdir('scanline')
transSw()
def transSw():
# check on making scanline width 78 to match nona
os.chdir('scanline')
#basename = os.path.basename(img).split('.')[0]
print "Parallel Warp"
#parallelWarp = ('ls ../*.tif | parallel gdalwarp -of VRT -dstalpha -co ALPHA=YES -srcnodata "0" -dstnodata "0" {} {/.}.vrt')
#parallelWarp = ('ls ../*.tif | parallel gdalwarp -of VRT -srcnodata "0" -dstnodata "0" {} {/.}.vrt')
#os.system(parallelWarp)
print "Parallel Translate"
parallelTrans = ('''ls ../*.tif | parallel 'gdal_translate -outsize 20%% 20%% -co BLOCKYSIZE=78 -a_nodata "0 0 0" -co TFW=YES {} {/.}.tif' ''')
os.system(parallelTrans)
os.chdir('../')
def makeVrt(vrt):
vrtSw = 'gdalbuildvrt %s.vrt *.tif' % (vrt)
os.system(vrtSw)
def parseVrt(vrt):
vrtBasename = os.path.basename(vrt).split('.')[0]
enList = '%s.list' % vrtBasename
enListFile = open(vrtBasename+'.list','w')
vrtInfo = parse(vrt)
GeoTransform = vrtInfo.getElementsByTagName('GeoTransform')
for gt in GeoTransform:
geot = gt.firstChild.data.split(',')
pixelX = float(geot[1])
pixelY = float(geot[5])
# Get ULX,ULY
ULX = float(geot[0]) + (pixelX/2)
ULY = float(geot[3]) + (pixelY/2)
tfw = open(vrtBasename+'.tfw','w')
tfwTxt = '%s\n0\n0\n%s\n%s\n%s' % (pixelX,pixelY,ULX,ULY)
tfw.write(tfwTxt)
tfw.close()
VRTDataset = vrtInfo.getElementsByTagName('VRTDataset')
for (name,value) in VRTDataset[0].attributes.items():
if name == 'rasterXSize':
rasterXSize = value
if name == 'rasterYSize':
rasterYSize = value
print 'Mosaic size is:' ,rasterXSize, rasterYSize
band1 = vrtInfo.getElementsByTagName('VRTRasterBand')[0]
sources = band1.getElementsByTagName('SimpleSource')
if len(sources) == 0:
sources = band1.getElementsByTagName('ComplexSource')
for source in sources:
SourceFilename = source.getElementsByTagName('SourceFilename')
for node in SourceFilename:
image_id = node.firstChild.data
imageListTxt = '%s\n'%image_id
enListFile.write(imageListTxt)
SrcRect = source.getElementsByTagName('SrcRect')
DstRect = source.getElementsByTagName('DstRect')
loop = 0
for (name, value) in DstRect[loop].attributes.items():
#print name,value
if name == 'xSize': # image width
xSize = value
if name == 'ySize': # image height
ySize = value
if name == 'xOff': # x offset into mosaic
xOff = value
if name == 'yOff': # y offset into mosaic
yOff = value
print 'adding exif to %s' % image_id
addExif = exifSw % (xOff,yOff,image_id)
# this step writes .sh to pass to parallel
#os.system(addExif)
exif_sh = open('exif-%s.sh'%image_id,'w')
exif_sh.write(addExif)
enListFile.close()
return rasterXSize, rasterYSize, enList, vrtBasename,pixelX,pixelY,ULX,ULY
def calcImgExt(img):
dataset = gdal.Open(img)
# get epsg code
try:
epsg = dataset.GetProjectionRef().split(',')[-1].split('"')[1]
except:
epsg = 0
#print dataset.GetDescription(),'has no projection'
geot = dataset.GetGeoTransform()
# Get image height width and heigth in pixels
rastX = dataset.RasterXSize
rastY = dataset.RasterYSize
# Get pixel sizes
pixelX = geot[1]
pixelY = geot[5]
# Get ULX,ULY
ULX = geot[0]
ULY = geot[3]
# Calculate URX,LRY
URX = ULX+(pixelX * rastX)
LRY = ULY+(pixelY * rastY)
dataset = None
#bboxSize = ULX,LRY,URX,ULY,rastX,rastY,pixelX,pixelY
#return bboxSize
return pixelX,rastX,rastY,ULX,LRY,URX,ULY
def wldTemplate():
'''Template for worldfiles'''
wt = '''%s\n0\n0\n-%s\n%s\n%s'''
return wt
procTifs()
os.chdir('scanline')
makeVrt('mosaic')
vrtIn = 'mosaic.vrt'
mosaicXSize, mosaicYSize, mosaicList, mosaicBasename,mosaicPixelX,mosaicPixelY,mosaicULX,mosaicULY = parseVrt(vrtIn)
mosaicLRX = mosaicULX + (int(mosaicXSize)*mosaicPixelX)
mosaicLRY = mosaicULY + (int(mosaicYSize)*mosaicPixelY)
# int the corners
mosaicLRX = int(mosaicLRX)
mosaicLRY = int(mosaicLRY)
mosaicULX = int(mosaicULX)
mosaicULY = int(mosaicULY)
# process the exif cmds
exifProc = 'ls exif*.sh | parallel bash {}'
os.system(exifProc)
os.system('rm exif*.sh')
# Create reducted resolution mosaic of all input images
enblendCmd = enblendSw % ( mosaicXSize, mosaicYSize, mosaicBasename )
os.system(enblendCmd)
os.system('cp mosaic.tfw ../big-blur')
os.chdir('../')
# ccfm-> closest to center feathered mosaic
ccfm = glob.glob('big-blur/mos*.tif')
for mos_fm in ccfm:
mosResolution,fmRasterXSize,fmRasterYSize,fmULX,fmLRY,fmURX,fmULY = calcImgExt(mos_fm)
basename = os.path.basename(mos_fm).replace('.tif','')
# Blur this feathered mosaic
imBlurBigMos = 'convert %s -blur %s - | convert - \( %s -channel a -separate +channel \) -alpha off -compose copy_opacity -composite big-blur/%s.png' % (mos_fm,blur,mos_fm,basename)
wldCmd = wldTemplate()%(mosResolution,mosResolution,fmULX,fmULY)
mvWldCmd = open('big-blur/%s.pgw' % (basename),'w')
mvWldCmd.write(wldCmd)
mvWldCmd.close()
shCmd = open(('%s.sh'%basename),'w')
shCmd.write(imBlurBigMos)
shCmd.close()
parallelCmd = 'ls mos*.sh | parallel bash {}'
os.system(parallelCmd)
os.system('rm mos*.sh')
makeMosFmVrt = 'gdalbuildvrt -srcnodata "0 0 0" mosFM.vrt big-blur/*.png'
os.system(makeMosFmVrt)
mos_tifs = glob.glob('*.tif')
for mos_tif in mos_tifs:
args = {}
pixelX,RasterXSize,RasterYSize,ULX,LRY,URX,ULY = calcImgExt(mos_tif)
basename = os.path.basename(mos_tif).replace('.tif','')
thisSH = open('correction-%s.sh'%basename,'w')
args['basename'] = basename
args['RasterXSize'] = RasterXSize
# Make TFW for output mosiac tile
tfw4corr = wldTemplate() % (pixelX,pixelX,ULX,ULY)
thisTFW = open('corr/%s.tfw'%basename,'w')
thisTFW.write(tfw4corr)
thisTFW.close()
# Resample mosaic tile to match feathered mosiac
resampleLittleBlur = 'gdalwarp -co TFW=YES -tr %s %s %s little-blur/%s.tif\n' % (mosResolution,mosResolution,mos_tif,basename)
thisSH.write(resampleLittleBlur)
# Blur resampled mosiac tile
imBlurLittleMos = 'mogrify -blur %s little-blur/%s.tif\n' % (blur,basename)
thisSH.write(imBlurLittleMos)
# Clip out corresponding area in feathered mosaic
warpFM = 'gdalwarp -te %s %s %s %s mosFM.vrt big-blur/%s.tif\n' %(ULX,LRY,URX,ULY,basename)
thisSH.write(warpFM)
# Subtract fwd and rev
dstCmd = 'composite -compose minus_dst little-blur/%s.tif big-blur/%s.tif little-blur/dst%s.tif\n' %(basename,basename,basename)
thisSH.write(dstCmd)
srcCmd = 'composite -compose minus_src little-blur/%s.tif big-blur/%s.tif little-blur/src%s.tif\n' %(basename,basename,basename)
thisSH.write(srcCmd)
# Apply fwd and rev delta and limit to area of original alpha.
corrCmd = 'composite -quiet -compose minus_dst %(basename)s.tif little-blur/dst%(basename)s.tif -resize %(RasterXSize)s - | composite -quiet -compose plus - little-blur/src%(basename)s.tif -resize %(RasterXSize)s - | convert - \( %(basename)s.tif -channel a -separate +channel -morphology Erode:8 Disk -blur 0x16 \) -alpha off -compose copy_opacity -composite corr/%(basename)s.tif\n ' % args
thisSH.write(corrCmd)
#seamCmd = 'mogrify -channel a -blur 0x4 -morphology Erode:4 Disk corr/%(basename)s.tif\n' % args
#thisSH.write(seamCmd)
# Clean up intermediate files
cleanCmd = 'rm little-blur/dst%(basename)s.tif little-blur/src%(basename)s.tif little-blur/%(basename)s.tif big-blur/%(basename)s.tif\n' % args
thisSH.write(cleanCmd)
# Enable to compress original image
gzCmd = 'gzip %s\n' % mos_tif
if spaceIsTight:
thisSH.write(gzCmd)
thisSH.close()
print 'Start color correction...'
parallelCmd = 'ls correction-*.sh | parallel -j 16 bash {}'
os.system(parallelCmd)
os.system('rm correction-*.sh')
writerCMD = 'gdalwarp -q -wo SKIP_NOSOURCE=YES -te %s %s %s %s -srcnodata "0 0 0" -co TILED=YES corr/*.tif final/%s.tif'
print 'Start writing tiles...'
counter=0
offset = 100000
os.system('pwd')
for xVal in range(mosaicULX,mosaicLRX+offset,offset):
for yVal in range(mosaicLRY,mosaicULY+offset,offset):
writerSH = open(('writer-%s.sh'%counter),'w')
writerTXT = writerCMD % (xVal,yVal,xVal+offset,yVal+offset,counter)
writerSH.write(writerTXT)
writerSH.close()
counter += 1
parallelCmd = 'ls writer-*.sh | parallel bash {}'
os.system(parallelCmd)
|
|
import os
import pprint
import random
import wx
import numpy
# The recommended way to use wx with mpl is with the WXAgg
# backend.
#
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import \
FigureCanvasWxAgg as FigCanvas, \
NavigationToolbar2WxAgg as NavigationToolbar
import mir2
import mir
from matplotlib.colors import LogNorm
import main
class mirgui(wx.Frame):
""" The main frame of the application
"""
title = 'Music Search Application'
def __init__(self):
wx.Frame.__init__(self, None, -1, self.title)
self.data = [5, 6, 9, 14]
self.create_menu()
self.create_status_bar()
self.create_main_panel()
#self.textbox.SetValue(' '.join(map(str, self.data)))
self.draw_figure()
print 'Training.'
self.musicsearch = main.Search(8, 32)
for f in os.listdir('train'):
print f
x, fs, enc = mir.wavread('train/'+f)
self.musicsearch.add(x, fs, f)
print 'Done training.'
def create_menu(self):
self.menubar = wx.MenuBar()
menu_file = wx.Menu()
m_browse = menu_file.Append(-1, "&Import *.wav file...", "Shows a File Dialog")
self.Bind(wx.EVT_MENU, self.openfile, m_browse)
m_key = menu_file.Append(-1, "&Estimate Key...", "Estimates Key of the Entire wav file")
self.Bind(wx.EVT_MENU, self.est_key, m_key)
m_expt = menu_file.Append(-1, "&Save plot\tCtrl-S", "Save plot to file")
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, "E&xit\tCtrl-X", "Exit")
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
menu_edit = wx.Menu()
m_reset = menu_edit.Append(-1, "&Reset Parameters...", "Resets plot parameters to Default Values")
self.Bind(wx.EVT_MENU, self.on_reset, m_reset)
m_lognorm = menu_edit.AppendCheckItem(-1, "Log-Norm", "Plot gram values using Log Normalized spectrum")
self.Bind(wx.EVT_MENU, self.on_log_norm, m_lognorm)
m_WC1 = menu_edit.Append(-1, 'Adjust Input Plot', kind=wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU,self.which_canvas1, m_WC1)
m_WC2 = menu_edit.Append(-1, 'Adjust Output Plot', kind=wx.ITEM_RADIO)
self.Bind(wx.EVT_MENU,self.which_canvas2, m_WC2)
menu_help = wx.Menu()
m_about = menu_help.Append(-1, "&About\tF1", "About the demo")
self.Bind(wx.EVT_MENU, self.on_about, m_about)
self.menubar.Append(menu_file, "&File")
self.menubar.Append(menu_edit, "&Edit")
self.menubar.Append(menu_help, "&Help")
self.SetMenuBar(self.menubar)
def create_main_panel(self):
""" Creates the main panel with all the controls on it:
* mpl canvas
* mpl navigation toolbar
* Control panel for interaction
"""
self.panel = wx.Panel(self)
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 100
self.fig = Figure((3.0, 3.0), dpi=self.dpi)
self.canvas = FigCanvas(self.panel, -1, self.fig)
self.canvas2= FigCanvas(self.panel, -1, self.fig)
# Since we have only one plot, we can use add_axes
# instead of add_subplot, but then the subplot
# configuration tool in the navigation toolbar wouldn't
# work.
#
self.axes = self.fig.add_subplot(111)
# Bind the 'pick' event for clicking on one of the bars
#
self.canvas.mpl_connect('pick_event', self.on_pick)
self.drawbutton = wx.Button(self.panel, -1, "Plot Gram")
self.Bind(wx.EVT_BUTTON, self.on_draw_button, self.drawbutton)
self.plot_select = ['Time Domain Signal', 'Spectrogram','Constant Q Spectrogram', 'Chromagram']
self.combo = wx.ComboBox(self.panel, -1, pos = (0,400), choices = self.plot_select, style=wx.ALIGN_LEFT | wx.CB_READONLY)
self.combo.SetSelection(2)
self.setbutton = wx.Button(self.panel, -1, "Set Parameters")
self.Bind(wx.EVT_BUTTON, self.on_set_button, self.setbutton)
self.record = wx.BitmapButton(self.panel, -1, wx.Bitmap('record.png'))
self.Bind(wx.EVT_BUTTON, self.on_rec, self.record)
self.play = wx.BitmapButton(self.panel, -1, wx.Bitmap('play.png'))
self.Bind(wx.EVT_BUTTON, self.on_play, self.play)
self.stop = wx.BitmapButton(self.panel, -1, wx.Bitmap('stop.png'))
self.searchbutton = wx.Button(self.panel, -1, "Search Database")
self.Bind(wx.EVT_BUTTON, self.search, self.searchbutton)
self.searchbutton1 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name="1) Sonata in A Maj., Beethoven")
self.searchbutton2 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton3 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton4 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.searchbutton5 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton6 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton7 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.searchbutton8 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "2) Polonaise in G Min., Chopin")
self.searchbutton9 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "3) Rondo No. 5 in C# Min., Bartok")
self.searchbutton10 = wx.Button(self.panel, -1, style=wx.BU_LEFT, name= "1) Sonata in A Maj., Beethoven")
self.Sbuttonlist = [self.searchbutton1,self.searchbutton2,
self.searchbutton3,self.searchbutton4,
self.searchbutton5,self.searchbutton6,
self.searchbutton7,self.searchbutton8,
self.searchbutton9,self.searchbutton10]
self.Bind(wx.EVT_BUTTON, self.getmeta1, self.searchbutton1)
self.Bind(wx.EVT_BUTTON, self.getmeta2, self.searchbutton2)
self.Bind(wx.EVT_BUTTON, self.getmeta3, self.searchbutton3)
self.Bind(wx.EVT_BUTTON, self.getmeta4, self.searchbutton4)
self.Bind(wx.EVT_BUTTON, self.getmeta5, self.searchbutton5)
self.Bind(wx.EVT_BUTTON, self.getmeta6, self.searchbutton6)
self.Bind(wx.EVT_BUTTON, self.getmeta7, self.searchbutton7)
self.Bind(wx.EVT_BUTTON, self.getmeta8, self.searchbutton8)
self.Bind(wx.EVT_BUTTON, self.getmeta9, self.searchbutton9)
self.Bind(wx.EVT_BUTTON, self.getmeta10, self.searchbutton10)
#self.plt_titlestr = ''
#self.plot_title = wx.StaticText(self.panel, -1, 'text1',(30,15), style=wx.ALIGN_CENTRE)
# Create the navigation toolbar, tied to the canvas
#
self.toolbar = NavigationToolbar(self.canvas)
#
# Layout with box sizers
#
flags = wx.ALIGN_LEFT | wx.ALL | wx.GROW
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.vbox2 = wx.BoxSizer(wx.VERTICAL)
self.vbox3 = wx.BoxSizer(wx.VERTICAL)
self.vbox2.AddStretchSpacer(1)
self.vbox2.Add(self.searchbutton1, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton2, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton3, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton4, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton5, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton6, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton7, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton8, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton9, 0, border=3, flag=flags)
self.vbox2.Add(self.searchbutton10, 0, border=3, flag=flags)
self.vbox2.AddStretchSpacer(1)
self.vbox3.Add(self.canvas, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.vbox3.Add(self.canvas2, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.hbox2.Add(self.vbox2, 0, wx.LEFT | wx.TOP | wx.ALIGN_LEFT| wx.GROW)
#self.panel.SetSizer(self.vbox)
#self.vbox.Fit(self)
self.hbox2.Add(self.vbox3, 10, wx.RIGHT | wx.TOP | wx.ALIGN_RIGHT | wx.GROW)
self.vbox.Add(self.hbox2, 0, wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.toolbar, 0, wx.EXPAND)
self.vbox.AddSpacer(7)
self.hbox = wx.BoxSizer(wx.HORIZONTAL)
self.hbox.AddSpacer(15)
self.hbox.Add(self.combo, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.setbutton, 0, border = 3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.drawbutton, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.play, 0, flag = flags)
self.hbox.Add(self.stop, 0, flag = flags)
self.hbox.Add(self.record, 0, flag = flags)
self.hbox.AddSpacer(30)
self.hbox.Add(self.searchbutton, 0, border=3, flag=flags)
self.hbox.AddSpacer(30)
self.vbox.Add(self.hbox, 0, flag = wx.ALIGN_LEFT | wx.BOTTOM | wx.EXPAND |wx.GROW)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
self.mypath = None
self.fsz = 0.040
self.hop = 0.020
self.fmax = 44100
self.x, self.fs, self.nbits = mir2.wavread('default.wav')
#self.tmax = round(float(len(self.x))/self.fs,2)
self.rectime = 20
self.tmax = self.rectime
self.tmin = 0
self.LG_flag = 0
self.LG_str = None
self.LG_vmin = 25
self.LG_vmax = 50
self.tmin_samp = None
self.tmax_samp = None
self.WC = 1
#self.rec_input = mir2.wavread('default.wav')#None
self.rec_input = None
self.rankresults = [('Beethoven_vln_sonata5_Francescatti_01.wav',1),('adksfjghl',3)]
self.dict = {'Beethoven_vln_sonata5_Zukerman_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Zukerman_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'Pinchas Zukerman'),
'Beethoven_vln_sonata5_Oistrakh_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Oistrakh_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'David Oistrakh'),
'Beethoven_vln_sonata5_Francescatti_01.wav':
('Sonata No. 5, Mvt. 1', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_02.wav':
('Sonata No. 5, Mvt. 2', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_03.wav':
('Sonata No. 5, Mvt. 3', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_04.wav':
('Sonata No. 5, Mvt. 4', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Beethoven_vln_sonata5_Francescatti_05.wav':
('Sonata No. 5, Mvt. 5', 'L. V. Beethoven','F Major','Violin and Piano', 'Zino Francescatti'),
'Bach Vln Partita3 - Fischbach 2004 - 01.wav':
('Partita No. 3 - Preludio', 'J. S. Bach', 'E Major', 'Violin', 'Garrett Fischbach'),
'Bach Vln Partita3 - Fischbach 2004 - 03.wav':
('Partita No. 3 - Gavotte en Rondeau', 'J. S. Bach', 'E Major', 'Violin', 'Garrett Fischbach'),
'Bach Vln Sonata1 - Fischbach 2004 - 02.wav':
('Sonata No. 1 - Fuga', 'J. S. Bach', 'G minor', 'Violin', 'Garrett Fischbach'),
'Bach Vln Partita3 - Milstein 1955 - 01.wav':
('Partita No. 3 - Preludio', 'J. S. Bach', 'E Major', 'Violin', 'Nathan Milstein'),
'Bach Vln Partita3 - Milstein 1955 - 03.wav':
('Partita No. 3 - Gavotte en Rondeau', 'J. S. Bach', 'E Major', 'Violin', 'Nathan Milstein'),
'Bach Vln Sonata1 - Milstein 1954 - 02.wav':
('Sonata No. 1 - Fuga', 'J. S. Bach', 'G minor', 'Violin', 'Nathan Milstein'),
'brahms_rhapsody_01.wav':
('Brahms Rhapsody Op.79, No.2', 'J. Brahms','G minor','Piano','Lili Kraus'),
'brahms_rhapsody_02.wav':
('Brahms Rhapsody Op.79, No.2', 'J. Brahms','G minor','Piano','Martha Argerich'),
'debussy_toccata.wav':
('Debussy Toccata from Pour le Piano', 'C. Debussy','N/A','Piano','Boris Feiner'),
'dont_stop_believin.wav':
('Don\'t Stop Believin\'', 'Journey','E major','Vocal, Guitar, Bass, Piano, Drums','Journey'),
'lady_madonna.wav':
('Lady Madonna', 'The Beatles','E major','Vocal, Guitar, Bass, Piano, Saxophone, Drums','The Beatles'),
'let_it_be.wav':
('Let it Be', 'The Beatles','C major','Vocal, Guitar, Bass, Piano, Drums','The Beatles'),
'moonlight.wav':
('Beethoven Piano Sonata No.14', 'L. Beethoven','C# minor','Piano','Unknown'),
'office_theme.wav':
('Theme from \'The Office\'', 'Unknown','G Major','Piano','Unknown'),
'konstantine.wav':
('Konstantine', 'Andrew McMahon','D minor','Vocal, Piano','Something Corporate'),
}
def create_status_bar(self):
self.statusbar = self.CreateStatusBar()
def draw_figure(self, i=0):
""" Redraws the figure
"""
if self.rec_input is None:
return
if self.mypath is None:
self.mypath = 'default.wav'
#self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
if self.WC == 2:
path = 'train/'
filename = self.rankresults[i][0]
fullpath = path + filename
self.x, self.fs, self.nbits = mir2.wavread(fullpath)
if self.WC == 1:
self.x = self.rec_input
#self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
print 'storing rec_input'
self.get_plot_type()
G = 0
self.tmax = float(len(self.x))/self.fs
self.tmin_samp = int(self.tmin*self.fs)
self.tmax_samp = int(self.tmax*self.fs)
if self.tmax_samp > len(self.x):
self.tmax_samp = len(self.x) - 1
print self.x.shape, self.fs, self.fsz, self.hop
if self.plot_type == 0:
P = self.x[self.tmin_samp:self.tmax_samp]
elif self.plot_type == 1:
G = mir2.spectrogram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
elif self.plot_type == 2:
G = mir2.qspectrogram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
elif self.plot_type == 3:
G = mir2.chromagram(self.x,self.fs, framesz = self.fsz, hop=self.hop, tmin=self.tmin, tmax=self.tmax)
#self.plot_titlestr = self.mypath + gramtype
self.axes.clear()
if self.plot_type == 0:
self.axes.plot(P)
elif self.plot_type == 1 or 2 or 3:
if self.LG_flag == 0:
self.LG_str = None
self.axes.imshow(G.X, aspect='auto', interpolation ='nearest',origin='lower')
elif self.LG_flag == 1:
self.LG_str = 'LogNorm(vmin = 25, vmax = 50)'
self.axes.imshow(G.X, aspect='auto', interpolation ='nearest',origin='lower', norm = LogNorm()) #vmin = self.LG_vmin, vmax = self.LG_vmax))
#self.WC = 1
if self.WC == 1:
self.canvas.draw()
if self.WC == 2:
self.canvas2.draw()
def which_canvas1(self, event):
self.WC = 1
def which_canvas2(self, event):
self.WC = 2
def on_draw_button(self, event):
self.get_plot_type
self.draw_figure()
def search(self, event):
self.ranklist = ['1) ','2) ','3) ','4) ','5) ','6) ','7) ','8) ','9) ','10) ']
self.titlelist = ['Sonata', 'Polonaise in G Min., Chopin',
'Rondo No. 5 in C# Min., Bartok', 'Sonata in A Maj., Beethoven',
'Polonaise in G Min., Chopin', 'Rondo No. 5 in C# Min., Bartok',
'Sonata in A Maj., Beethoven', 'Polonaise in G Min., Chopin',
'Rondo No. 5 in C# Min., Bartok','Rondo No. 5 in C# Min., Bartok']
self.rankresults = [('Beethoven_vln_sonata5_Francescatti_01.wav',1),('adksfjghl',3)]
print self.rec_input.shape, self.fs
for i in range(10):
self.Sbuttonlist[i].SetLabel('')
self.rankresults = self.musicsearch.query(self.rec_input, self.fs)
print self.rankresults
self.metalist = range(len(self.rankresults))
for i in range(len(self.rankresults)):
self.metalist[i] = self.dict[self.rankresults[i][0]]
for i in range(min(10, len(self.metalist))):
self.Sbuttonlist[i].SetLabel(self.ranklist[i] + self.metalist[i][0])
#self.create_main_panel()
self.WC = 2
#self.getmeta1(None)
def on_set_button(self, event):
self.get_plot_type()
params_box = ParamsDialog(self, -1, '', self.fsz, self.hop, self.tmin, self.tmax, self.plot_type)
val = params_box.ShowModal()
self.fsz, self.hop, self.tmin, self.tmax = params_box.return_params()
self.draw_figure()
params_box.Destroy()
def on_pick(self, event):
# The event received here is of the type
# matplotlib.backend_bases.PickEvent
#
# It carries lots of information, of which we're using
# only a small amount here.
#
box_points = event.artist.get_bbox().get_points()
msg = "You've clicked on a bar with coords:\n %s" % box_points
dlg = wx.MessageDialog(
self,
msg,
"Click!",
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def on_text_enter(self, event):
self.draw_figure()
def openfile(self, event):
dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.wav", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
basename = os.path.basename(path)
self.SetStatusText("You selected: %s" % basename)
self.mypath = path
self.x, self.fs, self.nbits = mir2.wavread(self.mypath)
self.rec_input = self.x
self.WC = 1
self.on_reset(self)
self.draw_figure()
dlg.Destroy()
def on_save_plot(self, event):
file_choices = "PNG (*.png)|*.png"
dlg = wx.FileDialog(
self,
message="Save plot as...",
defaultDir=os.getcwd(),
defaultFile="plot.png",
wildcard=file_choices,
style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi=self.dpi)
self.flash_status_message("Saved to %s" % path)
def on_play(self,event):
if self.WC == 2:
mir2.play(self.x, self.fs)
elif self.WC == 1:
mir2.play(self.rec_input, self.fs)
def on_rec(self,event):
print 'Recording.'
self.rec_input = mir.micread(self.rectime)
self.WC = 1
self.draw_figure()
mir.play(self.rec_input, 44100)
def est_key(self, event):
self.statusbar.SetStatusText('Estimating Key...')
keynum = mir2.Key(self.x, self.fs)
keylist = ['C', 'C#','D','D#','E','F','F#','G','G#','A','A#','B']
self.keystr = keylist[keynum]
self.statusbar.SetStatusText('The Key is: ' + self.keystr)
def on_exit(self, event):
self.Destroy()
def on_reset(self, event):
self.fsz = 0.040
self.hop = 0.020
self.fmax = self.fs
self.tmax = round(float(len(self.x))/self.fs,2)
self.tmin = 0
self.draw_figure()
def on_log_norm(self, event):
if self.LG_flag == 0:
self.LG_flag = 1
elif self.LG_flag == 1:
self.LG_flag = 0
self.draw_figure()
def on_about(self, event):
msg = """ Content-based musical search.\n Brennan Keegan, Steve Tjoa\n Signals and Information Group\n University of Maryland\n April 30, 2011 """
dlg = wx.MessageDialog(self, msg, "About", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def flash_status_message(self, msg, flash_len_ms=1500):
self.statusbar.SetStatusText(msg)
self.timeroff = wx.Timer(self)
self.Bind(
wx.EVT_TIMER,
self.on_flash_status_off,
self.timeroff)
self.timeroff.Start(flash_len_ms, oneShot=True)
def on_flash_status_off(self, event):
self.statusbar.SetStatusText('')
def get_plot_type(self):
plotstr = self.combo.GetStringSelection()
for x in range(len(self.plot_select)):
if plotstr == self.plot_select[x]:
self.plot_type = x
def getmeta1(self, event):
if self.searchbutton1.GetLabel() == '':
return
self.draw_figure(0)
meta = self.metalist[0]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta2(self, event):
if self.searchbutton2.GetLabel() == '':
return
self.draw_figure(1)
meta = self.metalist[1]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta3(self, event):
if self.searchbutton3.GetLabel() == '':
return
self.draw_figure(2)
meta = self.metalist[2]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta4(self, event):
if self.searchbutton4.GetLabel() == '':
return
self.draw_figure(3)
meta = self.metalist[3]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta5(self, event):
if self.searchbutton5.GetLabel() == '':
return
self.draw_figure(4)
meta = self.metalist[4]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta6(self, event):
if self.searchbutton6.GetLabel() == '':
return
self.draw_figure(5)
meta = self.metalist[5]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta7(self, event):
if self.searchbutton7.GetLabel() == '':
return
self.draw_figure(6)
meta = self.metalist[6]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta8(self, event):
if self.searchbutton8.GetLabel() == '':
return
self.draw_figure(7)
meta = self.metalist[7]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta9(self, event):
if self.searchbutton9.GetLabel() == '':
return
self.draw_figure(8)
meta = self.metalist[8]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
def getmeta10(self, event):
if self.searchbutton10.GetLabel() == '':
return
self.draw_figure(9)
meta = self.metalist[9]
print meta
metastr = 'Title: '+meta[0]+'\n\nComposer: '+meta[1]+'\n\nKey: '+meta[2]+'\n\nInstruments: '+meta[3]+'\n\nArtist: '+meta[4]
dial = wx.MessageDialog(None, metastr, 'Piece Information', wx.OK | wx.ICON_INFORMATION)
dial.ShowModal()
class ParamsDialog(wx.Dialog):
def __init__(self, parent, id, title, fsz, hop, tmin, tmax, plot_type):
wx.Dialog.__init__(self, parent, id, title)#, size = (400,500))
self.fsz, self.hop, self.tmin, self.tmax, self.plot_type = str(fsz), str(hop), str(tmin), str(tmax), plot_type
if self.plot_type == 0:
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.AddSpacer(3)
hbox1.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(9)
hbox2.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.Add(hbox2, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 1:
self.fmin, self.fmax = '0.00', '44100'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Freq. (Hz): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Freq. (Hz): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 2:
self.fmin, self.fmax = '0', '136'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Pitch (MIDI): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Pitch (MIDI): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
elif self.plot_type == 3:
self.fmin, self.fmax = 'C', 'B'
vbox = wx.BoxSizer(wx.VERTICAL)
hbox1 = wx.BoxSizer(wx.HORIZONTAL)
hbox2 = wx.BoxSizer(wx.HORIZONTAL)
hbox3 = wx.BoxSizer(wx.HORIZONTAL)
hbox4 = wx.BoxSizer(wx.HORIZONTAL)
hbox5 = wx.BoxSizer(wx.HORIZONTAL)
hbox6 = wx.BoxSizer(wx.HORIZONTAL)
self.fsz_label = wx.StaticText(self, -1, "Frame Size (sec): ")
self.fsz_box = wx.TextCtrl(self,-1, self.fsz, style=wx.TE_PROCESS_ENTER)
self.hop_label = wx.StaticText(self, -1, "Hop Size (sec): ")
self.hop_box = wx.TextCtrl(self,-1, self.hop, style=wx.TE_PROCESS_ENTER)
self.tmin_label = wx.StaticText(self, -1, "Start Time (sec): ")
self.tmin_box = wx.TextCtrl(self,-1, self.tmin, style=wx.TE_PROCESS_ENTER)
self.tmax_label = wx.StaticText(self, -1, "End Time (sec): ")
self.tmax_box = wx.TextCtrl(self,-1, self.tmax, style=wx.TE_PROCESS_ENTER)
self.fmin_label = wx.StaticText(self, -1, "Min Pitch (Note): ")
self.fmin_box = wx.TextCtrl(self,-1, self.fmin, style=wx.TE_PROCESS_ENTER)
self.fmax_label = wx.StaticText(self, -1, "Max Pitch (Note): ")
self.fmax_box = wx.TextCtrl(self,-1, self.fmax, style=wx.TE_PROCESS_ENTER)
#self.Bind(wx.EVT_TEXT_ENTER, self.on_text_enter, self.fsz)
hbox1.AddSpacer(80)
hbox1.Add(self.fsz_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox1.Add(self.fsz_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox2.AddSpacer(80)
hbox2.Add(self.hop_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox2.AddSpacer(13)
hbox2.Add(self.hop_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox3.AddSpacer(80)
hbox3.Add(self.tmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox3.AddSpacer(3)
hbox3.Add(self.tmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox4.AddSpacer(80)
hbox4.Add(self.tmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox4.AddSpacer(9)
hbox4.Add(self.tmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox5.AddSpacer(80)
hbox5.Add(self.fmin_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox5.AddSpacer(13)
hbox5.Add(self.fmin_box, 1, wx.ALIGN_CENTER|wx.TOP)
hbox6.AddSpacer(80)
hbox6.Add(self.fmax_label, 1, wx.ALIGN_CENTER | wx.TOP)
hbox6.AddSpacer(9)
hbox6.Add(self.fmax_box, 1, wx.ALIGN_CENTER|wx.TOP)
sizer = self.CreateButtonSizer(wx.CANCEL|wx.OK)
space = 10
vbox.AddSpacer(10)
vbox.Add(hbox1, 1)
vbox.AddSpacer(space)
vbox.Add(hbox2, 1)
vbox.AddSpacer(space)
vbox.Add(hbox3, 1)
vbox.AddSpacer(space)
vbox.Add(hbox4, 1)
vbox.AddSpacer(space)
vbox.Add(hbox5, 1)
vbox.AddSpacer(space)
vbox.Add(hbox6, 1)
vbox.AddSpacer(15)
vbox.Add(sizer, 0, wx.ALIGN_CENTER)
vbox.AddSpacer(20)
self.SetSizer(vbox)
self.Bind(wx.EVT_BUTTON, self.OnOK, id=wx.ID_OK)
def OnOK(self, event):
if self.plot_type != 0:
self.fsz = float(self.fsz_box.GetValue())
self.hop = float(self.hop_box.GetValue())
self.tmin =float(self.tmin_box.GetValue())
self.tmax =float(self.tmax_box.GetValue())
self.Close()
def return_params(self):
return self.fsz, self.hop, self.tmin, self.tmax
if __name__ == '__main__':
app = wx.PySimpleApp()
app.frame = mirgui()
app.frame.Show()
app.frame.Maximize()
app.MainLoop()
|
|
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A wrapper of Session API which runs hooks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import sys
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import util
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as training_saver
from tensorflow.python.training import session_manager as sm
from tensorflow.python.training import session_run_hook
# The list of exceptions that we should recover from. Exceptions not in this
# list may terminate the job.
_PREEMPTION_ERRORS = (errors.AbortedError, errors.UnavailableError)
# Value that indicates no value was provided.
USE_DEFAULT = object()
# TODO(touts): Share that with the Supervisor.
class Scaffold(object):
"""Structure to create or gather pieces commonly needed to train a model.
When you build a model for training you usually need ops to initialize
variables, a `Saver` to checkpoint them, an op to collect summaries for
the visualizer, and so on.
Various libraries built on top of the core TensorFlow library take care of
creating some or all of these pieces and storing them in well known
collections in the graph. The `Scaffold` class helps pick these pieces from
the graph collections, creating and adding them to the collections if needed.
If you call the scaffold constructor without any arguments, it will pick
pieces from the collections, creating default ones if needed when
`scaffold.finalize()` is called. You can pass arguments to the constructor to
provide your own pieces. Pieces that you pass to the constructor are not
added to the graph collections.
The following pieces are directly accessible as attributes of the `Scaffold`
object:
* `saver`: A `tf.train.Saver` object taking care of saving the variables.
Picked from and stored into the `SAVERS` collection in the graph by default.
* `init_op`: An op to run to initialize the variables. Picked from and
stored into the `INIT_OP` collection in the graph by default.
* `ready_op`: An op to verify that the variables are initialized. Picked
from and stored into the `READY_OP` collection in the graph by default.
* `ready_for_local_init_op`: An op to verify that global state has been
initialized and it is alright to run `local_init_op`. Picked from and
stored into the `READY_FOR_LOCAL_INIT_OP` collection in the graph by
default. This is needed when the initialization of local variables depends
on the values of global variables.
* `local_init_op`: An op to initialize the local variables. Picked
from and stored into the `LOCAL_INIT_OP` collection in the graph by default.
* `summary_op`: An op to run and merge the summaries in the graph. Picked
from and stored into the `SUMMARY_OP` collection in the graph by default.
* `global_step`: A tensor containing the global step counter. Picked
from and stored into the `GLOBAL_STEP` collection in the graph by default.
You can also pass the following additional pieces to the constructor:
* `init_feed_dict`: A session feed dictionary that should be used when
running the init op.
* `init_fn`: A callable to run after the init op to perform additional
initializations. The callable will be called as
`init_fn(scaffold, session)`.
"""
def __init__(self,
init_op=None,
init_feed_dict=None,
init_fn=None,
ready_op=None,
ready_for_local_init_op=None,
local_init_op=None,
summary_op=None,
saver=None,
copy_from_scaffold=None):
"""Create a scaffold.
Args:
init_op: Optional op for initializing variables.
init_feed_dict: Optional session feed dictionary to use when running the
init_op.
init_fn: Optional function to use to initialize the model after running
the init_op. Will be called as `init_fn(scaffold, session)`.
ready_op: Optional op to verify that the variables are initialized. Must
return an empty 1D string tensor when the variables are initialized, or
a non-empty 1D string tensor listing the names of the non-initialized
variables.
ready_for_local_init_op: Optional op to verify that the global variables
are initialized and `local_init_op` can be run. Must return an empty
1D string tensor when the global variables are initialized, or a
non-empty 1D string tensor listing the names of the non-initialized
global variables.
local_init_op: Optional op to initialize local variables.
summary_op: Optional op to gather all summaries. Must return a scalar
string tensor containing a serialized `Summary` proto.
saver: Optional `tf.train.Saver` object to use to save and restore
variables.
copy_from_scaffold: Optional scaffold object to copy fields from. Its
fields will be overwritten by the provided fields in this function.
"""
if copy_from_scaffold is not None:
if not isinstance(copy_from_scaffold, Scaffold):
raise TypeError('copy_from_scaffold is not a Scaffold instance.')
# We need _coalesce since Tensor is not converted to bool automatically,
# so the common idiom of (a or b) does not work.
coalesce = lambda a, b: a if a is not None else b
init_op = coalesce(init_op, copy_from_scaffold.init_op)
init_feed_dict = coalesce(init_feed_dict,
copy_from_scaffold.init_feed_dict)
# Use the original init_fn provided by the user to init the new Scaffold.
init_fn = coalesce(init_fn, copy_from_scaffold._user_init_fn) # pylint: disable=protected-access
ready_op = coalesce(ready_op, copy_from_scaffold.ready_op)
ready_for_local_init_op = coalesce(
ready_for_local_init_op, copy_from_scaffold.ready_for_local_init_op)
local_init_op = coalesce(local_init_op, copy_from_scaffold.local_init_op)
summary_op = coalesce(summary_op, copy_from_scaffold.summary_op)
saver = coalesce(saver, copy_from_scaffold.saver)
# NOTE(touts): modifying the init function to be passed the scaffold is a
# hack to make it easy to find the saver. Is there a better way?
self._user_init_fn = init_fn
if init_fn:
self._init_fn = lambda sess: init_fn(self, sess)
else:
self._init_fn = None
self._init_op = init_op
self._init_feed_dict = init_feed_dict
self._ready_op = ready_op
self._ready_for_local_init_op = ready_for_local_init_op
self._local_init_op = local_init_op
self._summary_op = summary_op
self._saver = saver
def finalize(self):
"""Creates operations if needed and finalizes the graph."""
if self._init_op is None:
def default_init_op():
return control_flow_ops.group(
variables.global_variables_initializer(),
resources.initialize_resources(resources.shared_resources()))
self._init_op = Scaffold.get_or_default(
'init_op',
ops.GraphKeys.INIT_OP,
default_init_op)
if self._ready_op is None:
def default_ready_op():
return array_ops.concat([
variables.report_uninitialized_variables(),
resources.report_uninitialized_resources()
], 0)
self._ready_op = Scaffold.get_or_default(
'ready_op', ops.GraphKeys.READY_OP,
default_ready_op)
if self._ready_for_local_init_op is None:
def default_ready_for_local_init_op():
return variables.report_uninitialized_variables(
variables.global_variables())
self._ready_for_local_init_op = Scaffold.get_or_default(
'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,
default_ready_for_local_init_op)
if self._local_init_op is None:
self._local_init_op = Scaffold.get_or_default(
'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,
Scaffold._default_local_init_op)
if self._summary_op is None:
self._summary_op = Scaffold.get_or_default('summary_op',
ops.GraphKeys.SUMMARY_OP,
summary.merge_all)
# pylint: disable=g-long-lambda
if self._saver is None:
self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access
# pylint: enable=g-long-lambda
self._saver.build()
ops.get_default_graph().finalize()
return self
@property
def init_fn(self):
return self._init_fn
@property
def init_op(self):
return self._init_op
@property
def ready_op(self):
return self._ready_op
@property
def ready_for_local_init_op(self):
return self._ready_for_local_init_op
@property
def local_init_op(self):
return self._local_init_op
@property
def summary_op(self):
return self._summary_op
@property
def saver(self):
return self._saver
@property
def init_feed_dict(self):
return self._init_feed_dict
@staticmethod
def get_or_default(arg_name, collection_key, default_constructor):
"""Get from cache or create a default operation."""
elements = ops.get_collection(collection_key)
if elements:
if len(elements) > 1:
raise RuntimeError('More than one item in the collection "%s". '
'Please indicate which one to use by passing it to '
'the tf.Scaffold constructor as: '
'tf.Scaffold(%s=item to use)', collection_key,
arg_name)
return elements[0]
op = default_constructor()
if op is not None:
ops.add_to_collection(collection_key, op)
return op
@staticmethod
def _default_local_init_op():
return control_flow_ops.group(variables.local_variables_initializer(),
lookup_ops.tables_initializer())
def MonitoredTrainingSession(master='', # pylint: disable=invalid-name
is_chief=True,
checkpoint_dir=None,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=USE_DEFAULT,
save_summaries_secs=USE_DEFAULT,
config=None,
stop_grace_period_secs=120,
log_step_count_steps=100):
"""Creates a `MonitoredSession` for training.
For a chief, this utility sets proper session initializer/restorer. It also
creates hooks related to checkpoint and summary saving. For workers, this
utility sets proper session creator which waits for the chief to
initialize/restore. Please check `tf.train.MonitoredSession` for more
information.
Args:
master: `String` the TensorFlow master to use.
is_chief: If `True`, it will take care of initialization and recovery the
underlying TensorFlow session. If `False`, it will wait on a chief to
initialize or recover the TensorFlow session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified, a default one is created. It's used to finalize the graph.
hooks: Optional list of `SessionRunHook` objects.
chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if
`is_chief==True`, ignore otherwise.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If both
`save_summaries_steps` and `save_summaries_secs` are set to `None`, then
the default summary saver isn't used. Default 100.
save_summaries_secs: The frequency, in secs, that the summaries are written
to disk using a default summary saver. If both `save_summaries_steps` and
`save_summaries_secs` are set to `None`, then the default summary saver
isn't used. Default not enabled.
config: an instance of `tf.ConfigProto` proto used to configure the session.
It's the `config` argument of constructor of `tf.Session`.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
log_step_count_steps: The frequency, in number of global steps, that the
global step/sec is logged.
Returns:
A `MonitoredSession` object.
"""
if save_summaries_steps == USE_DEFAULT and save_summaries_secs == USE_DEFAULT:
save_summaries_steps = 100
save_summaries_secs = None
elif save_summaries_secs == USE_DEFAULT:
save_summaries_secs = None
elif save_summaries_steps == USE_DEFAULT:
save_summaries_steps = None
scaffold = scaffold or Scaffold()
if not is_chief:
session_creator = WorkerSessionCreator(
scaffold=scaffold, master=master, config=config)
return MonitoredSession(session_creator=session_creator, hooks=hooks or [],
stop_grace_period_secs=stop_grace_period_secs)
all_hooks = []
if chief_only_hooks:
all_hooks.extend(chief_only_hooks)
session_creator = ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_dir,
master=master,
config=config)
if checkpoint_dir:
if log_step_count_steps and log_step_count_steps > 0:
all_hooks.append(
basic_session_run_hooks.StepCounterHook(
output_dir=checkpoint_dir, every_n_steps=log_step_count_steps))
if (save_summaries_steps and save_summaries_steps > 0) or (
save_summaries_secs and save_summaries_secs > 0):
all_hooks.append(basic_session_run_hooks.SummarySaverHook(
scaffold=scaffold,
save_steps=save_summaries_steps,
save_secs=save_summaries_secs,
output_dir=checkpoint_dir))
if save_checkpoint_secs and save_checkpoint_secs > 0:
all_hooks.append(basic_session_run_hooks.CheckpointSaverHook(
checkpoint_dir, save_secs=save_checkpoint_secs, scaffold=scaffold))
if hooks:
all_hooks.extend(hooks)
return MonitoredSession(session_creator=session_creator, hooks=all_hooks,
stop_grace_period_secs=stop_grace_period_secs)
class SessionCreator(object):
"""A factory for tf.Session."""
@abc.abstractmethod
def create_session(self):
raise NotImplementedError(
'create_session is not implemented for {}.'.format(self))
class ChiefSessionCreator(SessionCreator):
"""Creates a tf.Session for a chief."""
def __init__(self,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None):
"""Initializes a chief session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
checkpoint_filename_with_path: Full file name path to the checkpoint file.
"""
self._checkpoint_dir = checkpoint_dir
self._checkpoint_filename_with_path = checkpoint_filename_with_path
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().prepare_session(
self._master,
saver=self._scaffold.saver,
checkpoint_dir=self._checkpoint_dir,
checkpoint_filename_with_path=self._checkpoint_filename_with_path,
config=self._config,
init_op=self._scaffold.init_op,
init_feed_dict=self._scaffold.init_feed_dict,
init_fn=self._scaffold.init_fn)
class WorkerSessionCreator(SessionCreator):
"""Creates a tf.Session for a worker."""
def __init__(self, scaffold=None, master='', config=None):
"""Initializes a worker session creator.
Args:
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
"""
self._scaffold = scaffold or Scaffold()
self._session_manager = None
self._master = master
self._config = config
def _get_session_manager(self):
if self._session_manager:
return self._session_manager
self._session_manager = sm.SessionManager(
local_init_op=self._scaffold.local_init_op,
ready_op=self._scaffold.ready_op,
ready_for_local_init_op=self._scaffold.ready_for_local_init_op,
graph=ops.get_default_graph())
return self._session_manager
def create_session(self):
self._scaffold.finalize()
return self._get_session_manager().wait_for_session(
self._master, config=self._config,
max_wait_secs=30 * 60 # Wait up to 30 mins for the session to be ready.
)
class _MonitoredSession(object):
"""See `MonitoredSession` or `SingularMonitoredSession`."""
def __init__(self, session_creator, hooks, should_recover,
stop_grace_period_secs=120):
"""Sets up a Monitored or Hooked Session.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` or a `WorkerSessionCreator`.
hooks: An iterable of `SessionRunHook' objects.
should_recover: A bool. Indicates whether to recover from `AbortedError`
and `UnavailableError` or not.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
self._graph_was_finalized = ops.get_default_graph().finalized
self._hooks = hooks or []
for h in self._hooks:
h.begin()
# Create the session.
self._coordinated_creator = self._CoordinatedSessionCreator(
session_creator=session_creator or ChiefSessionCreator(),
hooks=self._hooks,
stop_grace_period_secs=stop_grace_period_secs)
if should_recover:
self._sess = _RecoverableSession(self._coordinated_creator)
else:
self._sess = self._coordinated_creator.create_session()
@property
def graph(self):
"""The graph that was launched in this session."""
if self._tf_sess() is None:
return None
return self._tf_sess().graph
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Run ops in the monitored session.
This method is completely compatible with the `tf.Session.run()` method.
Args:
fetches: Same as `tf.Session.run()`.
feed_dict: Same as `tf.Session.run()`.
options: Same as `tf.Session.run()`.
run_metadata: Same as `tf.Session.run()`.
Returns:
Same as `tf.Session.run()`.
"""
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
def run_step_fn(self, step_fn):
"""Run ops using a step function.
Args:
step_fn: A function or a method with a single argument of type
`StepContext`. The function may use methods of the argument to
perform computations with access to a raw session.
The returned value of the `step_fn` will be returned from `run_step_fn`,
unless a stop is requested. In that case, the next `should_stop` call
will return True.
Example usage:
```python
with tf.Graph().as_default():
c = tf.placeholder(dtypes.float32)
v = tf.add(c, 4.0)
w = tf.add(c, 0.5)
def step_fn(step_context):
a = step_context.session.run(fetches=v, feed_dict={c: 0.5})
if a <= 4.5:
step_context.request_stop()
return step_context.run_with_hooks(fetches=w, feed_dict={c: 0.1})
with tf.MonitoredSession() as session:
while not session.should_stop():
a = session.run_step_fn(step_fn)
```
Hooks interact with the `run_with_hooks()` call inside the `step_fn`
as they do with a `MonitoredSession.run` call.
Returns:
Returns the returned value of `step_fn`.
Raises:
StopIteration: if `step_fn` has called `request_stop()`. It may be
caught by `with tf.MonitoredSession()` to close the session.
ValueError: if `step_fn` doesn't have a single argument called
`step_context`. It may also optionally have `self` for cases when it
belongs to an object.
"""
step_fn_arguments = util.fn_args(step_fn)
if step_fn_arguments != ('step_context',) and step_fn_arguments != (
'self',
'step_context',
):
raise ValueError(
'`step_fn` may either have one `step_context` argument, or'
' `self` and `step_context` arguments if it\'s an instance'
' method. Got {} instead.'.format(step_fn_arguments))
# `self._sess` is either `_RecoverableSession` or a `_CoordinatedSession`.
# Setting `run_with_hooks` to `None` will cause `run_with_hooks` to be
# `_CoordinatedSession.run` downstream in either case. This allows
# `_PREEMPTION_ERRORS` to propage from within `step_fn` to
# `_RecoverableSession.run_step_fn`.
return self._sess.run_step_fn(step_fn, self._tf_sess(), run_with_hooks=None)
class StepContext(object):
"""Control flow instrument for the `step_fn` from `run_step_fn()`.
Users of `step_fn` may perform `run()` calls without running hooks
by accessing the `session`. A `run()` call with hooks may be performed
using `run_with_hooks()`. Computation flow can be interrupted using
`request_stop()`.
"""
def __init__(self, session, run_with_hooks_fn):
"""Initializes the `step_context` argument for a `step_fn` invocation.
Args:
session: An instance of `tf.Session`.
run_with_hooks_fn: A function for running fetches and hooks.
"""
self._session = session
self._run_with_hooks_fn = run_with_hooks_fn
@property
def session(self):
return self._session
def run_with_hooks(self, *args, **kwargs):
"""Same as `MonitoredSession.run`. Accepts the same arguments."""
return self._run_with_hooks_fn(*args, **kwargs)
def request_stop(self):
"""Exit the training loop by causing `should_stop()` to return `True`.
Causes `step_fn` to exit by raising an exception.
Raises:
StopIteration
"""
raise StopIteration('step_fn has requested the iterations to stop.')
def should_stop(self):
return self._sess is None or self._sess.should_stop()
def close(self):
self._close_internal()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
if exception_type in [errors.OutOfRangeError, StopIteration]:
exception_type = None
self._close_internal(exception_type)
# __exit__ should return True to suppress an exception.
return exception_type is None
class _CoordinatedSessionCreator(SessionCreator):
"""Factory for the _RecoverableSession."""
def __init__(self, session_creator, hooks, stop_grace_period_secs):
self._session_creator = session_creator
self._hooks = hooks
self.coord = None
self.tf_sess = None
self._stop_grace_period_secs = stop_grace_period_secs
def create_session(self):
"""Creates a coordinated session."""
# Keep the tf_sess for unit testing.
self.tf_sess = self._session_creator.create_session()
# We don't want coordinator to suppress any exception.
self.coord = coordinator.Coordinator(clean_stop_exception_types=[])
queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)
# Inform the hooks that a new session has been created.
for hook in self._hooks:
hook.after_create_session(self.tf_sess, self.coord)
return _CoordinatedSession(
_HookedSession(self.tf_sess, self._hooks), self.coord,
self._stop_grace_period_secs)
def _close_internal(self, exception_type=None):
try:
if not exception_type:
for h in self._hooks:
h.end(self._coordinated_creator.tf_sess)
finally:
try:
if self._sess is None:
raise RuntimeError('Session is already closed.')
self._sess.close()
finally:
self._sess = None
self._coordinated_creator.tf_sess = None
self._coordinated_creator.coord = None
if not self._graph_was_finalized:
ops.get_default_graph()._unsafe_unfinalize() # pylint: disable=protected-access
def _is_closed(self):
"""Return True if the monitored session is closed. For tests only.
Returns:
A boolean.
"""
return self._coordinated_creator.tf_sess is None
def _tf_sess(self):
return self._coordinated_creator.tf_sess
class MonitoredSession(_MonitoredSession):
"""Session-like object that handles initialization, recovery and hooks.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with MonitoredSession(session_creator=ChiefSessionCreator(...),
hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the monitored session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
* calls `hook.after_create_session()`
Run: When `run()` is called, the monitored session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
* if `AbortedError` or `UnavailableError` occurs, it recovers or
reinitializes the session before executing the run() call again
Exit: At the `close()`, the monitored session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the monitored_session is used as a context
How to set `tf.Session` arguments:
* In most cases you can set session arguments as follows:
```python
MonitoredSession(
session_creator=ChiefSessionCreator(master=..., config=...))
```
* In distributed setting for a non-chief worker, you can use following:
```python
MonitoredSession(
session_creator=WorkerSessionCreator(master=..., config=...))
```
See `MonitoredTrainingSession` for an example usage based on chief or worker.
Note: This is not a `tf.Session`. For example, it cannot do following:
* it cannot be set as default session.
* it cannot be sent to saver.save.
* it cannot be sent to tf.train.start_queue_runners.
Args:
session_creator: A factory object to create session. Typically a
`ChiefSessionCreator` which is the default one.
hooks: An iterable of `SessionRunHook' objects.
Returns:
A MonitoredSession object.
"""
def __init__(self, session_creator=None, hooks=None,
stop_grace_period_secs=120):
super(MonitoredSession, self).__init__(
session_creator, hooks, should_recover=True,
stop_grace_period_secs=stop_grace_period_secs)
class SingularMonitoredSession(_MonitoredSession):
"""Session-like object that handles initialization, restoring, and hooks.
Please note that this utility is not recommended for distributed settings.
For distributed settings, please use `tf.train.MonitoredSession`. The
differences between `MonitoredSession` and `SingularMonitoredSession` are:
* `MonitoredSession` handles `AbortedError` and `UnavailableError` for
distributed settings, but `SingularMonitoredSession` does not.
* `MonitoredSession` can be created in `chief` or `worker` modes.
`SingularMonitoredSession` is always created as `chief`.
* You can access the raw `tf.Session` object used by
`SingularMonitoredSession`, whereas in MonitoredSession the raw session is
private. This can be used:
- To `run` without hooks.
- To save and restore.
* All other functionality is identical.
Example usage:
```python
saver_hook = CheckpointSaverHook(...)
summary_hook = SummarySaverHook(...)
with SingularMonitoredSession(hooks=[saver_hook, summary_hook]) as sess:
while not sess.should_stop():
sess.run(train_op)
```
Initialization: At creation time the hooked session does following things
in given order:
* calls `hook.begin()` for each given hook
* finalizes the graph via `scaffold.finalize()`
* create session
* initializes the model via initialization ops provided by `Scaffold`
* restores variables if a checkpoint exists
* launches queue runners
Run: When `run()` is called, the hooked session does following things:
* calls `hook.before_run()`
* calls TensorFlow `session.run()` with merged fetches and feed_dict
* calls `hook.after_run()`
* returns result of `session.run()` asked by user
Exit: At the `close()`, the hooked session does following things in order:
* calls `hook.end()`
* closes the queue runners and the session
* suppresses `OutOfRange` error which indicates that all inputs have been
processed if the `SingularMonitoredSession` is used as a context.
"""
def __init__(self,
hooks=None,
scaffold=None,
master='',
config=None,
checkpoint_dir=None,
stop_grace_period_secs=120,
checkpoint_filename_with_path=None):
"""Creates a SingularMonitoredSession.
Args:
hooks: An iterable of `SessionRunHook' objects.
scaffold: A `Scaffold` used for gathering or building supportive ops. If
not specified a default one is created. It's used to finalize the graph.
master: `String` representation of the TensorFlow master to use.
config: `ConfigProto` proto used to configure the session.
checkpoint_dir: A string. Optional path to a directory where to restore
variables.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
checkpoint_filename_with_path: A string. Optional path to a checkpoint
file from which to restore variables.
"""
session_creator = ChiefSessionCreator(
scaffold=scaffold,
master=master,
config=config,
checkpoint_dir=checkpoint_dir,
checkpoint_filename_with_path=checkpoint_filename_with_path)
super(SingularMonitoredSession, self).__init__(
session_creator, hooks, should_recover=False,
stop_grace_period_secs=stop_grace_period_secs)
def raw_session(self):
"""Returns underlying `TensorFlow.Session` object."""
return self._tf_sess()
class _WrappedSession(object):
"""Wrapper around a `tf.Session`.
This wrapper is used as a base class for various session wrappers
that provide additional functionality such as monitoring, coordination,
and recovery.
In addition to the methods exported by `SessionInterface` the wrapper
provides a method to check for stop and never raises exceptions from
calls to `close()`.
"""
def __init__(self, sess):
"""Creates a `_WrappedSession`.
Args:
sess: A `tf.Session` or `_WrappedSession` object. The wrapped session.
"""
self._sess = sess
self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)
@property
def graph(self):
return self._sess.graph
@property
def sess_str(self):
return self._sess.sess_str
def should_stop(self):
"""Return true if this session should not be used anymore.
Always return True if the session was closed.
Returns:
True if the session should stop, False otherwise.
"""
if self._check_stop():
return True
if self._sess:
return self._wrapped_is_stoppable and self._sess.should_stop()
return True
def _check_stop(self):
"""Hook for subclasses to provide their own stop condition.
Returns:
True if the session should stop, False otherwise.
"""
return False
def close(self):
if self._sess:
try:
self._sess.close()
except _PREEMPTION_ERRORS:
pass
finally:
self._sess = None
def run(self, *args, **kwargs):
return self._sess.run(*args, **kwargs)
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
# `_RecoverableSession` sets `run_with_hooks` to `_CoordinatedSession.run`.
# It is `None` when called from `_CoordinatedSession`. In that case
# `self.run` is `_CoordinatedSession.run`.
run_with_hooks = run_with_hooks or self.run
return step_fn(_MonitoredSession.StepContext(raw_session, run_with_hooks))
class _RecoverableSession(_WrappedSession):
"""A wrapped session that recreates a session upon certain kinds of errors.
The constructor is passed a SessionCreator object, not a session.
Calls to `run()` are delegated to the wrapped session. If a call raises the
exception `tf.errors.AbortedError` or `tf.errors.UnavailableError`, the
wrapped session is closed, and a new one is created by calling the factory
again.
"""
def __init__(self, sess_creator):
"""Create a new `_RecoverableSession`.
The value returned by calling `sess_creator.create_session()` will be the
session wrapped by this recoverable session.
Args:
sess_creator: A 'SessionCreator' to be wrapped by recoverable.
"""
self._sess_creator = sess_creator
_WrappedSession.__init__(self, self._create_session())
def _create_session(self):
while True:
try:
return self._sess_creator.create_session()
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised while a session was being created. '
'This may be due to a preemption of a connected worker '
'or parameter server. A new session will be created. '
'Error: %s', e)
def _check_stop(self):
try:
if self._sess:
return self._sess._check_stop() # pylint: disable=protected-access
else:
return True
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised while considering whether the '
'session is complete. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. Error: %s', e)
self.close()
self._sess = self._create_session()
# Since we have just recreated the session, the overall computation should
# not stop:
return False
except Exception: # pylint: disable=broad-except
# `should_stop` should return True instead of raising an exception.
return True
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
while True:
try:
if not self._sess:
self._sess = self._create_session()
return self._sess.run(fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. Error: %s', e)
self.close()
self._sess = None
def run_step_fn(self, step_fn, raw_session, run_with_hooks):
while True:
try:
if not self._sess:
self._sess = self._create_session()
run_with_hooks = self._sess.run
return self._sess.run_step_fn(step_fn, raw_session, run_with_hooks)
except _PREEMPTION_ERRORS as e:
logging.info('An error was raised. This may be due to a preemption in '
'a connected worker or parameter server. The current '
'session will be closed and a new session will be '
'created. Error: %s', e)
self.close()
self._sess = None
class _CoordinatedSession(_WrappedSession):
"""A wrapped session that works with a `tf.Coordinator`.
Calls to `run()` are delegated to the wrapped session. If a call
raises an exception, the exception is reported to the coordinator.
In addition, after each call to `run()` this session ask the coordinator if
the session should stop. In that case it will will join all the threads
registered with the coordinator before returning.
If the coordinator was requested to stop with an exception, that exception
will be re-raised from the call to `run()`.
"""
def __init__(self, sess, coord, stop_grace_period_secs=120):
"""Create a new `_CoordinatedSession`.
Args:
sess: A `tf.Session` object. The wrapped session.
coord: A `tf.train.Coordinator` object.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
_WrappedSession.__init__(self, sess)
self._coord = coord
self._stop_grace_period_secs = stop_grace_period_secs
def _check_stop(self):
# If the coordinator was asked to stop due to an exception, then it needs
# to be propagated to this stack.
self._coord.raise_requested_exception()
# At this point, no exceptions are recorded in the coordinator.
return self._coord.should_stop()
def close(self):
self._coord.request_stop()
try:
self._coord.join(
stop_grace_period_secs=self._stop_grace_period_secs,
ignore_live_threads=True)
finally:
try:
_WrappedSession.close(self)
except Exception: # pylint: disable=broad-except
# We intentionally suppress exceptions from the close() here since
# useful exceptions are already reported by join().
pass
def run(self, *args, **kwargs):
try:
return self._sess.run(*args, **kwargs)
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
# A non-preemption error could have been caused by a preemption error
# in the coordinator. If this is the case, raise that exception instead,
# since it's the root cause. Otherwise, stick to the `original_exc_info`.
original_exc_info = sys.exc_info()
try:
self._coord.raise_requested_exception()
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
raise six.reraise(*original_exc_info)
else:
raise six.reraise(*original_exc_info)
class _HookedSession(_WrappedSession):
"""A _WrappedSession that calls hooks during calls to run().
The list of hooks to call is passed in the constructor. Before each call
to `run()` the session calls the `before_run()` method of the hooks, which
can return additional ops or tensors to run. These are added to the arguments
of the call to `run()`.
When the `run()` call finishes, the session calls the `after_run()` methods of
the hooks, passing the values returned by the `run()` call corresponding to
the ops and tensors that each hook requested.
If any call to the hooks, requests stop via run_context the session will be
marked as needing to stop and its `should_stop()` method will now return
`True`.
"""
def __init__(self, sess, hooks):
"""Initializes a _HookedSession object.
Args:
sess: A `tf.Session` or a `_WrappedSession` object.
hooks: An iterable of `SessionRunHook' objects.
"""
_WrappedSession.__init__(self, sess)
self._hooks = hooks
self._should_stop = False
def _check_stop(self):
"""See base class."""
return self._should_stop
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""See base class."""
if self.should_stop():
raise RuntimeError('Run called even after should_stop requested.')
actual_fetches = {'caller': fetches}
run_context = session_run_hook.SessionRunContext(
original_args=session_run_hook.SessionRunArgs(fetches, feed_dict),
session=self._sess)
options = options or config_pb2.RunOptions()
feed_dict = self._call_hook_before_run(run_context, actual_fetches,
feed_dict, options)
# Do session run.
run_metadata = run_metadata or config_pb2.RunMetadata()
outputs = _WrappedSession.run(self,
fetches=actual_fetches,
feed_dict=feed_dict,
options=options,
run_metadata=run_metadata)
for hook in self._hooks:
hook.after_run(
run_context,
session_run_hook.SessionRunValues(
results=outputs[hook] if hook in outputs else None,
options=options,
run_metadata=run_metadata))
self._should_stop = self._should_stop or run_context.stop_requested
return outputs['caller']
def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict,
options):
"""Calls hooks.before_run and handles requests from hooks."""
hook_feeds = {}
for hook in self._hooks:
request = hook.before_run(run_context)
if request is not None:
if request.fetches is not None:
fetch_dict[hook] = request.fetches
if request.feed_dict:
self._raise_if_feeds_intersects(
hook_feeds, request.feed_dict,
'Same tensor is fed by two hooks.')
hook_feeds.update(request.feed_dict)
if request.options:
self._merge_run_options(options, request.options)
if not hook_feeds:
return user_feed_dict
if not user_feed_dict:
return hook_feeds
self._raise_if_feeds_intersects(
user_feed_dict, hook_feeds,
'Same tensor is fed by a SessionRunHook and user.')
hook_feeds.update(user_feed_dict)
return hook_feeds
def _raise_if_feeds_intersects(self, feeds1, feeds2, message):
intersection = set(feeds1.keys()) & set(feeds2.keys())
if intersection:
raise RuntimeError(message + ' Conflict(s): ' + str(list(intersection)))
def _merge_run_options(self, options, incoming_options):
"""Merge two instances of RunOptions into the first one.
During the merger, the numerical fields including trace_level,
timeout_in_ms, inter_op_thread_pool are set to the larger one of the two.
The boolean value is set to the logical OR of the two.
debug_tensor_watch_opts of the original options is extended with that from
the incoming one.
Args:
options: The options to merge into.
incoming_options: The options to be merged into the first argument.
"""
options.trace_level = max(options.trace_level, incoming_options.trace_level)
options.timeout_in_ms = max(options.timeout_in_ms,
incoming_options.timeout_in_ms)
options.inter_op_thread_pool = max(options.inter_op_thread_pool,
incoming_options.inter_op_thread_pool)
options.output_partition_graphs = max(
options.output_partition_graphs,
incoming_options.output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
incoming_options.debug_options.debug_tensor_watch_opts)
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_config import cfg
import requests
from rally.common import logging
from rally import exceptions
from rally.plugins.openstack import scenario
from rally.task import atomic
from rally.task import utils
LOG = logging.getLogger(__name__)
HEAT_BENCHMARK_OPTS = [
cfg.FloatOpt("heat_stack_create_prepoll_delay",
default=2.0,
help="Time(in sec) to sleep after creating a resource before "
"polling for it status."),
cfg.FloatOpt("heat_stack_create_timeout",
default=3600.0,
help="Time(in sec) to wait for heat stack to be created."),
cfg.FloatOpt("heat_stack_create_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"stack creation."),
cfg.FloatOpt("heat_stack_delete_timeout",
default=3600.0,
help="Time(in sec) to wait for heat stack to be deleted."),
cfg.FloatOpt("heat_stack_delete_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"stack deletion."),
cfg.FloatOpt("heat_stack_check_timeout",
default=3600.0,
help="Time(in sec) to wait for stack to be checked."),
cfg.FloatOpt("heat_stack_check_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"stack checking."),
cfg.FloatOpt("heat_stack_update_prepoll_delay",
default=2.0,
help="Time(in sec) to sleep after updating a resource before "
"polling for it status."),
cfg.FloatOpt("heat_stack_update_timeout",
default=3600.0,
help="Time(in sec) to wait for stack to be updated."),
cfg.FloatOpt("heat_stack_update_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"stack update."),
cfg.FloatOpt("heat_stack_suspend_timeout",
default=3600.0,
help="Time(in sec) to wait for stack to be suspended."),
cfg.FloatOpt("heat_stack_suspend_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"stack suspend."),
cfg.FloatOpt("heat_stack_resume_timeout",
default=3600.0,
help="Time(in sec) to wait for stack to be resumed."),
cfg.FloatOpt("heat_stack_resume_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"stack resume."),
cfg.FloatOpt("heat_stack_snapshot_timeout",
default=3600.0,
help="Time(in sec) to wait for stack snapshot to "
"be created."),
cfg.FloatOpt("heat_stack_snapshot_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"stack snapshot to be created."),
cfg.FloatOpt("heat_stack_restore_timeout",
default=3600.0,
help="Time(in sec) to wait for stack to be restored from "
"snapshot."),
cfg.FloatOpt("heat_stack_restore_poll_interval",
default=1.0,
help="Time interval(in sec) between checks when waiting for "
"stack to be restored."),
cfg.FloatOpt("heat_stack_scale_timeout",
default=3600.0,
help="Time (in sec) to wait for stack to scale up or down."),
cfg.FloatOpt("heat_stack_scale_poll_interval",
default=1.0,
help="Time interval (in sec) between checks when waiting for "
"a stack to scale up or down."),
]
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(HEAT_BENCHMARK_OPTS, group=benchmark_group)
class HeatScenario(scenario.OpenStackScenario):
"""Base class for Heat scenarios with basic atomic actions."""
@atomic.action_timer("heat.list_stacks")
def _list_stacks(self):
"""Return user stack list."""
return list(self.clients("heat").stacks.list())
@atomic.action_timer("heat.create_stack")
def _create_stack(self, template, parameters=None,
files=None, environment=None):
"""Create a new stack.
:param template: template with stack description.
:param parameters: template parameters used during stack creation
:param files: additional files used in template
:param environment: stack environment definition
:returns: object of stack
"""
stack_name = self.generate_random_name()
kw = {
"stack_name": stack_name,
"disable_rollback": True,
"parameters": parameters or {},
"template": template,
"files": files or {},
"environment": environment or {}
}
# heat client returns body instead manager object, so we should
# get manager object using stack_id
stack_id = self.clients("heat").stacks.create(**kw)["stack"]["id"]
stack = self.clients("heat").stacks.get(stack_id)
time.sleep(CONF.benchmark.heat_stack_create_prepoll_delay)
stack = utils.wait_for(
stack,
ready_statuses=["CREATE_COMPLETE"],
failure_statuses=["CREATE_FAILED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_create_timeout,
check_interval=CONF.benchmark.heat_stack_create_poll_interval)
return stack
@atomic.action_timer("heat.update_stack")
def _update_stack(self, stack, template, parameters=None,
files=None, environment=None):
"""Update an existing stack
:param stack: stack that need to be updated
:param template: Updated template
:param parameters: template parameters for stack update
:param files: additional files used in template
:param environment: stack environment definition
:returns: object of updated stack
"""
kw = {
"stack_name": stack.stack_name,
"disable_rollback": True,
"parameters": parameters or {},
"template": template,
"files": files or {},
"environment": environment or {}
}
self.clients("heat").stacks.update(stack.id, **kw)
time.sleep(CONF.benchmark.heat_stack_update_prepoll_delay)
stack = utils.wait_for(
stack,
ready_statuses=["UPDATE_COMPLETE"],
failure_statuses=["UPDATE_FAILED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_update_timeout,
check_interval=CONF.benchmark.heat_stack_update_poll_interval)
return stack
@atomic.action_timer("heat.check_stack")
def _check_stack(self, stack):
"""Check given stack.
Check the stack and stack resources.
:param stack: stack that needs to be checked
"""
self.clients("heat").actions.check(stack.id)
utils.wait_for(
stack,
ready_statuses=["CHECK_COMPLETE"],
failure_statuses=["CHECK_FAILED"],
update_resource=utils.get_from_manager(["CHECK_FAILED"]),
timeout=CONF.benchmark.heat_stack_check_timeout,
check_interval=CONF.benchmark.heat_stack_check_poll_interval)
@atomic.action_timer("heat.delete_stack")
def _delete_stack(self, stack):
"""Delete given stack.
Returns when the stack is actually deleted.
:param stack: stack object
"""
stack.delete()
utils.wait_for_status(
stack,
ready_statuses=["DELETE_COMPLETE"],
failure_statuses=["DELETE_FAILED"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_delete_timeout,
check_interval=CONF.benchmark.heat_stack_delete_poll_interval)
@atomic.action_timer("heat.suspend_stack")
def _suspend_stack(self, stack):
"""Suspend given stack.
:param stack: stack that needs to be suspended
"""
self.clients("heat").actions.suspend(stack.id)
utils.wait_for(
stack,
ready_statuses=["SUSPEND_COMPLETE"],
failure_statuses=["SUSPEND_FAILED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_suspend_timeout,
check_interval=CONF.benchmark.heat_stack_suspend_poll_interval)
@atomic.action_timer("heat.resume_stack")
def _resume_stack(self, stack):
"""Resume given stack.
:param stack: stack that needs to be resumed
"""
self.clients("heat").actions.resume(stack.id)
utils.wait_for(
stack,
ready_statuses=["RESUME_COMPLETE"],
failure_statuses=["RESUME_FAILED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_resume_timeout,
check_interval=CONF.benchmark.heat_stack_resume_poll_interval)
@atomic.action_timer("heat.snapshot_stack")
def _snapshot_stack(self, stack):
"""Creates a snapshot for given stack.
:param stack: stack that will be used as base for snapshot
:returns: snapshot created for given stack
"""
snapshot = self.clients("heat").stacks.snapshot(
stack.id)
utils.wait_for(
stack,
ready_statuses=["SNAPSHOT_COMPLETE"],
failure_statuses=["SNAPSHOT_FAILED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_snapshot_timeout,
check_interval=CONF.benchmark.heat_stack_snapshot_poll_interval)
return snapshot
@atomic.action_timer("heat.restore_stack")
def _restore_stack(self, stack, snapshot_id):
"""Restores stack from given snapshot.
:param stack: stack that will be restored from snapshot
:param snapshot_id: id of given snapshot
"""
self.clients("heat").stacks.restore(stack.id, snapshot_id)
utils.wait_for(
stack,
ready_statuses=["RESTORE_COMPLETE"],
failure_statuses=["RESTORE_FAILED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_restore_timeout,
check_interval=CONF.benchmark.heat_stack_restore_poll_interval
)
def _count_instances(self, stack):
"""Count instances in a Heat stack.
:param stack: stack to count instances in.
"""
return len([
r for r in self.clients("heat").resources.list(stack.id,
nested_depth=1)
if r.resource_type == "OS::Nova::Server"])
def _scale_stack(self, stack, output_key, delta):
"""Scale a stack up or down.
Calls the webhook given in the output value identified by
'output_key', and waits for the stack size to change by
'delta'.
:param stack: stack to scale up or down
:param output_key: The name of the output to get the URL from
:param delta: The expected change in number of instances in
the stack (signed int)
"""
num_instances = self._count_instances(stack)
expected_instances = num_instances + delta
LOG.debug("Scaling stack %s from %s to %s instances with %s" %
(stack.id, num_instances, expected_instances, output_key))
with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key):
self._stack_webhook(stack, output_key)
utils.wait_for(
stack,
is_ready=lambda s: (
self._count_instances(s) == expected_instances),
failure_statuses=["UPDATE_FAILED"],
update_resource=utils.get_from_manager(),
timeout=CONF.benchmark.heat_stack_scale_timeout,
check_interval=CONF.benchmark.heat_stack_scale_poll_interval)
def _stack_webhook(self, stack, output_key):
"""POST to the URL given in the output value identified by output_key.
This can be used to scale stacks up and down, for instance.
:param stack: stack to call a webhook on
:param output_key: The name of the output to get the URL from
:raises InvalidConfigException: if the output key is not found
"""
url = None
for output in stack.outputs:
if output["output_key"] == output_key:
url = output["output_value"]
break
else:
raise exceptions.InvalidConfigException(
"No output key %(key)s found in stack %(id)s" %
{"key": output_key, "id": stack.id})
with atomic.ActionTimer(self, "heat.%s_webhook" % output_key):
requests.post(url).raise_for_status()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._replications_operations import build_create_request_initial, build_delete_request_initial, build_get_request, build_list_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ReplicationsOperations:
"""ReplicationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_12_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> "_models.Replication":
"""Gets the properties of the specified replication.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Replication, or the result of cls(response)
:rtype: ~azure.mgmt.containerregistry.v2019_12_01_preview.models.Replication
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: "_models.Replication",
**kwargs: Any
) -> "_models.Replication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(replication, 'Replication')
request = build_create_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
content_type=content_type,
json=_json,
template_url=self._create_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Replication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace_async
async def begin_create(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication: "_models.Replication",
**kwargs: Any
) -> AsyncLROPoller["_models.Replication"]:
"""Creates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param replication: The parameters for creating a replication.
:type replication: ~azure.mgmt.containerregistry.v2019_12_01_preview.models.Replication
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_12_01_preview.models.Replication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication=replication,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a replication from a container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: "_models.ReplicationUpdateParameters",
**kwargs: Any
) -> "_models.Replication":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(replication_update_parameters, 'ReplicationUpdateParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Replication', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
registry_name: str,
replication_name: str,
replication_update_parameters: "_models.ReplicationUpdateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.Replication"]:
"""Updates a replication for a container registry with the specified parameters.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:param replication_name: The name of the replication.
:type replication_name: str
:param replication_update_parameters: The parameters for updating a replication.
:type replication_update_parameters:
~azure.mgmt.containerregistry.v2019_12_01_preview.models.ReplicationUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Replication or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerregistry.v2019_12_01_preview.models.Replication]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Replication"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
registry_name=registry_name,
replication_name=replication_name,
replication_update_parameters=replication_update_parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Replication', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications/{replicationName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
registry_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ReplicationListResult"]:
"""Lists all the replications for the specified container registry.
:param resource_group_name: The name of the resource group to which the container registry
belongs.
:type resource_group_name: str
:param registry_name: The name of the container registry.
:type registry_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ReplicationListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_12_01_preview.models.ReplicationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
registry_name=registry_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ReplicationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerRegistry/registries/{registryName}/replications'} # type: ignore
|
|
# -*- coding: utf-8 -*-
"""
Backend to the console plugin.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
"""
# this file is a modified version of source code from the Accerciser project
# http://live.gnome.org/accerciser
# this file is from http://ipython.scipy.org/moin/Cookbook/EmbeddingInGTK
import gtk
import re
import sys
import os
import pango
from StringIO import StringIO
try:
import IPython
except Exception,e:
raise "Error importing IPython (%s)" % str(e)
ansi_colors = {'0;30': 'Black',
'0;31': 'Red',
'0;32': 'Green',
'0;33': 'Brown',
'0;34': 'Blue',
'0;35': 'Purple',
'0;36': 'Cyan',
'0;37': 'LightGray',
'1;30': 'DarkGray',
'1;31': 'DarkRed',
'1;32': 'SeaGreen',
'1;33': 'Yellow',
'1;34': 'LightBlue',
'1;35': 'MediumPurple',
'1;36': 'LightCyan',
'1;37': 'White'}
class IterableIPShell:
def __init__(self,argv=None,user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
if input_func:
IPython.iplib.raw_input_original = input_func
if cin:
IPython.Shell.Term.cin = cin
if cout:
IPython.Shell.Term.cout = cout
if cerr:
IPython.Shell.Term.cerr = cerr
if argv is None:
argv=[]
# This is to get rid of the blockage that occurs during
# IPython.Shell.InteractiveShell.user_setup()
IPython.iplib.raw_input = lambda x: None
self.term = IPython.genutils.IOTerm(cin=cin, cout=cout, cerr=cerr)
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
self.IP = IPython.Shell.make_IPython(argv,user_ns=user_ns,
user_global_ns=user_global_ns,
embedded=True,
shell_class=IPython.Shell.InteractiveShell)
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ',
verbose=self.IP.rc.system_verbose)
sys.excepthook = excepthook
self.iter_more = 0
self.history_level = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
def execute(self):
self.history_level = 0
orig_stdout = sys.stdout
sys.stdout = IPython.Shell.Term.cout
try:
line = self.IP.raw_input(None, self.iter_more)
if self.IP.autoindent:
self.IP.readline_startup_hook(None)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.resetbuffer()
# keep cache in sync with the prompt counter:
self.IP.outputcache.prompt_count -= 1
if self.IP.autoindent:
self.IP.indent_current_nsp = 0
self.iter_more = 0
except:
self.IP.showtraceback()
else:
self.iter_more = self.IP.push(line)
if (self.IP.SyntaxTB.last_syntax_error and
self.IP.rc.autoedit_syntax):
self.IP.edit_syntax_error()
if self.iter_more:
self.prompt = str(self.IP.outputcache.prompt2).strip()
if self.IP.autoindent:
self.IP.readline_startup_hook(self.IP.pre_readline)
else:
self.prompt = str(self.IP.outputcache.prompt1).strip()
sys.stdout = orig_stdout
def historyBack(self):
self.history_level -= 1
return self._getHistory()
def historyForward(self):
self.history_level += 1
return self._getHistory()
def _getHistory(self):
try:
rv = self.IP.user_ns['In'][self.history_level].strip('\n')
except IndexError:
self.history_level = 0
rv = ''
return rv
def updateNamespace(self, ns_dict):
self.IP.user_ns.update(ns_dict)
def complete(self, line):
split_line = self.complete_sep.split(line)
possibilities = self.IP.complete(split_line[-1])
if possibilities:
common_prefix = reduce(self._commonPrefix, possibilities)
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
return completed, possibilities
def _commonPrefix(self, str1, str2):
for i in range(len(str1)):
if not str2.startswith(str1[:i+1]):
return str1[:i]
return str1
def shell(self, cmd,verbose=0,debug=0,header=''):
stat = 0
if verbose or debug: print header+cmd
# flush stdout so we don't mangle python's buffering
if not debug:
input, output = os.popen4(cmd)
print output.read()
output.close()
input.close()
class ConsoleView(gtk.TextView):
def __init__(self):
gtk.TextView.__init__(self)
self.modify_font(pango.FontDescription('Mono'))
self.set_cursor_visible(True)
self.text_buffer = self.get_buffer()
self.mark = self.text_buffer.create_mark('scroll_mark',
self.text_buffer.get_end_iter(),
False)
for code in ansi_colors:
self.text_buffer.create_tag(code,
foreground=ansi_colors[code],
weight=700)
self.text_buffer.create_tag('0')
self.text_buffer.create_tag('notouch', editable=False)
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.line_start = \
self.text_buffer.create_mark('line_start',
self.text_buffer.get_end_iter(), True
)
self.connect('key-press-event', self._onKeypress)
self.last_cursor_pos = 0
def write(self, text, editable=False):
segments = self.color_pat.split(text)
segment = segments.pop(0)
start_mark = self.text_buffer.create_mark(None,
self.text_buffer.get_end_iter(),
True)
self.text_buffer.insert(self.text_buffer.get_end_iter(), segment)
if segments:
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
self.text_buffer.insert_with_tags_by_name(self.text_buffer.get_end_iter(),
segments[i+1], tag)
segments.pop(i)
if not editable:
self.text_buffer.apply_tag_by_name('notouch',
self.text_buffer.get_iter_at_mark(start_mark),
self.text_buffer.get_end_iter())
self.text_buffer.delete_mark(start_mark)
self.scroll_mark_onscreen(self.mark)
def showPrompt(self, prompt):
self.write(prompt)
self.text_buffer.move_mark(self.line_start,self.text_buffer.get_end_iter())
def changeLine(self, text):
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.delete(self.text_buffer.get_iter_at_mark(self.line_start), iter)
self.write(text, True)
def getCurrentLine(self):
rv = self.text_buffer.get_slice(self.text_buffer.get_iter_at_mark(self.line_start),
self.text_buffer.get_end_iter(), False)
return rv
def showReturned(self, text):
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.apply_tag_by_name('notouch',
self.text_buffer.get_iter_at_mark(self.line_start),
iter)
self.write('\n'+text)
if text:
self.write('\n')
self.showPrompt(self.prompt)
self.text_buffer.move_mark(self.line_start,self.text_buffer.get_end_iter())
self.text_buffer.place_cursor(self.text_buffer.get_end_iter())
def _onKeypress(self, obj, event):
if not event.string:
return
insert_mark = self.text_buffer.get_insert()
insert_iter = self.text_buffer.get_iter_at_mark(insert_mark)
selection_mark = self.text_buffer.get_selection_bound()
selection_iter = self.text_buffer.get_iter_at_mark(selection_mark)
start_iter = self.text_buffer.get_iter_at_mark(self.line_start)
if start_iter.compare(insert_iter) <= 0 and \
start_iter.compare(selection_iter) <= 0:
return
elif start_iter.compare(insert_iter) > 0 and \
start_iter.compare(selection_iter) > 0:
self.text_buffer.place_cursor(start_iter)
elif insert_iter.compare(selection_iter) < 0:
self.text_buffer.move_mark(insert_mark, start_iter)
elif insert_iter.compare(selection_iter) > 0:
self.text_buffer.move_mark(selection_mark, start_iter)
class IPythonView(ConsoleView, IterableIPShell):
def __init__(self):
ConsoleView.__init__(self)
self.cout = StringIO()
IterableIPShell.__init__(self, cout=self.cout,cerr=self.cout,
input_func=self.raw_input)
self.connect('key_press_event', self.keyPress)
self.execute()
self.cout.truncate(0)
self.showPrompt(self.prompt)
self.interrupt = False
def raw_input(self, prompt=''):
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def keyPress(self, widget, event):
if event.state & gtk.gdk.CONTROL_MASK and event.keyval == 99:
self.interrupt = True
self._processLine()
return True
elif event.keyval == gtk.keysyms.Return:
self._processLine()
return True
elif event.keyval == gtk.keysyms.Up:
self.changeLine(self.historyBack())
return True
elif event.keyval == gtk.keysyms.Down:
self.changeLine(self.historyForward())
return True
elif event.keyval == gtk.keysyms.Tab:
if not self.getCurrentLine().strip():
return False
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return True
def _processLine(self):
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
|
|
#!/usr/bin/env python
# encoding: utf-8
"""
Run all scripts in examples on specific sample world.
"""
import sys
import os
import subprocess
import shutil
import tempfile
import glob
import logging
import unittest
try:
from unittest import skip as _skip
except ImportError:
# Python 2.6 has an older unittest API. The backported package is available from pypi.
import unittest2 as unittest
# local modules
import downloadsample
if sys.version_info[0] < 3:
def _deletechars(text, deletechars):
"""Return string, with the deletechars removed"""
return filter(lambda c: c not in deletechars, text)
else:
def _deletechars(text, deletechars):
"""Return string, with the deletechars removed"""
filter = str.maketrans('', '', deletechars)
return text.translate(filter)
def _mkdir(dstdir, subdir):
"""Helper function: create folder /dstdir/subdir"""
os.mkdir(os.path.join(dstdir, os.path.normpath(subdir)))
def _copyglob(srcdir, destdir, pattern):
"""Helper function: copies files from /srcdir/pattern to /destdir/pattern.
pattern is a glob pattern."""
for fullpath in glob.glob(os.path.join(srcdir, os.path.normpath(pattern))):
relpath = os.path.relpath(fullpath, srcdir)
shutil.copy2(fullpath, os.path.join(destdir, relpath))
def _copyrename(srcdir, destdir, src, dest):
"""Helper function: copy file from /srcdir/src to /destdir/dest."""
shutil.copy2(os.path.join(srcdir, os.path.normpath(src)), \
os.path.join(destdir, os.path.normpath(dest)))
class ScriptTestCase(unittest.TestCase):
"""Test Case with helper functions for running a script, and installing a
Minecraft sample world."""
worldfolder = None
mcregionfolder = None
anvilfolder = None
examplesdir = os.path.normpath(os.path.join(__file__, os.pardir, os.pardir, 'examples'))
def runScript(self, script, args):
scriptpath = os.path.join(self.examplesdir, script)
args.insert(0, scriptpath)
# Ensure we're using the same python version.
# python = sys.argv[0]
# args.insert(0, python)
env = dict(os.environ).copy()
env['LC_ALL'] = 'C'
# Open a subprocess, wait till it is done, and get the STDOUT result
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
p.wait()
output = [r.decode('utf-8') for r in p.stdout.readlines()]
for l in p.stderr.readlines():
sys.stdout.write("%s: %s" % (script, l.decode('utf-8')))
try:
p.stdout.close()
p.stderr.close()
except IOError:
pass
self.assertEqual(p.returncode, 0, "return code is %d" % p.returncode)
return output
def assertEqualOutput(self, actual, expected):
"""Compare two lists of strings, ignoring whitespace at begin and end of line."""
if len(actual) < len(expected):
self.fail("Output is %d lines, expected at least %d lines" % \
(len(actual), len(expected)))
for i,expline in enumerate(expected):
self.assertEqual(actual[i].strip(), expline.strip(), \
"Output line %d is %r, expected %r" % (i+1, actual[i], expline))
def assertEqualString(self, actual, expected):
"""Compare strings, ignoring whitespace at begin and end of line."""
self.assertEqual(actual.strip(), expected.strip(), \
"Output line %r, expected %r" % (actual, expected))
class BiomeAnalysisScriptTest(ScriptTestCase):
pass
# TODO: Sample World was converted with simple script, but does not seem to have biome data.
# This needs to be added. (opening the world with minecraft client will change the
# world a bit, which I like to avoid. Perhaps opening with the server will not change it,
# if "/stop" is called quickly enough. this may change the amount of generated chunks to
# everything in a 380x380 block though.)
# @classmethod
# def setUpClass(cls):
# cls.installsampleworld()
# cls.extractAnvilWorld()
# def testAnvilWorld(self):
# output = self.runScript('biome_analysis.py', [self.anvilfolder])
class BlockAnalysisScriptTest(ScriptTestCase):
expected = [
"DiamondOre:1743",
"GoldOre:4838",
"RedstoneOre:14487",
"IronOre:52906",
"CoalOre:97597",
"LapisLazuliOre:2051",
"Dungeons:26",
"Clay:897",
"SugarCane:22",
"Cacti:0",
"Pumpkin:6",
"Dandelion:513",
"Rose:131",
"BrownMushroom:40",
"RedMushroom:31",
"LavaSprings:47665",
]
def testMcRegionWorld(self):
output = self.runScript('block_analysis.py', [self.mcregionfolder])
self.assertTrue(len(output) >= 73, "Expected output of at least 73 lines long")
output = [_deletechars(l, " ,.") for l in output[-16:]]
self.assertEqualOutput(output, self.expected)
# TODO: Anvil does not yet work.
# def testAnvilWorld(self):
# output = self.runScript('block_analysis.py', [self.anvilfolder])
# print repr(output)
# self.assertTrue(len(output) >= 73, "Expected output of at least 73 lines long")
# output = [_deletechars(l, " ,.") for l in output[-16:]]
# self.assertEqualOutput(output, self.expected)
class ChestAnalysisScriptTest(ScriptTestCase):
def testMcRegionWorld(self):
output = self.runScript('chest_analysis.py', [self.mcregionfolder])
self.assertEqual(len(output), 178)
count = len(list(filter(lambda l: l.startswith('Chest at '), output)))
self.assertEqual(count, 38)
def testAnvilWorld(self):
output = self.runScript('chest_analysis.py', [self.anvilfolder])
self.assertEqual(len(output), 178)
count = len(list(filter(lambda l: l.startswith('Chest at '), output)))
self.assertEqual(count, 38)
def has_PIL():
try:
from PIL import Image
return True
except ImportError:
return False
class MapScriptTest(ScriptTestCase):
@unittest.skipIf(not has_PIL(), "PIL library not available")
def testMcRegionWorld(self):
output = self.runScript('map.py', ['--noshow', self.mcregionfolder])
self.assertTrue(output[-1].startswith("Saved map as "))
# TODO: this currently writes the map to tests/nbtmcregion*.png files.
# The locations should be a tempfile, and the file should be deleted afterwards.
# @skipIf(not has_PIL(), "PIL library not available")
# def testAnvilWorld(self):
# output = self.runScript('map.py', ['--noshow', self.anvilfolder])
# self.assertEqualString(output[-1], "Saved map as Sample World.png")
class MobAnalysisScriptTest(ScriptTestCase):
def testMcRegionWorld(self):
output = self.runScript('mob_analysis.py', [self.mcregionfolder])
self.assertEqual(len(output), 413)
output = sorted(output)
self.assertEqualString(output[0], "Chicken at 107.6,88.0,374.5")
self.assertEqualString(output[400], "Zombie at 249.3,48.0,368.1")
def testAnvilWorld(self):
output = self.runScript('mob_analysis.py', [self.anvilfolder])
self.assertEqual(len(output), 413)
output = sorted(output)
self.assertEqualString(output[0], "Chicken at 107.6,88.0,374.5")
self.assertEqualString(output[400], "Zombie at 249.3,48.0,368.1")
class SeedScriptTest(ScriptTestCase):
def testMcRegionWorld(self):
output = self.runScript('seed.py', [self.mcregionfolder])
self.assertEqualOutput(output, ["-3195717715052600521"])
def testAnvilWorld(self):
output = self.runScript('seed.py', [self.anvilfolder])
self.assertEqualOutput(output, ["-3195717715052600521"])
class GenerateLevelDatScriptTest(ScriptTestCase):
expected = [
"NBTFile('Data'): {10 Entries}",
"{",
" TAG_Long('Time'): 1",
" TAG_Long('LastPlayed'): *",
" TAG_Int('SpawnX'): 0",
" TAG_Int('SpawnY'): 2",
" TAG_Int('SpawnZ'): 0",
" TAG_Long('SizeOnDisk'): 0",
" TAG_Long('RandomSeed'): *",
" TAG_Int('version'): 19132",
" TAG_String('LevelName'): Testing",
" TAG_Compound('Player'): {3 Entries}",
" {",
" TAG_Int('Score'): 0",
" TAG_Int('Dimension'): 0",
" TAG_Compound('Inventory'): {0 Entries}",
" }",
"}"
]
def testNBTGeneration(self):
output = self.runScript('generate_level_dat.py', [])
self.assertEqual(len(output), 18)
self.assertEqualString(output[0], self.expected[0])
self.assertEqualString(output[10], self.expected[10])
self.assertEqualString(output[11], self.expected[11])
self.assertEqualString(output[13], self.expected[13])
# Load resources once.
# Implement as setUpModule/tearDownModule, since it only needs to be called
# once for the ScriptTestCase class. By default, a setUpClass/tearDownClass
# calls it for each subclass.
def setUpModule():
"""Download sample world, and copy Anvil and McRegion files to temporary folders."""
if ScriptTestCase.worldfolder == None:
downloadsample.install()
ScriptTestCase.worldfolder = downloadsample.worlddir
if ScriptTestCase.mcregionfolder == None:
ScriptTestCase.mcregionfolder = downloadsample.temp_mcregion_world()
if ScriptTestCase.anvilfolder == None:
ScriptTestCase.anvilfolder = downloadsample.temp_anvil_world()
def tearDownModule():
"""Remove temporary folders with Anvil and McRegion files."""
if ScriptTestCase.mcregionfolder != None:
downloadsample.cleanup_temp_world(ScriptTestCase.mcregionfolder)
if ScriptTestCase.anvilfolder != None:
downloadsample.cleanup_temp_world(ScriptTestCase.anvilfolder)
ScriptTestCase.worldfolder = None
ScriptTestCase.mcregionfolder = None
ScriptTestCase.anvilfolder = None
if __name__ == '__main__':
logger = logging.getLogger("nbt.tests.exampletests")
if len(logger.handlers) == 0:
# Logging is not yet configured. Configure it.
logging.basicConfig(level=logging.INFO, stream=sys.stderr, format='%(levelname)-8s %(message)s')
unittest.main(verbosity=2, failfast=True)
|
|
from collections import defaultdict
import datetime
import json
from collections import OrderedDict
from moto.core import BaseBackend, BaseModel, CloudFormationModel
from moto.core.utils import unix_time
from moto.core import ACCOUNT_ID
from .comparisons import get_comparison_func
class DynamoJsonEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, "to_json"):
return obj.to_json()
def dynamo_json_dump(dynamo_object):
return json.dumps(dynamo_object, cls=DynamoJsonEncoder)
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
self.type = list(type_as_dict.keys())[0]
self.value = list(type_as_dict.values())[0]
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return self.type == other.type and self.value == other.value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
def add(self, dyn_type):
if self.type == "SS":
self.value.append(dyn_type.value)
if self.type == "N":
self.value = str(int(self.value) + int(dyn_type.value))
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.value, *range_values)
class Item(BaseModel):
def __init__(self, hash_key, hash_key_type, range_key, range_key_type, attrs):
self.hash_key = hash_key
self.hash_key_type = hash_key_type
self.range_key = range_key
self.range_key_type = range_key_type
self.attrs = {}
for key, value in attrs.items():
self.attrs[key] = DynamoType(value)
def __repr__(self):
return "Item: {0}".format(self.to_json())
def to_json(self):
attributes = {}
for attribute_key, attribute in self.attrs.items():
attributes[attribute_key] = attribute.value
return {"Attributes": attributes}
def describe_attrs(self, attributes):
if attributes:
included = {}
for key, value in self.attrs.items():
if key in attributes:
included[key] = value
else:
included = self.attrs
return {"Item": included}
class Table(CloudFormationModel):
def __init__(
self,
name,
hash_key_attr,
hash_key_type,
range_key_attr=None,
range_key_type=None,
read_capacity=None,
write_capacity=None,
):
self.name = name
self.hash_key_attr = hash_key_attr
self.hash_key_type = hash_key_type
self.range_key_attr = range_key_attr
self.range_key_type = range_key_type
self.read_capacity = read_capacity
self.write_capacity = write_capacity
self.created_at = datetime.datetime.utcnow()
self.items = defaultdict(dict)
@property
def has_range_key(self):
return self.range_key_attr is not None
@property
def describe(self):
results = {
"Table": {
"CreationDateTime": unix_time(self.created_at),
"KeySchema": {
"HashKeyElement": {
"AttributeName": self.hash_key_attr,
"AttributeType": self.hash_key_type,
}
},
"ProvisionedThroughput": {
"ReadCapacityUnits": self.read_capacity,
"WriteCapacityUnits": self.write_capacity,
},
"TableName": self.name,
"TableStatus": "ACTIVE",
"ItemCount": len(self),
"TableSizeBytes": 0,
}
}
if self.has_range_key:
results["Table"]["KeySchema"]["RangeKeyElement"] = {
"AttributeName": self.range_key_attr,
"AttributeType": self.range_key_type,
}
return results
@staticmethod
def cloudformation_name_type():
return "TableName"
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html
return "AWS::DynamoDB::Table"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
properties = cloudformation_json["Properties"]
key_attr = [
i["AttributeName"]
for i in properties["KeySchema"]
if i["KeyType"] == "HASH"
][0]
key_type = [
i["AttributeType"]
for i in properties["AttributeDefinitions"]
if i["AttributeName"] == key_attr
][0]
spec = {
"name": properties["TableName"],
"hash_key_attr": key_attr,
"hash_key_type": key_type,
}
# TODO: optional properties still missing:
# range_key_attr, range_key_type, read_capacity, write_capacity
return Table(**spec)
def __len__(self):
return sum(
[(len(value) if self.has_range_key else 1) for value in self.items.values()]
)
def __nonzero__(self):
return True
def __bool__(self):
return self.__nonzero__()
def put_item(self, item_attrs):
hash_value = DynamoType(item_attrs.get(self.hash_key_attr))
if self.has_range_key:
range_value = DynamoType(item_attrs.get(self.range_key_attr))
else:
range_value = None
item = Item(
hash_value, self.hash_key_type, range_value, self.range_key_type, item_attrs
)
if range_value:
self.items[hash_value][range_value] = item
else:
self.items[hash_value] = item
return item
def get_item(self, hash_key, range_key):
if self.has_range_key and not range_key:
raise ValueError(
"Table has a range key, but no range key was passed into get_item"
)
try:
if range_key:
return self.items[hash_key][range_key]
else:
return self.items[hash_key]
except KeyError:
return None
def query(self, hash_key, range_comparison, range_objs):
results = []
last_page = True # Once pagination is implemented, change this
if self.range_key_attr:
possible_results = self.items[hash_key].values()
else:
possible_results = list(self.all_items())
if range_comparison:
for result in possible_results:
if result.range_key.compare(range_comparison, range_objs):
results.append(result)
else:
# If we're not filtering on range key, return all values
results = possible_results
return results, last_page
def all_items(self):
for hash_set in self.items.values():
if self.range_key_attr:
for item in hash_set.values():
yield item
else:
yield hash_set
def scan(self, filters):
results = []
scanned_count = 0
last_page = True # Once pagination is implemented, change this
for result in self.all_items():
scanned_count += 1
passes_all_conditions = True
for (
attribute_name,
(comparison_operator, comparison_objs),
) in filters.items():
attribute = result.attrs.get(attribute_name)
if attribute:
# Attribute found
if not attribute.compare(comparison_operator, comparison_objs):
passes_all_conditions = False
break
elif comparison_operator == "NULL":
# Comparison is NULL and we don't have the attribute
continue
else:
# No attribute found and comparison is no NULL. This item
# fails
passes_all_conditions = False
break
if passes_all_conditions:
results.append(result)
return results, scanned_count, last_page
def delete_item(self, hash_key, range_key):
try:
if range_key:
return self.items[hash_key].pop(range_key)
else:
return self.items.pop(hash_key)
except KeyError:
return None
def update_item(self, hash_key, range_key, attr_updates):
item = self.get_item(hash_key, range_key)
if not item:
return None
for attr, update in attr_updates.items():
if update["Action"] == "PUT":
item.attrs[attr] = DynamoType(update["Value"])
if update["Action"] == "DELETE":
item.attrs.pop(attr)
if update["Action"] == "ADD":
item.attrs[attr].add(DynamoType(update["Value"]))
return item
@classmethod
def has_cfn_attr(cls, attribute):
return attribute in ["StreamArn"]
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "StreamArn":
region = "us-east-1"
time = "2000-01-01T00:00:00.000"
return "arn:aws:dynamodb:{0}:{1}:table/{2}/stream/{3}".format(
region, ACCOUNT_ID, self.name, time
)
raise UnformattedGetAttTemplateException()
class DynamoDBBackend(BaseBackend):
def __init__(self):
self.tables = OrderedDict()
def create_table(self, name, **params):
table = Table(name, **params)
self.tables[name] = table
return table
def delete_table(self, name):
return self.tables.pop(name, None)
def update_table_throughput(self, name, new_read_units, new_write_units):
table = self.tables[name]
table.read_capacity = new_read_units
table.write_capacity = new_write_units
return table
def put_item(self, table_name, item_attrs):
table = self.tables.get(table_name)
if not table:
return None
return table.put_item(item_attrs)
def get_item(self, table_name, hash_key_dict, range_key_dict):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.get_item(hash_key, range_key)
def query(self, table_name, hash_key_dict, range_comparison, range_value_dicts):
table = self.tables.get(table_name)
if not table:
return None, None
hash_key = DynamoType(hash_key_dict)
range_values = [DynamoType(range_value) for range_value in range_value_dicts]
return table.query(hash_key, range_comparison, range_values)
def scan(self, table_name, filters):
table = self.tables.get(table_name)
if not table:
return None, None, None
scan_filters = {}
for key, (comparison_operator, comparison_values) in filters.items():
dynamo_types = [DynamoType(value) for value in comparison_values]
scan_filters[key] = (comparison_operator, dynamo_types)
return table.scan(scan_filters)
def delete_item(self, table_name, hash_key_dict, range_key_dict):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.delete_item(hash_key, range_key)
def update_item(self, table_name, hash_key_dict, range_key_dict, attr_updates):
table = self.tables.get(table_name)
if not table:
return None
hash_key = DynamoType(hash_key_dict)
range_key = DynamoType(range_key_dict) if range_key_dict else None
return table.update_item(hash_key, range_key, attr_updates)
dynamodb_backend = DynamoDBBackend()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_update_put_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_patch_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-06-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
class MonitoringSettingsOperations(object):
"""MonitoringSettingsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2021_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> "_models.MonitoringSettingResource":
"""Get the Monitoring Setting and its properties.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MonitoringSettingResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2021_06_01_preview.models.MonitoringSettingResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def _update_put_initial(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> "_models.MonitoringSettingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
request = build_update_put_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_put_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_put_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
@distributed_trace
def begin_update_put(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> LROPoller["_models.MonitoringSettingResource"]:
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource:
~azure.mgmt.appplatform.v2021_06_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2021_06_01_preview.models.MonitoringSettingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_put_initial(
resource_group_name=resource_group_name,
service_name=service_name,
monitoring_setting_resource=monitoring_setting_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_put.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
def _update_patch_initial(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> "_models.MonitoringSettingResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(monitoring_setting_resource, 'MonitoringSettingResource')
request = build_update_patch_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
content_type=content_type,
json=_json,
template_url=self._update_patch_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_patch_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
@distributed_trace
def begin_update_patch(
self,
resource_group_name: str,
service_name: str,
monitoring_setting_resource: "_models.MonitoringSettingResource",
**kwargs: Any
) -> LROPoller["_models.MonitoringSettingResource"]:
"""Update the Monitoring Setting.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param monitoring_setting_resource: Parameters for the update operation.
:type monitoring_setting_resource:
~azure.mgmt.appplatform.v2021_06_01_preview.models.MonitoringSettingResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either MonitoringSettingResource or the result
of cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2021_06_01_preview.models.MonitoringSettingResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.MonitoringSettingResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_patch_initial(
resource_group_name=resource_group_name,
service_name=service_name,
monitoring_setting_resource=monitoring_setting_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('MonitoringSettingResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/monitoringSettings/default'} # type: ignore
|
|
"""Support for Bluesound devices."""
from __future__ import annotations
import asyncio
from asyncio import CancelledError
from datetime import timedelta
from http import HTTPStatus
import logging
from urllib import parse
import aiohttp
from aiohttp.client_exceptions import ClientError
from aiohttp.hdrs import CONNECTION, KEEP_ALIVE
import async_timeout
import voluptuous as vol
import xmltodict
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_ENQUEUE,
MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_HOSTS,
CONF_NAME,
CONF_PORT,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
from .const import (
DOMAIN,
SERVICE_CLEAR_TIMER,
SERVICE_JOIN,
SERVICE_SET_TIMER,
SERVICE_UNJOIN,
)
_LOGGER = logging.getLogger(__name__)
ATTR_BLUESOUND_GROUP = "bluesound_group"
ATTR_MASTER = "master"
DATA_BLUESOUND = "bluesound"
DEFAULT_PORT = 11000
NODE_OFFLINE_CHECK_TIMEOUT = 180
NODE_RETRY_INITIATION = timedelta(minutes=3)
STATE_GROUPED = "grouped"
SYNC_STATUS_INTERVAL = timedelta(minutes=5)
UPDATE_CAPTURE_INTERVAL = timedelta(minutes=30)
UPDATE_PRESETS_INTERVAL = timedelta(minutes=30)
UPDATE_SERVICES_INTERVAL = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOSTS): vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
],
)
}
)
BS_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
BS_JOIN_SCHEMA = BS_SCHEMA.extend({vol.Required(ATTR_MASTER): cv.entity_id})
SERVICE_TO_METHOD = {
SERVICE_JOIN: {"method": "async_join", "schema": BS_JOIN_SCHEMA},
SERVICE_UNJOIN: {"method": "async_unjoin", "schema": BS_SCHEMA},
SERVICE_SET_TIMER: {"method": "async_increase_timer", "schema": BS_SCHEMA},
SERVICE_CLEAR_TIMER: {"method": "async_clear_timer", "schema": BS_SCHEMA},
}
def _add_player(hass, async_add_entities, host, port=None, name=None):
"""Add Bluesound players."""
@callback
def _init_player(event=None):
"""Start polling."""
hass.async_create_task(player.async_init())
@callback
def _start_polling(event=None):
"""Start polling."""
player.start_polling()
@callback
def _stop_polling():
"""Stop polling."""
player.stop_polling()
@callback
def _add_player_cb():
"""Add player after first sync fetch."""
if player.id in [x.id for x in hass.data[DATA_BLUESOUND]]:
_LOGGER.warning("Player already added %s", player.id)
return
hass.data[DATA_BLUESOUND].append(player)
async_add_entities([player])
_LOGGER.info("Added device with name: %s", player.name)
if hass.is_running:
_start_polling()
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _start_polling)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_polling)
player = BluesoundPlayer(hass, host, port, name, _add_player_cb)
if hass.is_running:
_init_player()
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _init_player)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Bluesound platforms."""
if DATA_BLUESOUND not in hass.data:
hass.data[DATA_BLUESOUND] = []
if discovery_info:
_add_player(
hass,
async_add_entities,
discovery_info.get(CONF_HOST),
discovery_info.get(CONF_PORT),
)
return
if hosts := config.get(CONF_HOSTS):
for host in hosts:
_add_player(
hass,
async_add_entities,
host.get(CONF_HOST),
host.get(CONF_PORT),
host.get(CONF_NAME),
)
async def async_service_handler(service: ServiceCall) -> None:
"""Map services to method of Bluesound devices."""
if not (method := SERVICE_TO_METHOD.get(service.service)):
return
params = {
key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID
}
if entity_ids := service.data.get(ATTR_ENTITY_ID):
target_players = [
player
for player in hass.data[DATA_BLUESOUND]
if player.entity_id in entity_ids
]
else:
target_players = hass.data[DATA_BLUESOUND]
for player in target_players:
await getattr(player, method["method"])(**params)
for service, method in SERVICE_TO_METHOD.items():
schema = method["schema"]
hass.services.async_register(
DOMAIN, service, async_service_handler, schema=schema
)
class BluesoundPlayer(MediaPlayerEntity):
"""Representation of a Bluesound Player."""
def __init__(self, hass, host, port=None, name=None, init_callback=None):
"""Initialize the media player."""
self.host = host
self._hass = hass
self.port = port
self._polling_session = async_get_clientsession(hass)
self._polling_task = None # The actual polling task.
self._name = name
self._id = None
self._icon = None
self._capture_items = []
self._services_items = []
self._preset_items = []
self._sync_status = {}
self._status = None
self._last_status_update = None
self._is_online = False
self._retry_remove = None
self._muted = False
self._master = None
self._is_master = False
self._group_name = None
self._group_list = []
self._bluesound_device_name = None
self._init_callback = init_callback
if self.port is None:
self.port = DEFAULT_PORT
class _TimeoutException(Exception):
pass
@staticmethod
def _try_get_index(string, search_string):
"""Get the index."""
try:
return string.index(search_string)
except ValueError:
return -1
async def force_update_sync_status(self, on_updated_cb=None, raise_timeout=False):
"""Update the internal status."""
resp = await self.send_bluesound_command(
"SyncStatus", raise_timeout, raise_timeout
)
if not resp:
return None
self._sync_status = resp["SyncStatus"].copy()
if not self._name:
self._name = self._sync_status.get("@name", self.host)
if not self._id:
self._id = self._sync_status.get("@id", None)
if not self._bluesound_device_name:
self._bluesound_device_name = self._sync_status.get("@name", self.host)
if not self._icon:
self._icon = self._sync_status.get("@icon", self.host)
if (master := self._sync_status.get("master")) is not None:
self._is_master = False
master_host = master.get("#text")
master_port = master.get("@port", "11000")
master_id = f"{master_host}:{master_port}"
master_device = [
device
for device in self._hass.data[DATA_BLUESOUND]
if device.id == master_id
]
if master_device and master_id != self.id:
self._master = master_device[0]
else:
self._master = None
_LOGGER.error("Master not found %s", master_id)
else:
if self._master is not None:
self._master = None
slaves = self._sync_status.get("slave")
self._is_master = slaves is not None
if on_updated_cb:
on_updated_cb()
return True
async def _start_poll_command(self):
"""Loop which polls the status of the player."""
try:
while True:
await self.async_update_status()
except (asyncio.TimeoutError, ClientError, BluesoundPlayer._TimeoutException):
_LOGGER.info("Node %s:%s is offline, retrying later", self.name, self.port)
await asyncio.sleep(NODE_OFFLINE_CHECK_TIMEOUT)
self.start_polling()
except CancelledError:
_LOGGER.debug("Stopping the polling of node %s:%s", self.name, self.port)
except Exception:
_LOGGER.exception("Unexpected error in %s:%s", self.name, self.port)
raise
def start_polling(self):
"""Start the polling task."""
self._polling_task = self._hass.async_create_task(self._start_poll_command())
def stop_polling(self):
"""Stop the polling task."""
self._polling_task.cancel()
async def async_init(self, triggered=None):
"""Initialize the player async."""
try:
if self._retry_remove is not None:
self._retry_remove()
self._retry_remove = None
await self.force_update_sync_status(self._init_callback, True)
except (asyncio.TimeoutError, ClientError):
_LOGGER.info("Node %s:%s is offline, retrying later", self.host, self.port)
self._retry_remove = async_track_time_interval(
self._hass, self.async_init, NODE_RETRY_INITIATION
)
except Exception:
_LOGGER.exception(
"Unexpected when initiating error in %s:%s", self.host, self.port
)
raise
async def async_update(self):
"""Update internal status of the entity."""
if not self._is_online:
return
await self.async_update_sync_status()
await self.async_update_presets()
await self.async_update_captures()
await self.async_update_services()
async def send_bluesound_command(
self, method, raise_timeout=False, allow_offline=False
):
"""Send command to the player."""
if not self._is_online and not allow_offline:
return
if method[0] == "/":
method = method[1:]
url = f"http://{self.host}:{self.port}/{method}"
_LOGGER.debug("Calling URL: %s", url)
response = None
try:
websession = async_get_clientsession(self._hass)
async with async_timeout.timeout(10):
response = await websession.get(url)
if response.status == HTTPStatus.OK:
result = await response.text()
if result:
data = xmltodict.parse(result)
else:
data = None
elif response.status == 595:
_LOGGER.info("Status 595 returned, treating as timeout")
raise BluesoundPlayer._TimeoutException()
else:
_LOGGER.error("Error %s on %s", response.status, url)
return None
except (asyncio.TimeoutError, aiohttp.ClientError):
if raise_timeout:
_LOGGER.info("Timeout: %s:%s", self.host, self.port)
raise
_LOGGER.debug("Failed communicating: %s:%s", self.host, self.port)
return None
return data
async def async_update_status(self):
"""Use the poll session to always get the status of the player."""
response = None
url = "Status"
etag = ""
if self._status is not None:
etag = self._status.get("@etag", "")
if etag != "":
url = f"Status?etag={etag}&timeout=120.0"
url = f"http://{self.host}:{self.port}/{url}"
_LOGGER.debug("Calling URL: %s", url)
try:
async with async_timeout.timeout(125):
response = await self._polling_session.get(
url, headers={CONNECTION: KEEP_ALIVE}
)
if response.status == HTTPStatus.OK:
result = await response.text()
self._is_online = True
self._last_status_update = dt_util.utcnow()
self._status = xmltodict.parse(result)["status"].copy()
group_name = self._status.get("groupName")
if group_name != self._group_name:
_LOGGER.debug("Group name change detected on device: %s", self.id)
self._group_name = group_name
# rebuild ordered list of entity_ids that are in the group, master is first
self._group_list = self.rebuild_bluesound_group()
# the sleep is needed to make sure that the
# devices is synced
await asyncio.sleep(1)
await self.async_trigger_sync_on_all()
elif self.is_grouped:
# when player is grouped we need to fetch volume from
# sync_status. We will force an update if the player is
# grouped this isn't a foolproof solution. A better
# solution would be to fetch sync_status more often when
# the device is playing. This would solve a lot of
# problems. This change will be done when the
# communication is moved to a separate library
await self.force_update_sync_status()
self.async_write_ha_state()
elif response.status == 595:
_LOGGER.info("Status 595 returned, treating as timeout")
raise BluesoundPlayer._TimeoutException()
else:
_LOGGER.error(
"Error %s on %s. Trying one more time", response.status, url
)
except (asyncio.TimeoutError, ClientError):
self._is_online = False
self._last_status_update = None
self._status = None
self.async_write_ha_state()
_LOGGER.info("Client connection error, marking %s as offline", self._name)
raise
async def async_trigger_sync_on_all(self):
"""Trigger sync status update on all devices."""
_LOGGER.debug("Trigger sync status on all devices")
for player in self._hass.data[DATA_BLUESOUND]:
await player.force_update_sync_status()
@Throttle(SYNC_STATUS_INTERVAL)
async def async_update_sync_status(self, on_updated_cb=None, raise_timeout=False):
"""Update sync status."""
await self.force_update_sync_status(on_updated_cb, raise_timeout=False)
@Throttle(UPDATE_CAPTURE_INTERVAL)
async def async_update_captures(self):
"""Update Capture sources."""
resp = await self.send_bluesound_command("RadioBrowse?service=Capture")
if not resp:
return
self._capture_items = []
def _create_capture_item(item):
self._capture_items.append(
{
"title": item.get("@text", ""),
"name": item.get("@text", ""),
"type": item.get("@serviceType", "Capture"),
"image": item.get("@image", ""),
"url": item.get("@URL", ""),
}
)
if "radiotime" in resp and "item" in resp["radiotime"]:
if isinstance(resp["radiotime"]["item"], list):
for item in resp["radiotime"]["item"]:
_create_capture_item(item)
else:
_create_capture_item(resp["radiotime"]["item"])
return self._capture_items
@Throttle(UPDATE_PRESETS_INTERVAL)
async def async_update_presets(self):
"""Update Presets."""
resp = await self.send_bluesound_command("Presets")
if not resp:
return
self._preset_items = []
def _create_preset_item(item):
self._preset_items.append(
{
"title": item.get("@name", ""),
"name": item.get("@name", ""),
"type": "preset",
"image": item.get("@image", ""),
"is_raw_url": True,
"url2": item.get("@url", ""),
"url": f"Preset?id={item.get('@id', '')}",
}
)
if "presets" in resp and "preset" in resp["presets"]:
if isinstance(resp["presets"]["preset"], list):
for item in resp["presets"]["preset"]:
_create_preset_item(item)
else:
_create_preset_item(resp["presets"]["preset"])
return self._preset_items
@Throttle(UPDATE_SERVICES_INTERVAL)
async def async_update_services(self):
"""Update Services."""
resp = await self.send_bluesound_command("Services")
if not resp:
return
self._services_items = []
def _create_service_item(item):
self._services_items.append(
{
"title": item.get("@displayname", ""),
"name": item.get("@name", ""),
"type": item.get("@type", ""),
"image": item.get("@icon", ""),
"url": item.get("@name", ""),
}
)
if "services" in resp and "service" in resp["services"]:
if isinstance(resp["services"]["service"], list):
for item in resp["services"]["service"]:
_create_service_item(item)
else:
_create_service_item(resp["services"]["service"])
return self._services_items
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the device."""
if self._status is None:
return STATE_OFF
if self.is_grouped and not self.is_master:
return STATE_GROUPED
status = self._status.get("state")
if status in ("pause", "stop"):
return STATE_PAUSED
if status in ("stream", "play"):
return STATE_PLAYING
return STATE_IDLE
@property
def media_title(self):
"""Title of current playing media."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
return self._status.get("title1")
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
if self._status is None:
return None
if self.is_grouped and not self.is_master:
return self._group_name
if not (artist := self._status.get("artist")):
artist = self._status.get("title2")
return artist
@property
def media_album_name(self):
"""Artist of current playing media (Music track only)."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
if not (album := self._status.get("album")):
album = self._status.get("title3")
return album
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
if not (url := self._status.get("image")):
return
if url[0] == "/":
url = f"http://{self.host}:{self.port}{url}"
return url
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
mediastate = self.state
if self._last_status_update is None or mediastate == STATE_IDLE:
return None
if (position := self._status.get("secs")) is None:
return None
position = float(position)
if mediastate == STATE_PLAYING:
position += (dt_util.utcnow() - self._last_status_update).total_seconds()
return position
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
if (duration := self._status.get("totlen")) is None:
return None
return float(duration)
@property
def media_position_updated_at(self):
"""Last time status was updated."""
return self._last_status_update
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
volume = self._status.get("volume")
if self.is_grouped:
volume = self._sync_status.get("@volume")
if volume is not None:
return int(volume) / 100
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
mute = self._status.get("mute")
if self.is_grouped:
mute = self._sync_status.get("@mute")
if mute is not None:
mute = bool(int(mute))
return mute
@property
def id(self):
"""Get id of device."""
return self._id
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def bluesound_device_name(self):
"""Return the device name as returned by the device."""
return self._bluesound_device_name
@property
def icon(self):
"""Return the icon of the device."""
return self._icon
@property
def source_list(self):
"""List of available input sources."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
sources = []
for source in self._preset_items:
sources.append(source["title"])
for source in [
x
for x in self._services_items
if x["type"] == "LocalMusic" or x["type"] == "RadioService"
]:
sources.append(source["title"])
for source in self._capture_items:
sources.append(source["title"])
return sources
@property
def source(self):
"""Name of the current input source."""
if self._status is None or (self.is_grouped and not self.is_master):
return None
if (current_service := self._status.get("service", "")) == "":
return ""
stream_url = self._status.get("streamUrl", "")
if self._status.get("is_preset", "") == "1" and stream_url != "":
# This check doesn't work with all presets, for example playlists.
# But it works with radio service_items will catch playlists.
items = [
x
for x in self._preset_items
if "url2" in x and parse.unquote(x["url2"]) == stream_url
]
if items:
return items[0]["title"]
# This could be a bit difficult to detect. Bluetooth could be named
# different things and there is not any way to match chooses in
# capture list to current playing. It's a bit of guesswork.
# This method will be needing some tweaking over time.
title = self._status.get("title1", "").lower()
if title == "bluetooth" or stream_url == "Capture:hw:2,0/44100/16/2":
items = [
x
for x in self._capture_items
if x["url"] == "Capture%3Abluez%3Abluetooth"
]
if items:
return items[0]["title"]
items = [x for x in self._capture_items if x["url"] == stream_url]
if items:
return items[0]["title"]
if stream_url[:8] == "Capture:":
stream_url = stream_url[8:]
idx = BluesoundPlayer._try_get_index(stream_url, ":")
if idx > 0:
stream_url = stream_url[:idx]
for item in self._capture_items:
url = parse.unquote(item["url"])
if url[:8] == "Capture:":
url = url[8:]
idx = BluesoundPlayer._try_get_index(url, ":")
if idx > 0:
url = url[:idx]
if url.lower() == stream_url.lower():
return item["title"]
items = [x for x in self._capture_items if x["name"] == current_service]
if items:
return items[0]["title"]
items = [x for x in self._services_items if x["name"] == current_service]
if items:
return items[0]["title"]
if self._status.get("streamUrl", "") != "":
_LOGGER.debug(
"Couldn't find source of stream URL: %s",
self._status.get("streamUrl", ""),
)
return None
@property
def supported_features(self):
"""Flag of media commands that are supported."""
if self._status is None:
return 0
if self.is_grouped and not self.is_master:
return SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE
supported = SUPPORT_CLEAR_PLAYLIST
if self._status.get("indexing", "0") == "0":
supported = (
supported
| SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_STOP
| SUPPORT_PLAY
| SUPPORT_SELECT_SOURCE
| SUPPORT_SHUFFLE_SET
)
current_vol = self.volume_level
if current_vol is not None and current_vol >= 0:
supported = (
supported
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
)
if self._status.get("canSeek", "") == "1":
supported = supported | SUPPORT_SEEK
return supported
@property
def is_master(self):
"""Return true if player is a coordinator."""
return self._is_master
@property
def is_grouped(self):
"""Return true if player is a coordinator."""
return self._master is not None or self._is_master
@property
def shuffle(self):
"""Return true if shuffle is active."""
return self._status.get("shuffle", "0") == "1"
async def async_join(self, master):
"""Join the player to a group."""
master_device = [
device
for device in self.hass.data[DATA_BLUESOUND]
if device.entity_id == master
]
if master_device:
_LOGGER.debug(
"Trying to join player: %s to master: %s",
self.id,
master_device[0].id,
)
await master_device[0].async_add_slave(self)
else:
_LOGGER.error("Master not found %s", master_device)
@property
def extra_state_attributes(self):
"""List members in group."""
attributes = {}
if self._group_list:
attributes = {ATTR_BLUESOUND_GROUP: self._group_list}
attributes[ATTR_MASTER] = self._is_master
return attributes
def rebuild_bluesound_group(self):
"""Rebuild the list of entities in speaker group."""
if self._group_name is None:
return None
bluesound_group = []
device_group = self._group_name.split("+")
sorted_entities = sorted(
self._hass.data[DATA_BLUESOUND],
key=lambda entity: entity.is_master,
reverse=True,
)
bluesound_group = [
entity.name
for entity in sorted_entities
if entity.bluesound_device_name in device_group
]
return bluesound_group
async def async_unjoin(self):
"""Unjoin the player from a group."""
if self._master is None:
return
_LOGGER.debug("Trying to unjoin player: %s", self.id)
await self._master.async_remove_slave(self)
async def async_add_slave(self, slave_device):
"""Add slave to master."""
return await self.send_bluesound_command(
f"/AddSlave?slave={slave_device.host}&port={slave_device.port}"
)
async def async_remove_slave(self, slave_device):
"""Remove slave to master."""
return await self.send_bluesound_command(
f"/RemoveSlave?slave={slave_device.host}&port={slave_device.port}"
)
async def async_increase_timer(self):
"""Increase sleep time on player."""
sleep_time = await self.send_bluesound_command("/Sleep")
if sleep_time is None:
_LOGGER.error("Error while increasing sleep time on player: %s", self.id)
return 0
return int(sleep_time.get("sleep", "0"))
async def async_clear_timer(self):
"""Clear sleep timer on player."""
sleep = 1
while sleep > 0:
sleep = await self.async_increase_timer()
async def async_set_shuffle(self, shuffle):
"""Enable or disable shuffle mode."""
value = "1" if shuffle else "0"
return await self.send_bluesound_command(f"/Shuffle?state={value}")
async def async_select_source(self, source):
"""Select input source."""
if self.is_grouped and not self.is_master:
return
items = [x for x in self._preset_items if x["title"] == source]
if not items:
items = [x for x in self._services_items if x["title"] == source]
if not items:
items = [x for x in self._capture_items if x["title"] == source]
if not items:
return
selected_source = items[0]
url = f"Play?url={selected_source['url']}&preset_id&image={selected_source['image']}"
if "is_raw_url" in selected_source and selected_source["is_raw_url"]:
url = selected_source["url"]
return await self.send_bluesound_command(url)
async def async_clear_playlist(self):
"""Clear players playlist."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Clear")
async def async_media_next_track(self):
"""Send media_next command to media player."""
if self.is_grouped and not self.is_master:
return
cmd = "Skip"
if self._status and "actions" in self._status:
for action in self._status["actions"]["action"]:
if "@name" in action and "@url" in action and action["@name"] == "skip":
cmd = action["@url"]
return await self.send_bluesound_command(cmd)
async def async_media_previous_track(self):
"""Send media_previous command to media player."""
if self.is_grouped and not self.is_master:
return
cmd = "Back"
if self._status and "actions" in self._status:
for action in self._status["actions"]["action"]:
if "@name" in action and "@url" in action and action["@name"] == "back":
cmd = action["@url"]
return await self.send_bluesound_command(cmd)
async def async_media_play(self):
"""Send media_play command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Play")
async def async_media_pause(self):
"""Send media_pause command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Pause")
async def async_media_stop(self):
"""Send stop command."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command("Pause")
async def async_media_seek(self, position):
"""Send media_seek command to media player."""
if self.is_grouped and not self.is_master:
return
return await self.send_bluesound_command(f"Play?seek={float(position)}")
async def async_play_media(self, media_type, media_id, **kwargs):
"""
Send the play_media command to the media player.
If ATTR_MEDIA_ENQUEUE is True, add `media_id` to the queue.
"""
if self.is_grouped and not self.is_master:
return
url = f"Play?url={media_id}"
if kwargs.get(ATTR_MEDIA_ENQUEUE):
return await self.send_bluesound_command(url)
return await self.send_bluesound_command(url)
async def async_volume_up(self):
"""Volume up the media player."""
current_vol = self.volume_level
if not current_vol or current_vol >= 1:
return
return await self.async_set_volume_level(current_vol + 0.01)
async def async_volume_down(self):
"""Volume down the media player."""
current_vol = self.volume_level
if not current_vol or current_vol <= 0:
return
return await self.async_set_volume_level(current_vol - 0.01)
async def async_set_volume_level(self, volume):
"""Send volume_up command to media player."""
if volume < 0:
volume = 0
elif volume > 1:
volume = 1
return await self.send_bluesound_command(f"Volume?level={float(volume) * 100}")
async def async_mute_volume(self, mute):
"""Send mute command to media player."""
if mute:
return await self.send_bluesound_command("Volume?mute=1")
return await self.send_bluesound_command("Volume?mute=0")
|
|
import matplotlib
matplotlib.use('Agg')
from isochrones.dartmouth import Dartmouth_Isochrone
from isochrones.starmodel import StarModel
from isochrones.observation import ObservationTree
import pandas as pd
import matplotlib.pyplot as plt
import sys
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
def get_index(n):
if n < 10:
return '000' + str(n)
elif n < 100:
return '00' + str(n)
elif n < 1000:
return '0' + str(n)
else:
return str(n)
n = sys.argv[1]
i = get_index(n)
df = pd.read_csv('/tigress/np5/dataFrame/df_quad_test{}.csv'.format(i))
#-------------------------------------------------------------------------------
#quad0 - all in same system
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar)
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad0'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad0.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad0_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad0_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad1 - M1,M2,M3 bound - M4 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,0,0,1])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad1'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad1.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_triplet1_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_triplet1_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad2 - M1,M2,M4 bound - M3 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,0,1,0])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad2'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad2.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad2_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad2_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad3 - M1,M3,M4 bound - M2 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,0,0])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad3'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad3.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad3_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad3_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad4 - M2,M3,M4 bound - M1 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,1,1])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad4'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad4.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad4_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad4_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad5 - M1,M2 bound - M3,M4 bound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,0,1,1])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad5'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad5.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad5_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad5_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad6 - M1,M2 bound - M3 unbound - M4 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,0,1,2])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad6'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad6.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad6_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad6_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad7 - M1,M3 bound - M2,M4 bound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,0,1])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad7'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad7.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad7_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad7_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad8 - M1,M3 bound - M2 unbound - M4 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,0,2])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad8'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad8.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad8_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad8_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad9 - M1,M4 bound - M2,M3 bound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,1,0])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad9'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad9.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad9_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad9_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad10 - M1,M4 bound - M2 unbound - M3 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,2,0])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad10'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad10.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad10_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad10_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad11 - M2,M3 bound - M1 unbound - M4 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,1,2])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad11'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad11.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad11_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad11_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad12 - M2,M4 bound - M1 unbound - M3 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,2,1])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad12'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad12.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad12_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad12_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad13 - M3,M4 bound - M1 unbound - M2 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,2,2])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad13'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad13.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad13_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad13_corner_observed.png'.format(i))
plt.close(fig)
#-------------------------------------------------------------------------------
#quad14 - M1 unbound - M2 unbound - M3 unbound - M4 unbound
dar = Dartmouth_Isochrone()
t = ObservationTree.from_df(df, name='test{}'.format(i))
t.define_models(dar, index=[0,1,2,3])
mod = StarModel(dar, obs=t)
mod.fit_multinest(n_live_points=1000,
basename='/tigress/np5/chains/test{}_quad14'.format(i))
if rank == 0:
f1 = open('/tigress/np5/evidence_quad14.txt','a')
evi = mod.evidence
evi = str(evi)
f1.write('case{}: '.format(i) + evi + '\n')
f1.close()
fig = mod.corner_physical(props=['mass', 'distance', 'AV'])
fig.savefig('/tigress/np5/figures/test{}_quad14_corner_physical.png'.format(i))
plt.close(fig)
fig = mod.corner_observed()
fig.savefig('/tigress/np5/figures/test{}_quad14_corner_observed.png'.format(i))
plt.close(fig)
|
|
################################################################################
# iso_read.py
#
# Simple ISO NC code parsing
#
# Hirutso Enni, 2009-01-13
""" use this script to backplot nc files to *.scr file for autocad,bricscad,
draftsight,progecad,ares commander, etc....
usage: python cad_iso_read.py temp.nc temp.scr
"""
import cad_nc_read as nc
import re
import sys
################################################################################
class Parser(nc.Parser):
def __init__(self, writer):
nc.Parser.__init__(self, writer)
self.pattern_main = re.compile('([(!;].*|\s+|[a-zA-Z0-9_:](?:[+-])?\d*(?:\.\d*)?|\w\#\d+|\(.*?\)|\#\d+\=(?:[+-])?\d*(?:\.\d*)?)')
#if ( or ! or ; at least one space or a letter followed by some character or not followed by a +/- followed by decimal, with a possible decimal point
# followed by a possible deimcal, or a letter followed by # with a decimal . deimcal
# add your character here > [(!;] for comments char
# then look for the 'comment' function towards the end of the file and add another elif
def ParseWord(self, word):
if (word[0] == 'A' or word[0] == 'a'):
self.col = "axis"
self.a = eval(word[1:])
self.move = True
elif (word[0] == 'B' or word[0] == 'b'):
self.col = "axis"
self.b = eval(word[1:])
self.move = True
elif (word[0] == 'C' or word[0] == 'c'):
self.col = "axis"
self.c = eval(word[1:])
self.move = True
elif (word[0] == 'F' or word[0] == 'f'):
self.col = "axis"
self.f = eval(word[1:])
self.move = True
elif (word == 'G0' or word == 'G00' or word == 'g0' or word == 'g00'):
self.path_col = "rapid"
self.col = "rapid"
self.arc = 0
elif (word == 'G1' or word == 'G01' or word == 'g1' or word == 'g01'):
self.path_col = "feed"
self.col = "feed"
self.arc = 0
elif (word == 'G2' or word == 'G02' or word == 'g2' or word == 'g02' or word == 'G12' or word == 'g12'):
self.path_col = "feed"
self.col = "feed"
self.arc = -1
elif (word == 'G3' or word == 'G03' or word == 'g3' or word == 'g03' or word == 'G13' or word == 'g13'):
self.path_col = "feed"
self.col = "feed"
self.arc = +1
elif (word == 'G10' or word == 'g10'):
self.no_move = True
elif (word == 'L1' or word == 'l1'):
self.no_move = True
elif (word == 'G61.1' or word == 'g61.1' or word == 'G61' or word == 'g61' or word == 'G64' or word == 'g64'):
self.no_move = True
elif (word == 'G20' or word == 'G70'):
self.col = "prep"
self.set_mode(units=25.4)
elif (word == 'G21' or word == 'G71'):
self.col = "prep"
self.set_mode(units=1.0)
elif (word == 'G81' or word == 'g81'):
self.drill = True
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G82' or word == 'g82'):
self.drill = True;
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G83' or word == 'g83'):
self.drill = True
self.no_move = True
self.path_col = "feed"
self.col = "feed"
elif (word == 'G90' or word == 'g90'):
self.absolute()
elif (word == 'G91' or word == 'g91'):
self.incremental()
elif (word[0] == 'G') : col = "prep"
elif (word[0] == 'I' or word[0] == 'i'):
self.col = "axis"
self.i = eval(word[1:])
self.move = True
elif (word[0] == 'J' or word[0] == 'j'):
self.col = "axis"
self.j = eval(word[1:])
self.move = True
elif (word[0] == 'K' or word[0] == 'k'):
self.col = "axis"
self.k = eval(word[1:])
self.move = True
elif (word[0] == 'M') : self.col = "misc"
elif (word[0] == 'N') : self.col = "blocknum"
elif (word[0] == 'O') : self.col = "program"
elif (word[0] == 'P' or word[0] == 'p'):
if (self.no_move != True):
self.col = "axis"
self.p = eval(word[1:])
self.move = True
elif (word[0] == 'Q' or word[0] == 'q'):
if (self.no_move != True):
self.col = "axis"
self.q = eval(word[1:])
self.move = True
elif (word[0] == 'R' or word[0] == 'r'):
self.col = "axis"
self.r = eval(word[1:])
self.move = True
elif (word[0] == 'S' or word[0] == 's'):
self.col = "axis"
self.s = eval(word[1:])
self.move = True
elif (word[0] == 'T') :
self.col = "tool"
self.set_tool( eval(word[1:]) )
elif (word[0] == 'X' or word[0] == 'x'):
self.col = "axis"
self.x = eval(word[1:])
self.move = True
elif (word[0] == 'Y' or word[0] == 'y'):
self.col = "axis"
self.y = eval(word[1:])
self.move = True
elif (word[0] == 'Z' or word[0] == 'z'):
self.col = "axis"
self.z = eval(word[1:])
self.move = True
elif (word[0] == '(') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == '!') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == ';') : (self.col, self.cdata) = ("comment", True)
elif (word[0] == '#') : self.col = "variable"
elif (word[0] == ':') : self.col = "blocknum"
elif (ord(word[0]) <= 32) : self.cdata = True
def Parse(self, name, oname=None):
self.files_open(name,oname)
#self.begin_ncblock()
#self.begin_path(None)
#self.add_line(z=500)
#self.end_path()
#self.end_ncblock()
self.path_col = None
self.f = None
self.arc = 0
while (self.readline()):
self.a = None
self.b = None
self.c = None
self.i = None
self.j = None
self.k = None
self.p = None
self.q = None
self.r = None
self.s = None
self.x = None
self.y = None
self.z = None
#self.begin_ncblock()
self.move = False
self.drill = False
self.no_move = False
words = self.pattern_main.findall(self.line)
for word in words:
self.col = None
self.cdata = False
self.ParseWord(word)
self.add_text(word, self.col, self.cdata)
if (self.drill):
self.begin_path("rapid")
self.add_line(self.x, self.y, self.r)
self.end_path()
self.begin_path("feed")
self.add_line(self.x, self.y, self.z)
self.end_path()
self.begin_path("feed")
self.add_line(self.x, self.y, self.r)
self.end_path()
else:
if (self.move and not self.no_move):
self.begin_path(self.path_col)
if (self.arc==-1):
self.add_arc(self.x, self.y, self.z, self.i, self.j, self.k, self.r, self.arc)
elif (self.arc==1):
#self.add_arc(x, y, z, i, j, k, -r, arc) #if you want to use arcs with R values uncomment the first part of this line and comment the next one
self.add_arc(self.x, self.y, self.z, self.i, self.j, self.k, self.r, self.arc)
else : self.add_line(self.x, self.y, self.z, self.a, self.b, self.c)
self.end_path()
self.end_ncblock()
self.files_close()
################################################################################
if __name__ == '__main__':
parser = ParserIso()
if len(sys.argv)>2:
parser.Parse(sys.argv[1],sys.argv[2])
else:
parser.Parse(sys.argv[1])
|
|
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# <[email protected]> wrote this file. You are hereby granted permission to
# copy, modify, or mutilate this file without restriction. If you create a
# work derived from this file, you may optionally include a copy of this notice,
# for which I would be most grateful, but you are not required to do so.
# If we meet some day, and you think this stuff is worth it, you can buy me a
# beer in return. Fred Dushin
# ----------------------------------------------------------------------------
import machine
import ubinascii
import sys
import gc
import os
import esp
import uhttpd
import network
import logging
class APIHandler:
def __init__(self):
self._handlers = {
'system': SystemAPIHandler(),
'memory': MemoryAPIHandler(),
'flash': FlashAPIHandler(),
'network': NetworkAPIHandler()
}
def get(self, api_request):
context = api_request['context']
print(context)
if len(context) == 0 or (len(context) == 1 and context[0] == ''):
api_request['context'] = []
return {
'system': self._handlers['system'].get(api_request),
'memory': self._handlers['memory'].get(api_request),
'flash': self._handlers['flash'].get(api_request),
'network': self._handlers['network'].get(api_request),
}
else:
context_head = context[0]
api_request['context'] = context[1:]
return self._handlers[context_head].get(api_request)
class SystemAPIHandler:
def __init__(self):
pass
#
# callbacks
#
def get(self, api_request):
return self.get_sys_stats()
#
# read operations
#
def get_sys_stats(self):
return {
'machine_id': "0x{}".format(ubinascii.hexlify(machine.unique_id()).decode().upper()),
'machine_freq': machine.freq(),
'byteorder': sys.byteorder,
'system': "{}-{}".format(
sys.implementation[0],
self.to_version_string(sys.implementation[1]),
),
'maxsize': sys.maxsize,
'modules': self.keys(sys.modules),
'path': sys.path,
'platform': sys.platform,
'version': sys.version,
}
def keys(self, pairs):
ret = []
for k, v in pairs.items():
ret.append(k)
return ret
def to_version_string(self, version):
return "{}.{}.{}".format(
version[0], version[1], version[2]
)
class MemoryAPIHandler:
def __init__(self):
pass
#
# callbacks
#
def get(self, api_request):
return self.get_memory_stats()
#
# read operations
#
def get_memory_stats(self):
mem_alloc = gc.mem_alloc()
mem_free = gc.mem_free()
return {
'mem_alloc': mem_alloc,
'mem_free': mem_free
}
class FlashAPIHandler:
def __init__(self):
pass
#
# callbacks
#
def get(self, api_request):
return self.get_flash_stats()
#
# read operations
#
def get_flash_stats(self):
stats = os.statvfs('/')
frsize = stats[1]
blocks = stats[2]
bavail = stats[4]
capacity = blocks * frsize
free = bavail * frsize
used = capacity - free
return {
'flash_id': esp.flash_id(),
'flash_size': esp.flash_size(),
'capacity': capacity,
'used': used,
'free': free
}
class NetworkAPIHandler:
def __init__(self):
pass
#
# callbacks
#
def get(self, api_request):
logging.info("get {}".format(api_request))
context = api_request['context']
return self.get_network_stats(context)
def post(self, api_request):
logging.info("post {}".format(api_request))
return self.save(api_request)
def put(self, api_request):
logging.info("put {}".format(api_request))
return self.save(api_request)
#
# read operations
#
def get_network_stats(self, context):
ret = {
'phy_mode': self.get_phy_mode(),
'sta': self.get_sta_stats(),
'ap': self.get_ap_stats()
}
for component in context:
if component in ret:
ret = ret[component]
else:
raise uhttpd.NotFoundException("Bad context: {}".format(context))
return ret
def get_sta_stats(self):
sta = network.WLAN(network.STA_IF)
return self.get_wlan_stats(sta)
def get_ap_stats(self):
ap = network.WLAN(network.AP_IF)
wlan_stats = self.get_wlan_stats(ap)
wlan_stats['config'] = self.get_wlan_config_stats(ap)
return wlan_stats
def get_wlan_stats(self, wlan):
if wlan.active():
ip, subnet, gateway, dns = wlan.ifconfig()
return {
'status': self.get_wlan_status(wlan),
'ifconfig': {
'ip': ip,
'subnet': subnet,
'gateway': gateway,
'dns': dns
}
}
else:
return {}
def get_wlan_config_stats(self, ap):
import ubinascii
return {
'mac': "0x{}".format(ubinascii.hexlify(ap.config('mac')).decode()),
'essid': ap.config('essid'),
'channel': ap.config('channel'),
'hidden': ap.config('hidden'),
'authmode': self.get_auth_mode(ap.config('authmode'))
}
def get_auth_mode(self, mode):
if mode == network.AUTH_OPEN:
return "AUTH_OPEN"
elif mode == network.AUTH_WEP:
return "AUTH_WEP"
elif mode == network.AUTH_WPA_PSK:
return "AUTH_WPA_PSK"
elif mode == network.AUTH_WPA2_PSK:
return "AUTH_WPA2_PSK"
elif mode == network.AUTH_WPA_WPA2_PSK:
return "AUTH_WPA_WPA2_PSK"
else:
return "Unknown auth_mode: {}".format(mode)
def get_wlan_status(self, wlan):
status = wlan.status()
if status == network.STAT_IDLE:
return 'STAT_IDLE'
elif status == network.STAT_CONNECTING:
return 'STAT_CONNECTING'
elif status == network.STAT_WRONG_PASSWORD:
return 'STAT_WRONG_PASSWORD'
elif status == network.STAT_NO_AP_FOUND:
return 'STAT_NO_AP_FOUND'
elif status == network.STAT_CONNECT_FAIL:
return 'STAT_CONNECT_FAIL'
elif status == network.STAT_GOT_IP:
return 'STAT_GOT_IP'
else:
return "Unknown wlan status: {}".format(status)
def get_phy_mode(self):
phy_mode = network.phy_mode()
if phy_mode == network.MODE_11B:
return 'MODE_11B'
elif phy_mode == network.MODE_11G:
return 'MODE_11G'
elif phy_mode == network.MODE_11N:
return 'MODE_11N'
else:
return "Unknown phy_mode: {}".format(phy_mode)
#
# save operations
#
def save(self, api_request):
context = api_request['context']
logging.info("context: {}".format(context))
if context == ['ap', 'config']:
return self.save_ap_config(api_request)
else:
raise uhttpd.BadRequestException("Unsupported context on save: {}", context)
def save_ap_config(self, api_request):
config = api_request['body']
ap = network.WLAN(network.AP_IF)
logging.info("config: {}".format(config))
ap.config(
#mac=config['mac'],
essid=config['essid'],
channel=config['channel'],
hidden=config['hidden']
)
return self.get_wlan_config_stats(ap)
|
|
"""
Database Class
-----------------
This class captures information and methods on the main dhs database. This
class will create both a regular psycopg2 connection to the database for
running queries, but also create a pandas connection to the postgres database with sqlalchemy to write tables.
"""
__author__ = 'krishnab'
__version__ = '0.1.0'
import numpy as np
import pandas as pd
import os
import datetime
import psycopg2 as pg
import psycopg2.extras as pgextras
import pandas.io.sql as psql
import sqlalchemy as sa
from sqlalchemy import create_engine, MetaData
import asyncio
import asyncpg
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
from psycopg2.extensions import AsIs
## Initialize Constants
'''
DatabasePsycopg2 Class:
-----------------------
Psycopg2 is the main postgres access module. It is also the most mature. So
that is the basic workhorse of the Database access module. One limitation of
psycopg2 is that it does not do well for writing tables to postgres, and that is
why the DatabaseSqlalchemy class was made. Most queries go through psycopg2
if possible. DatabaseAsyncpg uses a newer faster module to communicate with
postgresql, but it is also the least mature.
'''
class DatabasePsycopg2():
def __init__(self,
dbname,
username,
password,
hostname='localhost',
portnumber=5433):
## Setup psycopg2 database connection.
self.conn = self.connect_to_postgres_through_psycopg2(dbname,
username,
password,
hostname,
portnumber)
if isinstance(self.conn, pg.extensions.connection):
self.rcursor = self.conn.cursor()
self.dictcursor = self.conn.cursor(cursor_factory=pgextras.DictCursor)
## Setup pandas and sqlalchemy connection to database
def connect_to_postgres_through_psycopg2(self,
dbname,
username,
password,
hostname='localhost',
portnumber=5433):
try:
connection_string = "dbname=%s user=%s host=%s password=%s " \
"port=%d " % (dbname,
username,
hostname,
password,
int(portnumber))
conn = pg.connect(connection_string)
except:
print('check database connection information before proceeding')
raise
return (conn)
def add_column_to_table(self, tablename, columname):
query = "ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s varchar(255);"
return (self.get_regular_cursor_query_no_return(query, (AsIs(tablename), AsIs(columname),)))
def get_dictionary_cursor_query(self,
query,
strings=('',)):
try:
self.dictcursor.execute(query, strings)
except:
print('Sorry, something went wrong with running the query')
raise
return (self.dictcursor.fetchall())
def get_regular_cursor_query(self,
query,
strings=('',)):
try:
self.rcursor.execute(query, strings)
except:
print('Sorry, something went wrong with running the query')
raise
return (self.rcursor.fetchall())
def get_regular_cursor_query_no_return(self,
query,
strings=('',)):
try:
self.rcursor.execute(query, strings)
self.conn.commit()
except:
print('Sorry, something went wrong with running the query')
raise
return (0)
def get_table_column_names(self,
tablename):
query = "SELECT column_name FROM information_schema.columns WHERE " \
"table_name=(%s)"
return (self.get_regular_cursor_query(query, (tablename,)))
def get_list_of_tables_in_database(self,
schema):
query = "SELECT tablename FROM pg_catalog.pg_tables where schemaname " \
"=(%s)"
return (self.get_dictionary_cursor_query(query, (schema,)))
def set_all_table_names_to_lowercase(self):
# This query will generate queries for all capitalized tablenames"
query = """SELECT 'ALTER TABLE ' || quote_ident(t.table_schema) || '.'
|| quote_ident(t.table_name) || ' RENAME TO ' || quote_ident(lower(t.table_name)) || ';' As ddlsql
FROM information_schema.tables As t
WHERE t.table_schema NOT IN('information_schema', 'pg_catalog')
AND t.table_name <> lower(t.table_name)
ORDER BY t.table_schema, t.table_name;"""
list_of_updates = self.get_regular_cursor_query(query)
print(list_of_updates)
for q in list_of_updates:
print(type(q[0]))
print(q[0])
self.get_regular_cursor_query_no_return(q[0])
list_of_updates = self.get_regular_cursor_query(query)
if len(list_of_updates) > 0:
print("not all table names were fixed. Try again.")
def set_all_field_names_to_lowercase(self):
query = """SELECT 'ALTER TABLE ' || quote_ident(c.table_schema) || '.'
|| quote_ident(c.table_name) || ' RENAME "' || c.column_name || '" TO ' || quote_ident(lower(c.column_name)) || ';' As ddlsql
FROM information_schema.columns As c
WHERE c.table_schema NOT IN('information_schema', 'pg_catalog')
AND c.column_name <> lower(c.column_name)
ORDER BY c.table_schema, c.table_name, c.column_name;"""
list_of_updates = self.get_regular_cursor_query(query)
print(list_of_updates)
for q in list_of_updates:
print(type(q[0]))
print(q[0])
self.get_regular_cursor_query_no_return(q[0])
list_of_updates = self.get_regular_cursor_query(query)
if len(list_of_updates) > 0:
print("not all table names were fixed. Try again.")
def check_existence_or_drop_query(self, tablename):
query = 'DROP TABLE IF EXISTS {};'.format(tablename)
self.get_regular_cursor_query_no_return(query)
def get_variables_by_table(self):
vals = {'schemaname': 'public'}
query = """SELECT table_catalog, table_schema, table_name, column_name, data_type, is_generated, is_updatable FROM information_schema.columns where table_schema ='public'"""
return (pd.read_sql_query(query, self.conn))
def set_connection_closed(self):
self.conn.commit()
self.rcursor.close()
self.dictcursor.close()
self.conn.close()
return (1)
'''
DatabaseSqlalchemy Class:
-----------------------
'''
class DatabaseSqlalchemy():
def __init__(self,
dbname,
username,
password,
hostname='localhost',
portnumber=5433):
## Setup psycopg2 database connection.
self.conn = self.connect_to_postgres_through_sqlalchemy(
username,
password,
dbname,
hostname,
portnumber)
self.rconn = self.conn.raw_connection()
def connect_to_postgres_through_sqlalchemy(self,
username,
password,
dbname,
hostname,
portnumber):
'''Returns a connection and a metadata object'''
# We connect with the help of the PostgreSQL URL
# postgresql://federer:grandestslam@localhost:5432/tennis
url = 'postgresql://{}:{}@{}:{}/{}'
url = url.format(username, password, hostname, portnumber, dbname)
# The return value of create_engine() is our connection object
con = create_engine(url, client_encoding='utf8')
con.connect()
# We then bind the connection to MetaData()
meta = MetaData(bind=con, reflect=True)
# print(meta)
return (con)
def get_table_list_as_dataframe(self, schemaname):
vals = {'schemaname': schemaname}
query = "SELECT tablename FROM pg_catalog.pg_tables where schemaname =:schemaname"
res = pd.read_sql_query(sa.text(query), self.conn, params=vals)
return (res)
def get_column_list_for_table_as_dataframe(self, tablename):
vals = {'tablename': tablename}
query = "SELECT column_name FROM information_schema.columns WHERE " \
"table_name=:tablename"
res = pd.read_sql_query(sa.text(query), self.conn, params=vals)
return (res)
def _build_table_class(self, tablename, fields):
Base = declarative_base()
class NewTable(Base):
__tablename__ = tablename
id = Column(Integer, primary_key=True)
for index, row in fields.iterrows():
setattr(NewTable, row['fields'], Column(String(255)))
Base.metadata.create_all(self.conn)
'''
DatabaseAsyncpg Class:
-----------------------
'''
class DatabaseAsyncpg():
def __init__(self,
dbname,
username,
password,
hostname='localhost',
portnumber=5433):
## Setup psycopg2 database connection.
self.conn = self.connect_to_postgres_through_psycopg2(dbname,
username,
password,
hostname,
portnumber)
def connect_to_postgres_through_psycopg2(self,
dbname,
username,
password,
hostname='localhost',
portnumber=5433):
try:
connection_string = "dbname=%s user=%s host=%s password=%s " \
"port=%d " % (dbname,
username,
password,
hostname,
int(portnumber))
conn = pg.connect(connection_string)
except:
print('check database connection information before proceeding')
raise
return (conn)
|
|
"""
Unit testing for the Command system itself.
"""
from evennia.utils.test_resources import EvenniaTest, TestCase
from evennia.commands.cmdset import CmdSet
from evennia.commands.command import Command
# Testing-command sets
class _CmdA(Command):
key = "A"
def __init__(self, cmdset, *args, **kwargs):
super(_CmdA, self).__init__(*args, **kwargs)
self.from_cmdset = cmdset
class _CmdB(Command):
key = "B"
def __init__(self, cmdset, *args, **kwargs):
super(_CmdB, self).__init__(*args, **kwargs)
self.from_cmdset = cmdset
class _CmdC(Command):
key = "C"
def __init__(self, cmdset, *args, **kwargs):
super(_CmdC, self).__init__(*args, **kwargs)
self.from_cmdset = cmdset
class _CmdD(Command):
key = "D"
def __init__(self, cmdset, *args, **kwargs):
super(_CmdD, self).__init__(*args, **kwargs)
self.from_cmdset = cmdset
class _CmdSetA(CmdSet):
key = "A"
def at_cmdset_creation(self):
self.add(_CmdA("A"))
self.add(_CmdB("A"))
self.add(_CmdC("A"))
self.add(_CmdD("A"))
class _CmdSetB(CmdSet):
key = "B"
def at_cmdset_creation(self):
self.add(_CmdA("B"))
self.add(_CmdB("B"))
self.add(_CmdC("B"))
class _CmdSetC(CmdSet):
key = "C"
def at_cmdset_creation(self):
self.add(_CmdA("C"))
self.add(_CmdB("C"))
class _CmdSetD(CmdSet):
key = "D"
def at_cmdset_creation(self):
self.add(_CmdA("D"))
self.add(_CmdB("D"))
self.add(_CmdC("D"))
self.add(_CmdD("D"))
# testing Command Sets
class TestCmdSetMergers(TestCase):
"Test merging of cmdsets"
def setUp(self):
super(TestCmdSetMergers, self).setUp()
self.cmdset_a = _CmdSetA()
self.cmdset_b = _CmdSetB()
self.cmdset_c = _CmdSetC()
self.cmdset_d = _CmdSetD()
def test_union(self):
a, c = self.cmdset_a, self.cmdset_c
cmdset_f = a + c # same-prio
self.assertEqual(len(cmdset_f.commands), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 2)
cmdset_f = c + a # same-prio, inverse order
self.assertEqual(len(cmdset_f.commands), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 0)
a.priority = 1
cmdset_f = a + c # high prio A
self.assertEqual(len(cmdset_f.commands), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 0)
def test_intersect(self):
a, c = self.cmdset_a, self.cmdset_c
a.mergetype = "Intersect"
cmdset_f = a + c # same-prio - c's Union kicks in
self.assertEqual(len(cmdset_f.commands), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 2)
cmdset_f = c + a # same-prio - a's Intersect kicks in
self.assertEqual(len(cmdset_f.commands), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 0)
a.priority = 1
cmdset_f = a + c # high prio A, intersect kicks in
self.assertEqual(len(cmdset_f.commands), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 0)
def test_replace(self):
a, c = self.cmdset_a, self.cmdset_c
c.mergetype = "Replace"
cmdset_f = a + c # same-prio. C's Replace kicks in
self.assertEqual(len(cmdset_f.commands), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 0)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 2)
cmdset_f = c + a # same-prio. A's Union kicks in
self.assertEqual(len(cmdset_f.commands), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 0)
c.priority = 1
cmdset_f = c + a # c higher prio. C's Replace kicks in
self.assertEqual(len(cmdset_f.commands), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 0)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 2)
def test_remove(self):
a, c = self.cmdset_a, self.cmdset_c
c.mergetype = "Remove"
cmdset_f = a + c # same-prio. C's Remove kicks in
self.assertEqual(len(cmdset_f.commands), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 0)
cmdset_f = c + a # same-prio. A's Union kicks in
self.assertEqual(len(cmdset_f.commands), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 4)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 0)
c.priority = 1
cmdset_f = c + a # c higher prio. C's Remove kicks in
self.assertEqual(len(cmdset_f.commands), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "A"), 2)
self.assertEqual(sum(1 for cmd in cmdset_f.commands if cmd.from_cmdset == "C"), 0)
def test_order(self):
"Merge in reverse- and forward orders, same priorities"
a, b, c, d = self.cmdset_a, self.cmdset_b, self.cmdset_c, self.cmdset_d
cmdset_f = d + c + b + a # merge in reverse order of priority
self.assertEqual(cmdset_f.priority, 0)
self.assertEqual(cmdset_f.mergetype, "Union")
self.assertEqual(len(cmdset_f.commands), 4)
self.assertTrue(all(True for cmd in cmdset_f.commands if cmd.from_cmdset == "A"))
cmdset_f = a + b + c + d # merge in order of priority
self.assertEqual(cmdset_f.priority, 0)
self.assertEqual(cmdset_f.mergetype, "Union")
self.assertEqual(len(cmdset_f.commands), 4) # duplicates setting from A transfers
self.assertTrue(all(True for cmd in cmdset_f.commands if cmd.from_cmdset == "D"))
def test_priority_order(self):
"Merge in reverse- and forward order with well-defined prioritities"
a, b, c, d = self.cmdset_a, self.cmdset_b, self.cmdset_c, self.cmdset_d
a.priority = 2
b.priority = 1
c.priority = 0
d.priority = -1
cmdset_f = d + c + b + a # merge in reverse order of priority
self.assertEqual(cmdset_f.priority, 2)
self.assertEqual(cmdset_f.mergetype, "Union")
self.assertEqual(len(cmdset_f.commands), 4)
self.assertTrue(all(True for cmd in cmdset_f.commands if cmd.from_cmdset == "A"))
cmdset_f = a + b + c + d # merge in order of priority
self.assertEqual(cmdset_f.priority, 2)
self.assertEqual(cmdset_f.mergetype, "Union")
self.assertEqual(len(cmdset_f.commands), 4)
self.assertTrue(all(True for cmd in cmdset_f.commands if cmd.from_cmdset == "A"))
def test_option_transfer(self):
"Test transfer of cmdset options"
a, b, c, d = self.cmdset_a, self.cmdset_b, self.cmdset_c, self.cmdset_d
# the options should pass through since none of the other cmdsets care
# to change the setting from None.
a.no_exits = True
a.no_objs = True
a.no_channels = True
a.duplicates = True
cmdset_f = d + c + b + a # reverse, same-prio
self.assertTrue(cmdset_f.no_exits)
self.assertTrue(cmdset_f.no_objs)
self.assertTrue(cmdset_f.no_channels)
self.assertTrue(cmdset_f.duplicates)
self.assertEqual(len(cmdset_f.commands), 8)
cmdset_f = a + b + c + d # forward, same-prio
self.assertTrue(cmdset_f.no_exits)
self.assertTrue(cmdset_f.no_objs)
self.assertTrue(cmdset_f.no_channels)
self.assertFalse(cmdset_f.duplicates)
self.assertEqual(len(cmdset_f.commands), 4)
a.priority = 2
b.priority = 1
c.priority = 0
d.priority = -1
cmdset_f = d + c + b + a # reverse, A top priority
self.assertTrue(cmdset_f.no_exits)
self.assertTrue(cmdset_f.no_objs)
self.assertTrue(cmdset_f.no_channels)
self.assertTrue(cmdset_f.duplicates)
self.assertEqual(len(cmdset_f.commands), 4)
cmdset_f = a + b + c + d # forward, A top priority. This never happens in practice.
self.assertTrue(cmdset_f.no_exits)
self.assertTrue(cmdset_f.no_objs)
self.assertTrue(cmdset_f.no_channels)
self.assertTrue(cmdset_f.duplicates)
self.assertEqual(len(cmdset_f.commands), 4)
a.priority = -1
b.priority = 0
c.priority = 1
d.priority = 2
cmdset_f = d + c + b + a # reverse, A low prio. This never happens in practice.
self.assertTrue(cmdset_f.no_exits)
self.assertTrue(cmdset_f.no_objs)
self.assertTrue(cmdset_f.no_channels)
self.assertFalse(cmdset_f.duplicates)
self.assertEqual(len(cmdset_f.commands), 4)
cmdset_f = a + b + c + d # forward, A low prio
self.assertTrue(cmdset_f.no_exits)
self.assertTrue(cmdset_f.no_objs)
self.assertTrue(cmdset_f.no_channels)
self.assertFalse(cmdset_f.duplicates)
self.assertEqual(len(cmdset_f.commands), 4)
c.no_exits = False
b.no_objs = False
d.duplicates = False
# higher-prio sets will change the option up the chain
cmdset_f = a + b + c + d # forward, A low prio
self.assertFalse(cmdset_f.no_exits)
self.assertFalse(cmdset_f.no_objs)
self.assertTrue(cmdset_f.no_channels)
self.assertFalse(cmdset_f.duplicates)
self.assertEqual(len(cmdset_f.commands), 4)
a.priority = 0
b.priority = 0
c.priority = 0
d.priority = 0
c.duplicates = True
cmdset_f = d + b + c + a # two last mergers duplicates=True
self.assertEqual(len(cmdset_f.commands), 10)
# test cmdhandler functions
from evennia.commands import cmdhandler
from twisted.trial.unittest import TestCase as TwistedTestCase
class TestGetAndMergeCmdSets(TwistedTestCase, EvenniaTest):
"Test the cmdhandler.get_and_merge_cmdsets function."
def setUp(self):
super(TestGetAndMergeCmdSets, self).setUp()
self.cmdset_a = _CmdSetA()
self.cmdset_b = _CmdSetB()
self.cmdset_c = _CmdSetC()
self.cmdset_d = _CmdSetD()
def set_cmdsets(self, obj, *args):
"Set cmdets on obj in the order given in *args"
for cmdset in args:
obj.cmdset.add(cmdset)
def test_from_session(self):
a = self.cmdset_a
a.no_channels = True
self.set_cmdsets(self.session, a)
deferred = cmdhandler.get_and_merge_cmdsets(self.session, self.session, None, None, "session", "")
def _callback(cmdset):
self.assertEqual(cmdset.key, "A")
deferred.addCallback(_callback)
return deferred
def test_from_player(self):
from evennia.commands.default.cmdset_player import PlayerCmdSet
a = self.cmdset_a
a.no_channels = True
self.set_cmdsets(self.player, a)
deferred = cmdhandler.get_and_merge_cmdsets(self.player, None, self.player, None, "player", "")
# get_and_merge_cmdsets converts to lower-case internally.
def _callback(cmdset):
pcmdset = PlayerCmdSet()
pcmdset.at_cmdset_creation()
pcmds = [cmd.key for cmd in pcmdset.commands] + ["a", "b", "c", "d"]
self.assertTrue(all(cmd.key in pcmds for cmd in cmdset.commands))
#_callback = lambda cmdset: self.assertEqual(sum(1 for cmd in cmdset.commands if cmd.key in ("a", "b", "c", "d")), 4)
deferred.addCallback(_callback)
return deferred
def test_from_object(self):
self.set_cmdsets(self.obj1, self.cmdset_a)
deferred = cmdhandler.get_and_merge_cmdsets(self.obj1, None, None, self.obj1, "object", "")
# get_and_merge_cmdsets converts to lower-case internally.
_callback = lambda cmdset: self.assertEqual(sum(1 for cmd in cmdset.commands if cmd.key in ("a", "b", "c", "d")), 4)
deferred.addCallback(_callback)
return deferred
def test_multimerge(self):
a, b, c, d = self.cmdset_a, self.cmdset_b, self.cmdset_c, self.cmdset_d
a.no_exits = True
a.no_channels = True
self.set_cmdsets(self.obj1, a, b, c, d)
deferred = cmdhandler.get_and_merge_cmdsets(self.obj1, None, None, self.obj1, "object", "")
def _callback(cmdset):
self.assertTrue(cmdset.no_exits)
self.assertTrue(cmdset.no_channels)
self.assertEqual(cmdset.key, "D")
deferred.addCallback(_callback)
return deferred
def test_autocmdsets(self):
import evennia
from evennia.commands.default.cmdset_player import PlayerCmdSet
from evennia.comms.channelhandler import CHANNEL_HANDLER
testchannel = evennia.create_channel("channeltest", locks="listen:all();send:all()")
CHANNEL_HANDLER.add(testchannel)
CHANNEL_HANDLER.update()
self.assertTrue(testchannel.connect(self.player))
self.assertTrue(testchannel.has_connection(self.player))
a, b, c, d = self.cmdset_a, self.cmdset_b, self.cmdset_c, self.cmdset_d
self.set_cmdsets(self.player, a, b, c, d)
deferred = cmdhandler.get_and_merge_cmdsets(self.session, self.session, self.player, self.char1, "session", "")
def _callback(cmdset):
pcmdset = PlayerCmdSet()
pcmdset.at_cmdset_creation()
pcmds = [cmd.key for cmd in pcmdset.commands] + ["a", "b", "c", "d"] + ["out"]
self.assertTrue(all(cmd.key or hasattr(cmd, "is_channel") in pcmds for cmd in cmdset.commands))
self.assertTrue(any(hasattr(cmd, "is_channel") for cmd in cmdset.commands))
deferred.addCallback(_callback)
return deferred
def test_duplicates(self):
a, b, c, d = self.cmdset_a, self.cmdset_b, self.cmdset_c, self.cmdset_d
a.no_exits = True
a.no_channels = True
b.duplicates = True
d.duplicates = True
self.set_cmdsets(self.obj1, a, b, c, d)
deferred = cmdhandler.get_and_merge_cmdsets(self.obj1, None, None, self.obj1, "object", "")
def _callback(cmdset):
self.assertEqual(len(cmdset.commands), 9)
deferred.addCallback(_callback)
return deferred
|
|
from __future__ import unicode_literals
import sys
import re
from StringIO import StringIO
class DecompilerBase(object):
def __init__(self, out_file=None, indentation=' ', match_line_numbers=False):
self.out_file = out_file or sys.stdout
self.indentation = indentation
self.match_line_numbers = match_line_numbers
self.skip_indent_until_write = False
self.linenumber = 0
self.block_stack = []
self.index_stack = []
def dump(self, ast, indent_level=0, linenumber=1, skip_indent_until_write=False):
"""
Write the decompiled representation of `ast` into the opened file given in the constructor
"""
self.indent_level = indent_level
self.linenumber = linenumber
self.skip_indent_until_write = skip_indent_until_write
if not isinstance(ast, (tuple, list)):
ast = [ast]
self.print_nodes(ast)
return self.linenumber
def write(self, string):
"""
Shorthand method for writing `string` to the file
"""
string = unicode(string)
self.linenumber += string.count('\n')
self.skip_indent_until_write = False
self.out_file.write(string)
def save_state(self):
"""
Save our current state.
"""
state = (self.out_file, self.skip_indent_until_write, self.linenumber,
self.block_stack, self.index_stack, self.indent_level)
self.out_file = StringIO()
return state
def commit_state(self, state):
"""
Commit changes since a saved state.
"""
out_file = state[0]
out_file.write(self.out_file.getvalue())
self.out_file = out_file
def rollback_state(self, state):
"""
Roll back to a saved state.
"""
(self.out_file, self.skip_indent_until_write, self.linenumber,
self.block_stack, self.index_stack, self.indent_level) = state
def advance_to_line(self, linenumber):
if self.match_line_numbers and self.linenumber < linenumber:
# Stop one line short, since the call to indent() will advance the last line.
# Note that if self.linenumber == linenumber - 1, this will write the empty string.
# This is to make sure that skip_indent_until_write is cleared in that case.
self.write("\n" * (linenumber - self.linenumber - 1))
def indent(self):
"""
Shorthand method for pushing a newline and indenting to the proper indent level
Setting skip_indent_until_write causes calls to this method to be ignored until something
calls the write method
"""
if not self.skip_indent_until_write:
self.write('\n' + self.indentation * self.indent_level)
def print_nodes(self, ast, extra_indent=0):
# This node is a list of nodes
# Print every node
self.indent_level += extra_indent
self.block_stack.append(ast)
self.index_stack.append(0)
for i, node in enumerate(ast):
self.index_stack[-1] = i
self.print_node(node)
self.block_stack.pop()
self.index_stack.pop()
self.indent_level -= extra_indent
@property
def block(self):
return self.block_stack[-1]
@property
def index(self):
return self.index_stack[-1]
@property
def parent(self):
if len(self.block_stack) < 2:
return None
return self.block_stack[-2][self.index_stack[-2]]
def print_unknown(self, ast):
# If we encounter a placeholder note, print a warning and insert a placeholder
print "Unknown AST node: %s" % str(type(ast))
self.indent()
self.write("<<<UNKNOWN NODE %s>>>" % str(type(ast)))
def print_node(self, ast):
raise NotImplementedError()
class First(object):
# An often used pattern is that on the first item
# of a loop something special has to be done. This class
# provides an easy object which on the first access
# will return True, but any subsequent accesses False
def __init__(self, yes_value=True, no_value=False):
self.yes_value = yes_value
self.no_value = no_value
self.first = True
def __call__(self):
if self.first:
self.first = False
return self.yes_value
else:
return self.no_value
def reconstruct_paraminfo(paraminfo):
if paraminfo is None:
return ""
rv = ["("]
sep = First("", ", ")
positional = [i for i in paraminfo.parameters if i[0] in paraminfo.positional]
nameonly = [i for i in paraminfo.parameters if i not in positional]
for parameter in positional:
rv.append(sep())
rv.append(parameter[0])
if parameter[1] is not None:
rv.append("=%s" % parameter[1])
if paraminfo.extrapos:
rv.append(sep())
rv.append("*%s" % paraminfo.extrapos)
if nameonly:
if not paraminfo.extrapos:
rv.append(sep())
rv.append("*")
for param in nameonly:
rv.append(sep())
rv.append(parameter[0])
if param[1] is not None:
rv.append("=%s" % parameter[1])
if paraminfo.extrakw:
rv.append(sep())
rv.append("**%s" % paraminfo.extrakw)
rv.append(")")
return "".join(rv)
def reconstruct_arginfo(arginfo):
if arginfo is None:
return ""
rv = ["("]
sep = First("", ", ")
for (name, val) in arginfo.arguments:
rv.append(sep())
if name is not None:
rv.append("%s=" % name)
rv.append(val)
if arginfo.extrapos:
rv.append(sep())
rv.append("*%s" % arginfo.extrapos)
if arginfo.extrakw:
rv.append(sep())
rv.append("**%s" % arginfo.extrakw)
rv.append(")")
return "".join(rv)
def string_escape(s):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
return s
# keywords used by ren'py's parser
KEYWORDS = set(['$', 'as', 'at', 'behind', 'call', 'expression', 'hide',
'if', 'in', 'image', 'init', 'jump', 'menu', 'onlayer',
'python', 'return', 'scene', 'set', 'show', 'with',
'while', 'zorder', 'transform'])
word_regexp = ur'[a-zA-Z_\u00a0-\ufffd][0-9a-zA-Z_\u00a0-\ufffd]*'
def simple_expression_guard(s):
# Some things we deal with are supposed to be parsed by
# ren'py's Lexer.simple_expression but actually cannot
# be parsed by it. figure out if this is the case
# a slightly more naive approach woudl be to check
# for spaces in it and surround it with () if necessary
# but we're not naive
s = s.strip()
if Lexer(s).simple_expression():
return s
else:
return "(%s)" % s
def split_logical_lines(s):
return Lexer(s).split_logical_lines()
class Lexer(object):
# special lexer for simple_expressions the ren'py way
# false negatives aren't dangerous. but false positives are
def __init__(self, string):
self.pos = 0
self.length = len(string)
self.string = string
def re(self, regexp):
# see if regexp matches at self.string[self.pos].
# if it does, increment self.pos
if self.length == self.pos:
return None
match = re.compile(regexp, re.DOTALL).match(self.string, self.pos)
if not match:
return None
self.pos = match.end()
return match.group(0)
def eol(self):
# eat the next whitespace and check for the end of this simple_expression
self.re(ur"(\s+|\\\n)+")
return self.pos >= self.length
def match(self, regexp):
# strip whitespace and match regexp
self.re(ur"(\s+|\\\n)+")
return self.re(regexp)
def python_string(self, clear_whitespace=True):
# parse strings the ren'py way (don't parse docstrings, no b/r in front allowed)
if clear_whitespace:
return self.match(ur"""(u?(?P<a>"|').*?(?<=[^\\])(?:\\\\)*(?P=a))""")
else:
return self.re(ur"""(u?(?P<a>"|').*?(?<=[^\\])(?:\\\\)*(?P=a))""")
def container(self):
# parses something enclosed by [], () or {}'s. keyword something
containers = {"{": "}", "[": "]", "(": ")"}
if self.eol():
return None
c = self.string[self.pos]
if c not in containers:
return None
self.pos += 1
c = containers[c]
while not self.eol():
if c == self.string[self.pos]:
self.pos += 1
return True
if self.python_string() or self.container():
continue
self.pos += 1
return None
def number(self):
# parses a number, float or int (but not forced long)
return self.match(r'(\+|\-)?(\d+\.?\d*|\.\d+)(?:[eE][-+]?\d+)?')
def word(self):
# parses a word
return self.match(word_regexp)
def name(self):
# parses a word unless it's in KEYWORDS.
pos = self.pos
word = self.word()
if word in KEYWORDS:
self.pos = pos
return None
return word
def simple_expression(self):
# test if the start string was a simple expression
start = self.pos
# check if there's anything in here acctually
if self.eol():
return False
# parse anything which can be called or have attributes requested
if not(self.python_string() or
self.number() or
self.container() or
self.name()):
return False
while not self.eol():
# if the previous was followed by a dot, there should be a word after it
if self.match(r'\.'):
if not self.name():
# ren'py errors here. I just stop caring
return False
continue
# parses slices, function calls, and postfix {}
if self.container():
continue
break
# are we at the end of the simple expression?
return self.eol()
def split_logical_lines(self):
# split a sequence in logical lines
# this behaves similarly to .splitlines() which will ignore
# a trailing \n
lines = []
contained = 0
startpos = self.pos
while self.pos < self.length:
c = self.string[self.pos]
if c == '\n' and not contained and (not self.pos or self.string[self.pos - 1] != '\\'):
lines.append(self.string[startpos:self.pos])
# the '\n' is not included in the emitted line
self.pos += 1
startpos = self.pos
continue
if c in ('(', '[', '{'):
contained += 1
self.pos += 1
continue
if c in (')', ']', '}') and contained:
contained -= 1
self.pos += 1
continue
if c == '#':
self.re("[^\n]*")
continue
if self.python_string(False):
continue
self.re(r'\w+| +|.') # consume a word, whitespace or one symbol
if self.pos != startpos:
lines.append(self.string[startpos:])
return lines
# Versions of Ren'Py prior to 6.17 put trailing whitespace on the end of
# simple_expressions. This class attempts to preserve the amount of
# whitespace if possible.
class WordConcatenator(object):
def __init__(self, needs_space):
self.words = []
self.needs_space = needs_space
def append(self, *args):
args = filter(None, args)
if not args:
return
if self.needs_space:
self.words.append(' ')
self.words.extend((i if i[-1] == ' ' else (i + ' ')) for i in args[:-1])
self.words.append(args[-1])
self.needs_space = args[-1][-1] != ' '
def join(self):
return ''.join(self.words)
# Dict subclass for aesthetic dispatching. use @Dispatcher(data) to dispatch
class Dispatcher(dict):
def __call__(self, name):
def closure(func):
self[name] = func
return func
return closure
|
|
'''
Created on Dec 12, 2013
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
import os, sys, re
from arelle import PluginManager
from arelle import ModelDocument, XbrlConst, XmlUtil, UrlUtil, LeiUtil
from arelle.HashUtil import md5hash, Md5Sum
from arelle.ModelDtsObject import ModelConcept, ModelType, ModelLocator, ModelResource
from arelle.ModelFormulaObject import Aspect
from arelle.ModelObject import ModelObject
from arelle.ModelRelationshipSet import ModelRelationshipSet
from arelle.ModelValue import qname, qnameEltPfxName
from arelle.ValidateUtr import ValidateUtr
from arelle.XbrlConst import qnEnumerationItemTypes
from arelle.ModelInstanceObject import ModelFact
try:
import regex as re
except ImportError:
import re
from lxml import etree
from collections import defaultdict
qnFIndicators = qname("{http://www.eurofiling.info/xbrl/ext/filing-indicators}find:fIndicators")
qnFilingIndicator = qname("{http://www.eurofiling.info/xbrl/ext/filing-indicators}find:filingIndicator")
qnPercentItemType = qname("{http://www.xbrl.org/dtr/type/numeric}num:percentItemType")
qnPureItemType = qname("{http://www.xbrl.org/2003/instance}xbrli:pureItemType")
qnMetReportingCurrency = qname("{http://eiopa.europa.eu/xbrl/s2md/dict/met}met:ei1930")
integerItemTypes = {"integerItemType", "nonPositiveIntegerItemType", "negativeIntegerItemType",
"longItemType", "intItemType", "shortItemType", "byteItemType",
"nonNegativeIntegerItemType", "unsignedLongItemType", "unsignedIntItemType",
"unsignedShortItemType", "unsignedByteItemType", "positiveIntegerItemType"}
schemaRefDatePattern = re.compile(r".*/([0-9]{4}-[01][0-9]-[0-3][0-9])/.*")
s_2_18_c_a_met = {
"""
in templates S.06.02, SE.06.02, S.08.01, S.08.02,S.11.01 and E.01.01,
data points with the data type 'monetary' shall be expressed in units
with at least two decimals:
select distinct mem.MemberXBRLCode from mOrdinateCategorisation oc
inner join mAxisOrdinate ao on ao.OrdinateID = oc.OrdinateID
inner join mTableAxis ta on ta.AxisID = ao.AxisID
inner join mTable t on t.TableID = ta.TableID
inner join mMember mem on mem.MemberID = oc.MemberID
inner join mMetric met on met.CorrespondingMemberID = mem.MemberID and met.DataType = 'Monetary'
where (t.TableCode like 'S.06.02%' or t.TableCode like 'SE.06.02%' or t.TableCode like 'S.08.01%' or t.TableCode like 'S.08.02%' or t.TableCode like 'S.11.01%' or t.TableCode like 'E.01.01%') and mem.MemberXBRLCode not like 's2hd_met%'
order by t.TableCode;
"""
"s2md_met:mi1088", "s2md_met:mi1096", "s2md_met:mi1101", "s2md_met:mi1110",
"s2md_met:mi1112", "s2md_met:mi1115", "s2md_met:mi1117", "s2md_met:mi1126",
"s2md_met:mi1127", "s2md_met:mi1128", "s2md_met:mi1131"}
CANONICAL_PREFIXES = {
"http://www.xbrl.org/2003/iso4217": "iso4217",
"http://www.xbrl.org/2003/linkbase": "link",
"http://xbrl.org/2006/xbrldi": "xbrldi",
"http://www.xbrl.org/2003/instance": "xbrli",
"http://www.w3.org/1999/xlink": "xlink"}
def dislosureSystemTypes(disclosureSystem, *args, **kwargs):
# return ((disclosure system name, variable name), ...)
return (("EBA", "EBA"),
("EIOPA", "EIOPA"))
def disclosureSystemConfigURL(disclosureSystem, *args, **kwargs):
return os.path.join(os.path.dirname(__file__), "config.xml")
def validateSetup(val, parameters=None, *args, **kwargs):
val.validateEBA = val.validateDisclosureSystem and getattr(val.disclosureSystem, "EBA", False)
val.validateEIOPA = val.validateDisclosureSystem and getattr(val.disclosureSystem, "EIOPA", False)
if not (val.validateEBA or val.validateEIOPA):
return
val.validateUTR = False # do not use default UTR validation, it's at error level and not streamable
val.utrValidator = ValidateUtr(val.modelXbrl,
"WARNING", # EBA specifies SHOULD on UTR validation
"EBA.2.23") # override utre error-severity message code
val.isEIOPAfullVersion = val.isEIOPA_2_0_1 = False
modelDocument = val.modelXbrl.modelDocument
if modelDocument.type == ModelDocument.Type.INSTANCE:
for doc, docRef in modelDocument.referencesDocument.items():
if "href" in docRef.referenceTypes:
if docRef.referringModelObject.localName == "schemaRef":
_match = schemaRefDatePattern.match(doc.uri)
if _match:
val.isEIOPAfullVersion = _match.group(1) > "2015-02-28"
val.isEIOPA_2_0_1 = _match.group(1) >= "2015-10-21"
break
else:
val.modelXbrl.error( ("EBA.S.1.5.a/EBA.S.1.5.b", "EIOPA.S.1.5.a/EIOPA.S.1.5.b"),
_('The link:schemaRef element in submitted instances MUST resolve to the full published entry point URL, this schemaRef is missing date portion: %(schemaRef)s.'),
modelObject=modelDocument, schemaRef=doc.uri)
val.qnDimAF = val.qnDimOC = val.qnCAx1 = None
_nsmap = val.modelXbrl.modelDocument.xmlRootElement.nsmap
if val.isEIOPA_2_0_1:
_hasPiInstanceGenerator = False
for pi in modelDocument.processingInstructions:
if pi.target == "instance-generator":
_hasPiInstanceGenerator = True
if not all(pi.get(attr) for attr in ("id", "version", "creationdate")):
val.modelXbrl.warning("EIOPA.S.2.23",
_('The processing instruction instance-generator SHOULD contain attributes "id", "version" and "creationdate".'),
modelObject=modelDocument)
if not _hasPiInstanceGenerator:
val.modelXbrl.warning("EIOPA.S.2.23",
_('The instance SHOULD include a processing instruction "instance-generator".'),
modelObject=modelDocument)
val.qnDimAF = qname("s2c_dim:AF", _nsmap)
val.qnDimOC = qname("s2c_dim:OC", _nsmap)
val.qnCAx1 = qname("s2c_CA:x1", _nsmap)
elif val.validateEBA:
val.eba_qnDimCUS = qname("eba_dim:CUS", _nsmap)
val.eba_qnDimCCA = qname("eba_dim:CCA", _nsmap)
val.eba_qnCAx1 = qname("eba_CA:x1", _nsmap)
val.prefixNamespace = {}
val.namespacePrefix = {}
val.idObjects = {}
val.typedDomainQnames = set()
val.typedDomainElements = set()
for modelConcept in val.modelXbrl.qnameConcepts.values():
if modelConcept.isTypedDimension:
typedDomainElement = modelConcept.typedDomainElement
if isinstance(typedDomainElement, ModelConcept):
val.typedDomainQnames.add(typedDomainElement.qname)
val.typedDomainElements.add(typedDomainElement)
val.filingIndicators = {}
val.numFilingIndicatorTuples = 0
val.cntxEntities = set()
val.cntxDates = defaultdict(set)
val.unusedCntxIDs = set()
val.unusedUnitIDs = set()
val.currenciesUsed = {}
val.reportingCurrency = None
val.namespacePrefixesUsed = defaultdict(set)
val.prefixesUnused = set()
for prefix, ns in _nsmap.items():
val.prefixesUnused.add(prefix)
val.namespacePrefixesUsed[ns].add(prefix)
val.firstFactObjectIndex = sys.maxsize
val.firstFact = None
val.footnotesRelationshipSet = ModelRelationshipSet(val.modelXbrl, "XBRL-footnotes")
# re-init batch flag to enable more than one context/unit validation sessions for the same instance.
# (note that this monkey-patching would give trouble on two concurrent validation sessions of the same instance)
for cntx in val.modelXbrl.contexts.values():
if hasattr(cntx, "_batchChecked"):
cntx._batchChecked = False
for unit in val.modelXbrl.units.values():
if hasattr(unit, "_batchChecked"):
unit._batchChecked = False
def prefixUsed(val, ns, prefix):
val.namespacePrefixesUsed[ns].add(prefix)
for _prefix in val.namespacePrefixesUsed[ns]:
val.prefixesUnused.discard(_prefix)
def validateStreamingFacts(val, factsToCheck, *args, **kwargs):
if not (val.validateEBA or val.validateEIOPA):
return True
validateFacts(val, factsToCheck)
def validateFacts(val, factsToCheck):
# may be called in streaming batches or all at end (final) if not streaming
modelXbrl = val.modelXbrl
modelDocument = modelXbrl.modelDocument
# note EBA 2.1 is in ModelDocument.py
timelessDatePattern = re.compile(r"\s*([0-9]{4})-([0-9]{2})-([0-9]{2})\s*$")
for cntx in modelXbrl.contexts.values():
if getattr(cntx, "_batchChecked", False):
continue # prior streaming batch already checked
cntx._batchChecked = True
val.cntxEntities.add(cntx.entityIdentifier)
dateElts = XmlUtil.descendants(cntx, XbrlConst.xbrli, ("startDate","endDate","instant"))
if any(not timelessDatePattern.match(e.textValue) for e in dateElts):
modelXbrl.error(("EBA.2.10","EIOPA.2.10"),
_('Period dates must be whole dates without time or timezone: %(dates)s.'),
modelObject=cntx, dates=", ".join(e.text for e in dateElts))
if cntx.isForeverPeriod:
modelXbrl.error(("EBA.2.11","EIOPA.N.2.11"),
_('Forever context period is not allowed.'),
modelObject=cntx)
elif cntx.isStartEndPeriod:
modelXbrl.error(("EBA.2.13","EIOPA.N.2.11"),
_('Start-End (flow) context period is not allowed.'),
modelObject=cntx)
elif cntx.isInstantPeriod:
# cannot pass context object to final() below, for error logging, if streaming mode
val.cntxDates[cntx.instantDatetime].add(modelXbrl if getattr(val.modelXbrl, "isStreamingMode", False)
else cntx)
if cntx.hasSegment:
modelXbrl.error(("EBA.2.14","EIOPA.N.2.14"),
_("Contexts MUST NOT contain xbrli:segment values: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id)
if cntx.nonDimValues("scenario"):
modelXbrl.error(("EBA.2.15","EIOPA.S.2.15" if val.isEIOPAfullVersion else "EIOPA.N.2.15"),
_("Contexts MUST NOT contain non-dimensional xbrli:scenario values: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id,
messageCodes=("EBA.2.15","EIOPA.N.2.15","EIOPA.S.2.15"))
val.unusedCntxIDs.add(cntx.id)
if val.isEIOPA_2_0_1 and len(cntx.id) > 128:
modelXbrl.warning("EIOPA.S.2.6",
_("Contexts IDs SHOULD be short: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id)
for unit in modelXbrl.units.values():
if getattr(unit, "_batchChecked", False):
continue # prior streaming batch already checked
unit._batchChecked = True
val.unusedUnitIDs.add(unit.id)
factsByQname = defaultdict(set) # top level for this
for f in factsToCheck:
factsByQname[f.qname].add(f)
val.unusedCntxIDs.discard(f.contextID)
val.unusedUnitIDs.discard(f.unitID)
if f.objectIndex < val.firstFactObjectIndex:
val.firstFactObjectIndex = f.objectIndex
val.firstFact = f
for fIndicators in factsByQname[qnFIndicators]:
val.numFilingIndicatorTuples += 1
for fIndicator in fIndicators.modelTupleFacts:
_value = (getattr(fIndicator, "xValue", None) or fIndicator.value) # use validated xValue if DTS else value for skipDTS
_filed = fIndicator.get("{http://www.eurofiling.info/xbrl/ext/filing-indicators}filed", "true") in ("true", "1")
if _value in val.filingIndicators:
modelXbrl.error(("EBA.1.6.1", "EIOPA.1.6.1"),
_('Multiple filing indicators facts for indicator %(filingIndicator)s.'),
modelObject=(fIndicator, val.filingIndicators[_value]), filingIndicator=_value)
if _filed and not val.filingIndicators[_value]:
val.filingIndicators[_value] = _filed #set to filed if any of the multiple indicators are filed=true
else: # not a duplicate filing indicator
val.filingIndicators[_value] = _filed
val.unusedCntxIDs.discard(fIndicator.contextID)
cntx = fIndicator.context
if cntx is not None and (cntx.hasSegment or cntx.hasScenario):
modelXbrl.error("EIOPA.N.1.6.d" if val.isEIOPAfullVersion else "EIOPA.S.1.6.d",
_('Filing indicators must not contain segment or scenario elements %(filingIndicator)s.'),
modelObject=fIndicator, filingIndicator=_value)
# Using model object id's is not accurate in case of edition
prevObj = fIndicators.getprevious()
while prevObj is not None:
if isinstance(prevObj, ModelFact) and prevObj.qname != qnFIndicators:
modelXbrl.warning("EIOPA.1.6.2",
_('Filing indicators should precede first fact %(firstFact)s.'),
modelObject=(fIndicators, val.firstFact), firstFact=val.firstFact.qname)
break
prevObj = prevObj.getprevious()
if val.isEIOPAfullVersion:
for fIndicator in factsByQname[qnFilingIndicator]:
if fIndicator.getparent().qname == XbrlConst.qnXbrliXbrl:
_isPos = fIndicator.get("{http://www.eurofiling.info/xbrl/ext/filing-indicators}filed", "true") in ("true", "1")
_value = (getattr(fIndicator, "xValue", None) or fIndicator.value) # use validated xValue if DTS else value for skipDTS
modelXbrl.error("EIOPA.1.6.a" if _isPos else "EIOPA.1.6.b",
_('Filing indicators must be in a tuple %(filingIndicator)s.'),
modelObject=fIndicator, filingIndicator=_value,
messageCodes=("EIOPA.1.6.a", "EIOPA.1.6.b"))
otherFacts = {} # (contextHash, unitHash, xmlLangHash) : fact
nilFacts = []
stringFactsWithXmlLang = []
nonMonetaryNonPureFacts = []
for qname, facts in factsByQname.items():
for f in facts:
if f.qname == qnFIndicators or f.qname == qnFIndicators:
continue # skip root-level and non-root-level filing indicators
if modelXbrl.skipDTS:
c = f.qname.localName[0]
isNumeric = c in ('m', 'p', 'r', 'i')
isMonetary = c == 'm'
isInteger = c == 'i'
isPercent = c == 'p'
isString = c == 's'
isEnum = c == 'e'
else:
concept = f.concept
if concept is not None:
isNumeric = concept.isNumeric
isMonetary = concept.isMonetary
isInteger = concept.baseXbrliType in integerItemTypes
isPercent = concept.typeQname in (qnPercentItemType, qnPureItemType)
isString = concept.baseXbrliType in ("stringItemType", "normalizedStringItemType")
isEnum = concept.typeQname in qnEnumerationItemTypes
else:
isNumeric = isString = isEnum = False # error situation
k = (f.getparent().objectIndex,
f.qname,
f.context.contextDimAwareHash if f.context is not None else None,
f.unit.hash if f.unit is not None else None,
hash(f.xmlLang))
if k not in otherFacts:
otherFacts[k] = {f}
else:
matches = [o
for o in otherFacts[k]
if (f.getparent().objectIndex == o.getparent().objectIndex and
f.qname == o.qname and
f.context.isEqualTo(o.context) if f.context is not None and o.context is not None else True) and
# (f.unit.isEqualTo(o.unit) if f.unit is not None and o.unit is not None else True) and
(f.xmlLang == o.xmlLang)]
if matches:
contexts = [f.contextID] + [o.contextID for o in matches]
modelXbrl.error(("EBA.2.16", "EIOPA.S.2.16" if val.isEIOPAfullVersion else "EIOPA.S.2.16.a"),
_('Facts are duplicates %(fact)s contexts %(contexts)s.'),
modelObject=[f] + matches, fact=f.qname, contexts=', '.join(contexts),
messageCodes=("EBA.2.16", "EIOPA.S.2.16", "EIOPA.S.2.16.a"))
else:
otherFacts[k].add(f)
if isNumeric:
if f.precision:
modelXbrl.error(("EBA.2.17", "EIOPA.2.18.a"),
_("Numeric fact %(fact)s of context %(contextID)s has a precision attribute '%(precision)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, precision=f.precision)
if f.decimals and not f.isNil: # in XbrlDpmSqlDB for 2_0_1
if f.decimals == "INF":
if not val.isEIOPAfullVersion:
modelXbrl.error("EIOPA.S.2.18.f",
_("Monetary fact %(fact)s of context %(contextID)s has a decimal attribute INF: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
else:
try:
xValue = f.xValue
dec = int(f.decimals)
if isMonetary:
if val.isEIOPA_2_0_1:
_absXvalue = abs(xValue)
if str(f.qname) in s_2_18_c_a_met:
dMin = 2
elif _absXvalue >= 100000000:
dMin = -4
elif 100000000 > _absXvalue >= 1000000:
dMin = -3
elif 1000000 > _absXvalue >= 1000:
dMin = -2
else:
dMin = -1
if dMin > dec:
modelXbrl.error("EIOPA.S.2.18.c",
_("Monetary fact %(fact)s of context %(contextID)s has a decimals attribute less than minimum %(minimumDecimals)s: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, minimumDecimals=dMin, decimals=f.decimals)
elif dec < -3:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.c"),
_("Monetary fact %(fact)s of context %(contextID)s has a decimals attribute < -3: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
else: # apply dynamic decimals check
if -.1 < xValue < .1: dMin = 2
elif -1 < xValue < 1: dMin = 1
elif -10 < xValue < 10: dMin = 0
elif -100 < xValue < 100: dMin = -1
elif -1000 < xValue < 1000: dMin = -2
else: dMin = -3
if dMin > dec:
modelXbrl.warning("EIOPA:factDecimalsWarning",
_("Monetary fact %(fact)s of context %(contextID)s value %(value)s has an imprecise decimals attribute: %(decimals)s, minimum is %(mindec)s"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue, decimals=f.decimals, mindec=dMin)
elif isInteger:
if dec != 0:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.d"),
_("Integer fact %(fact)s of context %(contextID)s has a decimals attribute \u2260 0: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
elif isPercent:
if dec < 4:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.e"),
_("Percent fact %(fact)s of context %(contextID)s has a decimals attribute < 4: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
if val.isEIOPA_2_0_1 and xValue > 1:
modelXbrl.warning(("EIOPA.3.2.b"),
_("Percent fact %(fact)s of context %(contextID)s appears to be over 100% = 1.0: '%(value)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue)
else:
if -.001 < xValue < .001: dMin = 4
elif -.01 < xValue < .01: dMin = 3
elif -.1 < xValue < .1: dMin = 2
elif -1 < xValue < 1: dMin = 1
else: dMin = 0
if dMin > dec:
modelXbrl.warning("EIOPA:factDecimalsWarning",
_("Numeric fact %(fact)s of context %(contextID)s value %(value)s has an imprecise decimals attribute: %(decimals)s, minimum is %(mindec)s"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue, decimals=f.decimals, mindec=dMin)
except (AttributeError, ValueError):
pass # should have been reported as a schema error by loader
'''' (not intended by EBA 2.18, paste here is from EFM)
if not f.isNil and getattr(f,"xValid", 0) == 4:
try:
insignificance = insignificantDigits(f.xValue, decimals=f.decimals)
if insignificance: # if not None, returns (truncatedDigits, insiginficantDigits)
modelXbrl.error(("EFM.6.05.37", "GFM.1.02.26"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s has nonzero digits in insignificant portion %(insignificantDigits)s."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals,
value=f1.xValue, truncatedDigits=insignificance[0], insignificantDigits=insignificance[1])
except (ValueError,TypeError):
modelXbrl.error(("EBA.2.18"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s causes Value Error exception."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals, value=f1.value)
'''
unit = f.unit
if unit is not None:
if isMonetary:
if unit.measures[0]:
_currencyMeasure = unit.measures[0][0]
if val.isEIOPA_2_0_1 and f.context is not None:
if f.context.dimMemberQname(val.qnDimAF) == val.qnCAx1 and val.qnDimOC in f.context.qnameDims:
_ocCurrency = f.context.dimMemberQname(val.qnDimOC).localName
if _currencyMeasure.localName != _ocCurrency:
modelXbrl.error("EIOPA.3.1",
_("There MUST be only one currency but metric %(metric)s reported OC dimension currency %(ocCurrency)s differs from unit currency: %(unitCurrency)s."),
modelObject=f, metric=f.qname, ocCurrency=_ocCurrency, unitCurrency=_currencyMeasure.localName)
else:
val.currenciesUsed[_currencyMeasure] = unit
elif val.validateEBA and f.context is not None:
if f.context.dimMemberQname(val.eba_qnDimCCA) == val.eba_qnCAx1 and val.eba_qnDimCUS in f.context.qnameDims:
currency = f.context.dimMemberQname(val.eba_qnDimCUS).localName
if _currencyMeasure.localName != currency:
modelXbrl.error("EBA.3.1",
_("There MUST be only one currency but metric %(metric)s reported CCA dimension currency %(currency)s differs from unit currency: %(unitCurrency)s."),
modelObject=f, metric=f.qname, currency=currency, unitCurrency=_currencyMeasure.localName)
else:
val.currenciesUsed[_currencyMeasure] = unit
else:
val.currenciesUsed[_currencyMeasure] = unit
elif not unit.isSingleMeasure or unit.measures[0][0] != XbrlConst.qnXbrliPure:
nonMonetaryNonPureFacts.append(f)
if isEnum:
_eQn = getattr(f,"xValue", None) or qnameEltPfxName(f, f.value)
if _eQn:
prefixUsed(val, _eQn.namespaceURI, _eQn.prefix)
if val.isEIOPA_2_0_1 and f.qname.localName == "ei1930":
val.reportingCurrency = _eQn.localName
elif isString:
if f.xmlLang: # requires disclosureSystem to NOT specify default language
stringFactsWithXmlLang.append(f)
if f.isNil:
nilFacts.append(f)
if val.footnotesRelationshipSet.fromModelObject(f):
modelXbrl.warning("EIOPA.S.19",
_("Fact %(fact)s of context %(contextID)s has footnotes.'"),
modelObject=f, fact=f.qname, contextID=f.contextID)
if nilFacts:
modelXbrl.error(("EBA.2.19", "EIOPA.S.2.19"),
_('Nil facts MUST NOT be present in the instance: %(nilFacts)s.'),
modelObject=nilFacts, nilFacts=", ".join(str(f.qname) for f in nilFacts))
if stringFactsWithXmlLang:
modelXbrl.warning("EIOPA.2.20", # not reported for EBA
_("String facts reporting xml:lang (not saved by T4U, not round-tripped): '%(factsWithLang)s'"),
modelObject=stringFactsWithXmlLang, factsWithLang=", ".join(set(str(f.qname) for f in stringFactsWithXmlLang)))
if nonMonetaryNonPureFacts:
modelXbrl.error(("EBA.3.2","EIOPA.3.2.a"),
_("Non monetary (numeric) facts MUST use the pure unit: '%(langLessFacts)s'"),
modelObject=nonMonetaryNonPureFacts, langLessFacts=", ".join(set(str(f.qname) for f in nonMonetaryNonPureFacts)))
val.utrValidator.validateFacts() # validate facts for UTR at logLevel WARNING
unitHashes = {}
for unit in modelXbrl.units.values():
h = unit.hash
if h in unitHashes and unit.isEqualTo(unitHashes[h]):
modelXbrl.warning("EBA.2.21",
_("Duplicate units SHOULD NOT be reported, units %(unit1)s and %(unit2)s have same measures.'"),
modelObject=(unit, unitHashes[h]), unit1=unit.id, unit2=unitHashes[h].id)
if not getattr(modelXbrl, "isStreamingMode", False):
modelXbrl.error("EIOPA.2.21",
_("Duplicate units MUST NOT be reported, units %(unit1)s and %(unit2)s have same measures.'"),
modelObject=(unit, unitHashes[h]), unit1=unit.id, unit2=unitHashes[h].id)
else:
unitHashes[h] = unit
for _measures in unit.measures:
for _measure in _measures:
prefixUsed(val, _measure.namespaceURI, _measure.prefix)
del unitHashes
cntxHashes = {}
for cntx in modelXbrl.contexts.values():
h = cntx.contextDimAwareHash
if h in cntxHashes and cntx.isEqualTo(cntxHashes[h]):
if not getattr(modelXbrl, "isStreamingMode", False):
modelXbrl.log("WARNING" if val.isEIOPAfullVersion else "ERROR",
"EIOPA.S.2.7.b",
_("Duplicate contexts MUST NOT be reported, contexts %(cntx1)s and %(cntx2)s are equivalent.'"),
modelObject=(cntx, cntxHashes[h]), cntx1=cntx.id, cntx2=cntxHashes[h].id)
else:
cntxHashes[h] = cntx
for _dim in cntx.qnameDims.values():
_dimQn = _dim.dimensionQname
prefixUsed(val, _dimQn.namespaceURI, _dimQn.prefix)
if _dim.isExplicit:
_memQn = _dim.memberQname
else:
_memQn = _dim.typedMember.qname
if _memQn:
prefixUsed(val, _memQn.namespaceURI, _memQn.prefix)
for elt in modelDocument.xmlRootElement.iter():
if isinstance(elt, ModelObject): # skip comments and processing instructions
prefixUsed(val, elt.qname.namespaceURI, elt.qname.prefix)
for attrTag in elt.keys():
if attrTag.startswith("{"):
_prefix, _NS, _localName = XmlUtil.clarkNotationToPrefixNsLocalname(elt, attrTag, isAttribute=True)
if _prefix:
prefixUsed(val, _NS, _prefix)
elif val.isEIOPA_2_0_1:
if elt.tag in ("{http://www.w3.org/2001/XMLSchema}documentation", "{http://www.w3.org/2001/XMLSchema}annotation"):
modelXbrl.error("EIOPA.2.5",
_("xs:documentation element found, all relevant business data MUST only be contained in contexts, units, schemaRef and facts."),
modelObject=modelDocument)
elif isinstance(elt, etree._Comment):
modelXbrl.error("EIOPA.2.5",
_("XML comment found, all relevant business data MUST only be contained in contexts, units, schemaRef and facts: %(comment)s"),
modelObject=modelDocument, comment=elt.text)
def validateNonStreamingFinish(val, *args, **kwargs):
# non-streaming EBA checks, ignore when streaming (first all from ValidateXbrl.py)
if not getattr(val.modelXbrl, "isStreamingMode", False):
final(val)
def validateStreamingFinish(val, *args, **kwargs):
final(val) # always finish validation when streaming
def final(val):
if not (val.validateEBA or val.validateEIOPA):
return
modelXbrl = val.modelXbrl
modelDocument = modelXbrl.modelDocument
_statusMsg = _("validating {0} filing rules").format(val.disclosureSystem.name)
modelXbrl.profileActivity()
modelXbrl.modelManager.showStatus(_statusMsg)
if modelDocument.type == ModelDocument.Type.INSTANCE and (val.validateEBA or val.validateEIOPA):
if not modelDocument.uri.endswith(".xbrl"):
modelXbrl.warning("EBA.1.1",
_('XBRL instance documents SHOULD use the extension ".xbrl" but it is "%(extension)s"'),
modelObject=modelDocument, extension=os.path.splitext(modelDocument.basename)[1])
modelXbrl.error("EIOPA.S.1.1.a",
_('XBRL instance documents MUST use the extension ".xbrl" but it is "%(extension)s"'),
modelObject=modelDocument, extension=os.path.splitext(modelDocument.basename)[1])
if val.isEIOPA_2_0_1: _encodings = ("UTF-8", "utf-8-sig")
else: _encodings = ("utf-8", "UTF-8", "utf-8-sig")
if modelDocument.documentEncoding not in _encodings:
modelXbrl.error(("EBA.1.4", "EIOPA.1.4"),
_('XBRL instance documents MUST use "UTF-8" encoding but is "%(xmlEncoding)s"'),
modelObject=modelDocument, xmlEncoding=modelDocument.documentEncoding)
schemaRefElts = []
schemaRefFileNames = []
for doc, docRef in modelDocument.referencesDocument.items():
if "href" in docRef.referenceTypes:
if docRef.referringModelObject.localName == "schemaRef":
schemaRefElts.append(docRef.referringModelObject)
schemaRefFileNames.append(doc.basename)
if not UrlUtil.isAbsolute(doc.uri):
modelXbrl.error(("EBA.2.2", "EIOPA.S.1.5.a" if val.isEIOPAfullVersion else "EIOPA.S.1.5.b"),
_('The link:schemaRef element in submitted instances MUST resolve to the full published entry point URL: %(url)s.'),
modelObject=docRef.referringModelObject, url=doc.uri,
messageCodes=("EBA.2.2", "EIOPA.S.1.5.a","EIOPA.S.1.5.b"))
elif docRef.referringModelObject.localName == "linkbaseRef":
modelXbrl.error(("EBA.2.3","EIOPA.S.1.5.a"),
_('The link:linkbaseRef element is not allowed: %(fileName)s.'),
modelObject=docRef.referringModelObject, fileName=doc.basename)
_numSchemaRefs = len(XmlUtil.children(modelDocument.xmlRootElement, XbrlConst.link, "schemaRef"))
if _numSchemaRefs > 1:
modelXbrl.error(("EIOPA.S.1.5.a", "EBA.1.5"),
_('XBRL instance documents MUST reference only one entry point schema but %(numEntryPoints)s were found: %(entryPointNames)s'),
modelObject=modelDocument, numEntryPoints=_numSchemaRefs, entryPointNames=', '.join(sorted(schemaRefFileNames)))
### check entry point names appropriate for filing indicator (DPM DB?)
if len(schemaRefElts) != 1:
modelXbrl.error("EBA.2.3",
_('Any reported XBRL instance document MUST contain only one xbrli:xbrl/link:schemaRef node, but %(entryPointCount)s.'),
modelObject=schemaRefElts, entryPointCount=len(schemaRefElts))
# non-streaming EBA checks
if not getattr(modelXbrl, "isStreamingMode", False):
val.qnReportedCurrency = None
if val.isEIOPA_2_0_1 and qnMetReportingCurrency in modelXbrl.factsByQname:
for _multiCurrencyFact in modelXbrl.factsByQname[qnMetReportingCurrency]:
# multi-currency fact
val.qnReportedCurrency = _multiCurrencyFact.xValue
break
validateFacts(val, modelXbrl.facts)
# check sum of fact md5s (otherwise checked in streaming process)
xbrlFactsCheckVersion = None
expectedSumOfFactMd5s = None
for pi in modelDocument.xmlRootElement.getchildren():
if isinstance(pi, etree._ProcessingInstruction) and pi.target == "xbrl-facts-check":
_match = re.search("([\\w-]+)=[\"']([^\"']+)[\"']", pi.text)
if _match:
_matchGroups = _match.groups()
if len(_matchGroups) == 2:
if _matchGroups[0] == "version":
xbrlFactsCheckVersion = _matchGroups[1]
elif _matchGroups[0] == "sum-of-fact-md5s":
try:
expectedSumOfFactMd5s = Md5Sum(_matchGroups[1])
except ValueError:
modelXbrl.error("EIOPA:xbrlFactsCheckError",
_("Invalid sum-of-md5s %(sumOfMd5)s"),
modelObject=modelXbrl, sumOfMd5=_matchGroups[1])
if xbrlFactsCheckVersion and expectedSumOfFactMd5s:
sumOfFactMd5s = Md5Sum()
for f in modelXbrl.factsInInstance:
sumOfFactMd5s += f.md5sum
if sumOfFactMd5s != expectedSumOfFactMd5s:
modelXbrl.warning("EIOPA:xbrlFactsCheckWarning",
_("XBRL facts sum of md5s expected %(expectedMd5)s not matched to actual sum %(actualMd5Sum)s"),
modelObject=modelXbrl, expectedMd5=expectedSumOfFactMd5s, actualMd5Sum=sumOfFactMd5s)
else:
modelXbrl.info("info",
_("Successful XBRL facts sum of md5s."),
modelObject=modelXbrl)
if any(badError in modelXbrl.errors
for badError in ("EBA.2.1", "EIOPA.2.1", "EIOPA.S.1.5.a/EIOPA.S.1.5.b")):
pass # skip checking filingIndicators if bad errors
elif not val.filingIndicators:
modelXbrl.error(("EBA.1.6", "EIOPA.1.6.a"),
_('Missing filing indicators. Reported XBRL instances MUST include appropriate (positive) filing indicator elements'),
modelObject=modelDocument)
elif all(filed == False for filed in val.filingIndicators.values()):
modelXbrl.error(("EBA.1.6", "EIOPA.1.6.a"),
_('All filing indicators are filed="false". Reported XBRL instances MUST include appropriate (positive) filing indicator elements'),
modelObject=modelDocument)
if val.numFilingIndicatorTuples > 1:
modelXbrl.warning(("EBA.1.6.2", "EIOPA.1.6.2"),
_('Multiple filing indicators tuples when not in streaming mode (info).'),
modelObject=modelXbrl.factsByQname[qnFIndicators])
if len(val.cntxDates) > 1:
modelXbrl.error(("EBA.2.13","EIOPA.2.13"),
_('Contexts must have the same date: %(dates)s.'),
# when streaming values are no longer available, but without streaming they can be logged
modelObject=set(_cntx for _cntxs in val.cntxDates.values() for _cntx in _cntxs),
dates=', '.join(XmlUtil.dateunionValue(_dt, subtractOneDay=True)
for _dt in val.cntxDates.keys()))
if val.unusedCntxIDs:
if val.isEIOPA_2_0_1:
modelXbrl.error("EIOPA.2.7",
_('Unused xbrli:context nodes MUST NOT be present in the instance: %(unusedContextIDs)s.'),
modelObject=[modelXbrl.contexts[unusedCntxID] for unusedCntxID in val.unusedCntxIDs if unusedCntxID in modelXbrl.contexts],
unusedContextIDs=", ".join(sorted(val.unusedCntxIDs)))
else:
modelXbrl.warning(("EBA.2.7", "EIOPA.2.7"),
_('Unused xbrli:context nodes SHOULD NOT be present in the instance: %(unusedContextIDs)s.'),
modelObject=[modelXbrl.contexts[unusedCntxID] for unusedCntxID in val.unusedCntxIDs if unusedCntxID in modelXbrl.contexts],
unusedContextIDs=", ".join(sorted(val.unusedCntxIDs)))
if len(val.cntxEntities) > 1:
modelXbrl.error(("EBA.2.9", "EIOPA.2.9"),
_('All entity identifiers and schemes MUST be the same, %(count)s found: %(entities)s.'),
modelObject=modelDocument, count=len(val.cntxEntities),
entities=", ".join(sorted(str(cntxEntity) for cntxEntity in val.cntxEntities)))
for _scheme, _LEI in val.cntxEntities:
if (_scheme in ("http://standards.iso.org/iso/17442", "http://standard.iso.org/iso/17442", "LEI") or
(not val.isEIOPAfullVersion and _scheme == "PRE-LEI")):
if _scheme == "http://standard.iso.org/iso/17442":
modelXbrl.warning(("EBA.3.6", "EIOPA.S.2.8.c"),
_("Warning, context has entity scheme %(scheme)s should be plural: http://standards.iso.org/iso/17442."),
modelObject=modelDocument, scheme=_scheme)
result = LeiUtil.checkLei(_LEI)
if result == LeiUtil.LEI_INVALID_LEXICAL:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has lexically invalid LEI %(lei)s."),
modelObject=modelDocument, lei=_LEI)
elif result == LeiUtil.LEI_INVALID_CHECKSUM:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has LEI checksum error in %(lei)s."),
modelObject=modelDocument, lei=_LEI)
elif _scheme == "SC":
pass # anything is ok for Specific Code
else:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has unrecognized entity scheme %(scheme)s."),
modelObject=modelDocument, scheme=_scheme)
if val.unusedUnitIDs:
if val.isEIOPA_2_0_1:
modelXbrl.error("EIOPA.2.22",
_('Unused xbrli:unit nodes MUST NOT be present in the instance: %(unusedUnitIDs)s.'),
modelObject=[modelXbrl.units[unusedUnitID] for unusedUnitID in val.unusedUnitIDs if unusedUnitID in modelXbrl.units],
unusedUnitIDs=", ".join(sorted(val.unusedUnitIDs)))
else:
modelXbrl.warning(("EBA.2.22", "EIOPA.2.22"),
_('Unused xbrli:unit nodes SHOULD NOT be present in the instance: %(unusedUnitIDs)s.'),
modelObject=[modelXbrl.units[unusedUnitID] for unusedUnitID in val.unusedUnitIDs if unusedUnitID in modelXbrl.units],
unusedUnitIDs=", ".join(sorted(val.unusedUnitIDs)))
if len(val.currenciesUsed) > 1:
modelXbrl.error(("EBA.3.1","EIOPA.3.1"),
_("There MUST be only one currency but %(numCurrencies)s were found: %(currencies)s.'"),
modelObject=val.currenciesUsed.values(), numCurrencies=len(val.currenciesUsed), currencies=", ".join(str(c) for c in val.currenciesUsed.keys()))
elif val.isEIOPA_2_0_1 and any(_measure.localName != val.reportingCurrency for _measure in val.currenciesUsed.keys()):
modelXbrl.error("EIOPA.3.1",
_("There MUST be only one currency but reporting currency %(reportingCurrency)s differs from unit currencies: %(currencies)s.'"),
modelObject=val.currenciesUsed.values(), reportingCurrency=val.reportingCurrency, currencies=", ".join(str(c) for c in val.currenciesUsed.keys()))
if val.prefixesUnused:
modelXbrl.warning(("EBA.3.4", "EIOPA.3.4"),
_("There SHOULD be no unused prefixes but these were declared: %(unusedPrefixes)s.'"),
modelObject=modelDocument, unusedPrefixes=', '.join(sorted(val.prefixesUnused)))
for ns, prefixes in val.namespacePrefixesUsed.items():
nsDocs = modelXbrl.namespaceDocs.get(ns)
if nsDocs:
for nsDoc in nsDocs:
nsDocPrefix = XmlUtil.xmlnsprefix(nsDoc.xmlRootElement, ns)
if any(prefix != nsDocPrefix for prefix in prefixes if prefix is not None):
modelXbrl.warning(("EBA.3.5", "EIOPA.3.5"),
_("Prefix for namespace %(namespace)s is %(declaredPrefix)s but these were found %(foundPrefixes)s"),
modelObject=modelDocument, namespace=ns, declaredPrefix=nsDocPrefix, foundPrefixes=', '.join(sorted(prefixes - {None})))
elif ns in CANONICAL_PREFIXES and any(prefix != CANONICAL_PREFIXES[ns] for prefix in prefixes if prefix is not None):
modelXbrl.warning(("EBA.3.5", "EIOPA.3.5"),
_("Prefix for namespace %(namespace)s is %(declaredPrefix)s but these were found %(foundPrefixes)s"),
modelObject=modelDocument, namespace=ns, declaredPrefix=CANONICAL_PREFIXES[ns], foundPrefixes=', '.join(sorted(prefixes - {None})))
modelXbrl.profileActivity(_statusMsg, minTimeToShow=0.0)
modelXbrl.modelManager.showStatus(None)
del val.prefixNamespace, val.namespacePrefix, val.idObjects, val.typedDomainElements
del val.utrValidator, val.firstFact, val.footnotesRelationshipSet
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Validate EBA, EIOPA',
'version': '1.2',
'description': 'EBA (2.3), EIOPA (2.0.0) Filing Rules Validation.',
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2015 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'DisclosureSystem.Types': dislosureSystemTypes,
'DisclosureSystem.ConfigURL': disclosureSystemConfigURL,
'Validate.XBRL.Start': validateSetup,
'Validate.XBRL.Finally': validateNonStreamingFinish,
'Streaming.ValidateFacts': validateStreamingFacts,
'Streaming.ValidateFinish': validateStreamingFinish,
}
|
|
import functools
import os
import pkgutil
import sys
from collections import OrderedDict, defaultdict
from importlib import import_module
import django
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import (
BaseCommand, CommandError, CommandParser, handle_default_options,
)
from django.core.management.color import color_style
from django.utils import autoreload
from django.utils.encoding import force_text
def find_commands(management_dir):
"""
Given a path to a management directory, return a list of all the command
names that are available.
"""
command_dir = os.path.join(management_dir, 'commands')
return [name for _, name, is_pkg in pkgutil.iter_modules([command_dir])
if not is_pkg and not name.startswith('_')]
def load_command_class(app_name, name):
"""
Given a command name and an application name, return the Command
class instance. Allow all errors raised by the import process
(ImportError, AttributeError) to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
@functools.lru_cache(maxsize=None)
def get_commands():
"""
Return a dictionary mapping command names to their callback applications.
Look for a management.commands package in django.core, and in each
installed application -- if a commands package exists, register all
commands in that package.
Core commands are always included. If a settings module has been
specified, also include user-defined commands.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
commands = {name: 'django.core' for name in find_commands(__path__[0])}
if not settings.configured:
return commands
for app_config in reversed(list(apps.get_app_configs())):
path = os.path.join(app_config.path, 'management')
commands.update({name: app_config.name for name in find_commands(path)})
return commands
def call_command(command_name, *args, **options):
"""
Call the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
`command_name` may be a string or a command object. Using a string is
preferred unless the command object is required for further processing or
testing.
Some examples:
call_command('migrate')
call_command('shell', plain=True)
call_command('sqlmigrate', 'myapp')
from django.core.management.commands import flush
cmd = flush.Command()
call_command(cmd, verbosity=0, interactive=False)
# Do something with cmd ...
"""
if isinstance(command_name, BaseCommand):
# Command object passed in.
command = command_name
command_name = command.__class__.__module__.split('.')[-1]
else:
# Load the command object by name.
try:
app_name = get_commands()[command_name]
except KeyError:
raise CommandError("Unknown command: %r" % command_name)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
command = app_name
else:
command = load_command_class(app_name, command_name)
# Simulate argument parsing to get the option defaults (see #10080 for details).
parser = command.create_parser('', command_name)
# Use the `dest` option name from the parser option
opt_mapping = {
min(s_opt.option_strings).lstrip('-').replace('-', '_'): s_opt.dest
for s_opt in parser._actions if s_opt.option_strings
}
arg_options = {opt_mapping.get(key, key): value for key, value in options.items()}
defaults = parser.parse_args(args=[force_text(a) for a in args])
defaults = dict(defaults._get_kwargs(), **arg_options)
# Raise an error if any unknown options were passed.
stealth_options = set(command.base_stealth_options + command.stealth_options)
dest_parameters = {action.dest for action in parser._actions}
valid_options = (dest_parameters | stealth_options).union(opt_mapping)
unknown_options = set(options) - valid_options
if unknown_options:
raise TypeError(
"Unknown option(s) for %s command: %s. "
"Valid options are: %s." % (
command_name,
', '.join(sorted(unknown_options)),
', '.join(sorted(valid_options)),
)
)
# Move positional args out of options to mimic legacy optparse
args = defaults.pop('args', ())
if 'skip_checks' not in options:
defaults['skip_checks'] = True
return command.execute(*args, **defaults)
class ManagementUtility:
"""
Encapsulate the logic of the django-admin and manage.py utilities.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
if self.prog_name == '__main__.py':
self.prog_name = 'python -m django'
self.settings_exception = None
def main_help_text(self, commands_only=False):
"""Return the script's main help text, as a string."""
if commands_only:
usage = sorted(get_commands())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = defaultdict(lambda: [])
for name, app in get_commands().items():
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
# Output an extra note if settings are not properly configured
if self.settings_exception is not None:
usage.append(style.NOTICE(
"Note that only Django core commands are listed "
"as settings are not properly configured (error: %s)."
% self.settings_exception))
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Try to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin" or "manage.py") if it can't be found.
"""
# Get commands outside of try block to prevent swallowing exceptions
commands = get_commands()
try:
app_name = commands[subcommand]
except KeyError:
if os.environ.get('DJANGO_SETTINGS_MODULE'):
# If `subcommand` is missing due to misconfigured settings, the
# following line will retrigger an ImproperlyConfigured exception
# (get_commands() swallows the original one) so the user is
# informed about it.
settings.INSTALLED_APPS
else:
sys.stderr.write("No Django settings specified.\n")
sys.stderr.write(
"Unknown command: %r\nType '%s help' for usage.\n"
% (subcommand, self.prog_name)
)
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self):
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, an equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = list(get_commands()) + ['help']
options = [('--help', False)]
# subcommand
if cword == 1:
print(' '.join(sorted(filter(lambda x: x.startswith(curr), subcommands))))
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: add the names of installed apps to options
if cwords[0] in ('dumpdata', 'sqlmigrate', 'sqlsequencereset', 'test'):
try:
app_configs = apps.get_app_configs()
# Get the last part of the dotted path as the app name.
options.extend((app_config.label, 0) for app_config in app_configs)
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
parser = subcommand_cls.create_parser('', cwords[0])
options.extend(
(min(s_opt.option_strings), s_opt.nargs != 0)
for s_opt in parser._actions if s_opt.option_strings
)
# filter out previously specified options from available options
prev_opts = {x.split('=')[0] for x in cwords[1:cword - 1]}
options = (opt for opt in options if opt[0] not in prev_opts)
# filter options by current input
options = sorted((k, v) for k, v in options if k.startswith(curr))
for opt_label, require_arg in options:
# append '=' to options which require args
if require_arg:
opt_label += '='
print(opt_label)
# Exit code of the bash completion function is never passed back to
# the user, so it's safe to always exit with 0.
# For more details see #25420.
sys.exit(0)
def execute(self):
"""
Given the command-line arguments, figure out which subcommand is being
run, create a parser appropriate to that command, and run it.
"""
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = CommandParser(None, usage="%(prog)s subcommand [options] [args]", add_help=False)
parser.add_argument('--settings')
parser.add_argument('--pythonpath')
parser.add_argument('args', nargs='*') # catch-all
try:
options, args = parser.parse_known_args(self.argv[2:])
handle_default_options(options)
except CommandError:
pass # Ignore any option errors at this point.
try:
settings.INSTALLED_APPS
except ImproperlyConfigured as exc:
self.settings_exception = exc
if settings.configured:
# Start the auto-reloading dev server even if the code is broken.
# The hardcoded condition is a code smell but we can't rely on a
# flag on the command class because we haven't located it yet.
if subcommand == 'runserver' and '--noreload' not in self.argv:
try:
autoreload.check_errors(django.setup)()
except Exception:
# The exception will be raised later in the child process
# started by the autoreloader. Pretend it didn't happen by
# loading an empty list of applications.
apps.all_models = defaultdict(OrderedDict)
apps.app_configs = OrderedDict()
apps.apps_ready = apps.models_ready = apps.ready = True
# Remove options not compatible with the built-in runserver
# (e.g. options for the contrib.staticfiles' runserver).
# Changes here require manually testing as described in
# #27522.
_parser = self.fetch_command('runserver').create_parser('django', 'runserver')
_options, _args = _parser.parse_known_args(self.argv[2:])
for _arg in _args:
self.argv.remove(_arg)
# In all other cases, django.setup() is required to succeed.
else:
django.setup()
self.autocomplete()
if subcommand == 'help':
if '--commands' in args:
sys.stdout.write(self.main_help_text(commands_only=True) + '\n')
elif not options.args:
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(options.args[0]).print_help(self.prog_name, options.args[0])
# Special-cases: We want 'django-admin --version' and
# 'django-admin --help' to work, for backwards compatibility.
elif subcommand == 'version' or self.argv[1:] == ['--version']:
sys.stdout.write(django.get_version() + '\n')
elif self.argv[1:] in (['--help'], ['-h']):
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def execute_from_command_line(argv=None):
"""Run a ManagementUtility."""
utility = ManagementUtility(argv)
utility.execute()
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""View handlers for the DFP Playground."""
import json
import logging
import os
import socket
from api_handler import APIHandler
from googleads import ad_manager
from googleads import oauth2
import jinja2
from ndb_handler import InitUser
from ndb_handler import ReplaceAppCredential
from ndb_handler import RetrieveAppCredential
from ndb_handler import RevokeOldCredentials
from oauth2client import client
from utils import oauth2required
from utils import unpack_row
import webapp2
from zeep.helpers import serialize_object
from google.appengine.api import app_identity
from google.appengine.api import users
_APPLICATION_NAME = 'DFP Playground'
# load templates
_TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), 'templates')
_JINJA_ENVIRONMENT = jinja2.Environment(
autoescape=True,
extensions=['jinja2.ext.autoescape'],
loader=jinja2.FileSystemLoader(_TEMPLATES_PATH))
_CLIENT_ID, _CLIENT_SECRET = RetrieveAppCredential()
# set timeout to 10 s
socket.setdefaulttimeout(10)
# initialize flow object
_HOSTNAME = app_identity.get_default_version_hostname()
_FLOW = client.OAuth2WebServerFlow(
client_id=_CLIENT_ID,
client_secret=_CLIENT_SECRET,
scope=oauth2.GetAPIScope('ad_manager'),
user_agent='DFP Playground',
redirect_uri=(
('https://' + _HOSTNAME) if _HOSTNAME else 'http://localhost:8008') +
'/oauth2callback',
access_type='offline',
approval_prompt='force')
class MainPage(webapp2.RequestHandler):
"""View that displays the DFP Playground's homepage."""
@oauth2required
def get(self):
"""Handle get request."""
user = users.get_current_user()
user_email = user.email()
logout_url = users.create_logout_url('/')
user_ndb = InitUser()
template = _JINJA_ENVIRONMENT.get_template('index_page.html')
self.response.write(
template.render({
'user_email': user_email,
'logout_url': logout_url,
}))
class Login(webapp2.RequestHandler):
"""View that redirects to the auth flow's first step."""
def get(self):
"""Handle get request."""
auth_uri = _FLOW.step1_get_authorize_url()
self.redirect(auth_uri)
class LoginCallback(webapp2.RequestHandler):
"""View that handles the auth flow's callback, which contains credentials."""
def get(self):
"""Handle get request."""
if not self.request.get('code'):
# User denied OAuth2 permissions.
logging.info('User denied OAuth2 permissions')
return self.redirect('/login/error')
credentials = _FLOW.step2_exchange(self.request.get('code'))
if credentials:
# store user's credentials in database
user_ndb = InitUser(credentials.refresh_token)
# check if user has any networks
api_handler = APIHandler(
_CLIENT_ID, _CLIENT_SECRET, user_ndb, _APPLICATION_NAME)
networks = api_handler.GetAllNetworks()
if not networks:
# if user has no networks, redirect to ask if one should be made
return self.redirect('/make-test-network')
return self.redirect('/')
else:
# failure: no credentials
logging.error('No credentials found from step 2 in flow')
return self.redirect('/login/error')
class LoginErrorPage(webapp2.RequestHandler):
"""View that notifies the user of a login failure."""
def get(self):
"""Handle get request."""
self.response.write('''
Failed to log in. <a href="/login">Click</a> to try again.
''')
class MakeTestNetworkPage(webapp2.RequestHandler):
"""View that asks a new user to make a test network."""
def get(self):
"""Handle get request."""
template = _JINJA_ENVIRONMENT.get_template('make_network_page.html')
self.response.write(template.render({}))
class APIViewHandler(webapp2.RequestHandler):
"""View that chooses the appropriate handler depending on the method."""
api_handler_method_map = {
'users': 'GetUsers',
'adunits': 'GetAdUnits',
'companies': 'GetCompanies',
'creatives': 'GetCreatives',
'creativetemplates': 'GetCreativeTemplates',
'customtargetingkeys': 'GetCustomTargetingKeys',
'customtargetingvalues': 'GetCustomTargetingValues',
'licas': 'GetLICAs',
'orders': 'GetOrders',
'lineitems': 'GetLineItems',
'placements': 'GetPlacements',
'pql': 'GetPQLSelection',
}
def get(self, method):
"""Delegate GET request calls to the DFP API."""
method = method.lower()
user_ndb = InitUser()
api_handler = APIHandler(
_CLIENT_ID, _CLIENT_SECRET, user_ndb, _APPLICATION_NAME)
network_code = self.request.get('network_code')
# parse parameters
try:
limit = int(self.request.get('limit', api_handler.page_limit))
except ValueError:
self.response.status = 400
return self.response.write('Limit must be an integer')
try:
offset = int(self.request.get('offset', 0))
except ValueError:
self.response.status = 400
return self.response.write('Offset must be an integer')
if method == 'networks':
return_obj = api_handler.GetAllNetworks()
else:
# construct PQL statement
where_clause = self.request.get('where', '')
statement = ad_manager.FilterStatement(where_clause, limit=limit,
offset=offset)
try:
# throws KeyError if method not found
api_handler_func = getattr(api_handler,
self.api_handler_method_map[method])
except KeyError:
self.response.status = 400
self.response.write('API method not supported (%s).' % method)
# retrieve return_obj from api_handler and modify it
return_obj = api_handler_func(network_code, statement)
# process return_obj
if 'columns' in return_obj:
# special case: return_obj is from PQL Service
cols = return_obj['columns']
return_obj['results'] = [
unpack_row(row, cols) for row in return_obj['results']
]
else:
# regular case
return_obj['results'] = [
serialize_object(obj) for obj in return_obj['results']
]
if self.request.get('limit'):
return_obj['limit'] = limit
else:
try:
return_obj['limit'] = return_obj['totalResultSetSize']
except KeyError:
return_obj['limit'] = api_handler.page_limit
return_obj['offset'] = offset
# construct response headers
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(return_obj))
def post(self, method):
"""Delegate POST request calls to the DFP API."""
if method == 'networks':
user_ndb = InitUser()
api_handler = APIHandler(
_CLIENT_ID, _CLIENT_SECRET, user_ndb, _APPLICATION_NAME)
api_handler.MakeTestNetwork()
return self.redirect('/')
else:
self.response.status = 400
self.response.write(method + ' API POST method not found.')
class RevokeOldRefreshTokens(webapp2.RequestHandler):
"""View that revokes old credentials. It is used in cron.yaml."""
def get(self):
"""Handle get request."""
if self.request.headers.get('X-Appengine-Cron'):
RevokeOldCredentials()
else:
self.response.status = 401
class PutCredentials(webapp2.RequestHandler):
"""View that allows an admin user to replace credentials."""
def get(self):
template = _JINJA_ENVIRONMENT.get_template('create_credentials.html')
self.response.write(template.render())
def post(self):
client_id = self.request.POST['client_id']
client_secret = self.request.POST['client_secret']
ReplaceAppCredential(client_id, client_secret)
self.response.write(
'Success! Restart the server to load these credentials.')
|
|
import importlib
from tempfile import NamedTemporaryFile
import time
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
)
from urlparse import (
urlparse,
urlunparse,
)
import subprocess
from charmhelpers.core.hookenv import (
config,
log,
)
import os
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
PROPOSED_POCKET = """# Proposed
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
"""
CLOUD_ARCHIVE_POCKETS = {
# Folsom
'folsom': 'precise-updates/folsom',
'precise-folsom': 'precise-updates/folsom',
'precise-folsom/updates': 'precise-updates/folsom',
'precise-updates/folsom': 'precise-updates/folsom',
'folsom/proposed': 'precise-proposed/folsom',
'precise-folsom/proposed': 'precise-proposed/folsom',
'precise-proposed/folsom': 'precise-proposed/folsom',
# Grizzly
'grizzly': 'precise-updates/grizzly',
'precise-grizzly': 'precise-updates/grizzly',
'precise-grizzly/updates': 'precise-updates/grizzly',
'precise-updates/grizzly': 'precise-updates/grizzly',
'grizzly/proposed': 'precise-proposed/grizzly',
'precise-grizzly/proposed': 'precise-proposed/grizzly',
'precise-proposed/grizzly': 'precise-proposed/grizzly',
# Havana
'havana': 'precise-updates/havana',
'precise-havana': 'precise-updates/havana',
'precise-havana/updates': 'precise-updates/havana',
'precise-updates/havana': 'precise-updates/havana',
'havana/proposed': 'precise-proposed/havana',
'precise-havana/proposed': 'precise-proposed/havana',
'precise-proposed/havana': 'precise-proposed/havana',
# Icehouse
'icehouse': 'precise-updates/icehouse',
'precise-icehouse': 'precise-updates/icehouse',
'precise-icehouse/updates': 'precise-updates/icehouse',
'precise-updates/icehouse': 'precise-updates/icehouse',
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
# Juno
'juno': 'trusty-updates/juno',
'trusty-juno': 'trusty-updates/juno',
'trusty-juno/updates': 'trusty-updates/juno',
'trusty-updates/juno': 'trusty-updates/juno',
'juno/proposed': 'trusty-proposed/juno',
'juno/proposed': 'trusty-proposed/juno',
'trusty-juno/proposed': 'trusty-proposed/juno',
'trusty-proposed/juno': 'trusty-proposed/juno',
}
# The order of this list is very important. Handlers should be listed in from
# least- to most-specific URL matching.
FETCH_HANDLERS = (
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
)
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
class SourceConfigError(Exception):
pass
class UnhandledSource(Exception):
pass
class AptLockError(Exception):
pass
class BaseFetchHandler(object):
"""Base class for FetchHandler implementations in fetch plugins"""
def can_handle(self, source):
"""Returns True if the source can be handled. Otherwise returns
a string explaining why it cannot"""
return "Wrong source type"
def install(self, source):
"""Try to download and unpack the source. Return the path to the
unpacked files or raise UnhandledSource."""
raise UnhandledSource("Wrong source type {}".format(source))
def parse_url(self, url):
return urlparse(url)
def base_url(self, url):
"""Return url without querystring or fragment"""
parts = list(self.parse_url(url))
parts[4:] = ['' for i in parts[4:]]
return urlunparse(parts)
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
import apt_pkg
apt_pkg.init()
# Tell apt to build an in-memory cache to prevent race conditions (if
# another process is already building the cache).
apt_pkg.config.set("Dir::Cache::pkgcache", "")
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
cache = apt_pkg.Cache()
_pkgs = []
for package in packages:
try:
p = cache[package]
p.current_ver or _pkgs.append(package)
except KeyError:
log('Package {} has no installation candidate.'.format(package),
level='WARNING')
_pkgs.append(package)
return _pkgs
def apt_install(packages, options=None, fatal=False):
"""Install one or more packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
cmd.append('install')
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
_run_apt_command(cmd, fatal)
def apt_upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages"""
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
cmd = ['apt-get', '--assume-yes']
cmd.extend(options)
if dist:
cmd.append('dist-upgrade')
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
_run_apt_command(cmd, fatal)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
_run_apt_command(cmd, fatal)
def apt_purge(packages, fatal=False):
"""Purge one or more packages"""
cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Purging {}".format(packages))
_run_apt_command(cmd, fatal)
def apt_hold(packages, fatal=False):
"""Hold one or more packages"""
cmd = ['apt-mark', 'hold']
if isinstance(packages, basestring):
cmd.append(packages)
else:
cmd.extend(packages)
log("Holding {}".format(packages))
if fatal:
subprocess.check_call(cmd)
else:
subprocess.call(cmd)
def add_source(source, key=None):
"""Add a package source to this system.
@param source: a URL or sources.list entry, as supported by
add-apt-repository(1). Examples:
ppa:charmers/example
deb https://stub:[email protected]/ubuntu trusty main
In addition:
'proposed:' may be used to enable the standard 'proposed'
pocket for the release.
'cloud:' may be used to activate official cloud archive pockets,
such as 'cloud:icehouse'
@param key: A key to be added to the system's APT keyring and used
to verify the signatures on packages. Ideally, this should be an
ASCII format GPG public key including the block headers. A GPG key
id may also be used, but be aware that only insecure protocols are
available to retrieve the actual public key from a public keyserver
placing your Juju environment at risk. ppa and cloud archive keys
are securely added automtically, so sould not be provided.
"""
if source is None:
log('Source is not present. Skipping')
return
if (source.startswith('ppa:') or
source.startswith('http') or
source.startswith('deb ') or
source.startswith('cloud-archive:')):
subprocess.check_call(['add-apt-repository', '--yes', source])
elif source.startswith('cloud:'):
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
fatal=True)
pocket = source.split(':')[-1]
if pocket not in CLOUD_ARCHIVE_POCKETS:
raise SourceConfigError(
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
elif source == 'proposed':
release = lsb_release()['DISTRIB_CODENAME']
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
apt.write(PROPOSED_POCKET.format(release))
else:
raise SourceConfigError("Unknown source: {!r}".format(source))
if key:
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
with NamedTemporaryFile() as key_file:
key_file.write(key)
key_file.flush()
key_file.seek(0)
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
else:
# Note that hkp: is in no way a secure protocol. Using a
# GPG key id is pointless from a security POV unless you
# absolutely trust your network and DNS.
subprocess.check_call(['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv',
key])
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""
Configure multiple sources from charm configuration.
The lists are encoded as yaml fragments in the configuration.
The frament needs to be included as a string. Sources and their
corresponding keys are of the types supported by add_source().
Example config:
install_sources: |
- "ppa:foo"
- "http://example.com/repo precise main"
install_keys: |
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
sources = safe_load((config(sources_var) or '').strip()) or []
keys = safe_load((config(keys_var) or '').strip()) or None
if isinstance(sources, basestring):
sources = [sources]
if keys is None:
for source in sources:
add_source(source, None)
else:
if isinstance(keys, basestring):
keys = [keys]
if len(sources) != len(keys):
raise SourceConfigError(
'Install sources and keys lists are different lengths')
for source, key in zip(sources, keys):
add_source(source, key)
if update:
apt_update(fatal=True)
def install_remote(source):
"""
Install a file tree from a remote source
The specified source should be a url of the form:
scheme://[host]/path[#[option=value][&...]]
Schemes supported are based on this modules submodules
Options supported are submodule-specific"""
# We ONLY check for True here because can_handle may return a string
# explaining why it can't handle a given source.
handlers = [h for h in plugins() if h.can_handle(source) is True]
installed_to = None
for handler in handlers:
try:
installed_to = handler.install(source)
except UnhandledSource:
pass
if not installed_to:
raise UnhandledSource("No handler found for source {}".format(source))
return installed_to
def install_from_config(config_var_name):
charm_config = config()
source = charm_config[config_var_name]
return install_remote(source)
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
plugin_list = []
for handler_name in fetch_handlers:
package, classname = handler_name.rsplit('.', 1)
try:
handler_class = getattr(
importlib.import_module(package),
classname)
plugin_list.append(handler_class())
except (ImportError, AttributeError):
# Skip missing plugins so that they can be ommitted from
# installation if desired
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list
def _run_apt_command(cmd, fatal=False):
"""
Run an APT command, checking output and retrying if the fatal flag is set
to True.
:param: cmd: str: The apt command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried.
"""
env = os.environ.copy()
if 'DEBIAN_FRONTEND' not in env:
env['DEBIAN_FRONTEND'] = 'noninteractive'
if fatal:
retry_count = 0
result = None
# If the command is considered "fatal", we need to retry if the apt
# lock was not acquired.
while result is None or result == APT_NO_LOCK:
try:
result = subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError, e:
retry_count = retry_count + 1
if retry_count > APT_NO_LOCK_RETRY_COUNT:
raise
result = e.returncode
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
"".format(APT_NO_LOCK_RETRY_DELAY))
time.sleep(APT_NO_LOCK_RETRY_DELAY)
else:
subprocess.call(cmd, env=env)
|
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import glob
import sys
import os
import time
import unittest
basepath = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, basepath + '/gen-py.tornado')
sys.path.insert(0, glob.glob(os.path.join(basepath, '../../lib/py/build/lib*'))[0])
try:
__import__('tornado')
except ImportError:
print("module `tornado` not found, skipping test")
sys.exit(0)
from tornado import gen
from tornado.testing import AsyncTestCase, get_unused_port, gen_test
from thrift import TTornado
from thrift.protocol import TBinaryProtocol
from thrift.transport.TTransport import TTransportException
from ThriftTest import ThriftTest
from ThriftTest.ttypes import *
class TestHandler(object):
def __init__(self, test_instance):
self.test_instance = test_instance
def testVoid(self):
pass
def testString(self, s):
return s
def testByte(self, b):
return b
def testI16(self, i16):
return i16
def testI32(self, i32):
return i32
def testI64(self, i64):
return i64
def testDouble(self, dub):
return dub
def testBinary(self, thing):
return thing
def testStruct(self, thing):
return thing
def testException(self, s):
if s == 'Xception':
x = Xception()
x.errorCode = 1001
x.message = s
raise x
elif s == 'throw_undeclared':
raise ValueError("foo")
def testOneway(self, seconds):
start = time.time()
def fire_oneway():
end = time.time()
self.test_instance.stop((start, end, seconds))
self.test_instance.io_loop.add_timeout(
datetime.timedelta(seconds=seconds),
fire_oneway)
def testNest(self, thing):
return thing
@gen.coroutine
def testMap(self, thing):
yield gen.moment
raise gen.Return(thing)
def testSet(self, thing):
return thing
def testList(self, thing):
return thing
def testEnum(self, thing):
return thing
def testTypedef(self, thing):
return thing
class ThriftTestCase(AsyncTestCase):
def setUp(self):
super(ThriftTestCase, self).setUp()
self.port = get_unused_port()
# server
self.handler = TestHandler(self)
self.processor = ThriftTest.Processor(self.handler)
self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.server = TTornado.TTornadoServer(self.processor, self.pfactory, io_loop=self.io_loop)
self.server.bind(self.port)
self.server.start(1)
# client
transport = TTornado.TTornadoStreamTransport('localhost', self.port, io_loop=self.io_loop)
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
self.io_loop.run_sync(transport.open)
self.client = ThriftTest.Client(transport, pfactory)
@gen_test
def test_void(self):
v = yield self.client.testVoid()
self.assertEqual(v, None)
@gen_test
def test_string(self):
v = yield self.client.testString('Python')
self.assertEqual(v, 'Python')
@gen_test
def test_byte(self):
v = yield self.client.testByte(63)
self.assertEqual(v, 63)
@gen_test
def test_i32(self):
v = yield self.client.testI32(-1)
self.assertEqual(v, -1)
v = yield self.client.testI32(0)
self.assertEqual(v, 0)
@gen_test
def test_i64(self):
v = yield self.client.testI64(-34359738368)
self.assertEqual(v, -34359738368)
@gen_test
def test_double(self):
v = yield self.client.testDouble(-5.235098235)
self.assertEqual(v, -5.235098235)
@gen_test
def test_struct(self):
x = Xtruct()
x.string_thing = "Zero"
x.byte_thing = 1
x.i32_thing = -3
x.i64_thing = -5
y = yield self.client.testStruct(x)
self.assertEqual(y.string_thing, "Zero")
self.assertEqual(y.byte_thing, 1)
self.assertEqual(y.i32_thing, -3)
self.assertEqual(y.i64_thing, -5)
def test_oneway(self):
self.client.testOneway(0)
start, end, seconds = self.wait(timeout=1)
self.assertAlmostEqual(seconds, (end - start), places=3)
@gen_test
def test_map(self):
"""
TestHandler.testMap is a coroutine, this test checks if gen.Return() from a coroutine works.
"""
expected = {1: 1}
res = yield self.client.testMap(expected)
self.assertEqual(res, expected)
@gen_test
def test_exception(self):
yield self.client.testException('Safe')
try:
yield self.client.testException('Xception')
except Xception as ex:
self.assertEqual(ex.errorCode, 1001)
self.assertEqual(ex.message, 'Xception')
else:
self.fail("should have gotten exception")
try:
yield self.client.testException('throw_undeclared')
except TTransportException as ex:
pass
else:
self.fail("should have gotten exception")
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(ThriftTestCase))
return suite
if __name__ == '__main__':
unittest.TestProgram(defaultTest='suite',
testRunner=unittest.TextTestRunner(verbosity=1))
|
|
#!/bin/env python3
import sys
import os
import os.path
import glob
import copy
import traceback
import re
import csv
import tempfile
import urllib.request, urllib.parse, urllib.error
import shutil
import atexit
import subprocess
import time
import math
from collections import defaultdict, Counter
from os.path import join, dirname, realpath
try:
sys.path.append(join(dirname(realpath(__file__)),
'..', '..', 'common', 'src'))
except NameError:
pass
from optparse_gui import OptionParser, OptionGroup, GUI, UserCancelledError, ProgressText
from util import ReadFilterFactory, BadRead, ReadGroupFactory
from fisher import *
from pileups import SerialPileups, ThreadedPileups, MultiprocPileups
from chromreg import ChromLabelRegistry
from operator import itemgetter
from os.path import join, dirname, realpath, split
if getattr(sys, 'frozen', False):
scriptdir = dirname(realpath(sys.executable))
if not scriptdir.endswith('/bin') and not scriptdir.endswith('/MacOS'):
scriptdir = realpath(os.path.join(scriptdir,".."))
scriptdirs = [scriptdir]
else:
scriptdir = dirname(realpath(sys.argv[0]))
scriptdir1 = realpath(join(scriptdir, '..', '..', 'ReadCounts', 'src'))
scriptdirs = [scriptdir,scriptdir1]
try:
scriptextn = "." + os.path.split(sys.argv[0])[1].rsplit('.', 1)[1]
except:
scriptextn = ""
from execute import Execute
execprog = Execute(*scriptdirs,extn=scriptextn)
from release import RELEASE, VERSION
VERSION = "%s (%s:%s)"%(VERSION,RELEASE,VERSION)
def excepthook(etype, value, tb):
traceback.print_exception(etype, value, tb)
print("Type <Enter> to Exit...", end=' ', file=sys.stderr)
sys.stderr.flush()
input()
toremove = []
def cleanup():
for d in toremove:
shutil.rmtree(d, ignore_errors=True)
atexit.register(cleanup)
if len(sys.argv) == 2 and sys.argv[1] == '--GUI':
from optparse_gui.needswx import *
sys.exit(1)
if len(sys.argv) == 1:
if not GUI():
print("Graphical user-interface unavailable.",file=sys.stderr)
sys.exit(1)
from optparse_gui import OptionParserGUI
parser = OptionParserGUI(version=VERSION)
error_kwargs = {'exit': False}
sys.excepthook = excepthook
else:
parser = OptionParser(version=VERSION)
error_kwargs = {}
filterFactory = ReadFilterFactory()
filterOptions = [t[0] for t in filterFactory.list()]
filterDesc = []
for o,d in sorted(filterFactory.list()):
filterDesc.append("%s (%s)"%(o,d.strip('.')))
groupFactory = ReadGroupFactory()
groupOptions = ["","None","-"] + [t[0] for t in groupFactory.list()]
groupDesc = []
for o,d in sorted(groupFactory.list()):
groupDesc.append("%s (%s)"%(o,d.strip('.')))
minreads_default = 5
maxreads_default = None
threads_default = 0
filter_default = "Basic"
readgroup_default = "UMI-tools"
advanced = OptionGroup(parser, "Advanced")
parser.add_option("-s", "--snvs", type="files", dest="snvs", default=None,
help="Single-Nucleotide-Variant files. Required.", name="SNV Files",
notNone=True, remember=True,
filetypes=[("SNV Files", "*.vcf;*.csv;*.tsv;*.xls;*.xlsx;*.txt")])
parser.add_option("-r", "--readalignments", type="files", dest="alignments", default=None,
help="Read alignment files in indexed BAM format. Required.", name="Read Alignment Files",
notNone=True, remember=True,
filetypes=[("Read Alignment Files (indexed BAM)", "*.bam")])
parser.add_option("-f", "--alignmentfilter", type="choice", dest="filter", default=filter_default, remember=True,
help="Alignment filtering strategy. Options: %s. Default: Basic."%(", ".join(filterDesc),), choices = filterOptions,
name="Alignment Filter")
advanced.add_option("-m", "--minreads", type="int", dest="minreads", default=minreads_default, remember=True,
help="Minimum number of good reads at SNV locus per alignment file. Default=5.", name="Min. Reads")
advanced.add_option("-M", "--maxreads", type="string", dest="maxreads", default=maxreads_default, remember=True,
help="Scale read counts at high-coverage loci to ensure at most this many good reads at SNV locus per alignment file. Values greater than 1 indicate absolute read counts, otherwise the value indicates the coverage distribution percentile. Default=No maximum.", name="Max. Reads")
advanced.add_option("-t", "--threads", type="int", dest="threads", default=threads_default, remember=True,
help="Worker threads. Indicate no threading/multiprocessing with 0. Default=0.", name="Threads")
advanced.add_option("-G", "--readgroup", type="choice", dest="readgroup", default=readgroup_default, remember=True,
choices=groupOptions, name="Read Group",
help="Additional read grouping based on read name/identifier strings or BAM-file RG. Options: %s. Default: %s."%(", ".join(groupDesc),readgroup_default))
# advanced.add_option("--alignmentfilterparam", type="string", dest="filterparam", default="", remember=True,
# help="Override parameters for selected alignment filter. Default: Do not override.", name="Alignment Filter Param.")
# advanced.add_option("--readgroupparam", type="string", dest="readgroupparam", default="", remember=True,
# help="Override parameters for selected read group. Default: Do not override.", name="Read Group Param.")
advanced.add_option("-b","--barcode_acceptlist", type="file", dest="acceptlist", default=None,
help="File of white-space separated, acceptable read group values (barcode accept list). Overrides value, if any, specified by Read Group. Use None to remove a default accept list.", name="Valid Read Groups",
remember=True,
filetypes=[("Valid Read Groups File", "*.txt;*.tsv")])
advanced.add_option("-F", "--force", action="store_true", dest="force", default=False, remember=True,
help="Force all output files to be re-computed, even if already present. Default: False.", name="Force")
advanced.add_option("-q", "--quiet", action="store_true", dest="quiet", default=False, remember=True,
help="Quiet.", name="Quiet")
parser.add_option("-o", "--output", type="savefile", dest="output", remember=True,
help="Output file. Required.", notNone=True, default=None,
name="Output File", filetypes=[("All output formats", "*.xlsx;*.xls;*.csv;*.tsv;*.txt"),
("Excel", "*.xlsx"), ("Excel2003", "*.xls"),
("CSV", "*.csv"), ("TSV", "*.tsv"), ("Text", "*.txt")])
parser.add_option_group(advanced)
opt = None
while True:
if 'exit' in error_kwargs:
try:
opt, args = parser.parse_args(opts=opt)
except UserCancelledError:
sys.exit(0)
else:
opt, args = parser.parse_args()
try:
if opt.maxreads not in (None,""):
opt.maxreads = float(opt.maxreads)
else:
opt.maxreads = None
except ValueError:
parser.error("Bad Max. Read option",**error_kwargs)
continue
break
readfilter = filterFactory.get(opt.filter)
if opt.readgroup not in ("","None","-"):
readgroupparam = ""
if opt.acceptlist != None:
if opt.acceptlist in ("","None","-"):
readgroupparam = "*:acceptlist=None"
else:
readgroupparam = "*:acceptlist='%s'"%(opt.acceptlist,)
readgroup = groupFactory.get(opt.readgroup,readgroupparam)
else:
readgroup = None
progress = None
if not opt.output:
opt.quiet = True
progress = ProgressText(quiet=opt.quiet)
doublequote = lambda s: '"%s"'%(s,)
indent = lambda s,n: "\n".join([(" "*n)+l for l in s.splitlines()])
args = []
args.extend(["-s",doublequote(" ".join(opt.snvs))])
args.extend(["-r",doublequote(" ".join(opt.alignments))])
if opt.filter != filter_default:
args.extend(["-f",doublequote(opt.filter)])
if opt.minreads != minreads_default:
args.extend(["-m",str(opt.minreads)])
if opt.maxreads != maxreads_default:
args.extend(["-M",str(opt.maxreads)])
if opt.readgroup != readgroup_default:
args.extend(["-G",doublequote(opt.readgroup if readgroup != None else "")])
if opt.acceptlist != None and readgroup != None:
args.extend(["-b",doublequote(opt.acceptlist)])
if opt.threads != threads_default:
args.extend(["-t",str(opt.threads)])
if opt.force:
args.extend(["-F"])
if opt.quiet:
args.extend(["-q"])
args.extend(["-o",doublequote(opt.output)])
cmdargs = " ".join(args)
execution_log = """
scReadCounts Options:
SNV Files (-s): %s
Read Files (-r): %s
Read/Alignment Filter (-f): %s%s
Outfile File (-o): %s
Advanced:
Min. Reads (-m) %s (applied only to VAF matrix)
Max. Reads (-M): %s
Read Groups (-G): %s%s
Valid Read Groups (-b): %s
Threads (-t): %s
Quiet (-q): %s
Command-Line: scReadCounts %s
"""%(", ".join(opt.snvs),
", ".join(opt.alignments),
opt.filter,
"" if readfilter == None else "\n"+indent(readfilter.tostr(),10),
opt.output,
opt.minreads,
opt.maxreads,
None if readgroup == None else opt.readgroup,
"" if readgroup == None else "\n"+indent(readgroup.tostr(),12),
"" if opt.acceptlist else opt.acceptlist,
opt.threads,
opt.quiet,
cmdargs)
progress.message(execution_log)
args = []
args.extend(["-s"," ".join(opt.snvs)])
args.extend(["-r"," ".join(opt.alignments)])
args.extend(["-f",opt.filter])
args.extend(["-o",opt.output])
args.extend(["-m",0])
if opt.maxreads != maxreads_default:
args.extend(["-M",opt.maxreads])
if readgroup != None:
args.extend(["-G",opt.readgroup])
if opt.acceptlist:
args.extend(["-b",opt.acceptlist])
args.extend(["-t",opt.threads])
if opt.quiet:
args.extend(["-q"])
args = [ str(x) for x in args ]
if os.path.exists(opt.output) and not opt.force:
progress.message("\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
progress.message("Skipping readCounts, output file present.")
progress.message(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n")
else:
progress.message("\n>>>>>>>>>>>>>>>>>>>>>")
progress.message("Execute readCounts...")
progress.message(">>>>>>>>>>>>>>>>>>>>>\n")
execprog.execute("readCounts",*args)
opt.force = True
outbase,extn = opt.output.rsplit('.',1)
outmatrix1 = outbase + '.cnt.matrix.' + extn
outmatrix2 = outbase + '.vaf-m%d.matrix.'%(opt.minreads,) + extn
args = []
args.extend(["-c",opt.output])
args.extend(["-M","Ref;Var"])
args.extend(["-m",0])
if opt.quiet:
args.extend(["-q"])
args.extend(["-o",outmatrix1])
args = [ str(x) for x in args ]
if os.path.exists(outmatrix1) and not opt.force:
progress.message("\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
progress.message("Skipping readCountsMatrix for Ref;Var, output file present.")
progress.message(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n")
else:
progress.message("\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
progress.message("Execute readCountsMatrix for Ref;Var matrix...")
progress.message(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n")
execprog.execute("readCountsMatrix",*args)
args = []
args.extend(["-c",opt.output])
args.extend(["-M","VAF"])
args.extend(["-m",opt.minreads])
if opt.quiet:
args.extend(["-q"])
args.extend(["-o",outmatrix2])
args = [ str(x) for x in args ]
if os.path.exists(outmatrix2) and not opt.force:
progress.message("\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
progress.message("Skipping readCountsMatrix for VAF, output file present.")
progress.message(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n")
else:
progress.message("\n>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
progress.message("Execute readCountsMatrix for VAF matrix...")
progress.message(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n")
execprog.execute("readCountsMatrix",*args)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import django
from django.core.urlresolvers import reverse
from django.forms import widgets
from django import http
from django.test.utils import override_settings
from mox3.mox import IsA # noqa
import six
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.project.volumes \
.volumes import tables
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
VOLUME_INDEX_URL = reverse('horizon:project:volumes:index')
VOLUME_VOLUMES_TAB_URL = reverse('horizon:project:volumes:volumes_tab')
SEARCH_OPTS = dict(status=api.cinder.VOLUME_STATE_AVAILABLE)
class VolumeViewTests(test.TestCase):
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_without_name(self):
volume = self.cinder_volumes.first()
volume_type = self.volume_types.first()
az = self.cinder_availability_zones.first().zoneName
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': '',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'type': volume_type.name,
'size': 50,
'snapshot_source': '',
'availability_zone': az}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
formData['type'],
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=formData['availability_zone'],
source_volid=None)\
.AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'no_source_type',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': self.images.first().id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_get',
'volume_get',
'volume_type_list'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'volume_snapshot_get',
'volume_snapshot_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A copy of a volume',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'volume_source',
'volume_source': volume.id}
cinder.volume_list(IsA(http.HttpRequest), search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volumes.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest),
volume.id).AndReturn(self.cinder_volumes.first())
cinder.extension_supported(IsA(http.HttpRequest),
'AvailabilityZones').AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=None,
availability_zone=None,
source_volid=volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
redirect_url = VOLUME_VOLUMES_TAB_URL
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_snapshot_list',
'volume_snapshot_get',
'volume_get',
'volume_list',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 250,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 50,
'type': '',
'volume_source_type': 'snapshot_source',
'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=snapshot.id,
image_id=None,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get snapshot from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_type_list',
'volume_type_default',
'volume_get'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_snapshot_invalid_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
snapshot = self.cinder_volume_snapshots.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 20, 'snapshot_source': snapshot.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_default(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_get(IsA(http.HttpRequest),
str(snapshot.id)).AndReturn(snapshot)
cinder.volume_get(IsA(http.HttpRequest), snapshot.volume_id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"snapshot_id=" + str(snapshot.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"snapshot size (40GB)")
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 40,
'type': '',
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from url
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_create',
'volume_type_list',
'volume_list',
'volume_snapshot_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_dropdown(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 200,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 30,
'type': '',
'volume_source_type': 'image_source',
'snapshot_source': self.cinder_volume_snapshots.first().id,
'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)) \
.AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
cinder.volume_create(IsA(http.HttpRequest),
formData['size'],
formData['name'],
formData['description'],
'',
metadata={},
snapshot_id=None,
image_id=image.id,
availability_zone=None,
source_volid=None).AndReturn(volume)
self.mox.ReplayAll()
# get image from dropdown list
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
redirect_url = VOLUME_VOLUMES_TAB_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_type_list',
'volume_type_default',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def test_create_volume_from_image_under_image_size(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
image = self.images.first()
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 1, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_default(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
# in django 1.6 filesizeformat replaces all spaces with
# non-breaking space characters
if django.VERSION >= (1, 6):
msg = (u"The volume size cannot be less than the "
u"image size (20.0\xa0GB)")
else:
msg = (u"The volume size cannot be less than the "
u"image size (20.0 GB)")
self.assertFormError(res, 'form', None, msg)
@test.create_stubs({cinder: ('volume_type_list',
'volume_type_default',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_get',
'image_list_detailed'),
quotas: ('tenant_limit_usages',)})
def _test_create_volume_from_image_under_image_min_disk_size(self, image):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'description': u'This is a volume I am making for a test.',
'method': u'CreateForm',
'size': 5, 'image_source': image.id}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_default(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
api.glance.image_get(IsA(http.HttpRequest),
str(image.id)).AndReturn(image)
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post("?".join([url,
"image_id=" + str(image.id)]),
formData, follow=True)
self.assertEqual(res.redirect_chain, [])
self.assertFormError(res, 'form', None,
"The volume size cannot be less than the "
"image minimum disk size (30GB)")
def test_create_volume_from_image_under_image_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
def test_create_volume_from_image_under_image_property_min_disk_size(self):
image = self.images.get(name="protected_images")
image.min_disk = 0
image.properties['min_disk'] = 30
self._test_create_volume_from_image_under_image_min_disk_size(image)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_list',
'volume_type_default',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_gb_used_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 80,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'This Volume Is Huge!',
'description': u'This is a volume that is just too big!',
'method': u'CreateForm',
'size': 5000}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_default(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'A volume of 5000GB cannot be created as you only'
' have 20GB of your quota available.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('volume_snapshot_list',
'volume_type_default',
'volume_type_list',
'volume_list',
'availability_zone_list',
'extension_supported'),
api.glance: ('image_list_detailed',),
quotas: ('tenant_limit_usages',)})
def test_create_volume_number_over_alloted_quota(self):
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': len(self.cinder_volumes.list())}
formData = {'name': u'Too Many...',
'description': u'We have no volumes left!',
'method': u'CreateForm',
'size': 10}
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_list(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.list())
cinder.volume_type_default(IsA(http.HttpRequest)).\
AndReturn(self.volume_types.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=SEARCH_OPTS).\
AndReturn(self.cinder_volume_snapshots.list())
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'is_public': True, 'status': 'active'}) \
.AndReturn([self.images.list(), False, False])
api.glance.image_list_detailed(
IsA(http.HttpRequest),
filters={'property-owner_id': self.tenant.id,
'status': 'active'}) \
.AndReturn([[], False, False])
cinder.volume_list(IsA(
http.HttpRequest),
search_opts=SEARCH_OPTS).AndReturn(self.cinder_volumes.list())
cinder.extension_supported(IsA(http.HttpRequest), 'AvailabilityZones')\
.AndReturn(True)
cinder.availability_zone_list(IsA(http.HttpRequest)).AndReturn(
self.cinder_availability_zones.list())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:create')
res = self.client.post(url, formData)
expected_error = [u'You are already using all of your available'
' volumes.']
self.assertEqual(res.context['form'].errors['__all__'], expected_error)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'volume_delete',),
api.nova: ('server_list',)})
def test_delete_volume(self):
volumes = self.cinder_volumes.list()
volume = self.cinder_volumes.first()
formData = {'action':
'volumes__delete__%s' % volume.id}
cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
cinder.volume_delete(IsA(http.HttpRequest), volume.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.volume_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertIn("Scheduled deletion of Volume: Volume name",
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('volume_get',
'tenant_absolute_limits')})
def test_delete_volume_with_snap_no_action_item(self):
volume = self.cinder_volumes.get(name='Volume name')
setattr(volume, 'has_snapshot', True)
limits = self.cinder_limits['absolute']
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)). \
MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertNotContains(res, 'Delete Volume')
self.assertNotContains(res, 'delete')
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '/dev/vdb',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
msg = 'Volume %s on instance %s' % (volume.name, servers[0].name)
self.assertContains(res, msg)
# Asserting length of 2 accounts for the one instance option,
# and the one 'Choose Instance' option.
form = res.context['form']
self.assertEqual(len(form.fields['instance']._choices),
1)
self.assertEqual(res.status_code, 200)
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
@override_settings(OPENSTACK_HYPERVISOR_FEATURES={'can_set_mount_point':
True})
def test_edit_attachments_auto_device_name(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
volume.attachments = [{'id': volume.id,
'volume_id': volume.id,
'volume_name': volume.name,
'instance': servers[0],
'device': '',
'server_id': servers[0].id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.TextInput))
self.assertFalse(form.fields['device'].required)
@test.create_stubs({cinder: ('volume_get',), api.nova: ('server_list',)})
def test_edit_attachments_cannot_set_mount_point(self):
volume = self.cinder_volumes.first()
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)).AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
# Assert the device field is hidden.
form = res.context['form']
self.assertTrue(isinstance(form.fields['device'].widget,
widgets.HiddenInput))
@test.create_stubs({cinder: ('volume_get',),
api.nova: ('server_list',)})
def test_edit_attachments_attached_volume(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
server = servers[0]
volume = self.cinder_volumes.list()[0]
cinder.volume_get(IsA(http.HttpRequest), volume.id) \
.AndReturn(volume)
api.nova.server_list(IsA(http.HttpRequest)) \
.AndReturn([servers, False])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:attach',
args=[volume.id])
res = self.client.get(url)
self.assertEqual(res.context['form'].fields['instance']._choices[0][1],
"Select an instance")
self.assertEqual(len(res.context['form'].fields['instance'].choices),
2)
self.assertEqual(res.context['form'].fields['instance']._choices[1][0],
server.id)
self.assertEqual(res.status_code, 200)
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_create_snapshot_button_disabled_when_quota_exceeded(self):
limits = {'maxTotalSnapshots': 1}
limits['totalSnapshotsUsed'] = limits['maxTotalSnapshots']
volume = self.cinder_volumes.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).AndReturn(limits)
self.mox.ReplayAll()
create_link = tables.CreateSnapshot()
url = reverse(create_link.get_link_url(), args=[volume.id])
res_url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(res_url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' class=\"%s disabled\" "\
"id=\"volumes__row_%s__action_snapshots\">%s</a>" \
% (url, " ".join(classes), volume.id, link_name)
self.assertContains(
res, expected_string, html=True,
msg_prefix="The create snapshot button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_list',
'volume_snapshot_list',
'volume_backup_supported',),
api.nova: ('server_list',)})
def test_create_button_disabled_when_quota_exceeded(self):
limits = self.cinder_limits['absolute']
limits['totalVolumesUsed'] = limits['maxTotalVolumes']
volumes = self.cinder_volumes.list()
api.cinder.volume_backup_supported(IsA(http.HttpRequest)). \
MultipleTimes().AndReturn(True)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
self.assertTemplateUsed(res, 'project/volumes/index.html')
volumes = res.context['volumes_table'].data
self.assertItemsEqual(volumes, self.cinder_volumes.list())
create_link = tables.CreateVolume()
url = create_link.get_link_url()
classes = (list(create_link.get_default_classes())
+ list(create_link.classes))
link_name = "%s (%s)" % (six.text_type(create_link.verbose_name),
"Quota exceeded")
expected_string = "<a href='%s' title='%s' class='%s disabled' "\
"id='volumes__action_create' data-update-url=" \
"'/project/volumes/?action=create&table=volumes'> "\
"<span class='fa fa-plus'></span>%s</a>" \
% (url, link_name, " ".join(classes), link_name)
self.assertContains(res, expected_string, html=True,
msg_prefix="The create button is not disabled")
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',),
api.nova: ('server_get',)})
def test_detail_view(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res, "<h1>Volume Details: Volume name</h1>",
1, 200)
self.assertContains(res, "<dd>Volume name</dd>", 1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
self.assertContains(res, "<dd>40 GB</dd>", 1, 200)
self.assertContains(res,
("<a href=\"/project/instances/1/\">%s</a>"
% server.name),
1,
200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',
'volume_get_encryption_metadata'), })
def test_encryption_detail_view_encrypted(self):
enc_meta = self.cinder_volume_encryption.first()
volume = self.cinder_volumes.get(name='my_volume2')
cinder.volume_get_encryption_metadata(
IsA(http.HttpRequest), volume.id).AndReturn(enc_meta)
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Encryption Details: "
"%s</h1>" % volume.name,
1, 200)
self.assertContains(res, "<dd>%s</dd>" % volume.volume_type, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.provider, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.control_location, 1,
200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.cipher, 1, 200)
self.assertContains(res, "<dd>%s</dd>" % enc_meta.key_size, 1, 200)
self.assertNoMessages()
@test.create_stubs({cinder: ('volume_get',
'volume_get_encryption_metadata'), })
def test_encryption_detail_view_unencrypted(self):
enc_meta = self.cinder_volume_encryption.list()[1]
volume = self.cinder_volumes.get(name='my_volume2')
cinder.volume_get_encryption_metadata(
IsA(http.HttpRequest), volume.id).AndReturn(enc_meta)
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:encryption_detail',
args=[volume.id])
res = self.client.get(url)
self.assertContains(res,
"<h1>Volume Encryption Details: "
"%s</h1>" % volume.name,
1, 200)
self.assertContains(res, "<h3>Volume is Unencrypted</h3>", 1, 200)
self.assertNoMessages()
@test.create_stubs({cinder: ('tenant_absolute_limits',
'volume_get',)})
def test_get_data(self):
volume = self.cinder_volumes.get(name='v2_volume')
volume._apiresource.name = ""
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertEqual(volume.name, volume.id)
@test.create_stubs({cinder: ('volume_get',)})
def test_detail_view_with_exception(self):
volume = self.cinder_volumes.first()
server = self.servers.first()
volume.attachments = [{"server_id": server.id}]
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:detail',
args=[volume.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
volume.description)
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
False)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume_without_name(self):
volume = self.cinder_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
'',
volume.description)
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
False)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': '',
'description': volume.description,
'bootable': False}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_update',
'volume_set_bootable',
'volume_get',)})
def test_update_volume_bootable_flag(self):
volume = self.cinder_bootable_volumes.get(name="my_volume")
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_update(IsA(http.HttpRequest),
volume.id,
volume.name,
'update bootable flag')
cinder.volume_set_bootable(IsA(http.HttpRequest),
volume.id,
True)
self.mox.ReplayAll()
formData = {'method': 'UpdateForm',
'name': volume.name,
'description': 'update bootable flag',
'bootable': True}
url = reverse('horizon:project:volumes:volumes:update',
args=[volume.id])
res = self.client.post(url, formData)
self.assertRedirectsNoFollow(res, VOLUME_INDEX_URL)
@test.create_stubs({cinder: ('volume_upload_to_image',
'volume_get')})
def test_upload_to_image(self):
volume = self.cinder_volumes.get(name='v2_volume')
loaded_resp = {'container_format': 'bare',
'disk_format': 'raw',
'id': '741fe2ac-aa2f-4cec-82a9-4994896b43fb',
'image_id': '2faa080b-dd56-4bf0-8f0a-0d4627d8f306',
'image_name': 'test',
'size': '2',
'status': 'uploading'}
form_data = {'id': volume.id,
'name': volume.name,
'image_name': 'testimage',
'force': True,
'container_format': 'bare',
'disk_format': 'raw'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_upload_to_image(
IsA(http.HttpRequest),
form_data['id'],
form_data['force'],
form_data['image_name'],
form_data['container_format'],
form_data['disk_format']).AndReturn(loaded_resp)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:upload_to_image',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
self.assertMessageCount(info=1)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',
'volume_extend'),
quotas: ('tenant_limit_usages',)})
def test_extend_volume(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 120}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_extend(IsA(http.HttpRequest),
volume.id,
formData['new_size']).AndReturn(volume)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_wrong_size(self):
volume = self.cinder_volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.cinder_volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 10}
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.cinder_volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormErrors(res, 1,
"New size must be greater than "
"current size.")
@test.create_stubs({cinder: ('volume_get',
'tenant_absolute_limits')})
def test_retype_volume_supported_action_item(self):
volume = self.cinder_volumes.get(name='v2_volume')
limits = self.cinder_limits['absolute']
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
url = (VOLUME_INDEX_URL +
"?action=row_update&table=volumes&obj_id=" + volume.id)
res = self.client.get(url, {}, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertEqual(res.status_code, 200)
self.assertContains(res, 'Change Volume Type')
self.assertContains(res, 'retype')
@test.create_stubs({cinder: ('volume_get',
'volume_retype',
'volume_type_list')})
def test_retype_volume(self):
volume = self.cinder_volumes.get(name='my_volume2')
volume_type = self.cinder_volume_types.get(name='vol_type_1')
form_data = {'id': volume.id,
'name': volume.name,
'volume_type': volume_type.name,
'migration_policy': 'on-demand'}
cinder.volume_get(IsA(http.HttpRequest), volume.id).AndReturn(volume)
cinder.volume_type_list(
IsA(http.HttpRequest)).AndReturn(self.cinder_volume_types.list())
cinder.volume_retype(
IsA(http.HttpRequest),
volume.id,
form_data['volume_type'],
form_data['migration_policy']).AndReturn(True)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:retype',
args=[volume.id])
res = self.client.post(url, form_data)
self.assertNoFormErrors(res)
redirect_url = VOLUME_INDEX_URL
self.assertRedirectsNoFollow(res, redirect_url)
def test_encryption_false(self):
self._test_encryption(False)
def test_encryption_true(self):
self._test_encryption(True)
@test.create_stubs({cinder: ('volume_list',
'volume_snapshot_list',
'volume_backup_supported',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def _test_encryption(self, encryption):
volumes = self.volumes.list()
for volume in volumes:
volume.encrypted = encryption
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes('backup_supported').AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn(self.cinder_volume_snapshots.list())
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes('limits').AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
rows = res.context['volumes_table'].get_rows()
if encryption:
column_value = 'Yes'
else:
column_value = 'No'
for row in rows:
self.assertEqual(row.cells['encryption'].data, column_value)
@test.create_stubs({cinder: ('volume_get',),
quotas: ('tenant_limit_usages',)})
def test_extend_volume_with_size_out_of_quota(self):
volume = self.volumes.first()
usage_limit = {'maxTotalVolumeGigabytes': 100,
'gigabytesUsed': 20,
'volumesUsed': len(self.volumes.list()),
'maxTotalVolumes': 6}
formData = {'name': u'A Volume I Am Making',
'orig_size': volume.size,
'new_size': 1000}
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
cinder.volume_get(IsA(http.HttpRequest), volume.id).\
AndReturn(self.volumes.first())
quotas.tenant_limit_usages(IsA(http.HttpRequest)).\
AndReturn(usage_limit)
self.mox.ReplayAll()
url = reverse('horizon:project:volumes:volumes:extend',
args=[volume.id])
res = self.client.post(url, formData)
self.assertFormError(res, "form", "new_size",
"Volume cannot be extended to 1000GB as you only "
"have 80GB of your quota available.")
@test.create_stubs({cinder: ('volume_backup_supported',
'volume_list',
'volume_snapshot_list',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def test_create_transfer_availability(self):
limits = self.cinder_limits['absolute']
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(self.volumes.list())
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
api.nova.server_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(limits)
self.mox.ReplayAll()
res = self.client.get(VOLUME_INDEX_URL)
table = res.context['volumes_table']
# Verify that the create transfer action is present if and only if
# the volume is available
for vol in table.data:
actions = [a.name for a in table.get_row_actions(vol)]
self.assertEqual('create_transfer' in actions,
vol.status == 'available')
@test.create_stubs({cinder: ('transfer_create',)})
def test_create_transfer(self):
volumes = self.volumes.list()
volToTransfer = [v for v in volumes if v.status == 'available'][0]
formData = {'volume_id': volToTransfer.id,
'name': u'any transfer name'}
cinder.transfer_create(IsA(http.HttpRequest),
formData['volume_id'],
formData['name']).AndReturn(
self.cinder_volume_transfers.first())
self.mox.ReplayAll()
# Create a transfer for the first available volume
url = reverse('horizon:project:volumes:volumes:create_transfer',
args=[volToTransfer.id])
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_backup_supported',
'volume_list',
'volume_snapshot_list',
'transfer_delete',
'tenant_absolute_limits'),
api.nova: ('server_list',)})
def test_delete_transfer(self):
transfer = self.cinder_volume_transfers.first()
volumes = []
# Attach the volume transfer to the relevant volume
for v in self.cinder_volumes.list():
if v.id == transfer.volume_id:
v.status = 'awaiting-transfer'
v.transfer = transfer
volumes.append(v)
formData = {'action':
'volumes__delete_transfer__%s' % transfer.volume_id}
cinder.volume_backup_supported(IsA(http.HttpRequest))\
.MultipleTimes().AndReturn(False)
cinder.volume_list(IsA(http.HttpRequest), search_opts=None)\
.AndReturn(volumes)
cinder.volume_snapshot_list(IsA(http.HttpRequest),
search_opts=None).\
AndReturn([])
cinder.transfer_delete(IsA(http.HttpRequest), transfer.id)
api.nova.server_list(IsA(http.HttpRequest), search_opts=None).\
AndReturn([self.servers.list(), False])
cinder.tenant_absolute_limits(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn(self.cinder_limits['absolute'])
self.mox.ReplayAll()
url = VOLUME_INDEX_URL
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
self.assertIn('Successfully deleted volume transfer "test transfer"',
[m.message for m in res.context['messages']])
@test.create_stubs({cinder: ('transfer_accept',)})
def test_accept_transfer(self):
transfer = self.cinder_volume_transfers.first()
cinder.transfer_accept(IsA(http.HttpRequest), transfer.id,
transfer.auth_key)
self.mox.ReplayAll()
formData = {'transfer_id': transfer.id, 'auth_key': transfer.auth_key}
url = reverse('horizon:project:volumes:volumes:accept_transfer')
res = self.client.post(url, formData, follow=True)
self.assertNoFormErrors(res)
|
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, First Party Software
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import locale
from django.db import models
from django.conf import settings
from pytz import common_timezones
from django.core.cache import cache
from django.core.urlresolvers import reverse
from rest_framework.authtoken.models import Token
from mptt.fields import TreeForeignKey
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import AbstractUser, Group, UserManager
from servo import defaults
from servo.models.common import Location, Configuration
from servo.models.queue import Queue
from servo.models.customer import Customer
class ActiveManager(UserManager):
def get_queryset(self):
r = super(ActiveManager, self).get_queryset().filter(is_visible=True)
return r.filter(is_active=True)
class TechieManager(UserManager):
def get_queryset(self):
return super(TechieManager, self).get_queryset().filter(tech_id__regex=r'\w{8}')
def active(self):
return self.get_queryset().filter(is_active=True)
class User(AbstractUser):
site = models.ForeignKey(Site, editable=False, default=defaults.site_id)
customer = TreeForeignKey(
Customer,
null=True,
blank=True,
limit_choices_to={'is_company': True}
)
full_name = models.CharField(
max_length=128,
editable=False,
default=_('New User')
)
locations = models.ManyToManyField(Location, blank=True)
# The location this user is currently in
location = models.ForeignKey(
Location,
null=True,
related_name='+',
on_delete=models.PROTECT,
verbose_name=_('Current Location'),
help_text=_(u'Orders you create will be registered to this location.')
)
queues = models.ManyToManyField(Queue, blank=True, verbose_name=_('queues'))
LOCALES = (
('da_DK.UTF-8', _("Danish")),
('nl_NL.UTF-8', _("Dutch")),
('en_US.UTF-8', _("English")),
('et_EE.UTF-8', _("Estonian")),
('fi_FI.UTF-8', _("Finnish")),
('sv_SE.UTF-8', _("Swedish")),
)
locale = models.CharField(
max_length=32,
choices=LOCALES,
default=LOCALES[0][0],
verbose_name=_('language'),
help_text=_("Select which language you want to use Servo in.")
)
TIMEZONES = tuple((t, t) for t in common_timezones)
timezone = models.CharField(
max_length=128,
choices=TIMEZONES,
default=settings.TIMEZONE,
verbose_name=_('Time zone'),
help_text=_("Your current timezone")
)
REGIONS = (
('da_DK.UTF-8', _("Denmark")),
('et_EE.UTF-8', _("Estonia")),
('fi_FI.UTF-8', _("Finland")),
('en_US.UTF-8', _("United States")),
('nl_NL.UTF-8', _("Netherlands")),
('sv_SE.UTF-8', _("Sweden")),
)
region = models.CharField(
max_length=32,
choices=REGIONS,
default=defaults.locale,
verbose_name=_('region'),
help_text=_("Affects formatting of numbers, dates and currencies.")
)
should_notify = models.BooleanField(
default=True,
verbose_name=_('Enable notifications'),
help_text=_("Enable notifications in the toolbar.")
)
notify_by_email = models.BooleanField(
default=False,
verbose_name=_('email notifications'),
help_text=_("Event notifications will also be emailed to you.")
)
autoprint = models.BooleanField(
default=True,
verbose_name=_('print automatically'),
help_text=_("Opens print dialog automatically.")
)
tech_id = models.CharField(
blank=True,
default='',
max_length=16,
verbose_name=_("tech ID")
)
gsx_userid = models.CharField(
blank=True,
default='',
max_length=128,
verbose_name=_("User ID")
)
gsx_poprefix = models.CharField(
blank=True,
default='',
max_length=8,
verbose_name=_("PO prefix"),
help_text=_("GSX repairs you create will be prefixed")
)
photo = models.ImageField(
null=True,
blank=True,
upload_to="avatars",
verbose_name=_('photo'),
help_text=_("Maximum avatar size is 1MB")
)
is_visible = models.BooleanField(default=True, editable=False)
objects = UserManager()
techies = TechieManager()
active = ActiveManager()
def get_location_list(self):
results = []
for l in self.locations.all():
results.append({'pk': l.pk, 'name': l.title})
return results
@classmethod
def serialize(cls, queryset):
results = []
for u in queryset:
results.append({'pk': u.pk, 'name': u.get_name()})
return results
@classmethod
def refresh_nomail(cls):
users = cls.active.filter(notify_by_email=False)
nomail = [u.email for u in users]
cache.set('nomail', nomail)
@classmethod
def get_checkin_group(cls):
"""
Returns all the active members of the check-in group
"""
group = Configuration.conf('checkin_group')
return cls.active.filter(groups__pk=group)
@classmethod
def get_checkin_group_list(cls):
return cls.serialize(cls.get_checkin_group())
@classmethod
def get_checkin_user(cls):
return cls.objects.get(pk=Configuration.conf('checkin_user'))
def create_token(self):
token = Token.objects.create(user=self)
return token.key
def delete_tokens(self):
self.get_tokens().delete()
def get_tokens(self):
return Token.objects.filter(user=self)
def notify(self, msg):
pass
def get_group(self):
"""
Returns the user's primary (first) group
"""
return self.groups.first()
def get_icon(self):
return 'icon-star' if self.is_staff else 'icon-user'
def get_name(self):
return self.full_name if len(self.full_name) > 1 else self.username
def get_location(self):
return self.location
def get_unread_message_count(self):
key = '%s_unread_message_count' % self.user.email
count = cache.get(key, 0)
return count if count > 0 else ""
def get_order_count(self, max_state=2):
count = self.order_set.filter(state__lt=max_state).count()
return count if count > 0 else ""
def order_count_in_queue(self, queue):
count = self.user.order_set.filter(queue=queue).count()
return count if count > 0 else ""
def save(self, *args, **kwargs):
self.full_name = u"{0} {1}".format(self.first_name, self.last_name)
users = User.objects.filter(notify_by_email=False)
nomail = [u.email for u in users]
cache.set('nomail', nomail)
return super(User, self).save(*args, **kwargs)
def activate_locale(self):
"""
Activates this user's locale
"""
try:
lc = self.locale.split('.')
region = self.region.split('.')
locale.setlocale(locale.LC_TIME, region)
locale.setlocale(locale.LC_MESSAGES, lc)
locale.setlocale(locale.LC_NUMERIC, region)
locale.setlocale(locale.LC_MONETARY, region)
except Exception as e:
locale.setlocale(locale.LC_ALL, None)
# Return the language code
return self.locale.split('_', 1)[0]
def get_avatar(self):
try:
return self.photo.url
except ValueError:
return "/static/images/avatar.png"
def get_admin_url(self):
return reverse('admin-edit_user', args=[self.pk])
def __unicode__(self):
return self.get_name() or self.username
class Meta:
app_label = "servo"
ordering = ("full_name",)
verbose_name = _('User')
verbose_name_plural = _('Users & Groups')
class UserGroup(Group):
site = models.ForeignKey(Site, editable=False, default=defaults.site_id)
def members_as_list(self):
pass
def get_name(self):
return self.name
def get_admin_url(self):
return reverse('admin-edit_group', args=[self.pk])
class Meta:
app_label = 'servo'
|
|
# coding: utf-8
from __future__ import absolute_import
import functools
import re
from flask.ext import login
from flask.ext import wtf
from flask.ext.oauthlib import client as oauth
from google.appengine.ext import ndb
import flask
import unidecode
import wtforms
import cache
import config
import model
import task
import util
from main import app
_signals = flask.signals.Namespace()
###############################################################################
# Flask Login
###############################################################################
login_manager = login.LoginManager()
class AnonymousUser(login.AnonymousUserMixin):
id = 0
admin = False
name = 'Anonymous'
user_db = None
def key(self):
return None
def has_permission(self, permission):
return False
login_manager.anonymous_user = AnonymousUser
class FlaskUser(AnonymousUser):
def __init__(self, user_db):
self.user_db = user_db
self.id = user_db.key.id()
self.name = user_db.name
self.admin = user_db.admin
def key(self):
return self.user_db.key.urlsafe()
def get_id(self):
return self.user_db.key.urlsafe()
def is_authenticated(self):
return True
def is_active(self):
return self.user_db.active
def is_anonymous(self):
return False
def has_permission(self, permission):
return self.user_db.has_permission(permission)
@login_manager.user_loader
def load_user(key):
user_db = ndb.Key(urlsafe=key).get()
if user_db:
return FlaskUser(user_db)
return None
login_manager.init_app(app)
def current_user_id():
return login.current_user.id
def current_user_key():
return login.current_user.user_db.key if login.current_user.user_db else None
def current_user_db():
return login.current_user.user_db
def is_logged_in():
return login.current_user.id != 0
###############################################################################
# Decorators
###############################################################################
def login_required(f):
decorator_order_guard(f, 'auth.login_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in():
return f(*args, **kwargs)
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return decorated_function
def admin_required(f):
decorator_order_guard(f, 'auth.admin_required')
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if is_logged_in() and current_user_db().admin:
return f(*args, **kwargs)
if not is_logged_in() and flask.request.path.startswith('/api/'):
return flask.abort(401)
if not is_logged_in():
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
permission_registered = _signals.signal('permission-registered')
def permission_required(permission=None, methods=None):
def permission_decorator(f):
decorator_order_guard(f, 'auth.permission_required')
# default to decorated function name as permission
perm = permission or f.func_name
meths = [m.upper() for m in methods] if methods else None
permission_registered.send(f, permission=perm)
@functools.wraps(f)
def decorated_function(*args, **kwargs):
if meths and flask.request.method.upper() not in meths:
return f(*args, **kwargs)
if is_logged_in() and current_user_db().has_permission(perm):
return f(*args, **kwargs)
if not is_logged_in():
if flask.request.path.startswith('/api/'):
return flask.abort(401)
return flask.redirect(flask.url_for('signin', next=flask.request.url))
return flask.abort(403)
return decorated_function
return permission_decorator
###############################################################################
# Sign in stuff
###############################################################################
class SignInForm(wtf.Form):
email = wtforms.StringField(
'Email',
[wtforms.validators.required()],
filters=[util.email_filter],
)
password = wtforms.StringField(
'Password',
[wtforms.validators.required()],
)
remember = wtforms.BooleanField(
'Keep me signed in',
[wtforms.validators.optional()],
)
recaptcha = wtf.RecaptchaField()
next_url = wtforms.HiddenField()
@app.route('/signin/', methods=['GET', 'POST'])
def signin():
next_url = util.get_next_url()
form = None
if config.CONFIG_DB.has_email_authentication:
form = form_with_recaptcha(SignInForm())
save_request_params()
if form.validate_on_submit():
result = get_user_db_from_email(form.email.data, form.password.data)
if result:
cache.reset_auth_attempt()
return signin_user_db(result)
if result is None:
form.email.errors.append('Email or Password do not match')
if result is False:
return flask.redirect(flask.url_for('welcome'))
if not form.errors:
form.next_url.data = next_url
if form and form.errors:
cache.bump_auth_attempt()
return flask.render_template(
'auth/auth.html',
title='Sign in',
html_class='auth',
next_url=next_url,
form=form,
form_type='signin' if config.CONFIG_DB.has_email_authentication else '',
**urls_for_oauth(next_url)
)
###############################################################################
# Sign up stuff
###############################################################################
class SignUpForm(wtf.Form):
email = wtforms.StringField(
'Email',
[wtforms.validators.required(), wtforms.validators.email()],
filters=[util.email_filter],
)
recaptcha = wtf.RecaptchaField()
@app.route('/signup/', methods=['GET', 'POST'])
def signup():
next_url = util.get_next_url()
form = None
if config.CONFIG_DB.has_email_authentication:
form = form_with_recaptcha(SignUpForm())
save_request_params()
if form.validate_on_submit():
user_db = model.User.get_by('email', form.email.data)
if user_db:
form.email.errors.append('This email is already taken.')
if not form.errors:
user_db = create_user_db(
None,
util.create_name_from_email(form.email.data),
form.email.data,
form.email.data,
)
user_db.put()
task.activate_user_notification(user_db)
cache.bump_auth_attempt()
return flask.redirect(flask.url_for('welcome'))
if form and form.errors:
cache.bump_auth_attempt()
title = 'Sign up' if config.CONFIG_DB.has_email_authentication else 'Sign in'
return flask.render_template(
'auth/auth.html',
title=title,
html_class='auth',
next_url=next_url,
form=form,
**urls_for_oauth(next_url)
)
###############################################################################
# Sign out stuff
###############################################################################
@app.route('/signout/')
def signout():
login.logout_user()
return flask.redirect(util.param('next') or flask.url_for('signin'))
###############################################################################
# Helpers
###############################################################################
def url_for_signin(service_name, next_url):
return flask.url_for('signin_%s' % service_name, next=next_url)
def urls_for_oauth(next_url):
return {
'bitbucket_signin_url': url_for_signin('bitbucket', next_url),
'dropbox_signin_url': url_for_signin('dropbox', next_url),
'facebook_signin_url': url_for_signin('facebook', next_url),
'github_signin_url': url_for_signin('github', next_url),
'google_signin_url': url_for_signin('google', next_url),
'gae_signin_url': url_for_signin('gae', next_url),
'instagram_signin_url': url_for_signin('instagram', next_url),
'linkedin_signin_url': url_for_signin('linkedin', next_url),
'microsoft_signin_url': url_for_signin('microsoft', next_url),
'reddit_signin_url': url_for_signin('reddit', next_url),
'twitter_signin_url': url_for_signin('twitter', next_url),
'vk_signin_url': url_for_signin('vk', next_url),
'yahoo_signin_url': url_for_signin('yahoo', next_url),
}
def create_oauth_app(service_config, name):
upper_name = name.upper()
app.config[upper_name] = service_config
service_oauth = oauth.OAuth()
service_app = service_oauth.remote_app(name, app_key=upper_name)
service_oauth.init_app(app)
return service_app
def decorator_order_guard(f, decorator_name):
if f in app.view_functions.values():
raise SyntaxError(
'Do not use %s above app.route decorators as it would not be checked. '
'Instead move the line below the app.route lines.' % decorator_name
)
def save_request_params():
flask.session['auth-params'] = {
'next': util.get_next_url(),
'remember': util.param('remember'),
}
def signin_oauth(oauth_app, scheme=None):
try:
flask.session.pop('oauth_token', None)
save_request_params()
return oauth_app.authorize(callback=flask.url_for(
'%s_authorized' % oauth_app.name, _external=True, _scheme=scheme
))
except oauth.OAuthException:
flask.flash(
'Something went wrong with sign in. Please try again.',
category='danger',
)
return flask.redirect(flask.url_for('signin', next=util.get_next_url()))
def form_with_recaptcha(form):
should_have_recaptcha = cache.get_auth_attempt() >= config.RECAPTCHA_LIMIT
if not (should_have_recaptcha and config.CONFIG_DB.has_recaptcha):
del form.recaptcha
return form
###############################################################################
# User related stuff
###############################################################################
def create_user_db(auth_id, name, username, email='', verified=False, **props):
email = email.lower() if email else ''
if verified and email:
user_dbs, cursors = model.User.get_dbs(email=email, verified=True, limit=2)
if len(user_dbs) == 1:
user_db = user_dbs[0]
user_db.auth_ids.append(auth_id)
user_db.put()
task.new_user_notification(user_db)
return user_db
if isinstance(username, str):
username = username.decode('utf-8')
username = unidecode.unidecode(username.split('@')[0].lower()).strip()
username = re.sub(r'[\W_]+', '.', username)
new_username = username
n = 1
while not model.User.is_username_available(new_username):
new_username = '%s%d' % (username, n)
n += 1
user_db = model.User(
name=name,
email=email,
username=new_username,
auth_ids=[auth_id] if auth_id else [],
verified=verified,
token=util.uuid(),
**props
)
user_db.put()
task.new_user_notification(user_db)
return user_db
@ndb.toplevel
def signin_user_db(user_db):
if not user_db:
return flask.redirect(flask.url_for('signin'))
flask_user_db = FlaskUser(user_db)
auth_params = flask.session.get('auth-params', {
'next': flask.url_for('welcome'),
'remember': False,
})
flask.session.pop('auth-params', None)
if login.login_user(flask_user_db, remember=auth_params['remember']):
user_db.put_async()
return flask.redirect(util.get_next_url(auth_params['next']))
flask.flash('Sorry, but you could not sign in.', category='danger')
return flask.redirect(flask.url_for('signin'))
def get_user_db_from_email(email, password):
user_dbs, cursors = model.User.get_dbs(email=email, active=True, limit=2)
if not user_dbs:
return None
if len(user_dbs) > 1:
flask.flash('''We are sorry but it looks like there is a conflict with
your account. Our support team is already informed and we will get
back to you as soon as possible.''', category='danger')
task.email_conflict_notification(email)
return False
user_db = user_dbs[0]
if user_db.password_hash == util.password_hash(user_db, password):
return user_db
return None
|
|
import logging
import json
from datetime import datetime, timedelta
from unittest import TestCase, mock
from data_enterprise import EnterpriseEnvironment, get_enterprise_data, enterprise_allocate_ids
from keepercommander import api, crypto, utils
from keepercommander.record import Record
from keepercommander.params import KeeperParams
from keepercommander.error import CommandError
from data_vault import VaultEnvironment, get_connected_params
from keepercommander.commands import enterprise, aram
vault_env = VaultEnvironment()
ent_env = EnterpriseEnvironment()
class TestEnterprise(TestCase):
expected_commands = []
def setUp(self):
TestEnterprise.use_data_key = True
TestEnterprise.expected_commands.clear()
self.communicate_mock = mock.patch('keepercommander.api.communicate').start()
self.communicate_mock.side_effect = TestEnterprise.communicate_success
self.query_enterprise_mock = mock.patch('keepercommander.api.query_enterprise').start()
self.query_enterprise_mock.side_effect = TestEnterprise.query_enterprise
def tearDown(self):
mock.patch.stopall()
def test_get_enterprise(self):
params = get_connected_params()
api.query_enterprise(params)
self.assertIsNotNone(params.enterprise)
self.assertEqual(params.enterprise['unencrypted_tree_key'], ent_env.tree_key)
self.assertEqual(len(params.enterprise['nodes']), 2)
def test_get_enterprise_public_key(self):
TestEnterprise.use_data_key = False
params = get_connected_params()
api.query_enterprise(params)
self.assertIsNotNone(params.enterprise)
self.assertEqual(params.enterprise['unencrypted_tree_key'], ent_env.tree_key)
self.assertEqual(len(params.enterprise['nodes']), 2)
def test_enterprise_info_command(self):
params = get_connected_params()
api.query_enterprise(params)
with mock.patch('builtins.print'):
cmd = enterprise.EnterpriseInfoCommand()
cmd.execute(params, verbose=True)
def test_enterprise_add_user(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseUserCommand()
TestEnterprise.expected_commands = ['enterprise_user_add']
cmd.execute(params, add=True, email='[email protected]')
self.assertEqual(len(TestEnterprise.expected_commands), 0)
def test_enterprise_delete_user(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseUserCommand()
TestEnterprise.expected_commands = ['enterprise_user_delete']
cmd.execute(params, delete=True, force=True, email=[ent_env.user2_email])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
def test_enterprise_lock_user(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseUserCommand()
TestEnterprise.expected_commands = ['enterprise_user_lock', 'enterprise_user_lock']
cmd.execute(params, unlock=True, email=[ent_env.user2_email])
cmd.execute(params, lock=True, email=[ent_env.user2_email])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
def test_enterprise_wrong_user(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseUserCommand()
with self.assertRaises(CommandError):
with self.assertLogs(level=logging.WARNING):
cmd.execute(params, lock=True, email=['[email protected]'])
def test_enterprise_expire_password(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseUserCommand()
TestEnterprise.expected_commands = ['set_master_password_expire']
cmd.execute(params, expire=True, force=True, email=[ent_env.user2_email])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
with mock.patch('keepercommander.commands.enterprise.user_choice') as mock_choice:
TestEnterprise.expected_commands = ['set_master_password_expire']
mock_choice.return_value = 'y'
cmd.execute(params, expire=True, email=[ent_env.user2_email])
with mock.patch('builtins.print'):
self.assertEqual(len(TestEnterprise.expected_commands), 0)
mock_choice.return_value = 'n'
cmd.execute(params, expire=True, email=[ent_env.user2_email])
def test_enterprise_user_update(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseUserCommand()
TestEnterprise.expected_commands = ['enterprise_user_update']
cmd.execute(params, node='Enterprise 1', email=[ent_env.user2_email])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
TestEnterprise.expected_commands = ['enterprise_user_update']
cmd.execute(params, node='{0}'.format(ent_env.node1_id), email=[ent_env.user2_email])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
def test_enterprise_user_team(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseUserCommand()
TestEnterprise.expected_commands = ['team_enterprise_user_add']
cmd.execute(params, add_team=[ent_env.team2_uid], email=[ent_env.user2_email])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
TestEnterprise.expected_commands = ['team_enterprise_user_remove']
cmd.execute(params, remove_team=[ent_env.team2_uid], email=[ent_env.user2_email])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
def test_enterprise_role(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseRoleCommand()
with mock.patch('builtins.print'):
cmd.execute(params, role=[ent_env.role1_name])
with mock.patch('keepercommander.commands.enterprise.user_choice') as mock_choice:
mock_choice.return_value = 'y'
TestEnterprise.expected_commands = ['role_user_add']
cmd.execute(params, add_user=[ent_env.user2_email], role=[ent_env.role1_id])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
TestEnterprise.expected_commands = ['role_user_remove']
cmd.execute(params, remove_user=[ent_env.user2_email], role=[ent_env.role1_name])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
with self.assertLogs(level=logging.WARNING):
cmd.execute(params, add_user=[ent_env.user2_email], verbose=True, role=['Invalid'])
with mock.patch('builtins.print'):
cmd.execute(params, add_user=['[email protected]'], verbose=True, role=[ent_env.role1_name])
def test_enterprise_team(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseTeamCommand()
with mock.patch('builtins.print'):
cmd.execute(params, team=[ent_env.team1_uid])
TestEnterprise.expected_commands = ['team_add']
cmd.execute(params, add=True, restrict_edit='on', node=str(ent_env.node1_id), team=['Team 3'])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
with mock.patch('keepercommander.commands.enterprise.user_choice') as mock_choice:
TestEnterprise.expected_commands = ['team_delete']
mock_choice.return_value = 'y'
cmd.execute(params, delete=True, team=['Team 1'])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
with mock.patch('builtins.print'):
mock_choice.return_value = 'n'
cmd.execute(params, delete=True, team=[ent_env.team1_uid])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
with self.assertLogs(level=logging.WARNING):
cmd.execute(params, delete=True, team=['Unknown Team'])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
def test_enterprise_team_user(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterpriseTeamCommand()
TestEnterprise.expected_commands = ['team_enterprise_user_add']
cmd.execute(params, add_user=[ent_env.user2_email], team=[ent_env.team1_uid])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
TestEnterprise.expected_commands = ['team_enterprise_user_remove']
cmd.execute(params, remove_user=[ent_env.user2_email], team=[ent_env.team1_uid])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
def test_audit_log_splunk_properties_success(self):
splunk = aram.AuditLogSplunkExport()
props = {}
record = Record()
with mock.patch('builtins.print'), mock.patch('builtins.input') as mock_input, mock.patch('requests.post') as mock_post:
resp1 = mock.Mock()
resp1.status_code = 401
resp1.json.return_value = {'code': 2}
resp2 = mock.Mock()
resp2.status_code = 400
resp2.json.return_value = {'code': 6}
mock_input.side_effect = ['www.splunk.com', 'Splunk Token', KeyboardInterrupt()]
mock_post.side_effect = [resp1, resp2, Exception()]
splunk.get_properties(record, props)
self.assertIn('hec_url', props)
self.assertIn('token', props)
self.assertEqual(props['hec_url'], record.login_url)
self.assertEqual(props['token'], record.password)
self.assertTrue(splunk.store_record)
def test_audit_log_splunk_properties_cancel(self):
splunk = aram.AuditLogSplunkExport()
props = {}
record = Record()
with mock.patch('builtins.print'), mock.patch('builtins.input') as mock_input, mock.patch('requests.post') as mock_post:
resp1 = mock.Mock()
resp1.status_code = 404
mock_input.side_effect = ['www.splunk.com', KeyboardInterrupt()]
mock_post.side_effect = [resp1, Exception()]
with self.assertRaises(KeyboardInterrupt):
splunk.get_properties(record, props)
def test_audit_log_splunk_convert_event(self):
splunk = aram.AuditLogSplunkExport()
props = {
'host': 'h',
'enterprise_name': 'Unittest'
}
splunk.convert_event(props, self.get_audit_event())
def test_audit_audit_report_parse_date_filter(self):
cmd = aram.AuditReportCommand()
epoch_max = int(datetime.now().timestamp())
dt_max = datetime.fromtimestamp(epoch_max)
dt_min = dt_max - timedelta(days=1)
epoch_min = int(dt_min.timestamp())
val = cmd.get_filter(dt_max.strftime('%Y-%m-%dT%H:%M:%SZ'), cmd.convert_date)
self.assertTrue(type(val) == int)
self.assertEqual(epoch_max, val)
rng = cmd.get_filter('>{0}'.format(dt_min.strftime('%Y-%m-%dT%H:%M:%SZ')), cmd.convert_date)
self.assertTrue(type(rng) == dict)
self.assertIn('min', rng)
self.assertIn('exclude_min', rng)
self.assertTrue(rng['exclude_min'])
self.assertNotIn('max', rng)
self.assertEqual(rng['min'], epoch_min)
rng = cmd.get_filter('<= {0}'.format(dt_max.strftime('%Y-%m-%dT%H:%M:%SZ')), cmd.convert_date)
self.assertTrue(type(rng) == dict)
self.assertIn('max', rng)
self.assertFalse(rng.get('exclude_max') or False)
self.assertNotIn('min', rng)
self.assertEqual(rng['max'], epoch_max)
rng = cmd.get_filter('between {0} and {1}'.format(dt_min.strftime('%Y-%m-%dT%H:%M:%SZ'), dt_max.strftime('%Y-%m-%dT%H:%M:%SZ')), cmd.convert_date)
self.assertTrue(type(rng) == dict)
self.assertIn('min', rng)
self.assertIn('max', rng)
self.assertEqual(rng['min'], epoch_min)
self.assertEqual(rng['max'], epoch_max)
def test_audit_audit_report_parse_int_filter(self):
cmd = aram.AuditReportCommand()
arr = cmd.get_filter('In (1,2,3, 4, 6, 5,7, 0)', cmd.convert_int)
self.assertTrue(type(arr) == list)
arr.sort()
self.assertListEqual(arr, [0, 1, 2, 3, 4, 5, 6, 7])
def test_enterprise_push_command(self):
params = get_connected_params()
api.query_enterprise(params)
cmd = enterprise.EnterprisePushCommand()
template_body = '''
[
{
"title": "Record For ${user_name}",
"login": "${user_email}",
"password": "${generate_password}",
"login_url": "https://keepersecurity.com",
"notes": "notes",
"custom_fields": {
"key1": "value1",
"key2": "${user_email}"
}
},
{
"title": "Empty record"
}
]'''
templates = json.loads(template_body)
values = {
'user_name': api.generate_record_uid(),
'generate_password': api.generate_record_uid(),
'user_email': api.generate_record_uid()
}
cmd.enumerate_and_substitute_dict_fields(templates[0], values)
cmd.enumerate_and_substitute_dict_fields(templates[1], values)
self.assertEqual(templates[0]['title'], 'Record For {0}'.format(values['user_name']))
self.assertEqual(templates[0]['password'], values['generate_password'])
self.assertEqual(templates[0]['custom_fields']['key2'], values['user_email'])
self.assertEqual(templates[1]['title'], 'Empty record')
with self.assertRaises(CommandError):
cmd.execute(params, file='template.json')
with self.assertRaises(CommandError):
cmd.execute(params, user=[ent_env.user2_email])
def get_public_keys(_params, emails):
for email in emails:
emails[email] = vault_env.public_key
with mock.patch('builtins.open', mock.mock_open(read_data=template_body)), \
mock.patch('os.path.abspath', return_value='template.json'), \
mock.patch('os.path.isfile', return_value=True), \
mock.patch('keepercommander.commands.enterprise.EnterpriseCommand.get_public_keys') as m_pk:
m_pk.side_effect = get_public_keys
TestEnterprise.expected_commands = ['execute']
cmd.execute(params, file='template.json', team=[ent_env.team1_name])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
TestEnterprise.expected_commands = ['execute']
cmd.execute(params, file='template.json', user=[ent_env.user2_email])
self.assertEqual(len(TestEnterprise.expected_commands), 0)
@staticmethod
def get_audit_event():
return {
'id': 123456789098,
'created': int(datetime.now().timestamp()),
'username': vault_env.user,
'ip_address': '9.9.9.9',
'audit_event_type': 'login',
'keeper_version': 'c14.0.0.0'
}
@staticmethod
def query_enterprise(params): # type: (KeeperParams) -> None
params.enterprise = get_enterprise_data(params)
if params.enterprise:
encrypted_tree_key = utils.base64_url_decode(params.enterprise['tree_key'])
params.enterprise['unencrypted_tree_key'] = crypto.decrypt_aes_v1(encrypted_tree_key, params.data_key)
tree_key = params.enterprise['unencrypted_tree_key']
for key in params.enterprise:
o = params.enterprise[key]
if not isinstance(o, list):
continue
for elem in o:
if not isinstance(elem, dict):
continue
if 'encrypted_data' in elem:
decrypted_data = crypto.decrypt_aes_v1(utils.base64_url_decode(elem['encrypted_data']), tree_key)
elem['data'] = json.loads(decrypted_data.decode('utf-8'))
@staticmethod
def communicate_success(params, request):
# type: (any, dict) -> dict
if request['command'] == 'enterprise_allocate_ids':
return enterprise_allocate_ids(params, request)
rs = {
'result': 'success',
'result_code': '',
'message': ''
}
if request['command'] == 'team_get_keys':
rs['keys'] = [{
'team_uid': x,
'key': api.encrypt_aes(ent_env.team_key, vault_env.data_key),
'type': 1
} for x in request['teams']]
return rs
if request['command'] == 'public_keys':
rs['public_keys'] = [{
'key_owner': x,
'public_key': vault_env.encoded_public_key
} for x in request['key_owners']]
return rs
cmd = TestEnterprise.expected_commands.pop(0)
if cmd == request['command']:
return rs
if request['command'] == 'execute':
request = request['requests'][0]
if cmd == request['command']:
return rs
raise Exception()
|
|
"""Support for SimpliSafe alarm control panels."""
import logging
import re
from simplipy.entity import EntityTypes
from simplipy.system import SystemStates
from simplipy.system.v3 import VOLUME_HIGH, VOLUME_LOW, VOLUME_MEDIUM, VOLUME_OFF
from homeassistant.components.alarm_control_panel import (
FORMAT_NUMBER,
FORMAT_TEXT,
AlarmControlPanel,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
CONF_CODE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.util.dt import utc_from_timestamp
from . import SimpliSafeEntity
from .const import DATA_CLIENT, DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_ALARM_DURATION = "alarm_duration"
ATTR_ALARM_VOLUME = "alarm_volume"
ATTR_BATTERY_BACKUP_POWER_LEVEL = "battery_backup_power_level"
ATTR_CHIME_VOLUME = "chime_volume"
ATTR_ENTRY_DELAY_AWAY = "entry_delay_away"
ATTR_ENTRY_DELAY_HOME = "entry_delay_home"
ATTR_EXIT_DELAY_AWAY = "exit_delay_away"
ATTR_EXIT_DELAY_HOME = "exit_delay_home"
ATTR_GSM_STRENGTH = "gsm_strength"
ATTR_LAST_EVENT_INFO = "last_event_info"
ATTR_LAST_EVENT_SENSOR_NAME = "last_event_sensor_name"
ATTR_LAST_EVENT_SENSOR_TYPE = "last_event_sensor_type"
ATTR_LAST_EVENT_TIMESTAMP = "last_event_timestamp"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_LIGHT = "light"
ATTR_RF_JAMMING = "rf_jamming"
ATTR_VOICE_PROMPT_VOLUME = "voice_prompt_volume"
ATTR_WALL_POWER_LEVEL = "wall_power_level"
ATTR_WIFI_STRENGTH = "wifi_strength"
VOLUME_STRING_MAP = {
VOLUME_HIGH: "high",
VOLUME_LOW: "low",
VOLUME_MEDIUM: "medium",
VOLUME_OFF: "off",
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a SimpliSafe alarm control panel based on a config entry."""
simplisafe = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
async_add_entities(
[
SimpliSafeAlarm(simplisafe, system, entry.data.get(CONF_CODE))
for system in simplisafe.systems.values()
],
True,
)
class SimpliSafeAlarm(SimpliSafeEntity, AlarmControlPanel):
"""Representation of a SimpliSafe alarm."""
def __init__(self, simplisafe, system, code):
"""Initialize the SimpliSafe alarm."""
super().__init__(system, "Alarm Control Panel")
self._changed_by = None
self._code = code
self._simplisafe = simplisafe
self._state = None
if self._system.version == 3:
self._attrs.update(
{
ATTR_ALARM_DURATION: self._system.alarm_duration,
ATTR_ALARM_VOLUME: VOLUME_STRING_MAP[self._system.alarm_volume],
ATTR_BATTERY_BACKUP_POWER_LEVEL: self._system.battery_backup_power_level,
ATTR_CHIME_VOLUME: VOLUME_STRING_MAP[self._system.chime_volume],
ATTR_ENTRY_DELAY_AWAY: self._system.entry_delay_away,
ATTR_ENTRY_DELAY_HOME: self._system.entry_delay_home,
ATTR_EXIT_DELAY_AWAY: self._system.exit_delay_away,
ATTR_EXIT_DELAY_HOME: self._system.exit_delay_home,
ATTR_GSM_STRENGTH: self._system.gsm_strength,
ATTR_LIGHT: self._system.light,
ATTR_RF_JAMMING: self._system.rf_jamming,
ATTR_VOICE_PROMPT_VOLUME: VOLUME_STRING_MAP[
self._system.voice_prompt_volume
],
ATTR_WALL_POWER_LEVEL: self._system.wall_power_level,
ATTR_WIFI_STRENGTH: self._system.wifi_strength,
}
)
@property
def changed_by(self):
"""Return info about who changed the alarm last."""
return self._changed_by
@property
def code_format(self):
"""Return one or more digits/characters."""
if not self._code:
return None
if isinstance(self._code, str) and re.search("^\\d+$", self._code):
return FORMAT_NUMBER
return FORMAT_TEXT
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def _validate_code(self, code, state):
"""Validate given code."""
check = self._code is None or code == self._code
if not check:
_LOGGER.warning("Wrong code entered for %s", state)
return check
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._validate_code(code, "disarming"):
return
await self._system.set_off()
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
if not self._validate_code(code, "arming home"):
return
await self._system.set_home()
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
if not self._validate_code(code, "arming away"):
return
await self._system.set_away()
async def async_update(self):
"""Update alarm status."""
last_event = self._simplisafe.last_event_data[self._system.system_id]
if last_event.get("pinName"):
self._changed_by = last_event["pinName"]
if self._system.state == SystemStates.error:
self._online = False
return
self._online = True
if self._system.alarm_going_off:
self._state = STATE_ALARM_TRIGGERED
elif self._system.state == SystemStates.away:
self._state = STATE_ALARM_ARMED_AWAY
elif self._system.state in (
SystemStates.away_count,
SystemStates.exit_delay,
SystemStates.home_count,
):
self._state = STATE_ALARM_ARMING
elif self._system.state == SystemStates.home:
self._state = STATE_ALARM_ARMED_HOME
elif self._system.state == SystemStates.off:
self._state = STATE_ALARM_DISARMED
else:
self._state = None
try:
last_event_sensor_type = EntityTypes(last_event["sensorType"]).name
except ValueError:
_LOGGER.warning(
'Encountered unknown entity type: %s ("%s"). Please report it at'
"https://github.com/home-assistant/home-assistant/issues.",
last_event["sensorType"],
last_event["sensorName"],
)
last_event_sensor_type = None
self._attrs.update(
{
ATTR_LAST_EVENT_INFO: last_event["info"],
ATTR_LAST_EVENT_SENSOR_NAME: last_event["sensorName"],
ATTR_LAST_EVENT_SENSOR_TYPE: last_event_sensor_type,
ATTR_LAST_EVENT_TIMESTAMP: utc_from_timestamp(
last_event["eventTimestamp"]
),
ATTR_LAST_EVENT_TYPE: last_event["eventType"],
}
)
|
|
"""Test sensor of Brother integration."""
from datetime import datetime, timedelta
import json
from unittest.mock import Mock, patch
from homeassistant.components.brother.const import DOMAIN
from homeassistant.components.brother.sensor import UNIT_PAGES
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
DOMAIN as SENSOR_DOMAIN,
SensorDeviceClass,
SensorStateClass,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
PERCENTAGE,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import entity_registry as er
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import UTC, utcnow
from tests.common import async_fire_time_changed, load_fixture
from tests.components.brother import init_integration
ATTR_REMAINING_PAGES = "remaining_pages"
ATTR_COUNTER = "counter"
async def test_sensors(hass):
"""Test states of the sensors."""
entry = await init_integration(hass, skip_setup=True)
registry = er.async_get(hass)
# Pre-create registry entries for disabled by default sensors
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456789_uptime",
suggested_object_id="hl_l2340dw_uptime",
disabled_by=None,
)
test_time = datetime(2019, 11, 11, 9, 10, 32, tzinfo=UTC)
with patch("brother.datetime", utcnow=Mock(return_value=test_time)), patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("printer_data.json", "brother")),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer"
assert state.state == "waiting"
assert state.attributes.get(ATTR_STATE_CLASS) is None
entry = registry.async_get("sensor.hl_l2340dw_status")
assert entry
assert entry.unique_id == "0123456789_status"
state = hass.states.get("sensor.hl_l2340dw_black_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "75"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_black_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_black_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_cyan_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "10"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_cyan_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_cyan_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_magenta_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "8"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_magenta_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_magenta_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_yellow_toner_remaining")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d-nozzle"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "2"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_yellow_toner_remaining")
assert entry
assert entry.unique_id == "0123456789_yellow_toner_remaining"
state = hass.states.get("sensor.hl_l2340dw_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 11014
assert state.attributes.get(ATTR_COUNTER) == 986
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_black_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_black_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_black_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_cyan_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_cyan_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_cyan_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_magenta_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_magenta_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_magenta_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_yellow_drum_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:chart-donut"
assert state.attributes.get(ATTR_REMAINING_PAGES) == 16389
assert state.attributes.get(ATTR_COUNTER) == 1611
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "92"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_yellow_drum_remaining_life")
assert entry
assert entry.unique_id == "0123456789_yellow_drum_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_fuser_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:water-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "97"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_fuser_remaining_life")
assert entry
assert entry.unique_id == "0123456789_fuser_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_belt_unit_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:current-ac"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "97"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_belt_unit_remaining_life")
assert entry
assert entry.unique_id == "0123456789_belt_unit_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_pf_kit_1_remaining_life")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:printer-3d"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "98"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_pf_kit_1_remaining_life")
assert entry
assert entry.unique_id == "0123456789_pf_kit_1_remaining_life"
state = hass.states.get("sensor.hl_l2340dw_page_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "986"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_page_counter")
assert entry
assert entry.unique_id == "0123456789_page_counter"
state = hass.states.get("sensor.hl_l2340dw_duplex_unit_pages_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "538"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_duplex_unit_pages_counter")
assert entry
assert entry.unique_id == "0123456789_duplex_unit_pages_counter"
state = hass.states.get("sensor.hl_l2340dw_b_w_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "709"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_b_w_counter")
assert entry
assert entry.unique_id == "0123456789_b/w_counter"
state = hass.states.get("sensor.hl_l2340dw_color_counter")
assert state
assert state.attributes.get(ATTR_ICON) == "mdi:file-document-outline"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UNIT_PAGES
assert state.state == "902"
assert state.attributes.get(ATTR_STATE_CLASS) == SensorStateClass.MEASUREMENT
entry = registry.async_get("sensor.hl_l2340dw_color_counter")
assert entry
assert entry.unique_id == "0123456789_color_counter"
state = hass.states.get("sensor.hl_l2340dw_uptime")
assert state
assert state.attributes.get(ATTR_ICON) is None
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
assert state.attributes.get(ATTR_DEVICE_CLASS) == SensorDeviceClass.TIMESTAMP
assert state.state == "2019-09-24T12:14:56+00:00"
assert state.attributes.get(ATTR_STATE_CLASS) is None
entry = registry.async_get("sensor.hl_l2340dw_uptime")
assert entry
assert entry.unique_id == "0123456789_uptime"
async def test_disabled_by_default_sensors(hass):
"""Test the disabled by default Brother sensors."""
await init_integration(hass)
registry = er.async_get(hass)
state = hass.states.get("sensor.hl_l2340dw_uptime")
assert state is None
entry = registry.async_get("sensor.hl_l2340dw_uptime")
assert entry
assert entry.unique_id == "0123456789_uptime"
assert entry.disabled
assert entry.disabled_by is er.RegistryEntryDisabler.INTEGRATION
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when device is offline."""
await init_integration(hass)
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "waiting"
future = utcnow() + timedelta(minutes=5)
with patch("brother.Brother._get_data", side_effect=ConnectionError()):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=10)
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("printer_data.json", "brother")),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.hl_l2340dw_status")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "waiting"
async def test_manual_update_entity(hass):
"""Test manual update entity via service homeassistant/update_entity."""
await init_integration(hass)
data = json.loads(load_fixture("printer_data.json", "brother"))
await async_setup_component(hass, "homeassistant", {})
with patch(
"homeassistant.components.brother.Brother.async_update", return_value=data
) as mock_update:
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.hl_l2340dw_status"]},
blocking=True,
)
assert len(mock_update.mock_calls) == 1
|
|
import numpy as np
import scipy as sp
#====================================================================
# EOSMod: Equation of State Model
# eoslib- library of common equation of state models
#====================================================================
# mgd_ref_model()
# ref_model: BM3, BM4, VIN, LOG
# ref_path: S, T, (P),
# gamma_model: powlaw, shift_powlaw
#
#
# set_therm_model( 'MGD', eos_d )
# set_ref_model( 'MGD', FixS=True, eos_d )
# set_ref_model( 'MGD', FixT=True, eos_d )
#
# energy_therm_f = eos_d['func_d']['energy_therm_f']
# gamma_f = eos_d['func_d']['gamma_ref_f']
# temp_debye_f = eos_d['func_d']['temp_scale_ref_f']
# temp_ref_f = eos_d['func_d']['temp_ref_f']
#====================================================================
# SECT 0: Reference Compression Profiles
#====================================================================
#====================================================================
def set_param( name_l, val_l, eos_d ):
if 'param_d' in eos_d.keys():
param_d = eos_d['param_d']
else:
param_d = {}
eos_d['param_d'] = param_d
for name, val in zip( name_l, val_l ):
param_d[name] = val
#====================================================================
def set_const( name_l, val_l, eos_d ):
if 'const_d' in eos_d.keys():
const_d = eos_d['const_d']
else:
const_d = init_const()
eos_d['const_d'] = const_d
for name, val in zip( name_l, val_l ):
const_d[name] = val
#====================================================================
def set_func( name_l, val_l, eos_d ):
if 'param_d' in eos_d.keys():
param_d = eos_d['param_d']
else:
param_d = {}
eos_d['param_d'] = param_d
for name, val in zip( name_l, val_l ):
param_d[name] = val
#====================================================================
def init_const():
const_d = {}
const_d['eVperHa'] = 27.211 # eV/Ha
const_d['JperHa'] = 4.35974434e-18 # J/Ha
const_d['JperCal'] = 4.184 # J/Cal
const_d['Nmol'] = 6.0221413e+23 # atoms/mol
const_d['R'] = 8.314462 # J/K/mol
const_d['kboltz'] = 8.617332e-5 # eV/K
const_d['ang3percc'] = 1e24 # ang^3/cm^3
const_d['PV_ratio'] = 160.2176487 # (GPa*ang^3)/eV
const_d['TS_ratio'] = const_d['R']/const_d['kboltz'] # (J/mol)/eV
return const_d
#====================================================================
#====================================================================
# SECT 1: Reference Compression Profiles
#====================================================================
#====================================================================
# BM3- Birch Murnaghan 3rd Order
#====================================================================
def press_BM3( V_a, eos_d ):
# Retrieve parameter values
param_d = eos_d['param_d']
V0 = param_d['V0']
K0 = param_d['K0']
KP0 = param_d['KP0']
vratio_a = V_a/V0
press_a = 3.0/2*K0 * (vratio_a**(-7.0/3) - vratio_a**(-5.0/3)) * \
(1 + 3.0/4*(KP0-4)*(vratio_a**(-2.0/3)-1))
return press_a
#====================================================================
def energy_BM3( V_a, eos_d ):
# Retrieve parameter values
param_d = eos_d['param_d']
V0 = param_d['V0']
K0 = param_d['K0']
KP0 = param_d['KP0']
E0 = param_d['E0']
# Retrieve unit conversion ratio
PV_ratio = eos_d['const_d']['PV_ratio']
vratio_a = V_a/V0
fstrain_a = 0.5*(vratio_a**(-2.0/3) - 1)
energy_a = E0 + 9.0/2*(V0*K0/PV_ratio)*\
( KP0*fstrain_a**3 + fstrain_a**2*(1-4*fstrain_a) )
return energy_a
#====================================================================
#====================================================================
# SECT 2: Thermal EOS
#====================================================================
#====================================================================
# Gruneisen Model
#====================================================================
def press_mie_grun( V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
# units const
PV_ratio = eos_d['const_d']['PV_ratio']
# Needed functions
energy_therm_f = eos_d['func_d']['energy_therm_f']
gamma_f = eos_d['func_d']['gamma_ref_f']
energy_therm_a = energy_therm_f( V_a, T_a, eos_d )
gamma_a = gamma_f( V_a, func_d )
press_therm_a = PV_ratio*(gamma_a/V_a)*energy_therm_a
return press_therm_a
#====================================================================
def gamma_powlaw( V_a, eos_d ):
# get parameter values
param_d = eos_d['param_d']
V0 = param_d['V0']
gamma0 = param_d['gamma0']
q = param_d['q']
gamma_a = gamma0 *(V_a/V0)**q
return gamma_a
#====================================================================
def temp_powlaw( V_a, T0, eos_d ):
"""
Return temperature for debye model
V_a: sample volume array
T0: temperature at V=V0
"""
# get parameter values
param_d = eos_d['param_d']
V0 = param_d['V0']
gamma0 = param_d['gamma0']
q = param_d['q']
gamma_a = gamma_powlaw( V_a, eos_d )
T_a = T0*np.exp( -(gamma_a - gamma0)/q )
return T_a
#====================================================================
#====================================================================
# Debye Model
#====================================================================
def energy_debye( V_a, T_a, eos_d ):
'''
Thermal Energy for Debye model
Relies on reference profile properties stored in eos_d defined by:
* debye_temp_f( V_a, T_a )
* ref_temp_f( V_a, T_a )
'''
V_a, T_a = fill_array( V_a, T_a )
# get parameter values
Cvmax = eos_d['param_d']['Cvmax']
TS_ratio = eos_d['const_d']['TS_ratio']
# get eos funcs
temp_debye_f = eos_d['func_d']['temp_scale_ref_f']
temp_ref_f = eos_d['func_d']['temp_ref_f']
theta_a = temp_debye_f( V_a, eos_d )
Tref_a = temp_ref_f( V_a, eos_d )
energy_therm_a = (Cvmax/TS_ratio) \
*( T_a*debye_func( theta_a/T_a ) - Tref_a*debye_func( theta_a/Tref_a ) )
return energy_therm_a
#====================================================================
def entropy_debye( V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
# get parameter values
param_d = eos_d['param_d']
T0 = param_d['T0']
theta0 = param_d['theta0']
Cvmax = param_d['Cvmax']
TS_ratio = eos_d['const_d']['TS_ratio']
theta_f = eos_d['func_d']['temp_scale_ref_f']
theta_a = theta_f( V_a, eos_d )
x_a = theta_a/T_a
# entropy_a = Cvmax*Cv_const/3. \
# *(4*debye_func( x_a )-3*np.log( 1-np.exp( -x_a ) ) )
entropy_a = 1.0/3*(Cvmax/TS_ratio) \
*(4*debye_func( x_a )-3*np.log( np.exp( x_a ) - 1 ) + 3*x_a)
return entropy_a
#====================================================================
def heat_capacity_V_debye( V_a, T_a, eos_d ):
V_a, T_a = fill_array( V_a, T_a )
# get parameter values
Cvmax = eos_d['param_d']['Cvmax']
TS_ratio = eos_d['const_d']['TS_ratio']
# get funcs
temp_debye_f = eos_d['func_d']['temp_scale_ref_f']
theta_a = temp_debye_f( V_a, eos_d )
# The reference adiabat terms in the internal energy are temperature
# independent, and thus play no role in heat capacity
x_a = theta_a/T_a
heat_capacity_a = (Cvmax/TS_ratio)*(4*debye_func( x_a )-3*x_a/(np.exp(x_a)-1))
return heat_capacity_a
#====================================================================
def debye_func( x_a ):
"""
Return debye integral value
- calculation done using interpolation in a lookup table
- interpolation done in log-space where behavior is close to linear
- linear extrapolation is implemented manually
"""
if np.isscalar( x_a ):
assert x_a >= 0, 'x_a values must be greater than zero.'
else:
assert all( x_a >= 0 ), 'x_a values must be greater than zero.'
# Lookup table
# interpolate in log space where behavior is nearly linear
debyex_a = np.array( [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0,
1.1, 1.2, 1.3, 1.4, 1.6, 1.8, 2.0, 2.2, 2.4, 2.6, 2.8,
3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.4, 4.6, 4.8, 5.0,
5.5, 6.0, 6.5, 7.0, 7.5, 8.0, 8.5, 9.0, 9.5, 10.0] )
debyelogf_a = np.array( [ 0.0, -0.03770187, -0.07580279, -0.11429475,
-0.15316866, -0.19241674, -0.2320279 , -0.27199378,
-0.31230405, -0.35294619, -0.39390815, -0.43518026,
-0.47674953, -0.51860413, -0.56072866, -0.64573892,
-0.73167389, -0.81841793, -0.90586032, -0.99388207,
-1.08236598, -1.17119911, -1.26026101, -1.34944183,
-1.43863241, -1.52771969, -1.61660856, -1.70519469,
-1.79338479, -1.88108917, -1.96822938, -2.05471771,
-2.14049175, -2.35134476, -2.55643273, -2.75507892,
-2.94682783, -3.13143746, -3.30880053, -3.47894273,
-3.64199587, -3.79820337, -3.94785746] )
# Create interpolation function
logdeb_func = interpolate.interp1d( debyex_a, debyelogf_a, kind='cubic',
bounds_error=False, fill_value=np.nan )
logfval_a = logdeb_func( x_a )
# Check for extrapolated values indicated by NaN
# - replace with linear extrapolation
logfextrap_a = debyelogf_a[-1] + (x_a - debyex_a[-1]) \
*(debyelogf_a[-1]-debyelogf_a[-2])/(debyex_a[-1]-debyex_a[-2])
logfval_a = np.where( x_a > debyex_a[-1], logfextrap_a, logfval_a )
# exponentiate to get integral value
return np.exp( logfval_a )
#====================================================================
#====================================================================
# SECT N: Code Utility Functions
#====================================================================
def fill_array( var1, var2 ):
"""
fix fill_array such that it returns two numpy arrays of equal size
use numpy.full_like
"""
var1_a = np.asarray( var1 )
var2_a = np.asarray( var2 )
if var1_a.shape==():
var1_a = np.asarray( [var1] )
if var2_a.shape==():
var2_a = np.asarray( [var2] )
# Begin try/except block to handle all cases for filling an array
while True:
try:
assert var1_a.shape == var2_a.shape
break
except: pass
try:
var1_a = np.full_like( var2_a, var1_a )
break
except: pass
try:
var2_a = np.full_like( var1_a, var2_a )
break
except: pass
# If none of the cases properly handle it, throw error
assert False, 'var1 and var2 must both be equal shape or size=1'
return var1_a, var2_a
#====================================================================
|
|
#!/usr/bin/python
# Copyright (c) 2014 Jon Maur
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
################################################################
# tilemapcompiler.py
# Generates tilemaps for use with MOAI with Draggin.
from PIL import Image
import sys
import math
from math import ceil, log
from fnmatch import fnmatch
import os
import os.path
# matches anything in the list
def fnmatchList(_name, _filters):
for filter in _filters:
if fnmatch(_name, filter):
# found one match so get out of here
return True
# no matches
return False
# loop through all the params and grab the image filenames
def gatherFiles(_sequence, _filters):
print "Gathering filenames..."
global outFilename
filenames = []
for i in _sequence:
if os.path.isdir(i):
print("\t"+i+":")
os.listdir(i)
outFilename = i
print "outFilename: "+outFilename
for root, dirs, files in os.walk(i):
for name in files:
if fnmatchList(name , _filters):
filenames.append(os.path.join(i, name))
print("\t\t"+name)
#print getAnimationAndFrameFromFilename(name)
else:
if fnmatchList(i , _filters):
filenames.append(i)
#print("\t"+i)
#print getAnimationAndFrameFromFilename(i)
return filenames
class Tile:
def __init__(self):
self.image = None
class TileMap:
def __init__(self, _imgFile, _tileWidth, _tileHeight):
# open the image and convert it
self.mapImage = Image.open(_imgFile)
self.mapImage.draft("RGBA", self.mapImage.size)
self.mapImage = self.mapImage.convert("RGBA")
# tile size
self.tileWidth = _tileWidth
self.tileHeight = _tileHeight
# the array of tiles
self.tiles = []
self.mapWidthInTiles = self.mapImage.size[0] / self.tileWidth
self.mapHeightInTiles = self.mapImage.size[1] / self.tileHeight
# the map data, an array of tile indices
self.mapData = []
# make a blank tile for number 0
blankTile = Image.new("RGBA", (self.tileWidth, self.tileHeight), (0,0,0,0))
self.tiles.append(blankTile)
for y in range(self.mapHeightInTiles):
for x in range(self.mapWidthInTiles):
box = self.tileWidth * x, self.tileHeight * y, self.tileWidth * (x+1), self.tileHeight * (y+1)
tile = self.mapImage.crop(box)
self.mapData.append(self.findTile(tile))
# look for the tile in the list of tiles, if not found then add it
def findTile(self, tile):
i = 0
tileStr = tile.tostring()
for t in self.tiles:
if t.tostring() == tileStr:
return i
i += 1
self.tiles.append(tile)
return i
def saveMoaiLua(self, outFilename):
print "Num tiles: ", len(self.tiles)
#print "MapData: ", self.mapData
# this equation totally falls apart for 3, so I'll just special case that
if len(self.tiles) != 3:
resultWidth = int(math.ceil(math.sqrt(len(self.tiles))) * (self.tileWidth+2))
resultHeight = int(math.ceil(math.sqrt(len(self.tiles))) * (self.tileHeight+2))
else:
resultWidth = 2 * (self.tileWidth+2)
resultHeight = 2 * (self.tileHeight+2)
tilesetImg = Image.new("RGBA", (resultWidth, resultHeight))
x = 0
y = 0
offsetX = 0
offsetY = 0
rowX = 0 #tilesetImg.size[0]
rowY = 0 #tilesetImg.size[1]
for t in self.tiles:
offsetX = ((self.tileWidth+2) - t.size[0]) / 2
offsetY = ((self.tileHeight+2) - t.size[1]) / 2
x = rowX + offsetX
y = rowY + offsetY
# super cheesy way to "bleed" the tile out
tilesetImg.paste(t, (x-1, y-1))
tilesetImg.paste(t, (x+1, y+1))
tilesetImg.paste(t, (x-1, y+1))
tilesetImg.paste(t, (x+1, y-1))
tilesetImg.paste(t, (x-1, y))
tilesetImg.paste(t, (x+1, y))
tilesetImg.paste(t, (x, y+1))
tilesetImg.paste(t, (x, y-1))
# finally paste it in
tilesetImg.paste(t, (x, y))
rowX = rowX + (self.tileWidth+2)
if rowX > tilesetImg.size[0] - (self.tileWidth+2):
rowX = 0
rowY = rowY + (self.tileHeight+2)
print("Saving: "+outFilename+"_tileset.png")
tilesetImg.save(outFilename+"_tileset.png")
# write out the .lua file
luaFile = open(outFilename+".lua", "w")
luaFile.write('-- Generated by tilemapcompiler.py for Moai\n\n')
luaFile.write('return {\n')
luaFile.write('\ttileset = "'+str(os.path.basename(outFilename))+'_tileset.png",\n')
luaFile.write('\ttilesetWidth = '+str(tilesetImg.size[0])+',\n')
luaFile.write('\ttilesetHeight = '+str(tilesetImg.size[1])+',\n')
luaFile.write('\tmapWidthInTiles = '+str(self.mapWidthInTiles)+',\n')
luaFile.write('\tmapHeightInTiles = '+str(self.mapHeightInTiles)+',\n')
luaFile.write('\ttileWidth = '+str(self.tileWidth)+',\n')
luaFile.write('\ttileHeight = '+str(self.tileHeight)+',\n')
luaFile.write('\tdata = {')
for y in range(self.mapHeightInTiles):
luaFile.write('\n\t\t')
for x in range(self.mapWidthInTiles):
luaFile.write(str(self.mapData[y*self.mapWidthInTiles + x]+1)+', ')
luaFile.write('\n\t}\n')
luaFile.write('}\n')
print("Saving: "+outFilename+".lua")
# run the code here if it's not a module
if __name__ == "__main__":
print("Bucket Tilemap Compiler:")
if (len(sys.argv) == 1):
print("No arguments found.")
print("Commandline usage: tilemapcompiler [space separated list of files and/or folders]")
else:
imageFilenames = gatherFiles(sys.argv[1:], ("*.png", "*.bmp"))
tm = TileMap(imageFilenames[0], 16, 16)
tm.saveMoaiLua(os.path.splitext(imageFilenames[0])[0])
print "Done."
|
|
# Copyright (C) 2011, 2012 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import collections
import logging
import struct
import sys
import functools
from ryu import exception
from ryu import utils
from ryu.lib import stringify
from . import ofproto_common
LOG = logging.getLogger('ryu.ofproto.ofproto_parser')
def header(buf):
assert len(buf) >= ofproto_common.OFP_HEADER_SIZE
# LOG.debug('len %d bufsize %d', len(buf), ofproto.OFP_HEADER_SIZE)
return struct.unpack_from(ofproto_common.OFP_HEADER_PACK_STR, buffer(buf))
_MSG_PARSERS = {}
def register_msg_parser(version):
def register(msg_parser):
_MSG_PARSERS[version] = msg_parser
return msg_parser
return register
def msg(datapath, version, msg_type, msg_len, xid, buf):
assert len(buf) >= msg_len
msg_parser = _MSG_PARSERS.get(version)
if msg_parser is None:
raise exception.OFPUnknownVersion(version=version)
try:
return msg_parser(datapath, version, msg_type, msg_len, xid, buf)
except:
LOG.exception(
'Encounter an error during parsing OpenFlow packet from switch.'
'This implies switch sending a malformed OpenFlow packet.'
'version 0x%02x msg_type %d msg_len %d xid %d buf %s',
version, msg_type, msg_len, xid, utils.bytearray_to_hex(buf))
return None
def create_list_of_base_attributes(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
ret = f(self, *args, **kwargs)
cls = self.__class__
# hasattr(cls, '_base_attributes') doesn't work because super class
# may already have the attribute.
if '_base_attributes' not in cls.__dict__:
cls._base_attributes = set(dir(self))
return ret
return wrapper
def ofp_msg_from_jsondict(dp, jsondict):
"""
This function instanticates an appropriate OpenFlow message class
from the given JSON style dictionary.
The objects created by following two code fragments are equivalent.
Code A::
jsonstr = '{ "OFPSetConfig": { "flags": 0, "miss_send_len": 128 } }'
jsondict = json.loads(jsonstr)
o = ofp_msg_from_jsondict(dp, jsondict)
Code B::
o = dp.ofproto_parser.OFPSetConfig(flags=0, miss_send_len=128)
This function takes the following arguments.
======== =======================================
Argument Description
======== =======================================
dp An instance of ryu.controller.Datapath.
jsondict A JSON style dict.
======== =======================================
"""
parser = dp.ofproto_parser
assert len(jsondict) == 1
for k, v in jsondict.iteritems():
cls = getattr(parser, k)
assert issubclass(cls, MsgBase)
return cls.from_jsondict(v, datapath=dp)
class StringifyMixin(stringify.StringifyMixin):
_class_prefixes = ["OFP", "ONF", "MT"]
@classmethod
def cls_from_jsondict_key(cls, k):
obj_cls = super(StringifyMixin, cls).cls_from_jsondict_key(k)
return obj_cls
class MsgBase(StringifyMixin):
"""
This is a base class for OpenFlow message classes.
An instance of this class has at least the following attributes.
========= ==============================
Attribute Description
========= ==============================
datapath A ryu.controller.controller.Datapath instance for this message
version OpenFlow protocol version
msg_type Type of OpenFlow message
msg_len Length of the message
xid Transaction id
buf Raw data
========= ==============================
"""
@create_list_of_base_attributes
def __init__(self, datapath):
super(MsgBase, self).__init__()
self.datapath = datapath
self.version = None
self.msg_type = None
self.msg_len = None
self.xid = None
self.buf = None
def set_headers(self, version, msg_type, msg_len, xid):
assert msg_type == self.cls_msg_type
self.version = version
self.msg_type = msg_type
self.msg_len = msg_len
self.xid = xid
def set_xid(self, xid):
assert self.xid is None
self.xid = xid
def set_buf(self, buf):
self.buf = buffer(buf)
def __str__(self):
buf = 'version: 0x%x msg_type 0x%x xid 0x%x ' % (self.version,
self.msg_type,
self.xid)
return buf + StringifyMixin.__str__(self)
@classmethod
def parser(cls, datapath, version, msg_type, msg_len, xid, buf):
msg_ = cls(datapath)
msg_.set_headers(version, msg_type, msg_len, xid)
msg_.set_buf(buf)
return msg_
def _serialize_pre(self):
self.version = self.datapath.ofproto.OFP_VERSION
self.msg_type = self.cls_msg_type
self.buf = bytearray(self.datapath.ofproto.OFP_HEADER_SIZE)
def _serialize_header(self):
# buffer length is determined after trailing data is formated.
assert self.version is not None
assert self.msg_type is not None
assert self.buf is not None
assert len(self.buf) >= self.datapath.ofproto.OFP_HEADER_SIZE
self.msg_len = len(self.buf)
if self.xid is None:
self.xid = 0
struct.pack_into(self.datapath.ofproto.OFP_HEADER_PACK_STR,
self.buf, 0,
self.version, self.msg_type, self.msg_len, self.xid)
def _serialize_body(self):
pass
def serialize(self):
self._serialize_pre()
self._serialize_body()
self._serialize_header()
class MsgInMsgBase(MsgBase):
@classmethod
def _decode_value(cls, k, json_value, decode_string=base64.b64decode,
**additional_args):
return cls._get_decoder(k, decode_string)(json_value,
**additional_args)
def msg_pack_into(fmt, buf, offset, *args):
if len(buf) < offset:
buf += bytearray(offset - len(buf))
if len(buf) == offset:
buf += struct.pack(fmt, *args)
return
needed_len = offset + struct.calcsize(fmt)
if len(buf) < needed_len:
buf += bytearray(needed_len - len(buf))
struct.pack_into(fmt, buf, offset, *args)
def namedtuple(typename, fields, **kwargs):
class _namedtuple(StringifyMixin,
collections.namedtuple(typename, fields, **kwargs)):
pass
return _namedtuple
def msg_str_attr(msg_, buf, attr_list=None):
if attr_list is None:
attr_list = stringify.obj_attrs(msg_)
for attr in attr_list:
val = getattr(msg_, attr, None)
if val is not None:
buf += ' %s %s' % (attr, val)
return buf
|
|
# Copyright 2013 Big Switch Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Adapted from neutron.tests.unit.test_l3_plugin
import contextlib
import copy
import mock
from oslo.config import cfg
from six import moves
from webob import exc
from neutron.common import test_lib
from neutron import context
from neutron.extensions import l3
from neutron import manager
from neutron.openstack.common import uuidutils
from neutron.plugins.bigswitch.extensions import routerrule
from neutron.tests.unit.bigswitch import fake_server
from neutron.tests.unit.bigswitch import test_base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import test_extension_extradhcpopts as test_extradhcp
from neutron.tests.unit import test_l3_plugin
HTTPCON = 'neutron.plugins.bigswitch.servermanager.httplib.HTTPConnection'
_uuid = uuidutils.generate_uuid
class RouterRulesTestExtensionManager(object):
def get_resources(self):
l3.RESOURCE_ATTRIBUTE_MAP['routers'].update(
routerrule.EXTENDED_ATTRIBUTES_2_0['routers'])
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class DHCPOptsTestCase(test_base.BigSwitchTestBase,
test_extradhcp.TestExtraDhcpOpt):
def setUp(self, plugin=None):
self.setup_patches()
self.setup_config_files()
super(test_extradhcp.ExtraDhcpOptDBTestCase,
self).setUp(plugin=self._plugin_name)
self.startHttpPatch()
class RouterDBTestBase(test_base.BigSwitchTestBase,
test_l3_plugin.L3BaseForIntTests,
test_l3_plugin.L3NatTestCaseMixin):
mock_rescheduling = False
def setUp(self):
self.setup_patches()
self.setup_config_files()
ext_mgr = RouterRulesTestExtensionManager()
service_plugins = {'L3_ROUTER_NAT': self._l3_plugin_name}
super(RouterDBTestBase, self).setUp(plugin=self._plugin_name,
ext_mgr=ext_mgr,
service_plugins=service_plugins)
cfg.CONF.set_default('allow_overlapping_ips', False)
self.plugin_obj = manager.NeutronManager.get_service_plugins().get(
'L3_ROUTER_NAT')
self.startHttpPatch()
def tearDown(self):
super(RouterDBTestBase, self).tearDown()
del test_lib.test_config['config_files']
class RouterDBTestCase(RouterDBTestBase,
test_l3_plugin.L3NatDBIntTestCase):
def test_router_remove_router_interface_wrong_subnet_returns_400(self):
with self.router() as r:
with self.subnet() as s:
with self.subnet(cidr='10.0.10.0/24') as s1:
with self.port(subnet=s1) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
p['port']['id'],
exc.HTTPBadRequest.code)
#remove properly to clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_remove_router_interface_wrong_port_returns_404(self):
with self.router() as r:
with self.subnet() as s:
with self.port(subnet=s) as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
# create another port for testing failure case
res = self._create_port('json', p['port']['network_id'])
p2 = self.deserialize('json', res)
self._router_interface_action('remove',
r['router']['id'],
None,
p2['port']['id'],
exc.HTTPNotFound.code)
# remove correct interface to cleanup
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
# remove extra port created
self._delete('ports', p2['port']['id'])
def test_multi_tenant_flip_alllocation(self):
tenant1_id = _uuid()
tenant2_id = _uuid()
with contextlib.nested(
self.network(tenant_id=tenant1_id),
self.network(tenant_id=tenant2_id)) as (n1, n2):
with contextlib.nested(
self.subnet(network=n1, cidr='11.0.0.0/24'),
self.subnet(network=n2, cidr='12.0.0.0/24'),
self.subnet(cidr='13.0.0.0/24')) as (s1, s2, psub):
with contextlib.nested(
self.router(tenant_id=tenant1_id),
self.router(tenant_id=tenant2_id),
self.port(subnet=s1, tenant_id=tenant1_id),
self.port(subnet=s2, tenant_id=tenant2_id)) as (r1, r2,
p1, p2):
self._set_net_external(psub['subnet']['network_id'])
s1id = p1['port']['fixed_ips'][0]['subnet_id']
s2id = p2['port']['fixed_ips'][0]['subnet_id']
s1 = {'subnet': {'id': s1id}}
s2 = {'subnet': {'id': s2id}}
self._add_external_gateway_to_router(
r1['router']['id'],
psub['subnet']['network_id'])
self._add_external_gateway_to_router(
r2['router']['id'],
psub['subnet']['network_id'])
self._router_interface_action(
'add', r1['router']['id'],
s1['subnet']['id'], None)
self._router_interface_action(
'add', r2['router']['id'],
s2['subnet']['id'], None)
fl1 = self._make_floatingip_for_tenant_port(
net_id=psub['subnet']['network_id'],
port_id=p1['port']['id'],
tenant_id=tenant1_id)
self.httpPatch.stop()
multiFloatPatch = mock.patch(
HTTPCON,
new=fake_server.VerifyMultiTenantFloatingIP)
multiFloatPatch.start()
fl2 = self._make_floatingip_for_tenant_port(
net_id=psub['subnet']['network_id'],
port_id=p2['port']['id'],
tenant_id=tenant2_id)
multiFloatPatch.stop()
self.httpPatch.start()
self._delete('floatingips', fl1['floatingip']['id'])
self._delete('floatingips', fl2['floatingip']['id'])
self._router_interface_action(
'remove', r1['router']['id'],
s1['subnet']['id'], None)
self._router_interface_action(
'remove', r2['router']['id'],
s2['subnet']['id'], None)
def _make_floatingip_for_tenant_port(self, net_id, port_id, tenant_id):
data = {'floatingip': {'floating_network_id': net_id,
'tenant_id': tenant_id,
'port_id': port_id}}
floatingip_req = self.new_create_request('floatingips', data, self.fmt)
res = floatingip_req.get_response(self.ext_api)
return self.deserialize(self.fmt, res)
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(
'neutron.plugins.bigswitch.plugin.NeutronRestProxyV2')
def test_create_floatingip_no_ext_gateway_return_404(self):
with self.subnet(cidr='10.0.10.0/24') as public_sub:
self._set_net_external(public_sub['subnet']['network_id'])
with self.port() as private_port:
with self.router():
res = self._create_floatingip(
'json',
public_sub['subnet']['network_id'],
port_id=private_port['port']['id'])
self.assertEqual(res.status_int, exc.HTTPNotFound.code)
def test_router_update_gateway(self):
with self.router() as r:
with self.subnet() as s1:
with self.subnet(cidr='10.0.10.0/24') as s2:
self._set_net_external(s1['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id, s1['subnet']['network_id'])
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id, s2['subnet']['network_id'])
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'])
def test_router_add_interface_overlapped_cidr(self):
self.skipTest("Plugin does not support")
def test_router_add_interface_overlapped_cidr_returns_400(self):
self.skipTest("Plugin does not support")
def test_list_nets_external(self):
self.skipTest("Plugin does not support")
def test_router_update_gateway_with_existed_floatingip(self):
with self.subnet(cidr='10.0.10.0/24') as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.floatingip_with_assoc() as fip:
self._add_external_gateway_to_router(
fip['floatingip']['router_id'],
subnet['subnet']['network_id'],
expected_code=exc.HTTPConflict.code)
def test_router_remove_interface_wrong_subnet_returns_400(self):
with self.router() as r:
with self.subnet(cidr='10.0.10.0/24') as s:
with self.port() as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
p['port']['id'],
exc.HTTPBadRequest.code)
#remove properly to clean-up
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
def test_router_remove_interface_wrong_port_returns_404(self):
with self.router() as r:
with self.subnet(cidr='10.0.10.0/24'):
with self.port() as p:
self._router_interface_action('add',
r['router']['id'],
None,
p['port']['id'])
# create another port for testing failure case
res = self._create_port('json', p['port']['network_id'])
p2 = self.deserialize('json', res)
self._router_interface_action('remove',
r['router']['id'],
None,
p2['port']['id'],
exc.HTTPNotFound.code)
# remove correct interface to cleanup
self._router_interface_action('remove',
r['router']['id'],
None,
p['port']['id'])
# remove extra port created
self._delete('ports', p2['port']['id'])
def test_send_data(self):
fmt = 'json'
plugin_obj = manager.NeutronManager.get_plugin()
with self.router() as r:
r_id = r['router']['id']
with self.subnet(cidr='10.0.10.0/24') as s:
s_id = s['subnet']['id']
with self.router() as r1:
r1_id = r1['router']['id']
body = self._router_interface_action('add', r_id, s_id,
None)
self.assertIn('port_id', body)
r_port_id = body['port_id']
body = self._show('ports', r_port_id)
self.assertEqual(body['port']['device_id'], r_id)
with self.subnet(cidr='10.0.20.0/24') as s1:
s1_id = s1['subnet']['id']
body = self._router_interface_action('add', r1_id,
s1_id, None)
self.assertIn('port_id', body)
r1_port_id = body['port_id']
body = self._show('ports', r1_port_id)
self.assertEqual(body['port']['device_id'], r1_id)
with self.subnet(cidr='11.0.0.0/24') as public_sub:
public_net_id = public_sub['subnet']['network_id']
self._set_net_external(public_net_id)
with self.port() as prv_port:
prv_fixed_ip = prv_port['port']['fixed_ips'][0]
priv_sub_id = prv_fixed_ip['subnet_id']
self._add_external_gateway_to_router(
r_id, public_net_id)
self._router_interface_action('add', r_id,
priv_sub_id,
None)
priv_port_id = prv_port['port']['id']
res = self._create_floatingip(
fmt, public_net_id,
port_id=priv_port_id)
self.assertEqual(res.status_int,
exc.HTTPCreated.code)
floatingip = self.deserialize(fmt, res)
result = plugin_obj._send_all_data()
self.assertEqual(result[0], 200)
self._delete('floatingips',
floatingip['floatingip']['id'])
self._remove_external_gateway_from_router(
r_id, public_net_id)
self._router_interface_action('remove', r_id,
priv_sub_id,
None)
self._router_interface_action('remove', r_id, s_id,
None)
self._show('ports', r_port_id,
expected_code=exc.HTTPNotFound.code)
self._router_interface_action('remove', r1_id, s1_id,
None)
self._show('ports', r1_port_id,
expected_code=exc.HTTPNotFound.code)
def test_router_rules_update(self):
with self.router() as r:
r_id = r['router']['id']
router_rules = [{'destination': '1.2.3.4/32',
'source': '4.3.2.1/32',
'action': 'permit',
'nexthops': ['4.4.4.4', '4.4.4.5']}]
body = self._update('routers', r_id,
{'router': {'router_rules': router_rules}})
body = self._show('routers', r['router']['id'])
self.assertIn('router_rules', body['router'])
rules = body['router']['router_rules']
self.assertEqual(_strip_rule_ids(rules), router_rules)
# Try after adding another rule
router_rules.append({'source': 'external',
'destination': '8.8.8.8/32',
'action': 'permit', 'nexthops': []})
body = self._update('routers', r['router']['id'],
{'router': {'router_rules': router_rules}})
body = self._show('routers', r['router']['id'])
self.assertIn('router_rules', body['router'])
rules = body['router']['router_rules']
self.assertEqual(_strip_rule_ids(rules), router_rules)
def test_router_rules_separation(self):
with self.router() as r1:
with self.router() as r2:
r1_id = r1['router']['id']
r2_id = r2['router']['id']
router1_rules = [{'destination': '5.6.7.8/32',
'source': '8.7.6.5/32',
'action': 'permit',
'nexthops': ['8.8.8.8', '9.9.9.9']}]
router2_rules = [{'destination': '1.2.3.4/32',
'source': '4.3.2.1/32',
'action': 'permit',
'nexthops': ['4.4.4.4', '4.4.4.5']}]
body1 = self._update('routers', r1_id,
{'router':
{'router_rules': router1_rules}})
body2 = self._update('routers', r2_id,
{'router':
{'router_rules': router2_rules}})
body1 = self._show('routers', r1_id)
body2 = self._show('routers', r2_id)
rules1 = body1['router']['router_rules']
rules2 = body2['router']['router_rules']
self.assertEqual(_strip_rule_ids(rules1), router1_rules)
self.assertEqual(_strip_rule_ids(rules2), router2_rules)
def test_router_rules_validation(self):
with self.router() as r:
r_id = r['router']['id']
good_rules = [{'destination': '1.2.3.4/32',
'source': '4.3.2.1/32',
'action': 'permit',
'nexthops': ['4.4.4.4', '4.4.4.5']}]
body = self._update('routers', r_id,
{'router': {'router_rules': good_rules}})
body = self._show('routers', r_id)
self.assertIn('router_rules', body['router'])
self.assertEqual(good_rules,
_strip_rule_ids(body['router']['router_rules']))
# Missing nexthops should be populated with an empty list
light_rules = copy.deepcopy(good_rules)
del light_rules[0]['nexthops']
body = self._update('routers', r_id,
{'router': {'router_rules': light_rules}})
body = self._show('routers', r_id)
self.assertIn('router_rules', body['router'])
light_rules[0]['nexthops'] = []
self.assertEqual(light_rules,
_strip_rule_ids(body['router']['router_rules']))
# bad CIDR
bad_rules = copy.deepcopy(good_rules)
bad_rules[0]['destination'] = '1.1.1.1'
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# bad next hop
bad_rules = copy.deepcopy(good_rules)
bad_rules[0]['nexthops'] = ['1.1.1.1', 'f2']
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# bad action
bad_rules = copy.deepcopy(good_rules)
bad_rules[0]['action'] = 'dance'
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# duplicate rule with opposite action
bad_rules = copy.deepcopy(good_rules)
bad_rules.append(copy.deepcopy(bad_rules[0]))
bad_rules.append(copy.deepcopy(bad_rules[0]))
bad_rules[1]['source'] = 'any'
bad_rules[2]['action'] = 'deny'
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# duplicate nexthop
bad_rules = copy.deepcopy(good_rules)
bad_rules[0]['nexthops'] = ['1.1.1.1', '1.1.1.1']
body = self._update('routers', r_id,
{'router': {'router_rules': bad_rules}},
expected_code=exc.HTTPBadRequest.code)
# make sure light rules persisted during bad updates
body = self._show('routers', r_id)
self.assertIn('router_rules', body['router'])
self.assertEqual(light_rules,
_strip_rule_ids(body['router']['router_rules']))
def test_router_rules_config_change(self):
cfg.CONF.set_override('tenant_default_router_rule',
['*:any:any:deny',
'*:8.8.8.8/32:any:permit:1.2.3.4'],
'ROUTER')
with self.router() as r:
body = self._show('routers', r['router']['id'])
expected_rules = [{'source': 'any', 'destination': 'any',
'nexthops': [], 'action': 'deny'},
{'source': '8.8.8.8/32', 'destination': 'any',
'nexthops': ['1.2.3.4'], 'action': 'permit'}]
self.assertEqual(expected_rules,
_strip_rule_ids(body['router']['router_rules']))
def test_rule_exhaustion(self):
cfg.CONF.set_override('max_router_rules', 10, 'ROUTER')
with self.router() as r:
rules = []
for i in moves.xrange(1, 12):
rule = {'source': 'any', 'nexthops': [],
'destination': '1.1.1.' + str(i) + '/32',
'action': 'permit'}
rules.append(rule)
self._update('routers', r['router']['id'],
{'router': {'router_rules': rules}},
expected_code=exc.HTTPBadRequest.code)
def test_rollback_on_router_create(self):
tid = test_api_v2._uuid()
self.httpPatch.stop()
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._create_router('json', tid)
self.assertTrue(len(self._get_routers(tid)) == 0)
def test_rollback_on_router_update(self):
with self.router() as r:
data = {'router': {'name': 'aNewName'}}
self.httpPatch.stop()
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self.new_update_request(
'routers', data, r['router']['id']).get_response(self.api)
self.httpPatch.start()
updatedr = self._get_routers(r['router']['tenant_id'])[0]
# name should have stayed the same due to failure
self.assertEqual(r['router']['name'], updatedr['name'])
def test_rollback_on_router_delete(self):
with self.router() as r:
self.httpPatch.stop()
with mock.patch(HTTPCON, new=fake_server.HTTPConnectionMock500):
self._delete('routers', r['router']['id'],
expected_code=exc.HTTPInternalServerError.code)
self.httpPatch.start()
self.assertEqual(r['router']['id'],
self._get_routers(r['router']['tenant_id']
)[0]['id'])
def _get_routers(self, tenant_id):
ctx = context.Context('', tenant_id)
return self.plugin_obj.get_routers(ctx)
def _strip_rule_ids(rules):
cleaned = []
for rule in rules:
del rule['id']
cleaned.append(rule)
return cleaned
|
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#$HeadURL$
#$LastChangedDate$
#$LastChangedRevision$
'''
Copyright (c) 2009, Patrick Maupin, Austin, Texas
Automated testing for rst2pdf
See LICENSE.txt for licensing terms
'''
import os
import sys
import glob
import shutil
import shlex
from copy import copy
from optparse import OptionParser
from execmgr import textexec, default_logger as log
from pythonpaths import setpythonpaths
# md5 module deprecated, but hashlib not available in 2.4
try:
import hashlib
except ImportError:
import md5 as hashlib
description = '''
autotest.py reads .txt files (and optional associated .style and other files)
from the input directory and generates throw-away results (.pdf and .log) in
the output subdirectory. It also maintains (with the help of the developers)
a database of unknown, good, and bad MD5 checksums for the .pdf output files
in the md5 subdirectory.
By default, it will process all the files in the input directory, but one or
more individual files can be explicitly specified on the command line.
Use of the -c and -a options can cause usage of an external coverage package
to generate a .coverage file for code coverage.
'''
def dirname(path):
# os.path.dirname('abc') returns '', which is completely
# useless for most purposes...
return os.path.dirname(path) or '.'
def globjoin(*parts):
# A very common pattern in this module
return sorted(glob.glob(os.path.join(*parts)))
class PathInfo(object):
''' This class is just a namespace to avoid cluttering up the
module namespace. It is never instantiated.
'''
rootdir = os.path.realpath(dirname(__file__))
bindir = os.path.abspath(os.path.join(rootdir, '..', '..', 'bin'))
runfile = os.path.join(bindir, 'rst2pdf')
inpdir = os.path.join(rootdir, 'input')
outdir = os.path.join(rootdir, 'output')
md5dir = os.path.join(rootdir, 'md5')
if not os.path.exists(runfile):
raise SystemExit('Use bootstrap.py and buildout to create executable')
runcmd = [runfile]
@classmethod
def add_coverage(cls, keep=False):
cls.runcmd[0:0] = ['coverage', 'run', '-a']
fname = os.path.join(cls.rootdir, '.coverage')
os.environ['COVERAGE_FILE'] = fname
if not keep:
if os.path.exists(fname):
os.remove(fname)
@classmethod
def load_subprocess(cls):
import rst2pdf.createpdf
return rst2pdf.createpdf.main
class MD5Info(dict):
''' The MD5Info class is used to round-trip good, bad, unknown
information to/from a .json file.
For formatting reasons, the json module isn't used for writing,
and since we're not worried about security, we don't bother using
it for reading, either.
'''
# Category to dump new data into
new_category = 'unknown'
# Categories which always should be in file
mandatory_categories = 'good bad'.split()
# Sentinel to make manual changes and diffs easy
sentinel = 'sentinel'
# An empty list is one which is truly empty or which has a sentinel
empty = [[], ['sentinel']]
# Suffix for file items
suffix = '_md5'
def __str__(self):
''' Return the string to output to the MD5 file '''
result = []
for name, value in sorted(self.iteritems()):
if not name.endswith(self.suffix):
continue
result.append('%s = [' % name)
result.append(',\n'.join([" '%s'"%item for item in sorted(value)]))
result.append(']\n')
result.append('')
return '\n'.join(result)
def __init__(self):
self.__dict__ = self
self.changed = False
for name in self.mandatory_categories:
setattr(self, name + self.suffix, [self.sentinel])
def find(self, checksum, new_category=new_category):
''' find() has some serious side-effects. If the checksum
is found, the category it was found in is returned.
If the checksum is not found, then it is automagically
added to the unknown category. In all cases, the
data is prepped to output to the file (if necessary),
and self.changed is set if the data is modified during
this process. Functional programming this isn't...
A quick word about the 'sentinel'. This value starts
with an 's', which happens to sort > highest hexadecimal
digit of 'f', so it is always a the end of the list.
The only reason for the sentinel is to make the database
either to work with. Both to modify (by moving an MD5
line from one category to another) and to diff. This
is because every hexadecimal line (every line except
the sentinel) is guaranteed to end with a comma.
'''
suffix = self.suffix
new_key = new_category + suffix
sentinel = set([self.sentinel])
# Create a dictionary of relevant current information
# in the database.
oldinfo = dict((key, values)
for (key, values) in self.iteritems()
if key.endswith(suffix))
# Create sets and strip the sentinels while
# working with the dictionary.
newinfo = dict((key, set(values) - sentinel)
for (key, values) in oldinfo.iteritems())
# Create an inverse mapping of MD5s to key names
inverse = {}
for key,values in newinfo.iteritems():
for value in values:
inverse.setdefault(value, set()).add(key)
# In general, inverse should be a function (there
# should only be one answer to the question "What
# key name goes with this MD5?") If not,
# either report an error, or just remove one of
# the possible answers if it is the same answer
# we give by default.
for value, keys in inverse.iteritems():
if len(keys) > 1 and new_key in keys:
keys.remove(new_key)
newinfo[new_key].remove(value)
if len(keys) > 1:
raise SystemExit('MD5 %s is stored in multiple categories: %s' %
(value, ', '.join(keys)))
# Find the result in the dictionary. If it's not
# there we have to add it.
result, = inverse.get(checksum, [new_key])
if result == new_key:
newinfo.setdefault(result, set()).add(checksum)
# Create a canonical version of the dictionary,
# by adding sentinels and sorting the results.
for key, value in newinfo.iteritems():
newinfo[key] = sorted(value | sentinel)
# See if we changed anything
if newinfo != oldinfo:
self.update(newinfo)
self.changed = True
# And return the key associated with the MD5
assert result.endswith(suffix), result
return result[:-len(suffix)]
def checkmd5(pdfpath, md5path, resultlist, updatemd5, failcode=1, iprefix=None):
''' checkmd5 validates the checksum of a generated PDF
against the database, both reporting the results,
and updating the database to add this MD5 into the
unknown category if this checksum is not currently
in the database.
It updates the resultlist with information to be
printed and added to the log file, and returns
a result of 'good', 'bad', 'fail', or 'unknown'
'''
if not os.path.exists(pdfpath):
if not failcode and os.path.exists(iprefix + '.nopdf'):
log(resultlist, "Validity of file %s checksum '(none generated)' is good." % os.path.basename(pdfpath))
return 'good'
log(resultlist, 'File %s not generated' % os.path.basename(pdfpath))
return 'fail'
if os.path.isdir(pdfpath):
pdffiles = globjoin(pdfpath, '*.pdf')
else:
pdffiles = [pdfpath]
# Read the database
info = MD5Info()
if os.path.exists(md5path):
f = open(md5path, 'rb')
exec f in info
f.close()
# Generate the current MD5
md5s = []
for pdfpath in pdffiles:
f = open(pdfpath, 'rb')
data = f.read()
f.close()
m = hashlib.md5()
m.update(data)
md5s.append(m.hexdigest())
m = ' '.join(md5s)
new_category = (updatemd5 and isinstance(updatemd5, str)
and updatemd5 or info.new_category)
# Check MD5 against database and update if necessary
resulttype = info.find(m, new_category)
log(resultlist, "Validity of file %s checksum '%s' is %s." % (os.path.basename(pdfpath), m, resulttype))
if info.changed and updatemd5:
print "Updating MD5 file"
f = open(md5path, 'wb')
f.write(str(info))
f.close()
return resulttype
def build_sphinx(sphinxdir, outpdf):
def getbuilddirs():
return globjoin(sphinxdir, '*build*')
for builddir in getbuilddirs():
shutil.rmtree(builddir)
errcode, result = textexec('make clean pdf', cwd=sphinxdir)
builddirs = getbuilddirs()
if len(builddirs) != 1:
log(result, 'Cannot determine build directory')
return 1, result
builddir, = builddirs
pdfdir = os.path.join(builddir, 'pdf')
pdffiles = globjoin(pdfdir, '*.pdf')
if len(pdffiles) == 1:
shutil.copyfile(pdffiles[0], outpdf)
elif not pdffiles:
log(result, 'Output PDF apparently not generated')
errcode = 1
else:
shutil.copytree(pdfdir, outpdf)
return errcode, result
def build_txt(iprefix, outpdf, fastfork):
inpfname = iprefix + '.txt'
style = iprefix + '.style'
cli = iprefix + '.cli'
if os.path.isfile(cli):
f = open(cli)
extraargs=shlex.split(f.read())
f.close()
else:
extraargs=[]
args = PathInfo.runcmd + ['--date-invariant', '-v', os.path.basename(inpfname)]+extraargs
if os.path.exists(style):
args.extend(('-s', os.path.basename(style)))
args.extend(('-o', outpdf))
return textexec(args, cwd=dirname(inpfname), python_proc=fastfork)
def run_single(inpfname, incremental=False, fastfork=None, updatemd5=None):
use_sphinx = 'sphinx' in inpfname and os.path.isdir(inpfname)
if use_sphinx:
sphinxdir = inpfname
if sphinxdir.endswith('Makefile'):
sphinxdir = dirname(sphinxdir)
basename = os.path.basename(sphinxdir)
if not basename:
sphinxdir = os.path.dirname(sphinxdir)
basename = os.path.basename(sphinxdir)
else:
iprefix = os.path.splitext(inpfname)[0]
basename = os.path.basename(iprefix)
if os.path.exists(iprefix + '.ignore'):
return 'ignored', 0
oprefix = os.path.join(PathInfo.outdir, basename)
mprefix = os.path.join(PathInfo.md5dir, basename)
outpdf = oprefix + '.pdf'
outtext = oprefix + '.log'
md5file = mprefix + '.json'
if incremental and os.path.exists(outpdf):
return 'preexisting', 0
for fname in (outtext, outpdf):
if os.path.exists(fname):
if os.path.isdir(fname):
shutil.rmtree(fname)
else:
os.remove(fname)
if use_sphinx:
errcode, result = build_sphinx(sphinxdir, outpdf)
checkinfo = checkmd5(outpdf, md5file, result, updatemd5, errcode)
else:
errcode, result = build_txt(iprefix, outpdf, fastfork)
checkinfo = checkmd5(outpdf, md5file, result, updatemd5, errcode, iprefix)
log(result, '')
outf = open(outtext, 'wb')
outf.write('\n'.join(result))
outf.close()
return checkinfo, errcode
def run_testlist(testfiles=None, incremental=False, fastfork=None, do_text= False, do_sphinx=False, updatemd5=None):
if not testfiles:
testfiles = []
if do_text:
testfiles = globjoin(PathInfo.inpdir, '*.txt')
testfiles += globjoin(PathInfo.inpdir, '*', '*.txt')
testfiles = [x for x in testfiles if 'sphinx' not in x]
if do_sphinx:
testfiles += globjoin(PathInfo.inpdir, 'sphinx*')
results = {}
for fname in testfiles:
key, errcode = run_single(fname, incremental, fastfork, updatemd5)
results[key] = results.get(key, 0) + 1
if incremental and errcode and 0:
break
print
print 'Final checksum statistics:',
print ', '.join(sorted('%s=%s' % x for x in results.iteritems()))
print
def parse_commandline():
usage = '%prog [options] [<input.txt file> [<input.txt file>]...]'
parser = OptionParser(usage, description=description)
parser.add_option('-c', '--coverage', action="store_true",
dest='coverage', default=False,
help='Generate new coverage information.')
parser.add_option('-a', '--add-coverage', action="store_true",
dest='add_coverage', default=False,
help='Add coverage information to previous runs.')
parser.add_option('-i', '--incremental', action="store_true",
dest='incremental', default=False,
help='Incremental build -- ignores existing PDFs')
parser.add_option('-f', '--fast', action="store_true",
dest='fastfork', default=False,
help='Fork and reuse process information')
parser.add_option('-s', '--sphinx', action="store_true",
dest='sphinx', default=False,
help='Run sphinx tests only')
parser.add_option('-e', '--everything', action="store_true",
dest='everything', default=False,
help='Run both rst2pdf and sphinx tests')
parser.add_option('-p', '--python-path', action="store_true",
dest='nopythonpath', default=False,
help='Do not set up PYTHONPATH env variable')
parser.add_option('-u', '--update-md5', action="store", type="string",
dest='updatemd5', default=None,
help='Update MD5 checksum files')
return parser
def main(args=None):
parser = parse_commandline()
options, args = parser.parse_args(copy(args))
if not options.nopythonpath:
setpythonpaths(PathInfo.runfile, PathInfo.rootdir)
fastfork = None
do_sphinx = options.sphinx or options.everything
do_text = options.everything or not options.sphinx
if options.coverage or options.add_coverage:
assert not options.fastfork, "Cannot fastfork and run coverage simultaneously"
assert not do_sphinx, "Cannot run sphinx and coverage simultaneously"
PathInfo.add_coverage(options.add_coverage)
elif options.fastfork:
fastfork = PathInfo.load_subprocess()
updatemd5 = options.updatemd5
if updatemd5 is not None and updatemd5 not in 'good bad incomplete unknown deprecated'.split():
raise SystemExit('Unexpected value for updatemd5: %s' % updatemd5)
run_testlist(args, options.incremental, fastfork, do_text, do_sphinx, options.updatemd5)
if __name__ == '__main__':
main()
|
|
from itertools import islice
from six.moves import range
import collections
import inspect
import os
import shutil
import socket
import sys
import tempfile
from bpython._py3compat import py3
from bpython import config, repl, cli, autocomplete
from bpython.test import MagicIterMock, mock, FixLanguageTestCase as TestCase
from bpython.test import unittest
pypy = 'PyPy' in sys.version
def setup_config(conf):
config_struct = config.Struct()
config.loadini(config_struct, os.devnull)
if 'autocomplete_mode' in conf:
config_struct.autocomplete_mode = conf['autocomplete_mode']
return config_struct
class FakeHistory(repl.History):
def __init__(self):
pass
def reset(self):
pass
class FakeRepl(repl.Repl):
def __init__(self, conf={}):
repl.Repl.__init__(self, repl.Interpreter(), setup_config(conf))
self.current_line = ""
self.cursor_offset = 0
class FakeCliRepl(cli.CLIRepl, FakeRepl):
def __init__(self):
self.s = ''
self.cpos = 0
self.rl_history = FakeHistory()
class TestMatchesIterator(unittest.TestCase):
def setUp(self):
self.matches = ['bobby', 'bobbies', 'bobberina']
self.matches_iterator = repl.MatchesIterator()
self.matches_iterator.current_word = 'bob'
self.matches_iterator.orig_line = 'bob'
self.matches_iterator.orig_cursor_offset = len('bob')
self.matches_iterator.matches = self.matches
def test_next(self):
self.assertEqual(next(self.matches_iterator), self.matches[0])
for x in range(len(self.matches) - 1):
next(self.matches_iterator)
self.assertEqual(next(self.matches_iterator), self.matches[0])
self.assertEqual(next(self.matches_iterator), self.matches[1])
self.assertNotEqual(next(self.matches_iterator), self.matches[1])
def test_previous(self):
self.assertEqual(self.matches_iterator.previous(), self.matches[2])
for x in range(len(self.matches) - 1):
self.matches_iterator.previous()
self.assertNotEqual(self.matches_iterator.previous(), self.matches[0])
self.assertEqual(self.matches_iterator.previous(), self.matches[1])
self.assertEqual(self.matches_iterator.previous(), self.matches[0])
def test_nonzero(self):
"""self.matches_iterator should be False at start,
then True once we active a match.
"""
self.assertFalse(self.matches_iterator)
next(self.matches_iterator)
self.assertTrue(self.matches_iterator)
def test_iter(self):
slice = islice(self.matches_iterator, 0, 9)
self.assertEqual(list(slice), self.matches * 3)
def test_current(self):
with self.assertRaises(ValueError):
self.matches_iterator.current()
next(self.matches_iterator)
self.assertEqual(self.matches_iterator.current(), self.matches[0])
def test_update(self):
slice = islice(self.matches_iterator, 0, 3)
self.assertEqual(list(slice), self.matches)
newmatches = ['string', 'str', 'set']
completer = mock.Mock()
completer.locate.return_value = (0, 1, 's')
self.matches_iterator.update(1, 's', newmatches, completer)
newslice = islice(newmatches, 0, 3)
self.assertNotEqual(list(slice), self.matches)
self.assertEqual(list(newslice), newmatches)
def test_cur_line(self):
completer = mock.Mock()
completer.locate.return_value = (
0,
self.matches_iterator.orig_cursor_offset,
self.matches_iterator.orig_line)
self.matches_iterator.completer = completer
with self.assertRaises(ValueError):
self.matches_iterator.cur_line()
self.assertEqual(next(self.matches_iterator), self.matches[0])
self.assertEqual(self.matches_iterator.cur_line(),
(len(self.matches[0]), self.matches[0]))
def test_is_cseq(self):
self.assertTrue(self.matches_iterator.is_cseq())
class TestArgspec(unittest.TestCase):
def setUp(self):
self.repl = FakeRepl()
self.repl.push("def spam(a, b, c):\n", False)
self.repl.push(" pass\n", False)
self.repl.push("\n", False)
self.repl.push("class Spam(object):\n", False)
self.repl.push(" def spam(self, a, b, c):\n", False)
self.repl.push(" pass\n", False)
self.repl.push("\n", False)
self.repl.push("class SpammitySpam(object):\n", False)
self.repl.push(" def __init__(self, a, b, c):\n", False)
self.repl.push(" pass\n", False)
self.repl.push("\n", False)
self.repl.push("class WonderfulSpam(object):\n", False)
self.repl.push(" def __new__(self, a, b, c):\n", False)
self.repl.push(" pass\n", False)
self.repl.push("\n", False)
self.repl.push("o = Spam()\n", False)
self.repl.push("\n", False)
def set_input_line(self, line):
"""Set current input line of the test REPL."""
self.repl.current_line = line
self.repl.cursor_offset = len(line)
def test_func_name(self):
for (line, expected_name) in [("spam(", "spam"),
("spam(map([]", "map"),
("spam((), ", "spam")]:
self.set_input_line(line)
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.current_func.__name__, expected_name)
def test_func_name_method_issue_479(self):
for (line, expected_name) in [("o.spam(", "spam"),
("o.spam(map([]", "map"),
("o.spam((), ", "spam")]:
self.set_input_line(line)
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.current_func.__name__, expected_name)
def test_syntax_error_parens(self):
for line in ["spam(]", "spam([)", "spam())"]:
self.set_input_line(line)
# Should not explode
self.repl.get_args()
def test_kw_arg_position(self):
self.set_input_line("spam(a=0")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.arg_pos, "a")
self.set_input_line("spam(1, b=1")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.arg_pos, "b")
self.set_input_line("spam(1, c=2")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.arg_pos, "c")
def test_lambda_position(self):
self.set_input_line("spam(lambda a, b: 1, ")
self.assertTrue(self.repl.get_args())
self.assertTrue(self.repl.funcprops)
# Argument position
self.assertEqual(self.repl.arg_pos, 1)
def test_issue127(self):
self.set_input_line("x=range(")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.current_func.__name__, "range")
self.set_input_line("{x:range(")
self.assertTrue(self.repl.get_args())
self.assertEqual(self.repl.current_func.__name__, "range")
self.set_input_line("foo(1, 2, x,range(")
self.assertEqual(self.repl.current_func.__name__, "range")
self.set_input_line("(x,range(")
self.assertEqual(self.repl.current_func.__name__, "range")
def test_nonexistent_name(self):
self.set_input_line("spamspamspam(")
self.assertFalse(self.repl.get_args())
def test_issue572(self):
self.set_input_line("SpammitySpam(")
self.assertTrue(self.repl.get_args())
self.set_input_line("WonderfulSpam(")
self.assertTrue(self.repl.get_args())
@unittest.skipIf(pypy, 'pypy pydoc doesn\'t have this')
def test_issue583(self):
self.repl = FakeRepl()
self.repl.push("a = 1.2\n", False)
self.set_input_line("a.is_integer(")
self.repl.set_docstring()
self.assertIsNot(self.repl.docstring, None)
def test_methods_of_expressions(self):
self.set_input_line("'a'.capitalize(")
self.assertTrue(self.repl.get_args())
self.set_input_line("(1 + 1.1).as_integer_ratio(")
self.assertTrue(self.repl.get_args())
class TestArgspecInternal(unittest.TestCase):
def test_function_expressions(self):
te = self.assertTupleEqual
fa = lambda line: repl.Repl._funcname_and_argnum(line)
for line, (func, argnum) in [
('spam(', ('spam', 0)),
('spam((), ', ('spam', 1)),
('spam.eggs((), ', ('spam.eggs', 1)),
('spam[abc].eggs((), ', ('spam[abc].eggs', 1)),
('spam[0].eggs((), ', ('spam[0].eggs', 1)),
('spam[a + b]eggs((), ', ('spam[a + b]eggs', 1)),
('spam().eggs((), ', ('spam().eggs', 1)),
('spam(1, 2).eggs((), ', ('spam(1, 2).eggs', 1)),
('spam(1, f(1)).eggs((), ', ('spam(1, f(1)).eggs', 1)),
('[0].eggs((), ', ('[0].eggs', 1)),
('[0][0]((), {}).eggs((), ', ('[0][0]((), {}).eggs', 1)),
('a + spam[0].eggs((), ', ('spam[0].eggs', 1)),
("spam(", ("spam", 0)),
("spam(map([]", ("map", 0)),
("spam((), ", ("spam", 1))
]:
te(fa(line), (func, argnum))
class TestGetSource(unittest.TestCase):
def setUp(self):
self.repl = FakeRepl()
def set_input_line(self, line):
"""Set current input line of the test REPL."""
self.repl.current_line = line
self.repl.cursor_offset = len(line)
def assert_get_source_error_for_current_function(self, func, msg):
self.repl.current_func = func
with self.assertRaises(repl.SourceNotFound):
self.repl.get_source_of_current_name()
try:
self.repl.get_source_of_current_name()
except repl.SourceNotFound as e:
self.assertEqual(e.args[0], msg)
else:
self.fail("Should have raised SourceNotFound")
def test_current_function(self):
self.set_input_line('INPUTLINE')
self.repl.current_func = inspect.getsource
self.assertIn("text of the source code",
self.repl.get_source_of_current_name())
self.assert_get_source_error_for_current_function(
[], "No source code found for INPUTLINE")
self.assert_get_source_error_for_current_function(
list.pop, "No source code found for INPUTLINE")
@unittest.skipIf(pypy, 'different errors for PyPy')
def test_current_function_cpython(self):
self.set_input_line('INPUTLINE')
self.assert_get_source_error_for_current_function(
collections.defaultdict.copy, "No source code found for INPUTLINE")
self.assert_get_source_error_for_current_function(
collections.defaultdict, "could not find class definition")
def test_current_line(self):
self.repl.interp.locals['a'] = socket.socket
self.set_input_line('a')
self.assertIn('dup(self)', self.repl.get_source_of_current_name())
# TODO add tests for various failures without using current function
class TestEditConfig(TestCase):
def setUp(self):
self.repl = FakeRepl()
self.repl.interact.confirm = lambda msg: True
self.repl.interact.notify = lambda msg: None
self.repl.config.editor = 'true'
def test_create_config(self):
tmp_dir = tempfile.mkdtemp()
try:
config_path = os.path.join(tmp_dir, 'newdir', 'config')
self.repl.config.config_path = config_path
self.repl.edit_config()
self.assertTrue(os.path.exists(config_path))
finally:
shutil.rmtree(tmp_dir)
self.assertFalse(os.path.exists(config_path))
class TestRepl(unittest.TestCase):
def set_input_line(self, line):
"""Set current input line of the test REPL."""
self.repl.current_line = line
self.repl.cursor_offset = len(line)
def setUp(self):
self.repl = FakeRepl()
def test_current_string(self):
self.set_input_line('a = "2"')
# TODO factor cpos out of repl.Repl
self.repl.cpos = 0
self.assertEqual(self.repl.current_string(), '"2"')
self.set_input_line('a = "2" + 2')
self.assertEqual(self.repl.current_string(), '')
def test_push(self):
self.repl = FakeRepl()
self.repl.push("foobar = 2")
self.assertEqual(self.repl.interp.locals['foobar'], 2)
# COMPLETE TESTS
# 1. Global tests
def test_simple_global_complete(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.SIMPLE})
self.set_input_line("d")
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertEqual(self.repl.matches_iter.matches,
['def', 'del', 'delattr(', 'dict(', 'dir(',
'divmod('])
def test_substring_global_complete(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.SUBSTRING})
self.set_input_line("time")
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertEqual(self.repl.matches_iter.matches,
['RuntimeError(', 'RuntimeWarning('])
def test_fuzzy_global_complete(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.FUZZY})
self.set_input_line("doc")
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertEqual(self.repl.matches_iter.matches,
['UnboundLocalError(', '__doc__'] if not py3 else
['ChildProcessError(', 'UnboundLocalError(',
'__doc__'])
# 2. Attribute tests
def test_simple_attribute_complete(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.SIMPLE})
self.set_input_line("Foo.b")
code = "class Foo():\n\tdef bar(self):\n\t\tpass\n"
for line in code.split("\n"):
self.repl.push(line)
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertEqual(self.repl.matches_iter.matches, ['Foo.bar'])
def test_substring_attribute_complete(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.SUBSTRING})
self.set_input_line("Foo.az")
code = "class Foo():\n\tdef baz(self):\n\t\tpass\n"
for line in code.split("\n"):
self.repl.push(line)
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertEqual(self.repl.matches_iter.matches, ['Foo.baz'])
def test_fuzzy_attribute_complete(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.FUZZY})
self.set_input_line("Foo.br")
code = "class Foo():\n\tdef bar(self):\n\t\tpass\n"
for line in code.split("\n"):
self.repl.push(line)
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertEqual(self.repl.matches_iter.matches, ['Foo.bar'])
# 3. Edge cases
def test_updating_namespace_complete(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.SIMPLE})
self.set_input_line("foo")
self.repl.push("foobar = 2")
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertEqual(self.repl.matches_iter.matches, ['foobar'])
def test_file_should_not_appear_in_complete(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.SIMPLE})
self.set_input_line("_")
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertNotIn('__file__', self.repl.matches_iter.matches)
# 4. Parameter names
def test_paremeter_name_completion(self):
self.repl = FakeRepl({'autocomplete_mode': autocomplete.SIMPLE})
self.set_input_line("foo(ab")
code = "def foo(abc=1, abd=2, xyz=3):\n\tpass\n"
for line in code.split("\n"):
self.repl.push(line)
self.assertTrue(self.repl.complete())
self.assertTrue(hasattr(self.repl.matches_iter, 'matches'))
self.assertEqual(self.repl.matches_iter.matches,
['abc=', 'abd=', 'abs('])
class TestCliRepl(unittest.TestCase):
def setUp(self):
self.repl = FakeCliRepl()
def test_atbol(self):
self.assertTrue(self.repl.atbol())
self.repl.s = "\t\t"
self.assertTrue(self.repl.atbol())
self.repl.s = "\t\tnot an empty line"
self.assertFalse(self.repl.atbol())
def test_addstr(self):
self.repl.complete = mock.Mock(True)
self.repl.s = "foo"
self.repl.addstr("bar")
self.assertEqual(self.repl.s, "foobar")
self.repl.cpos = 3
self.repl.addstr('buzz')
self.assertEqual(self.repl.s, "foobuzzbar")
class TestCliReplTab(unittest.TestCase):
def setUp(self):
self.repl = FakeCliRepl()
# 3 Types of tab complete
def test_simple_tab_complete(self):
self.repl.matches_iter = MagicIterMock()
if py3:
self.repl.matches_iter.__bool__.return_value = False
else:
self.repl.matches_iter.__nonzero__.return_value = False
self.repl.complete = mock.Mock()
self.repl.print_line = mock.Mock()
self.repl.matches_iter.is_cseq.return_value = False
self.repl.show_list = mock.Mock()
self.repl.funcprops = mock.Mock()
self.repl.arg_pos = mock.Mock()
self.repl.matches_iter.cur_line.return_value = (None, "foobar")
self.repl.s = "foo"
self.repl.tab()
self.assertTrue(self.repl.complete.called)
self.repl.complete.assert_called_with(tab=True)
self.assertEqual(self.repl.s, "foobar")
@unittest.skip("disabled while non-simple completion is disabled")
def test_substring_tab_complete(self):
self.repl.s = "bar"
self.repl.config.autocomplete_mode = autocomplete.FUZZY
self.repl.tab()
self.assertEqual(self.repl.s, "foobar")
self.repl.tab()
self.assertEqual(self.repl.s, "foofoobar")
@unittest.skip("disabled while non-simple completion is disabled")
def test_fuzzy_tab_complete(self):
self.repl.s = "br"
self.repl.config.autocomplete_mode = autocomplete.FUZZY
self.repl.tab()
self.assertEqual(self.repl.s, "foobar")
# Edge Cases
def test_normal_tab(self):
"""make sure pressing the tab key will
still in some cases add a tab"""
self.repl.s = ""
self.repl.config = mock.Mock()
self.repl.config.tab_length = 4
self.repl.complete = mock.Mock()
self.repl.print_line = mock.Mock()
self.repl.tab()
self.assertEqual(self.repl.s, " ")
def test_back_parameter(self):
self.repl.matches_iter = mock.Mock()
self.repl.matches_iter.matches = True
self.repl.matches_iter.previous.return_value = "previtem"
self.repl.matches_iter.is_cseq.return_value = False
self.repl.show_list = mock.Mock()
self.repl.funcprops = mock.Mock()
self.repl.arg_pos = mock.Mock()
self.repl.matches_iter.cur_line.return_value = (None, "previtem")
self.repl.print_line = mock.Mock()
self.repl.s = "foo"
self.repl.cpos = 0
self.repl.tab(back=True)
self.assertTrue(self.repl.matches_iter.previous.called)
self.assertTrue(self.repl.s, "previtem")
# Attribute Tests
@unittest.skip("disabled while non-simple completion is disabled")
def test_fuzzy_attribute_tab_complete(self):
"""Test fuzzy attribute with no text"""
self.repl.s = "Foo."
self.repl.config.autocomplete_mode = autocomplete.FUZZY
self.repl.tab()
self.assertEqual(self.repl.s, "Foo.foobar")
@unittest.skip("disabled while non-simple completion is disabled")
def test_fuzzy_attribute_tab_complete2(self):
"""Test fuzzy attribute with some text"""
self.repl.s = "Foo.br"
self.repl.config.autocomplete_mode = autocomplete.FUZZY
self.repl.tab()
self.assertEqual(self.repl.s, "Foo.foobar")
# Expand Tests
def test_simple_expand(self):
self.repl.s = "f"
self.cpos = 0
self.repl.matches_iter = mock.Mock()
self.repl.matches_iter.is_cseq.return_value = True
self.repl.matches_iter.substitute_cseq.return_value = (3, "foo")
self.repl.print_line = mock.Mock()
self.repl.tab()
self.assertEqual(self.repl.s, "foo")
@unittest.skip("disabled while non-simple completion is disabled")
def test_substring_expand_forward(self):
self.repl.config.autocomplete_mode = autocomplete.SUBSTRING
self.repl.s = "ba"
self.repl.tab()
self.assertEqual(self.repl.s, "bar")
@unittest.skip("disabled while non-simple completion is disabled")
def test_fuzzy_expand(self):
pass
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2013-2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Model classes that form the core of instances functionality."""
from datetime import datetime
from datetime import timedelta
import re
from novaclient import exceptions as nova_exceptions
from oslo_config.cfg import NoSuchOptError
from oslo_log import log as logging
from trove.backup.models import Backup
from trove.common import cfg
from trove.common import exception
from trove.common import i18n as i18n
import trove.common.instance as tr_instance
from trove.common.notification import StartNotification
from trove.common.remote import create_cinder_client
from trove.common.remote import create_dns_client
from trove.common.remote import create_guest_client
from trove.common.remote import create_nova_client
from trove.common import template
from trove.common import utils
from trove.configuration.models import Configuration
from trove.datastore import models as datastore_models
from trove.datastore.models import DBDatastoreVersionMetadata
from trove.db import get_db_api
from trove.db import models as dbmodels
from trove.extensions.security_group.models import SecurityGroup
from trove.instance.tasks import InstanceTask
from trove.instance.tasks import InstanceTasks
from trove.module import models as module_models
from trove.module import views as module_views
from trove.quota.quota import run_with_quotas
from trove.taskmanager import api as task_api
(_, _LE, _LI, _LW) = (i18n._, i18n._LE, i18n._LI, i18n._LW)
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def filter_ips(ips, white_list_regex, black_list_regex):
"""Return IPs matching white_list_regex and
Filter out IPs matching black_list_regex.
"""
return [ip for ip in ips if re.search(white_list_regex, ip)
and not re.search(black_list_regex, ip)]
def load_server(context, instance_id, server_id):
"""
Loads a server or raises an exception.
:param context: request context used to access nova
:param instance_id: the trove instance id corresponding to the nova server
(informational only)
:param server_id: the compute instance id which will be retrieved from nova
:type context: trove.common.context.TroveContext
:type instance_id: unicode
:type server_id: unicode
:rtype: novaclient.v2.servers.Server
"""
client = create_nova_client(context)
try:
server = client.servers.get(server_id)
except nova_exceptions.NotFound:
LOG.error(_LE("Could not find nova server_id(%s)."), server_id)
raise exception.ComputeInstanceNotFound(instance_id=instance_id,
server_id=server_id)
except nova_exceptions.ClientException as e:
raise exception.TroveError(str(e))
return server
class InstanceStatus(object):
ACTIVE = "ACTIVE"
BLOCKED = "BLOCKED"
BUILD = "BUILD"
FAILED = "FAILED"
REBOOT = "REBOOT"
RESIZE = "RESIZE"
BACKUP = "BACKUP"
SHUTDOWN = "SHUTDOWN"
ERROR = "ERROR"
RESTART_REQUIRED = "RESTART_REQUIRED"
PROMOTE = "PROMOTE"
EJECT = "EJECT"
def validate_volume_size(size):
if size is None:
raise exception.VolumeSizeNotSpecified()
max_size = CONF.max_accepted_volume_size
if long(size) > max_size:
msg = ("Volume 'size' cannot exceed maximum "
"of %d GB, %s cannot be accepted."
% (max_size, size))
raise exception.VolumeQuotaExceeded(msg)
def load_simple_instance_server_status(context, db_info):
"""Loads a server or raises an exception."""
if 'BUILDING' == db_info.task_status.action:
db_info.server_status = "BUILD"
db_info.addresses = {}
else:
client = create_nova_client(context)
try:
server = client.servers.get(db_info.compute_instance_id)
db_info.server_status = server.status
db_info.addresses = server.addresses
except nova_exceptions.NotFound:
db_info.server_status = "SHUTDOWN"
db_info.addresses = {}
# Invalid states to contact the agent
AGENT_INVALID_STATUSES = ["BUILD", "REBOOT", "RESIZE", "PROMOTE", "EJECT"]
class SimpleInstance(object):
"""A simple view of an instance.
This gets loaded directly from the local database, so its cheaper than
creating the fully loaded Instance. As the name implies this class knows
nothing of the underlying Nova Compute Instance (i.e. server)
-----------
| |
| i |
| t n |
| r s ---------------------
| o t | datastore/guest |
| v a ---------------------
| e n |
| c |
| e |
| |
-----------
"""
def __init__(self, context, db_info, datastore_status, root_password=None,
ds_version=None, ds=None):
"""
:type context: trove.common.context.TroveContext
:type db_info: trove.instance.models.DBInstance
:type datastore_status: trove.instance.models.InstanceServiceStatus
:type root_password: str
"""
self.context = context
self.db_info = db_info
self.datastore_status = datastore_status
self.root_pass = root_password
if ds_version is None:
self.ds_version = (datastore_models.DatastoreVersion.
load_by_uuid(self.db_info.datastore_version_id))
if ds is None:
self.ds = (datastore_models.Datastore.
load(self.ds_version.datastore_id))
self.slave_list = None
@property
def addresses(self):
# TODO(tim.simpson): This code attaches two parts of the Nova server to
# db_info: "status" and "addresses". The idea
# originally was to listen to events to update this
# data and store it in the Trove database.
# However, it may have been unwise as a year and a
# half later we still have to load the server anyway
# and this makes the code confusing.
if hasattr(self.db_info, 'addresses'):
return self.db_info.addresses
else:
return None
@property
def created(self):
return self.db_info.created
@property
def dns_ip_address(self):
"""Returns the IP address to be used with DNS."""
ips = self.get_visible_ip_addresses()
if ips:
return ips[0]
@property
def flavor_id(self):
# Flavor ID is a str in the 1.0 API.
return str(self.db_info.flavor_id)
@property
def hostname(self):
return self.db_info.hostname
def get_visible_ip_addresses(self):
"""Returns IPs that will be visible to the user."""
if self.addresses is None:
return None
IPs = []
for label in self.addresses:
if (re.search(CONF.network_label_regex, label) and
len(self.addresses[label]) > 0):
IPs.extend([addr.get('addr')
for addr in self.addresses[label]])
# Includes ip addresses that match the regexp pattern
if CONF.ip_regex and CONF.black_list_regex:
IPs = filter_ips(IPs, CONF.ip_regex, CONF.black_list_regex)
return IPs
@property
def id(self):
return self.db_info.id
@property
def type(self):
return self.db_info.type
@property
def tenant_id(self):
return self.db_info.tenant_id
@property
def is_building(self):
return self.status in [InstanceStatus.BUILD]
@property
def is_datastore_running(self):
"""True if the service status indicates datastore is up and running."""
return self.datastore_status.status in MYSQL_RESPONSIVE_STATUSES
def datastore_status_matches(self, service_status):
return self.datastore_status.status == service_status
@property
def name(self):
return self.db_info.name
@property
def server_id(self):
return self.db_info.compute_instance_id
@property
def slave_of_id(self):
return self.db_info.slave_of_id
@property
def datastore_status(self):
"""
Returns the Service Status for this instance. For example, the status
of the mysql datastore which is running on the server...not the server
status itself.
:return: the current status of the datastore
:rtype: trove.instance.models.InstanceServiceStatus
"""
return self.__datastore_status
@datastore_status.setter
def datastore_status(self, datastore_status):
if datastore_status and not isinstance(datastore_status,
InstanceServiceStatus):
raise ValueError("datastore_status must be of type "
"InstanceServiceStatus. Got %s instead." %
datastore_status.__class__.__name__)
self.__datastore_status = datastore_status
@property
def status(self):
# Check for taskmanager errors.
if self.db_info.task_status.is_error:
return InstanceStatus.ERROR
# Check for taskmanager status.
action = self.db_info.task_status.action
if 'BUILDING' == action:
if 'ERROR' == self.db_info.server_status:
return InstanceStatus.ERROR
return InstanceStatus.BUILD
if 'REBOOTING' == action:
return InstanceStatus.REBOOT
if 'RESIZING' == action:
return InstanceStatus.RESIZE
if 'RESTART_REQUIRED' == action:
return InstanceStatus.RESTART_REQUIRED
if InstanceTasks.PROMOTING.action == action:
return InstanceStatus.PROMOTE
if InstanceTasks.EJECTING.action == action:
return InstanceStatus.EJECT
if InstanceTasks.LOGGING.action == action:
return InstanceStatus.LOGGING
# Check for server status.
if self.db_info.server_status in ["BUILD", "ERROR", "REBOOT",
"RESIZE"]:
return self.db_info.server_status
# As far as Trove is concerned, Nova instances in VERIFY_RESIZE should
# still appear as though they are in RESIZE.
if self.db_info.server_status in ["VERIFY_RESIZE"]:
return InstanceStatus.RESIZE
# Check if there is a backup running for this instance
if Backup.running(self.id):
return InstanceStatus.BACKUP
# Report as Shutdown while deleting, unless there's an error.
if 'DELETING' == action:
if self.db_info.server_status in ["ACTIVE", "SHUTDOWN", "DELETED"]:
return InstanceStatus.SHUTDOWN
else:
LOG.error(_LE("While shutting down instance (%(instance)s): "
"server had status (%(status)s)."),
{'instance': self.id,
'status': self.db_info.server_status})
return InstanceStatus.ERROR
# Check against the service status.
# The service is only paused during a reboot.
if tr_instance.ServiceStatuses.PAUSED == self.datastore_status.status:
return InstanceStatus.REBOOT
# If the service status is NEW, then we are building.
if tr_instance.ServiceStatuses.NEW == self.datastore_status.status:
return InstanceStatus.BUILD
# For everything else we can look at the service status mapping.
return self.datastore_status.status.api_status
@property
def updated(self):
return self.db_info.updated
@property
def volume_id(self):
return self.db_info.volume_id
@property
def volume_size(self):
return self.db_info.volume_size
@property
def datastore_version(self):
return self.ds_version
@property
def datastore(self):
return self.ds
@property
def volume_support(self):
return CONF.get(self.datastore_version.manager).volume_support
@property
def device_path(self):
return CONF.get(self.datastore_version.manager).device_path
@property
def root_password(self):
return self.root_pass
@property
def configuration(self):
if self.db_info.configuration_id is not None:
return Configuration.load(self.context,
self.db_info.configuration_id)
@property
def slaves(self):
if self.slave_list is None:
self.slave_list = DBInstance.find_all(tenant_id=self.tenant_id,
slave_of_id=self.id,
deleted=False).all()
return self.slave_list
@property
def cluster_id(self):
return self.db_info.cluster_id
@property
def shard_id(self):
return self.db_info.shard_id
class DetailInstance(SimpleInstance):
"""A detailed view of an Instance.
This loads a SimpleInstance and then adds additional data for the
instance from the guest.
"""
def __init__(self, context, db_info, datastore_status):
super(DetailInstance, self).__init__(context, db_info,
datastore_status)
self._volume_used = None
self._volume_total = None
@property
def volume_used(self):
return self._volume_used
@volume_used.setter
def volume_used(self, value):
self._volume_used = value
@property
def volume_total(self):
return self._volume_total
@volume_total.setter
def volume_total(self, value):
self._volume_total = value
def get_db_info(context, id, cluster_id=None, include_deleted=False):
"""
Retrieves an instance of the managed datastore from the persisted
storage based on the ID and Context
:param context: the context which owns the instance
:type context: trove.common.context.TroveContext
:param id: the unique ID of the instance
:type id: unicode or str
:param cluster_id: the unique ID of the cluster
:type cluster_id: unicode or str
:return: a record of the instance as its state exists in persisted storage
:rtype: trove.instance.models.DBInstance
"""
if context is None:
raise TypeError("Argument context not defined.")
elif id is None:
raise TypeError("Argument id not defined.")
args = {'id': id}
if cluster_id is not None:
args['cluster_id'] = cluster_id
if not include_deleted:
args['deleted'] = False
try:
db_info = DBInstance.find_by(context=context, **args)
except exception.NotFound:
raise exception.NotFound(uuid=id)
return db_info
def load_any_instance(context, id, load_server=True):
# Try to load an instance with a server.
# If that fails, try to load it without the server.
try:
return load_instance(BuiltInstance, context, id,
needs_server=load_server)
except exception.UnprocessableEntity:
LOG.warning(_LW("Could not load instance %s."), id)
return load_instance(FreshInstance, context, id, needs_server=False)
def load_instance(cls, context, id, needs_server=False,
include_deleted=False):
db_info = get_db_info(context, id, include_deleted=include_deleted)
if not needs_server:
# TODO(tim.simpson): When we have notifications this won't be
# necessary and instead we'll just use the server_status field from
# the instance table.
load_simple_instance_server_status(context, db_info)
server = None
else:
try:
server = load_server(context, db_info.id,
db_info.compute_instance_id)
# TODO(tim.simpson): Remove this hack when we have notifications!
db_info.server_status = server.status
db_info.addresses = server.addresses
except exception.ComputeInstanceNotFound:
LOG.error(_LE("Could not load compute instance %s."),
db_info.compute_instance_id)
raise exception.UnprocessableEntity("Instance %s is not ready." %
id)
service_status = InstanceServiceStatus.find_by(instance_id=id)
LOG.debug("Instance %(instance_id)s service status is %(service_status)s.",
{'instance_id': id, 'service_status': service_status.status})
return cls(context, db_info, server, service_status)
def load_instance_with_guest(cls, context, id, cluster_id=None):
db_info = get_db_info(context, id, cluster_id)
load_simple_instance_server_status(context, db_info)
service_status = InstanceServiceStatus.find_by(instance_id=id)
LOG.debug("Instance %(instance_id)s service status is %(service_status)s.",
{'instance_id': id, 'service_status': service_status.status})
instance = cls(context, db_info, service_status)
load_guest_info(instance, context, id)
return instance
def load_guest_info(instance, context, id):
if instance.status not in AGENT_INVALID_STATUSES:
guest = create_guest_client(context, id)
try:
volume_info = guest.get_volume_info()
instance.volume_used = volume_info['used']
instance.volume_total = volume_info['total']
except Exception as e:
LOG.error(e)
return instance
class BaseInstance(SimpleInstance):
"""Represents an instance.
-----------
| |
| i ---------------------
| t n | compute instance |
| r s ---------------------
| o t |
| v a |
| e n ---------------------
| c | datastore/guest |
| e ---------------------
| |
-----------
"""
def __init__(self, context, db_info, server, datastore_status):
"""
Creates a new initialized representation of an instance composed of its
state in the database and its state from Nova
:param context: the request context which contains the tenant that owns
this instance
:param db_info: the current state of this instance as it exists in the
db
:param server: the current state of this instance as it exists in the
Nova
:param datastore_status: the current state of the datastore on this
instance at it exists in the db
:type context: trove.common.context.TroveContext
:type db_info: trove.instance.models.DBInstance
:type server: novaclient.v2.servers.Server
:typdatastore_statusus: trove.instance.models.InstanceServiceStatus
"""
super(BaseInstance, self).__init__(context, db_info, datastore_status)
self.server = server
self._guest = None
self._nova_client = None
self._volume_client = None
def get_guest(self):
return create_guest_client(self.context, self.db_info.id)
def delete(self):
def _delete_resources():
if self.is_building:
raise exception.UnprocessableEntity("Instance %s is not ready."
% self.id)
LOG.debug("Deleting instance with compute id = %s.",
self.db_info.compute_instance_id)
from trove.cluster.models import is_cluster_deleting
if (self.db_info.cluster_id is not None and not
is_cluster_deleting(self.context, self.db_info.cluster_id)):
raise exception.ClusterInstanceOperationNotSupported()
if self.slaves:
msg = _("Detach replicas before deleting replica source.")
LOG.warning(msg)
raise exception.ReplicaSourceDeleteForbidden(msg)
self.update_db(task_status=InstanceTasks.DELETING,
configuration_id=None)
task_api.API(self.context).delete_instance(self.id)
deltas = {'instances': -1}
if self.volume_support:
deltas['volumes'] = -self.volume_size
return run_with_quotas(self.tenant_id,
deltas,
_delete_resources)
def _delete_resources(self, deleted_at):
"""Implemented in subclass."""
pass
def delete_async(self):
deleted_at = datetime.utcnow()
self._delete_resources(deleted_at)
LOG.debug("Setting instance %s to be deleted.", self.id)
self.update_db(deleted=True, deleted_at=deleted_at,
task_status=InstanceTasks.NONE)
self.set_servicestatus_deleted()
# Delete associated security group
if CONF.trove_security_groups_support:
SecurityGroup.delete_for_instance(self.db_info.id,
self.context)
@property
def guest(self):
if not self._guest:
self._guest = self.get_guest()
return self._guest
@property
def nova_client(self):
if not self._nova_client:
self._nova_client = create_nova_client(self.context)
return self._nova_client
def update_db(self, **values):
self.db_info = DBInstance.find_by(id=self.id, deleted=False)
for key in values:
setattr(self.db_info, key, values[key])
self.db_info.save()
def set_servicestatus_deleted(self):
del_instance = InstanceServiceStatus.find_by(instance_id=self.id)
del_instance.set_status(tr_instance.ServiceStatuses.DELETED)
del_instance.save()
@property
def volume_client(self):
if not self._volume_client:
self._volume_client = create_cinder_client(self.context)
return self._volume_client
def reset_task_status(self):
LOG.info(_LI("Resetting task status to NONE on instance %s."),
self.id)
self.update_db(task_status=InstanceTasks.NONE)
class FreshInstance(BaseInstance):
@classmethod
def load(cls, context, id):
return load_instance(cls, context, id, needs_server=False)
class BuiltInstance(BaseInstance):
@classmethod
def load(cls, context, id):
return load_instance(cls, context, id, needs_server=True)
class Instance(BuiltInstance):
"""Represents an instance.
The life span of this object should be limited. Do not store them or
pass them between threads.
"""
@classmethod
def get_root_on_create(cls, datastore_manager):
try:
root_on_create = CONF.get(datastore_manager).root_on_create
return root_on_create
except NoSuchOptError:
LOG.debug("root_on_create not configured for %s,"
" hence defaulting the value to False.",
datastore_manager)
return False
@classmethod
def create(cls, context, name, flavor_id, image_id, databases, users,
datastore, datastore_version, volume_size, backup_id,
availability_zone=None, nics=None,
configuration_id=None, slave_of_id=None, cluster_config=None,
replica_count=None, volume_type=None, modules=None):
call_args = {
'name': name,
'flavor_id': flavor_id,
'datastore': datastore.name if datastore else None,
'datastore_version': datastore_version.name,
'image_id': image_id,
'availability_zone': availability_zone,
}
# All nova flavors are permitted for a datastore-version unless one
# or more entries are found in datastore_version_metadata,
# in which case only those are permitted.
bound_flavors = DBDatastoreVersionMetadata.find_all(
datastore_version_id=datastore_version.id,
key='flavor', deleted=False
)
if bound_flavors.count() > 0:
valid_flavors = tuple(f.value for f in bound_flavors)
if flavor_id not in valid_flavors:
raise exception.DatastoreFlavorAssociationNotFound(
datastore=datastore.name,
datastore_version=datastore_version.name,
flavor_id=flavor_id)
datastore_cfg = CONF.get(datastore_version.manager)
client = create_nova_client(context)
try:
flavor = client.flavors.get(flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=flavor_id)
deltas = {'instances': 1}
volume_support = datastore_cfg.volume_support
if volume_support:
call_args['volume_size'] = volume_size
validate_volume_size(volume_size)
deltas['volumes'] = volume_size
# Instance volume should have enough space for the backup
# Backup, and volume sizes are in GBs
target_size = volume_size
else:
target_size = flavor.disk # local_storage
if volume_size is not None:
raise exception.VolumeNotSupported()
if datastore_cfg.device_path:
if flavor.ephemeral == 0:
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
target_size = flavor.ephemeral # ephemeral_Storage
if backup_id:
call_args['backup_id'] = backup_id
backup_info = Backup.get_by_id(context, backup_id)
if not backup_info.is_done_successfuly:
raise exception.BackupNotCompleteError(
backup_id=backup_id, state=backup_info.state)
if backup_info.size > target_size:
raise exception.BackupTooLarge(
backup_size=backup_info.size, disk_size=target_size)
if not backup_info.check_swift_object_exist(
context,
verify_checksum=CONF.verify_swift_checksum_on_restore):
raise exception.BackupFileNotFound(
location=backup_info.location)
if (backup_info.datastore_version_id
and backup_info.datastore.name != datastore.name):
raise exception.BackupDatastoreMismatchError(
datastore1=backup_info.datastore.name,
datastore2=datastore.name)
if slave_of_id:
call_args['replica_of'] = slave_of_id
call_args['replica_count'] = replica_count
replication_support = datastore_cfg.replication_strategy
if not replication_support:
raise exception.ReplicationNotSupported(
datastore=datastore.name)
try:
# looking for replica source
replica_source = DBInstance.find_by(
context,
id=slave_of_id,
deleted=False)
if replica_source.slave_of_id:
raise exception.Forbidden(
_("Cannot create a replica of a replica %(id)s.")
% {'id': slave_of_id})
# load the replica source status to check if
# source is available
load_simple_instance_server_status(
context,
replica_source)
replica_source_instance = Instance(
context, replica_source,
None,
InstanceServiceStatus.find_by(
context,
instance_id=slave_of_id))
replica_source_instance.validate_can_perform_action()
except exception.ModelNotFoundError:
LOG.exception(
_("Cannot create a replica of %(id)s "
"as that instance could not be found.")
% {'id': slave_of_id})
raise exception.NotFound(uuid=slave_of_id)
elif replica_count and replica_count != 1:
raise exception.Forbidden(_(
"Replica count only valid when creating replicas. Cannot "
"create %(count)d instances.") % {'count': replica_count})
multi_replica = slave_of_id and replica_count and replica_count > 1
instance_count = replica_count if multi_replica else 1
if not nics:
nics = []
if CONF.default_neutron_networks:
nics = [{"net-id": net_id}
for net_id in CONF.default_neutron_networks] + nics
if nics:
call_args['nics'] = nics
if cluster_config:
call_args['cluster_id'] = cluster_config.get("id", None)
if not modules:
modules = []
module_ids = [mod['id'] for mod in modules]
modules = module_models.Modules.load_by_ids(context, module_ids)
auto_apply_modules = module_models.Modules.load_auto_apply(
context, datastore.id, datastore_version.id)
for aa_module in auto_apply_modules:
if aa_module.id not in module_ids:
modules.append(aa_module)
module_list = []
for module in modules:
module.contents = module_models.Module.deprocess_contents(
module.contents)
module_info = module_views.DetailedModuleView(module).data(
include_contents=True)
module_list.append(module_info)
def _create_resources():
if cluster_config:
cluster_id = cluster_config.get("id", None)
shard_id = cluster_config.get("shard_id", None)
instance_type = cluster_config.get("instance_type", None)
else:
cluster_id = shard_id = instance_type = None
ids = []
names = []
root_passwords = []
root_password = None
for instance_index in range(0, instance_count):
db_info = DBInstance.create(
name=name, flavor_id=flavor_id, tenant_id=context.tenant,
volume_size=volume_size,
datastore_version_id=datastore_version.id,
task_status=InstanceTasks.BUILDING,
configuration_id=configuration_id,
slave_of_id=slave_of_id, cluster_id=cluster_id,
shard_id=shard_id, type=instance_type)
LOG.debug("Tenant %(tenant)s created new Trove instance "
"%(db)s.",
{'tenant': context.tenant, 'db': db_info.id})
instance_id = db_info.id
cls.add_instance_modules(context, instance_id, modules)
instance_name = name
ids.append(instance_id)
names.append(instance_name)
root_passwords.append(None)
# change the name to be name + replica_number if more than one
if multi_replica:
replica_number = instance_index + 1
names[instance_index] += '-' + str(replica_number)
setattr(db_info, 'name', names[instance_index])
db_info.save()
# if a configuration group is associated with an instance,
# generate an overrides dict to pass into the instance creation
# method
config = Configuration(context, configuration_id)
overrides = config.get_configuration_overrides()
service_status = InstanceServiceStatus.create(
instance_id=instance_id,
status=tr_instance.ServiceStatuses.NEW)
if CONF.trove_dns_support:
dns_client = create_dns_client(context)
hostname = dns_client.determine_hostname(instance_id)
db_info.hostname = hostname
db_info.save()
if cls.get_root_on_create(
datastore_version.manager) and not backup_id:
root_password = utils.generate_random_password()
root_passwords[instance_index] = root_password
if instance_count > 1:
instance_id = ids
instance_name = names
root_password = root_passwords
task_api.API(context).create_instance(
instance_id, instance_name, flavor, image_id, databases, users,
datastore_version.manager, datastore_version.packages,
volume_size, backup_id, availability_zone, root_password,
nics, overrides, slave_of_id, cluster_config,
volume_type=volume_type, modules=module_list)
return SimpleInstance(context, db_info, service_status,
root_password)
with StartNotification(context, **call_args):
return run_with_quotas(context.tenant, deltas, _create_resources)
@classmethod
def add_instance_modules(cls, context, instance_id, modules):
for module in modules:
module_models.InstanceModule.create(
context, instance_id, module.id, module.md5)
def get_flavor(self):
client = create_nova_client(self.context)
return client.flavors.get(self.flavor_id)
def get_default_configuration_template(self):
flavor = self.get_flavor()
LOG.debug("Getting default config template for datastore version "
"%(ds_version)s and flavor %(flavor)s.",
{'ds_version': self.ds_version, 'flavor': flavor})
config = template.SingleInstanceConfigTemplate(
self.ds_version, flavor, id)
return config.render_dict()
def resize_flavor(self, new_flavor_id):
self.validate_can_perform_action()
LOG.info(_LI("Resizing instance %(instance_id)s flavor to "
"%(flavor_id)s."),
{'instance_id': self.id, 'flavor_id': new_flavor_id})
if self.db_info.cluster_id is not None:
raise exception.ClusterInstanceOperationNotSupported()
# Validate that the old and new flavor IDs are not the same, new flavor
# can be found and has ephemeral/volume support if required by the
# current flavor.
if self.flavor_id == new_flavor_id:
raise exception.BadRequest(_("The new flavor id must be different "
"than the current flavor id of '%s'.")
% self.flavor_id)
client = create_nova_client(self.context)
try:
new_flavor = client.flavors.get(new_flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=new_flavor_id)
old_flavor = client.flavors.get(self.flavor_id)
if self.volume_support:
if new_flavor.ephemeral != 0:
raise exception.LocalStorageNotSupported()
elif self.device_path is not None:
# ephemeral support enabled
if new_flavor.ephemeral == 0:
raise exception.LocalStorageNotSpecified(flavor=new_flavor_id)
# Set the task to RESIZING and begin the async call before returning.
self.update_db(task_status=InstanceTasks.RESIZING)
LOG.debug("Instance %s set to RESIZING.", self.id)
task_api.API(self.context).resize_flavor(self.id, old_flavor,
new_flavor)
def resize_volume(self, new_size):
def _resize_resources():
self.validate_can_perform_action()
LOG.info(_LI("Resizing volume of instance %s."), self.id)
if self.db_info.cluster_id is not None:
raise exception.ClusterInstanceOperationNotSupported()
old_size = self.volume_size
if int(new_size) <= old_size:
raise exception.BadRequest(_("The new volume 'size' must be "
"larger than the current volume "
"size of '%s'.") % old_size)
# Set the task to Resizing before sending off to the taskmanager
self.update_db(task_status=InstanceTasks.RESIZING)
task_api.API(self.context).resize_volume(new_size, self.id)
if not self.volume_size:
raise exception.BadRequest(_("Instance %s has no volume.")
% self.id)
new_size_l = long(new_size)
validate_volume_size(new_size_l)
return run_with_quotas(self.tenant_id,
{'volumes': new_size_l - self.volume_size},
_resize_resources)
def reboot(self):
self.validate_can_perform_action()
LOG.info(_LI("Rebooting instance %s."), self.id)
if self.db_info.cluster_id is not None and not self.context.is_admin:
raise exception.ClusterInstanceOperationNotSupported()
self.update_db(task_status=InstanceTasks.REBOOTING)
task_api.API(self.context).reboot(self.id)
def restart(self):
self.validate_can_perform_action()
LOG.info(_LI("Restarting datastore on instance %s."), self.id)
if self.db_info.cluster_id is not None and not self.context.is_admin:
raise exception.ClusterInstanceOperationNotSupported()
# Set our local status since Nova might not change it quick enough.
# TODO(tim.simpson): Possible bad stuff can happen if this service
# shuts down before it can set status to NONE.
# We need a last updated time to mitigate this;
# after some period of tolerance, we'll assume the
# status is no longer in effect.
self.update_db(task_status=InstanceTasks.REBOOTING)
task_api.API(self.context).restart(self.id)
def detach_replica(self):
self.validate_can_perform_action()
LOG.info(_LI("Detaching instance %s from its replication source."),
self.id)
if not self.slave_of_id:
raise exception.BadRequest(_("Instance %s is not a replica.")
% self.id)
task_api.API(self.context).detach_replica(self.id)
def promote_to_replica_source(self):
self.validate_can_perform_action()
LOG.info(_LI("Promoting instance %s to replication source."), self.id)
if not self.slave_of_id:
raise exception.BadRequest(_("Instance %s is not a replica.")
% self.id)
# Update task status of master and all slaves
master = BuiltInstance.load(self.context, self.slave_of_id)
for dbinfo in [master.db_info] + master.slaves:
setattr(dbinfo, 'task_status', InstanceTasks.PROMOTING)
dbinfo.save()
task_api.API(self.context).promote_to_replica_source(self.id)
def eject_replica_source(self):
self.validate_can_perform_action()
LOG.info(_LI("Ejecting replica source %s from it's replication set."),
self.id)
if not self.slaves:
raise exception.BadRequest(_("Instance %s is not a replica"
" source.") % self.id)
service = InstanceServiceStatus.find_by(instance_id=self.id)
last_heartbeat_delta = datetime.utcnow() - service.updated_at
agent_expiry_interval = timedelta(seconds=CONF.agent_heartbeat_expiry)
if last_heartbeat_delta < agent_expiry_interval:
raise exception.BadRequest(_("Replica Source %s cannot be ejected"
" as it has a current heartbeat")
% self.id)
# Update task status of master and all slaves
for dbinfo in [self.db_info] + self.slaves:
setattr(dbinfo, 'task_status', InstanceTasks.EJECTING)
dbinfo.save()
task_api.API(self.context).eject_replica_source(self.id)
def migrate(self, host=None):
self.validate_can_perform_action()
LOG.info(_LI("Migrating instance id = %(instance_id)s "
"to host = %(host)s."),
{'instance_id': self.id, 'host': host})
self.update_db(task_status=InstanceTasks.MIGRATING)
task_api.API(self.context).migrate(self.id, host)
def validate_can_perform_action(self):
"""
Raises exception if an instance action cannot currently be performed.
"""
# cases where action cannot be performed
if self.db_info.server_status != 'ACTIVE':
status = self.db_info.server_status
elif (self.db_info.task_status != InstanceTasks.NONE and
self.db_info.task_status != InstanceTasks.RESTART_REQUIRED):
status = self.db_info.task_status
elif not self.datastore_status.status.action_is_allowed:
status = self.status
elif Backup.running(self.id):
status = InstanceStatus.BACKUP
else:
# action can be performed
return
msg = (_("Instance %(instance_id)s is not currently available for an "
"action to be performed (status was %(action_status)s).") %
{'instance_id': self.id, 'action_status': status})
LOG.error(msg)
raise exception.UnprocessableEntity(msg)
def _validate_can_perform_assign(self):
"""
Raises exception if a configuration assign cannot
currently be performed
"""
# check if the instance already has a configuration assigned
if self.db_info.configuration_id:
raise exception.ConfigurationAlreadyAttached(
instance_id=self.id,
configuration_id=self.db_info.configuration_id)
# check if the instance is not ACTIVE or has tasks
status = None
if self.db_info.server_status != InstanceStatus.ACTIVE:
status = self.db_info.server_status
elif self.db_info.task_status != InstanceTasks.NONE:
status = self.db_info.task_status.action
if status:
raise exception.InvalidInstanceState(instance_id=self.id,
status=status)
def unassign_configuration(self):
LOG.debug("Unassigning the configuration from the instance %s.",
self.id)
if self.configuration and self.configuration.id:
LOG.debug("Unassigning the configuration id %s.",
self.configuration.id)
self.guest.update_overrides({}, remove=True)
# Dynamically reset the configuration values back to their default
# values from the configuration template.
# Reset the values only if the default is available for all of
# them and restart is not required by any.
# Mark the instance with a 'RESTART_REQUIRED' status otherwise.
flavor = self.get_flavor()
default_config = self._render_config_dict(flavor)
current_config = Configuration(self.context, self.configuration.id)
current_overrides = current_config.get_configuration_overrides()
# Check the configuration template has defaults for all modified
# values.
has_defaults_for_all = all(key in default_config.keys()
for key in current_overrides.keys())
if (not current_config.does_configuration_need_restart() and
has_defaults_for_all):
self.guest.apply_overrides(
{k: v for k, v in default_config.items()
if k in current_overrides})
else:
LOG.debug(
"Could not revert all configuration changes dynamically. "
"A restart will be required.")
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED)
else:
LOG.debug("No configuration found on instance. Skipping.")
def assign_configuration(self, configuration_id):
self._validate_can_perform_assign()
try:
configuration = Configuration.load(self.context, configuration_id)
except exception.ModelNotFoundError:
raise exception.NotFound(
message='Configuration group id: %s could not be found.'
% configuration_id)
config_ds_v = configuration.datastore_version_id
inst_ds_v = self.db_info.datastore_version_id
if (config_ds_v != inst_ds_v):
raise exception.ConfigurationDatastoreNotMatchInstance(
config_datastore_version=config_ds_v,
instance_datastore_version=inst_ds_v)
config = Configuration(self.context, configuration.id)
LOG.debug("Config is %s.", config)
self.update_overrides(config)
self.update_db(configuration_id=configuration.id)
def update_overrides(self, config):
LOG.debug("Updating or removing overrides for instance %s.", self.id)
overrides = config.get_configuration_overrides()
self.guest.update_overrides(overrides)
# Apply the new configuration values dynamically to the running
# datastore service.
# Apply overrides only if ALL values can be applied at once or mark
# the instance with a 'RESTART_REQUIRED' status.
if not config.does_configuration_need_restart():
self.guest.apply_overrides(overrides)
else:
LOG.debug("Configuration overrides has non-dynamic settings and "
"will require restart to take effect.")
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED)
def _render_config_dict(self, flavor):
config = template.SingleInstanceConfigTemplate(
self.datastore_version, flavor, self.id)
return dict(config.render_dict())
def create_server_list_matcher(server_list):
# Returns a method which finds a server from the given list.
def find_server(instance_id, server_id):
matches = [server for server in server_list if server.id == server_id]
if len(matches) == 1:
return matches[0]
elif len(matches) < 1:
# The instance was not found in the list and
# this can happen if the instance is deleted from
# nova but still in trove database
raise exception.ComputeInstanceNotFound(
instance_id=instance_id, server_id=server_id)
else:
# Should never happen, but never say never.
LOG.error(_LE("Server %(server)s for instance %(instance)s was "
"found twice!"), {'server': server_id,
'instance': instance_id})
raise exception.TroveError(uuid=instance_id)
return find_server
class Instances(object):
DEFAULT_LIMIT = CONF.instances_page_size
@staticmethod
def load(context, include_clustered, instance_ids=None):
def load_simple_instance(context, db, status, **kwargs):
return SimpleInstance(context, db, status)
if context is None:
raise TypeError("Argument context not defined.")
client = create_nova_client(context)
servers = client.servers.list()
query_opts = {'tenant_id': context.tenant,
'deleted': False}
if not include_clustered:
query_opts['cluster_id'] = None
if instance_ids and len(instance_ids) > 1:
raise exception.DatastoreOperationNotSupported(
operation='module-instances', datastore='current')
db_infos = DBInstance.query().filter_by(**query_opts)
else:
if instance_ids:
query_opts['id'] = instance_ids[0]
db_infos = DBInstance.find_all(**query_opts)
limit = utils.pagination_limit(context.limit, Instances.DEFAULT_LIMIT)
data_view = DBInstance.find_by_pagination('instances', db_infos, "foo",
limit=limit,
marker=context.marker)
next_marker = data_view.next_page_marker
find_server = create_server_list_matcher(servers)
for db in db_infos:
LOG.debug("Checking for db [id=%(db_id)s, "
"compute_instance_id=%(instance_id)s].",
{'db_id': db.id, 'instance_id': db.compute_instance_id})
ret = Instances._load_servers_status(load_simple_instance, context,
data_view.collection,
find_server)
return ret, next_marker
@staticmethod
def load_all_by_cluster_id(context, cluster_id, load_servers=True):
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False)
return [load_any_instance(context, db_inst.id,
load_server=load_servers)
for db_inst in db_instances]
@staticmethod
def _load_servers_status(load_instance, context, db_items, find_server):
ret = []
for db in db_items:
server = None
try:
# TODO(tim.simpson): Delete when we get notifications working!
if InstanceTasks.BUILDING == db.task_status:
db.server_status = "BUILD"
db.addresses = {}
else:
try:
server = find_server(db.id, db.compute_instance_id)
db.server_status = server.status
db.addresses = server.addresses
except exception.ComputeInstanceNotFound:
db.server_status = "SHUTDOWN" # Fake it...
db.addresses = {}
# TODO(tim.simpson): End of hack.
# volumes = find_volumes(server.id)
datastore_status = InstanceServiceStatus.find_by(
instance_id=db.id)
if not datastore_status.status: # This should never happen.
LOG.error(_LE("Server status could not be read for "
"instance id(%s)."), db.id)
continue
LOG.debug("Server api_status(%s).",
datastore_status.status.api_status)
except exception.ModelNotFoundError:
LOG.error(_LE("Server status could not be read for "
"instance id(%s)."), db.id)
continue
ret.append(load_instance(context, db, datastore_status,
server=server))
return ret
class DBInstance(dbmodels.DatabaseModelBase):
"""Defines the task being executed plus the start time."""
_data_fields = ['name', 'created', 'compute_instance_id',
'task_id', 'task_description', 'task_start_time',
'volume_id', 'deleted', 'tenant_id',
'datastore_version_id', 'configuration_id', 'slave_of_id',
'cluster_id', 'shard_id', 'type']
def __init__(self, task_status, **kwargs):
"""
Creates a new persistable entity of the Trove Guest Instance for
purposes of recording its current state and record of modifications
:param task_status: the current state details of any activity or error
that is running on this guest instance (e.g. resizing, deleting)
:type task_status: trove.instance.tasks.InstanceTask
"""
kwargs["task_id"] = task_status.code
kwargs["task_description"] = task_status.db_text
kwargs["deleted"] = False
super(DBInstance, self).__init__(**kwargs)
self.set_task_status(task_status)
def _validate(self, errors):
if InstanceTask.from_code(self.task_id) is None:
errors['task_id'] = "Not valid."
if self.task_status is None:
errors['task_status'] = "Cannot be None."
def get_task_status(self):
return InstanceTask.from_code(self.task_id)
def set_task_status(self, value):
self.task_id = value.code
self.task_description = value.db_text
task_status = property(get_task_status, set_task_status)
class InstanceServiceStatus(dbmodels.DatabaseModelBase):
_data_fields = ['instance_id', 'status_id', 'status_description',
'updated_at']
def __init__(self, status, **kwargs):
kwargs["status_id"] = status.code
kwargs["status_description"] = status.description
super(InstanceServiceStatus, self).__init__(**kwargs)
self.set_status(status)
def _validate(self, errors):
if self.status is None:
errors['status'] = "Cannot be None."
if tr_instance.ServiceStatus.from_code(self.status_id) is None:
errors['status_id'] = "Not valid."
def get_status(self):
"""
Returns the current enumerated status of the Service running on the
instance
:return: a ServiceStatus reference indicating the currently stored
status of the service
:rtype: trove.common.instance.ServiceStatus
"""
return tr_instance.ServiceStatus.from_code(self.status_id)
def set_status(self, value):
"""
Sets the status of the hosted service
:param value: current state of the hosted service
:type value: trove.common.instance.ServiceStatus
"""
self.status_id = value.code
self.status_description = value.description
def save(self):
self['updated_at'] = utils.utcnow()
return get_db_api().save(self)
status = property(get_status, set_status)
def persisted_models():
return {
'instance': DBInstance,
'service_statuses': InstanceServiceStatus,
}
MYSQL_RESPONSIVE_STATUSES = [tr_instance.ServiceStatuses.RUNNING]
|
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.23
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1Secret(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'data': 'dict(str, str)',
'immutable': 'bool',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'string_data': 'dict(str, str)',
'type': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'data': 'data',
'immutable': 'immutable',
'kind': 'kind',
'metadata': 'metadata',
'string_data': 'stringData',
'type': 'type'
}
def __init__(self, api_version=None, data=None, immutable=None, kind=None, metadata=None, string_data=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1Secret - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._data = None
self._immutable = None
self._kind = None
self._metadata = None
self._string_data = None
self._type = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if data is not None:
self.data = data
if immutable is not None:
self.immutable = immutable
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if string_data is not None:
self.string_data = string_data
if type is not None:
self.type = type
@property
def api_version(self):
"""Gets the api_version of this V1Secret. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Secret. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Secret.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Secret. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def data(self):
"""Gets the data of this V1Secret. # noqa: E501
Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 # noqa: E501
:return: The data of this V1Secret. # noqa: E501
:rtype: dict(str, str)
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this V1Secret.
Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 # noqa: E501
:param data: The data of this V1Secret. # noqa: E501
:type: dict(str, str)
"""
self._data = data
@property
def immutable(self):
"""Gets the immutable of this V1Secret. # noqa: E501
Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. # noqa: E501
:return: The immutable of this V1Secret. # noqa: E501
:rtype: bool
"""
return self._immutable
@immutable.setter
def immutable(self, immutable):
"""Sets the immutable of this V1Secret.
Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. # noqa: E501
:param immutable: The immutable of this V1Secret. # noqa: E501
:type: bool
"""
self._immutable = immutable
@property
def kind(self):
"""Gets the kind of this V1Secret. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Secret. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Secret.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Secret. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Secret. # noqa: E501
:return: The metadata of this V1Secret. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Secret.
:param metadata: The metadata of this V1Secret. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def string_data(self):
"""Gets the string_data of this V1Secret. # noqa: E501
stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API. # noqa: E501
:return: The string_data of this V1Secret. # noqa: E501
:rtype: dict(str, str)
"""
return self._string_data
@string_data.setter
def string_data(self, string_data):
"""Sets the string_data of this V1Secret.
stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API. # noqa: E501
:param string_data: The string_data of this V1Secret. # noqa: E501
:type: dict(str, str)
"""
self._string_data = string_data
@property
def type(self):
"""Gets the type of this V1Secret. # noqa: E501
Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types # noqa: E501
:return: The type of this V1Secret. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1Secret.
Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types # noqa: E501
:param type: The type of this V1Secret. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Secret):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Secret):
return True
return self.to_dict() != other.to_dict()
|
|
"""
differential_evolution: The differential evolution global optimization algorithm
Added by Andrew Nelson 2014
Modified to add time criteria
"""
from __future__ import division, print_function, absolute_import
import time
import numpy as np
from scipy.optimize import OptimizeResult, minimize
from scipy.optimize.optimize import _status_message
import numbers
__all__ = ['differential_evolution']
_MACHEPS = np.finfo(np.float64).eps
def differential_evolution(func, bounds, args=(), strategy='best1bin',
maxiter=None, popsize=15, tol=0.01,
mutation=(0.5, 1), recombination=0.7, seed=None,
callback=None, disp=False, polish=True,
init='latinhypercube', max_execution_time=float('inf')):
solver = DifferentialEvolutionSolver(func, bounds, args=args,
strategy=strategy, maxiter=maxiter,
popsize=popsize, tol=tol,
mutation=mutation,
recombination=recombination,
seed=seed, polish=polish,
callback=callback,
disp=disp,
init=init,
max_execution_time=max_execution_time)
return solver.solve()
class DifferentialEvolutionSolver(object):
_binomial = {'best1bin': '_best1',
'randtobest1bin': '_randtobest1',
'best2bin': '_best2',
'rand2bin': '_rand2',
'rand1bin': '_rand1'}
_exponential = {'best1exp': '_best1',
'rand1exp': '_rand1',
'randtobest1exp': '_randtobest1',
'best2exp': '_best2',
'rand2exp': '_rand2'}
def __init__(self, func, bounds, args=(),
strategy='best1bin', maxiter=None, popsize=15,
tol=0.01, mutation=(0.5, 1), recombination=0.7, seed=None,
maxfun=None, callback=None, disp=False, polish=True,
init='latinhypercube', max_execution_time=float('inf')):
if strategy in self._binomial:
self.mutation_func = getattr(self, self._binomial[strategy])
elif strategy in self._exponential:
self.mutation_func = getattr(self, self._exponential[strategy])
else:
raise ValueError("Please select a valid mutation strategy")
self.strategy = strategy
self.callback = callback
self.polish = polish
self.tol = tol
self.max_execution_time = max_execution_time
#Mutation constant should be in [0, 2). If specified as a sequence
#then dithering is performed.
self.scale = mutation
if (not np.all(np.isfinite(mutation)) or
np.any(np.array(mutation) >= 2) or
np.any(np.array(mutation) < 0)):
raise ValueError('The mutation constant must be a float in '
'U[0, 2), or specified as a tuple(min, max)'
' where min < max and min, max are in U[0, 2).')
self.dither = None
if hasattr(mutation, '__iter__') and len(mutation) > 1:
self.dither = [mutation[0], mutation[1]]
self.dither.sort()
self.cross_over_probability = recombination
self.func = func
self.args = args
# convert tuple of lower and upper bounds to limits
# [(low_0, high_0), ..., (low_n, high_n]
# -> [[low_0, ..., low_n], [high_0, ..., high_n]]
self.limits = np.array(bounds, dtype='float').T
if (np.size(self.limits, 0) != 2
or not np.all(np.isfinite(self.limits))):
raise ValueError('bounds should be a sequence containing '
'real valued (min, max) pairs for each value'
' in x')
self.maxiter = maxiter or 1000
self.maxfun = (maxfun or ((self.maxiter + 1) * popsize *
np.size(self.limits, 1)))
# population is scaled to between [0, 1].
# We have to scale between parameter <-> population
# save these arguments for _scale_parameter and
# _unscale_parameter. This is an optimization
self.__scale_arg1 = 0.5 * (self.limits[0] + self.limits[1])
self.__scale_arg2 = np.fabs(self.limits[0] - self.limits[1])
parameter_count = np.size(self.limits, 1)
self.random_number_generator = _make_random_gen(seed)
#default initialization is a latin hypercube design, but there
#are other population initializations possible.
self.population = np.zeros((popsize * parameter_count,
parameter_count))
if init == 'latinhypercube':
self.init_population_lhs()
elif init == 'random':
self.init_population_random()
else:
raise ValueError("The population initialization method must be one"
"of 'latinhypercube' or 'random'")
self.population_energies = np.ones(
popsize * parameter_count) * np.inf
self.disp = disp
def init_population_lhs(self):
"""
Initializes the population with Latin Hypercube Sampling
Latin Hypercube Sampling ensures that the sampling of parameter space
is maximised.
"""
samples = np.size(self.population, 0)
N = np.size(self.population, 1)
rng = self.random_number_generator
# Generate the intervals
segsize = 1.0 / samples
# Fill points uniformly in each interval
rdrange = rng.rand(samples, N) * segsize
rdrange += np.atleast_2d(
np.linspace(0., 1., samples, endpoint=False)).T
# Make the random pairings
self.population = np.zeros_like(rdrange)
for j in range(N):
order = rng.permutation(range(samples))
self.population[:, j] = rdrange[order, j]
def init_population_random(self):
"""
Initialises the population at random. This type of initialization
can possess clustering, Latin Hypercube sampling is generally better.
"""
rng = self.random_number_generator
self.population = rng.random_sample(self.population.shape)
@property
def x(self):
return self._scale_parameters(self.population[0])
def solve(self):
nfev, nit, warning_flag = 0, 0, False
status_message = _status_message['success']
# calculate energies to start with
for index, candidate in enumerate(self.population):
parameters = self._scale_parameters(candidate)
self.population_energies[index] = self.func(parameters,
*self.args)
nfev += 1
if nfev > self.maxfun:
warning_flag = True
status_message = _status_message['maxfev']
break
minval = np.argmin(self.population_energies)
# put the lowest energy into the best solution position.
lowest_energy = self.population_energies[minval]
self.population_energies[minval] = self.population_energies[0]
self.population_energies[0] = lowest_energy
self.population[[0, minval], :] = self.population[[minval, 0], :]
if warning_flag:
return OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
# do the optimisation.
start_time = time.time()
nit = 0
while nit < self.maxiter + 1:
nit += 1
if start_time + self.max_execution_time < time.time():
warning_flag = True
status_message = 'Max execution time reached'
break
if self.dither is not None:
self.scale = self.random_number_generator.rand(
) * (self.dither[1] - self.dither[0]) + self.dither[0]
for candidate in range(np.size(self.population, 0)):
if nfev > self.maxfun:
warning_flag = True
status_message = _status_message['maxfev']
break
trial = self._mutate(candidate)
self._ensure_constraint(trial)
parameters = self._scale_parameters(trial)
energy = self.func(parameters, *self.args)
nfev += 1
if energy < self.population_energies[candidate]:
self.population[candidate] = trial
self.population_energies[candidate] = energy
if energy < self.population_energies[0]:
self.population_energies[0] = energy
self.population[0] = trial
# stop when the fractional s.d. of the population is less than tol
# of the mean energy
convergence = (np.std(self.population_energies) /
np.abs(np.mean(self.population_energies) +
_MACHEPS))
if self.disp:
print("differential_evolution step %d: f(x)= %g"
% (nit,
self.population_energies[0]))
if (self.callback and
self.callback(self._scale_parameters(self.population[0]),
convergence=self.tol / convergence) is True):
warning_flag = True
status_message = ('callback function requested stop early '
'by returning True')
break
if convergence < self.tol or warning_flag:
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
if self.polish:
result = minimize(self.func,
np.copy(DE_result.x),
method='L-BFGS-B',
bounds=self.limits.T,
args=self.args)
nfev += result.nfev
DE_result.nfev = nfev
if result.fun < DE_result.fun:
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
# to keep internal state consistent
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
return DE_result
def _scale_parameters(self, trial):
"""
scale from a number between 0 and 1 to parameters
"""
return self.__scale_arg1 + (trial - 0.5) * self.__scale_arg2
def _unscale_parameters(self, parameters):
"""
scale from parameters to a number between 0 and 1.
"""
return (parameters - self.__scale_arg1) / self.__scale_arg2 + 0.5
def _ensure_constraint(self, trial):
"""
make sure the parameters lie between the limits
"""
for index, param in enumerate(trial):
if param > 1 or param < 0:
trial[index] = self.random_number_generator.rand()
def _mutate(self, candidate):
"""
create a trial vector based on a mutation strategy
"""
trial = np.copy(self.population[candidate])
parameter_count = np.size(trial, 0)
fill_point = self.random_number_generator.randint(0, parameter_count)
if (self.strategy == 'randtobest1exp'
or self.strategy == 'randtobest1bin'):
bprime = self.mutation_func(candidate,
self._select_samples(candidate, 5))
else:
bprime = self.mutation_func(self._select_samples(candidate, 5))
if self.strategy in self._binomial:
crossovers = self.random_number_generator.rand(parameter_count)
crossovers = crossovers < self.cross_over_probability
# the last one is always from the bprime vector for binomial
# If you fill in modulo with a loop you have to set the last one to
# true. If you don't use a loop then you can have any random entry
# be True.
crossovers[fill_point] = True
trial = np.where(crossovers, bprime, trial)
return trial
elif self.strategy in self._exponential:
i = 0
while (i < parameter_count and
self.random_number_generator.rand() <
self.cross_over_probability):
trial[fill_point] = bprime[fill_point]
fill_point = (fill_point + 1) % parameter_count
i += 1
return trial
def _best1(self, samples):
"""
best1bin, best1exp
"""
r0, r1 = samples[:2]
return (self.population[0] + self.scale *
(self.population[r0] - self.population[r1]))
def _rand1(self, samples):
"""
rand1bin, rand1exp
"""
r0, r1, r2 = samples[:3]
return (self.population[r0] + self.scale *
(self.population[r1] - self.population[r2]))
def _randtobest1(self, candidate, samples):
"""
randtobest1bin, randtobest1exp
"""
r0, r1 = samples[:2]
bprime = np.copy(self.population[candidate])
bprime += self.scale * (self.population[0] - bprime)
bprime += self.scale * (self.population[r0] -
self.population[r1])
return bprime
def _best2(self, samples):
"""
best2bin, best2exp
"""
r0, r1, r2, r3 = samples[:4]
bprime = (self.population[0] + self.scale *
(self.population[r0] + self.population[r1]
- self.population[r2] - self.population[r3]))
return bprime
def _rand2(self, samples):
"""
rand2bin, rand2exp
"""
r0, r1, r2, r3, r4 = samples
bprime = (self.population[r0] + self.scale *
(self.population[r1] + self.population[r2] -
self.population[r3] - self.population[r4]))
return bprime
def _select_samples(self, candidate, number_samples):
"""
obtain random integers from range(np.size(self.population, 0)),
without replacement. You can't have the original candidate either.
"""
idxs = list(range(np.size(self.population, 0)))
idxs.remove(candidate)
self.random_number_generator.shuffle(idxs)
idxs = idxs[:number_samples]
return idxs
def _make_random_gen(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
|
|
"""Asuswrt status sensors."""
from __future__ import annotations
from dataclasses import dataclass
from numbers import Real
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_GIGABYTES,
DATA_RATE_MEGABITS_PER_SECOND,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
DATA_ASUSWRT,
DOMAIN,
SENSORS_BYTES,
SENSORS_CONNECTED_DEVICE,
SENSORS_LOAD_AVG,
SENSORS_RATES,
SENSORS_TEMPERATURES,
)
from .router import KEY_COORDINATOR, KEY_SENSORS, AsusWrtRouter
@dataclass
class AsusWrtSensorEntityDescription(SensorEntityDescription):
"""A class that describes AsusWrt sensor entities."""
factor: int | None = None
precision: int = 2
DEFAULT_PREFIX = "Asuswrt"
UNIT_DEVICES = "Devices"
CONNECTION_SENSORS: tuple[AsusWrtSensorEntityDescription, ...] = (
AsusWrtSensorEntityDescription(
key=SENSORS_CONNECTED_DEVICE[0],
name="Devices Connected",
icon="mdi:router-network",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=UNIT_DEVICES,
),
AsusWrtSensorEntityDescription(
key=SENSORS_RATES[0],
name="Download Speed",
icon="mdi:download-network",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=DATA_RATE_MEGABITS_PER_SECOND,
entity_registry_enabled_default=False,
factor=125000,
),
AsusWrtSensorEntityDescription(
key=SENSORS_RATES[1],
name="Upload Speed",
icon="mdi:upload-network",
state_class=SensorStateClass.MEASUREMENT,
native_unit_of_measurement=DATA_RATE_MEGABITS_PER_SECOND,
entity_registry_enabled_default=False,
factor=125000,
),
AsusWrtSensorEntityDescription(
key=SENSORS_BYTES[0],
name="Download",
icon="mdi:download",
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=DATA_GIGABYTES,
entity_registry_enabled_default=False,
factor=1000000000,
),
AsusWrtSensorEntityDescription(
key=SENSORS_BYTES[1],
name="Upload",
icon="mdi:upload",
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=DATA_GIGABYTES,
entity_registry_enabled_default=False,
factor=1000000000,
),
AsusWrtSensorEntityDescription(
key=SENSORS_LOAD_AVG[0],
name="Load Avg (1m)",
icon="mdi:cpu-32-bit",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
factor=1,
precision=1,
),
AsusWrtSensorEntityDescription(
key=SENSORS_LOAD_AVG[1],
name="Load Avg (5m)",
icon="mdi:cpu-32-bit",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
factor=1,
precision=1,
),
AsusWrtSensorEntityDescription(
key=SENSORS_LOAD_AVG[2],
name="Load Avg (15m)",
icon="mdi:cpu-32-bit",
state_class=SensorStateClass.MEASUREMENT,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
factor=1,
precision=1,
),
AsusWrtSensorEntityDescription(
key=SENSORS_TEMPERATURES[0],
name="2.4GHz Temperature",
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
factor=1,
precision=1,
),
AsusWrtSensorEntityDescription(
key=SENSORS_TEMPERATURES[1],
name="5GHz Temperature",
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
factor=1,
precision=1,
),
AsusWrtSensorEntityDescription(
key=SENSORS_TEMPERATURES[2],
name="CPU Temperature",
state_class=SensorStateClass.MEASUREMENT,
device_class=SensorDeviceClass.TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
factor=1,
precision=1,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the sensors."""
router: AsusWrtRouter = hass.data[DOMAIN][entry.entry_id][DATA_ASUSWRT]
entities = []
for sensor_data in router.sensors_coordinator.values():
coordinator = sensor_data[KEY_COORDINATOR]
sensors = sensor_data[KEY_SENSORS]
entities.extend(
[
AsusWrtSensor(coordinator, router, sensor_descr)
for sensor_descr in CONNECTION_SENSORS
if sensor_descr.key in sensors
]
)
async_add_entities(entities, True)
class AsusWrtSensor(CoordinatorEntity, SensorEntity):
"""Representation of a AsusWrt sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
router: AsusWrtRouter,
description: AsusWrtSensorEntityDescription,
) -> None:
"""Initialize a AsusWrt sensor."""
super().__init__(coordinator)
self.entity_description: AsusWrtSensorEntityDescription = description
self._attr_name = f"{DEFAULT_PREFIX} {description.name}"
self._attr_unique_id = f"{DOMAIN} {self.name}"
self._attr_device_info = router.device_info
self._attr_extra_state_attributes = {"hostname": router.host}
@property
def native_value(self) -> float | str | None:
"""Return current state."""
descr = self.entity_description
state = self.coordinator.data.get(descr.key)
if state is not None and descr.factor and isinstance(state, Real):
return round(state / descr.factor, descr.precision)
return state
|
|
from io import BytesIO
import numpy as np
from ..core.indexing import NumpyIndexingAdapter
from ..core.utils import Frozen, FrozenDict
from ..core.variable import Variable
from .common import BackendArray, WritableCFDataStore
from .file_manager import CachingFileManager, DummyFileManager
from .locks import ensure_lock, get_write_lock
from .netcdf3 import encode_nc3_attr_value, encode_nc3_variable, is_valid_nc3_name
def _decode_string(s):
if isinstance(s, bytes):
return s.decode("utf-8", "replace")
return s
def _decode_attrs(d):
# don't decode _FillValue from bytes -> unicode, because we want to ensure
# that its type matches the data exactly
return {k: v if k == "_FillValue" else _decode_string(v) for (k, v) in d.items()}
class ScipyArrayWrapper(BackendArray):
def __init__(self, variable_name, datastore):
self.datastore = datastore
self.variable_name = variable_name
array = self.get_variable().data
self.shape = array.shape
self.dtype = np.dtype(array.dtype.kind + str(array.dtype.itemsize))
def get_variable(self, needs_lock=True):
ds = self.datastore._manager.acquire(needs_lock)
return ds.variables[self.variable_name]
def __getitem__(self, key):
data = NumpyIndexingAdapter(self.get_variable().data)[key]
# Copy data if the source file is mmapped. This makes things consistent
# with the netCDF4 library by ensuring we can safely read arrays even
# after closing associated files.
copy = self.datastore.ds.use_mmap
return np.array(data, dtype=self.dtype, copy=copy)
def __setitem__(self, key, value):
with self.datastore.lock:
data = self.get_variable(needs_lock=False)
try:
data[key] = value
except TypeError:
if key is Ellipsis:
# workaround for GH: scipy/scipy#6880
data[:] = value
else:
raise
def _open_scipy_netcdf(filename, mode, mmap, version):
import gzip
import scipy.io
# if the string ends with .gz, then gunzip and open as netcdf file
if isinstance(filename, str) and filename.endswith(".gz"):
try:
return scipy.io.netcdf_file(
gzip.open(filename), mode=mode, mmap=mmap, version=version
)
except TypeError as e:
# TODO: gzipped loading only works with NetCDF3 files.
if "is not a valid NetCDF 3 file" in e.message:
raise ValueError(
"gzipped file loading only supports " "NetCDF 3 files."
)
else:
raise
if isinstance(filename, bytes) and filename.startswith(b"CDF"):
# it's a NetCDF3 bytestring
filename = BytesIO(filename)
try:
return scipy.io.netcdf_file(filename, mode=mode, mmap=mmap, version=version)
except TypeError as e: # netcdf3 message is obscure in this case
errmsg = e.args[0]
if "is not a valid NetCDF 3 file" in errmsg:
msg = """
If this is a NetCDF4 file, you may need to install the
netcdf4 library, e.g.,
$ pip install netcdf4
"""
errmsg += msg
raise TypeError(errmsg)
else:
raise
class ScipyDataStore(WritableCFDataStore):
"""Store for reading and writing data via scipy.io.netcdf.
This store has the advantage of being able to be initialized with a
StringIO object, allow for serialization without writing to disk.
It only supports the NetCDF3 file-format.
"""
def __init__(
self, filename_or_obj, mode="r", format=None, group=None, mmap=None, lock=None
):
if group is not None:
raise ValueError(
"cannot save to a group with the " "scipy.io.netcdf backend"
)
if format is None or format == "NETCDF3_64BIT":
version = 2
elif format == "NETCDF3_CLASSIC":
version = 1
else:
raise ValueError("invalid format for scipy.io.netcdf backend: %r" % format)
if lock is None and mode != "r" and isinstance(filename_or_obj, str):
lock = get_write_lock(filename_or_obj)
self.lock = ensure_lock(lock)
if isinstance(filename_or_obj, str):
manager = CachingFileManager(
_open_scipy_netcdf,
filename_or_obj,
mode=mode,
lock=lock,
kwargs=dict(mmap=mmap, version=version),
)
else:
scipy_dataset = _open_scipy_netcdf(
filename_or_obj, mode=mode, mmap=mmap, version=version
)
manager = DummyFileManager(scipy_dataset)
self._manager = manager
@property
def ds(self):
return self._manager.acquire()
def open_store_variable(self, name, var):
return Variable(
var.dimensions,
ScipyArrayWrapper(name, self),
_decode_attrs(var._attributes),
)
def get_variables(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)
def get_attrs(self):
return Frozen(_decode_attrs(self.ds._attributes))
def get_dimensions(self):
return Frozen(self.ds.dimensions)
def get_encoding(self):
encoding = {}
encoding["unlimited_dims"] = {
k for k, v in self.ds.dimensions.items() if v is None
}
return encoding
def set_dimension(self, name, length, is_unlimited=False):
if name in self.ds.dimensions:
raise ValueError(
"%s does not support modifying dimensions" % type(self).__name__
)
dim_length = length if not is_unlimited else None
self.ds.createDimension(name, dim_length)
def _validate_attr_key(self, key):
if not is_valid_nc3_name(key):
raise ValueError("Not a valid attribute name")
def set_attribute(self, key, value):
self._validate_attr_key(key)
value = encode_nc3_attr_value(value)
setattr(self.ds, key, value)
def encode_variable(self, variable):
variable = encode_nc3_variable(variable)
return variable
def prepare_variable(
self, name, variable, check_encoding=False, unlimited_dims=None
):
if check_encoding and variable.encoding:
if variable.encoding != {"_FillValue": None}:
raise ValueError(
"unexpected encoding for scipy backend: %r"
% list(variable.encoding)
)
data = variable.data
# nb. this still creates a numpy array in all memory, even though we
# don't write the data yet; scipy.io.netcdf does not not support
# incremental writes.
if name not in self.ds.variables:
self.ds.createVariable(name, data.dtype, variable.dims)
scipy_var = self.ds.variables[name]
for k, v in variable.attrs.items():
self._validate_attr_key(k)
setattr(scipy_var, k, v)
target = ScipyArrayWrapper(name, self)
return target, data
def sync(self):
self.ds.sync()
def close(self):
self._manager.close()
|
|
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sympy import Add
from cobra import Model
def convert_to_dual(model):
dual_model = model.interface.Model()
maximization = model.objective.direction == "max"
if maximization:
sign = 1
else:
sign = -1
coefficients = {}
dual_objective = {}
# Add dual variables from primal constraints:
for constraint in model.constraints:
if constraint.expression == 0:
continue
if not constraint.is_Linear:
raise NotImplementedError("Non-linear problems are currently not supported: " + str(constraint))
if constraint.lb is None and constraint.ub is None:
continue
if constraint.lb == constraint.ub:
const_var = model.interface.Variable("dual_" + constraint.name + "_constraint", lb=-1000, ub=1000)
dual_model._add_variable(const_var)
if constraint.lb != 0:
dual_objective[const_var] = sign * constraint.lb
for variable, coef in constraint.expression.as_coefficients_dict().items():
coefficients.setdefault(variable.name, {})[const_var] = sign * coef
else:
if constraint.lb is not None:
lb_var = model.interface.Variable("dual_" + constraint.name + "_constraint_lb", lb=0, ub=1000)
dual_model._add_variable(lb_var)
if constraint.lb != 0:
dual_objective[lb_var] = -sign * constraint.lb
if constraint.ub is not None:
ub_var = model.interface.Variable("dual_" + constraint.name + "_constraint_ub", lb=0, ub=1000)
dual_model._add_variable(ub_var)
if constraint.ub != 0:
dual_objective[ub_var] = sign * constraint.ub
for variable, coef in constraint.expression.as_coefficients_dict().items():
if constraint.lb is not None:
coefficients.setdefault(variable.name, {})[lb_var] = -sign * coef
if constraint.ub is not None:
coefficients.setdefault(variable.name, {})[ub_var] = sign * coef
# Add dual variables from primal bounds
for variable in model.variables:
if variable.type != "continuous":
raise NotImplementedError("Integer variables are currently not supported: " + str(variable))
if variable.lb is None or variable.lb < 0:
raise ValueError("Problem is not in standard form (" + variable.name + " can be negative)")
if variable.lb > 0:
bound_var = model.interface.Variable("dual_" + variable.name + "_lb", lb=0, ub=1000)
dual_model._add_variable(bound_var)
coefficients.setdefault(variable.name, {})[bound_var] = -sign * 1
dual_objective[bound_var] = -sign * variable.lb
if variable.ub is not None:
bound_var = model.interface.Variable("dual_" + variable.name + "_ub", lb=0, ub=1000)
dual_model._add_variable(bound_var)
coefficients.setdefault(variable.name, {})[bound_var] = sign * 1
if variable.ub != 0:
dual_objective[bound_var] = sign * variable.ub
# Add dual constraints from primal objective
primal_objective_dict = model.objective.expression.as_coefficients_dict()
for variable in model.variables:
expr = Add(*((coef * dual_var) for dual_var, coef in coefficients[variable.name].items()))
obj_coef = primal_objective_dict[variable]
if maximization:
const = model.interface.Constraint(expr, lb=obj_coef, name="dual_" + variable.name)
else:
const = model.interface.Constraint(expr, ub=obj_coef)
dual_model._add_constraint(const)
# Make dual objective
expr = Add(*((coef * dual_var) for dual_var, coef in dual_objective.items() if coef != 0))
if maximization:
objective = model.interface.Objective(expr, direction="min")
else:
objective = model.interface.Objective(expr, direction="max")
dual_model.objective = objective
return dual_model
def to_dual_model(model, solver_interface=None):
if solver_interface is None:
solver_interface = model.solver.interface
return ModelDual(model, solver_interface=solver_interface)
class ModelDual(Model): # pragma: no cover # don't test until it works
"""
A cobra.Model that also contains the dual variables and constraints, allowing primal and dual
problems to be combined.
Dual variables corresponding to stoichiometric constraints are prefixed by lambda
Dual variables corresponding to flux bounds are prefixed by mu
Other constraints are not supported at the moment!
Dual constraints will be set according to the original primal objective.
The objective can be changed subsequently to optimize an outer problem.
"""
def __init__(self, *args, **kwargs):
self._dual_variables = {}
super(ModelDual, self).__init__(*args, **kwargs)
def _populate_solver(self, reaction_list, metabolite_list=None):
super(ModelDual, self)._populate_solver(reaction_list)
metabolites = set.union(*(set(r.metabolites) for r in reaction_list))
self._populate_metabolites(metabolites)
objective_coefficients = self.objective.expression.as_coefficients_dict()
maximization = self.objective.direction == "max"
for reaction in reaction_list:
forward_coeff = objective_coefficients.get(reaction.forward_variable, 0)
reverse_coeff = objective_coefficients.get(reaction.reverse_variable, 0)
self._add_reaction_dual_constraint(reaction, forward_coeff, maximization, "fwd")
self._add_reaction_dual_constraint(reaction, reverse_coeff, maximization, "rvs")
self._add_flux_bound_dual_variable(reaction, forward_coeff, maximization, None)
def _add_reaction_dual_constraint(self, reaction, coefficient, maximization, prefix):
"""Add a dual constraint corresponding to the reaction's objective coefficient"""
stoichiometry = {self.solver.variables["lambda_" + m.id]: c for m, c in reaction.metabolites.items()}
if maximization:
constraint = self.solver.interface.Constraint(
Add._from_args(tuple(c * v for v, c in stoichiometry.items())),
name="r_%s_%s" % (reaction.id, prefix),
lb=coefficient)
else:
constraint = self._dual_solver.interface.Constraint(
Add._from_args(tuple(c * v for v, c in stoichiometry.items())),
name="r_%s_%s" % (reaction.id, prefix),
ub=coefficient)
self.solver._add_constraint(constraint)
@property
def objective(self):
return self.solver.objective
@objective.setter
def objective(self, value):
value = self.solver.interface.Objective(value)
objective_coefficients = value.expression.as_coefficients_dict()
maximization = value.direction == "max"
for reaction in self.reactions:
forward_coeff = objective_coefficients.get(reaction.forward_variable, 0)
reverse_coeff = objective_coefficients.get(reaction.reverse_variable, 0)
self._update_dual_reaction_constraint(reaction, forward_coeff, maximization, "fwd")
self._update_dual_reaction_constraint(reaction, reverse_coeff, maximization, "rvs")
self.solver.objective = sum(self.solver.variables["lambda_" + m.id] for m in self.metabolites)
self.objective.direction = "min" if maximization else "max"
def _update_dual_reaction_constraint(self, reaction, coefficient, maximization, prefix):
constraint = self.solver.constraints["r_%s_%s" % (reaction.id, prefix)]
if coefficient == 0:
constraint.lb = None
constraint.ub = None
else:
if maximization:
constraint.lb = coefficient
constraint.ub = None
else:
constraint.lb = None
constraint.ub = coefficient
def _populate_metabolites(self, metabolites):
for met in metabolites:
self._add_dual_variable(met.id, "lambda")
def _add_flux_bound_dual_variable(self, reaction, coefficient, maximization, prefix):
pass
def _add_dual_variable(self, identifier, prefix):
dual_variable_id = prefix + "_" + identifier
dual_variable = self.solver.interface.Variable(dual_variable_id)
self._dual_variables[dual_variable_id] = dual_variable
self.solver._add_variable(dual_variable)
@property
def objective(self):
return self.solver.objective
@objective.setter
def objective(self, objective):
self.solver.objective = objective
self._update_constraints()
@property
def dual_objective(self):
raise NotImplementedError(
"This is not yet implemented, but should return an expression (?) that describes the dual objective"
)
def primal_objective(self):
raise NotImplementedError(
"This is not yet implemented, but should return an expression that describes the primal objective"
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core.client_options import ClientOptions
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1beta1.services.index_service import pagers
from google.cloud.aiplatform_v1beta1.types import deployed_index_ref
from google.cloud.aiplatform_v1beta1.types import index
from google.cloud.aiplatform_v1beta1.types import index as gca_index
from google.cloud.aiplatform_v1beta1.types import index_service
from google.cloud.aiplatform_v1beta1.types import operation as gca_operation
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import IndexServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import IndexServiceGrpcAsyncIOTransport
from .client import IndexServiceClient
class IndexServiceAsyncClient:
"""A service for creating and managing Vertex AI's Index
resources.
"""
_client: IndexServiceClient
DEFAULT_ENDPOINT = IndexServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = IndexServiceClient.DEFAULT_MTLS_ENDPOINT
index_path = staticmethod(IndexServiceClient.index_path)
parse_index_path = staticmethod(IndexServiceClient.parse_index_path)
index_endpoint_path = staticmethod(IndexServiceClient.index_endpoint_path)
parse_index_endpoint_path = staticmethod(
IndexServiceClient.parse_index_endpoint_path
)
common_billing_account_path = staticmethod(
IndexServiceClient.common_billing_account_path
)
parse_common_billing_account_path = staticmethod(
IndexServiceClient.parse_common_billing_account_path
)
common_folder_path = staticmethod(IndexServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(IndexServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(IndexServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(
IndexServiceClient.parse_common_organization_path
)
common_project_path = staticmethod(IndexServiceClient.common_project_path)
parse_common_project_path = staticmethod(
IndexServiceClient.parse_common_project_path
)
common_location_path = staticmethod(IndexServiceClient.common_location_path)
parse_common_location_path = staticmethod(
IndexServiceClient.parse_common_location_path
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IndexServiceAsyncClient: The constructed client.
"""
return IndexServiceClient.from_service_account_info.__func__(IndexServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IndexServiceAsyncClient: The constructed client.
"""
return IndexServiceClient.from_service_account_file.__func__(IndexServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
return IndexServiceClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore
@property
def transport(self) -> IndexServiceTransport:
"""Returns the transport used by the client instance.
Returns:
IndexServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(
type(IndexServiceClient).get_transport_class, type(IndexServiceClient)
)
def __init__(
self,
*,
credentials: ga_credentials.Credentials = None,
transport: Union[str, IndexServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the index service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.IndexServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = IndexServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def create_index(
self,
request: Union[index_service.CreateIndexRequest, dict] = None,
*,
parent: str = None,
index: gca_index.Index = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates an Index.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_create_index():
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
# Initialize request argument(s)
index = aiplatform_v1beta1.Index()
index.display_name = "display_name_value"
request = aiplatform_v1beta1.CreateIndexRequest(
parent="parent_value",
index=index,
)
# Make the request
operation = client.create_index(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.CreateIndexRequest, dict]):
The request object. Request message for
[IndexService.CreateIndex][google.cloud.aiplatform.v1beta1.IndexService.CreateIndex].
parent (:class:`str`):
Required. The resource name of the Location to create
the Index in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
index (:class:`google.cloud.aiplatform_v1beta1.types.Index`):
Required. The Index to create.
This corresponds to the ``index`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that
allows for approximate nearest neighbor (a.k.a ANN)
algorithms search.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, index])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = index_service.CreateIndexRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if index is not None:
request.index = index
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_index,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_index.Index,
metadata_type=index_service.CreateIndexOperationMetadata,
)
# Done; return the response.
return response
async def get_index(
self,
request: Union[index_service.GetIndexRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> index.Index:
r"""Gets an Index.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_get_index():
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.GetIndexRequest(
name="name_value",
)
# Make the request
response = client.get_index(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.GetIndexRequest, dict]):
The request object. Request message for
[IndexService.GetIndex][google.cloud.aiplatform.v1beta1.IndexService.GetIndex]
name (:class:`str`):
Required. The name of the Index resource. Format:
``projects/{project}/locations/{location}/indexes/{index}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.types.Index:
A representation of a collection of
database items organized in a way that
allows for approximate nearest neighbor
(a.k.a ANN) algorithms search.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = index_service.GetIndexRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_index,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
async def list_indexes(
self,
request: Union[index_service.ListIndexesRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListIndexesAsyncPager:
r"""Lists Indexes in a Location.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_list_indexes():
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.ListIndexesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_indexes(request=request)
# Handle the response
for response in page_result:
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.ListIndexesRequest, dict]):
The request object. Request message for
[IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes].
parent (:class:`str`):
Required. The resource name of the Location from which
to list the Indexes. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1beta1.services.index_service.pagers.ListIndexesAsyncPager:
Response message for
[IndexService.ListIndexes][google.cloud.aiplatform.v1beta1.IndexService.ListIndexes].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = index_service.ListIndexesRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_indexes,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListIndexesAsyncPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
async def update_index(
self,
request: Union[index_service.UpdateIndexRequest, dict] = None,
*,
index: gca_index.Index = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates an Index.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_update_index():
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
# Initialize request argument(s)
index = aiplatform_v1beta1.Index()
index.display_name = "display_name_value"
request = aiplatform_v1beta1.UpdateIndexRequest(
index=index,
)
# Make the request
operation = client.update_index(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.UpdateIndexRequest, dict]):
The request object. Request message for
[IndexService.UpdateIndex][google.cloud.aiplatform.v1beta1.IndexService.UpdateIndex].
index (:class:`google.cloud.aiplatform_v1beta1.types.Index`):
Required. The Index which updates the
resource on the server.
This corresponds to the ``index`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`):
The update mask applies to the resource. For the
``FieldMask`` definition, see
[google.protobuf.FieldMask][google.protobuf.FieldMask].
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.aiplatform_v1beta1.types.Index` A representation of a collection of database items organized in a way that
allows for approximate nearest neighbor (a.k.a ANN)
algorithms search.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([index, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = index_service.UpdateIndexRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if index is not None:
request.index = index
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_index,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("index.name", request.index.name),)
),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
gca_index.Index,
metadata_type=index_service.UpdateIndexOperationMetadata,
)
# Done; return the response.
return response
async def delete_index(
self,
request: Union[index_service.DeleteIndexRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes an Index. An Index can only be deleted when all its
[DeployedIndexes][google.cloud.aiplatform.v1beta1.Index.deployed_indexes]
had been undeployed.
.. code-block:: python
from google.cloud import aiplatform_v1beta1
def sample_delete_index():
# Create a client
client = aiplatform_v1beta1.IndexServiceClient()
# Initialize request argument(s)
request = aiplatform_v1beta1.DeleteIndexRequest(
name="name_value",
)
# Make the request
operation = client.delete_index(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.aiplatform_v1beta1.types.DeleteIndexRequest, dict]):
The request object. Request message for
[IndexService.DeleteIndex][google.cloud.aiplatform.v1beta1.IndexService.DeleteIndex].
name (:class:`str`):
Required. The name of the Index resource to be deleted.
Format:
``projects/{project}/locations/{location}/indexes/{index}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
request = index_service.DeleteIndexRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_index,
default_timeout=5.0,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("IndexServiceAsyncClient",)
|
|
# Copyright 2021, Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import os
import random
import secrets
import string
import struct
import subprocess
import time
import uuid
import yaml
from builtins import FileExistsError
from Crypto.PublicKey import RSA
from OpenSSL import crypto
class APISSL:
def __init__(self):
# create a key pair
self.__key = crypto.PKey()
self.__key.generate_key(crypto.TYPE_RSA, 4096)
# define alt_names
self.__alt_names = ','.join([
'DNS:{}'.format('openstack.bcpc.example.com'),
'DNS:localhost',
'IP:10.65.0.254'
]).encode()
# create a self-signed cert
self.__cert = crypto.X509()
self.__cert.get_subject().C = "US"
self.__cert.get_subject().ST = "New York"
self.__cert.get_subject().L = "New York City"
self.__cert.get_subject().O = "Bloomberg L.P." # noqa
self.__cert.get_subject().OU = "openstack"
self.__cert.get_subject().CN = "openstack.bcpc.example.com"
self.__cert.set_serial_number(random.randrange(100000))
self.__cert.set_version(2)
self.__cert.gmtime_adj_notBefore(0)
self.__cert.gmtime_adj_notAfter(16*365*24*60*60)
self.__cert.set_issuer(self.__cert.get_subject())
self.__cert.set_pubkey(self.__key)
self.__cert.add_extensions([
crypto.X509Extension(b'subjectAltName', False, self.__alt_names),
crypto.X509Extension(b"basicConstraints", True, b"CA:false")
])
self.__cert.sign(self.__key, 'sha512')
def crt(self):
certificate = crypto.dump_certificate(crypto.FILETYPE_PEM, self.__cert)
return base64.b64encode(certificate).decode()
def key(self):
private_key = crypto.dump_privatekey(crypto.FILETYPE_PEM, self.__key)
return base64.b64encode(private_key).decode()
class EtcdSSL:
def __init__(self):
self.__certs = {}
# create key
self.__key = crypto.PKey()
self.__key.generate_key(crypto.TYPE_RSA, 2048)
# create self-signed ca
self.__ca = crypto.X509()
self.__ca.get_subject().C = "US"
self.__ca.get_subject().ST = "New York"
self.__ca.get_subject().L = "New York City"
self.__ca.get_subject().O = "Bloomberg L.P." # noqa
self.__ca.get_subject().OU = "ENG Cloud Infrastructure"
self.__ca.get_subject().CN = "ca"
self.__ca.set_version(2)
self.__ca.set_serial_number(random.randrange(100000))
self.__ca.gmtime_adj_notBefore(0)
self.__ca.gmtime_adj_notAfter(16*365*24*60*60)
self.__ca.set_issuer(self.__ca.get_subject())
self.__ca.set_pubkey(self.__key)
self.__ca.add_extensions([
crypto.X509Extension(b"basicConstraints", True,
b"CA:TRUE, pathlen:0"),
crypto.X509Extension(b"keyUsage", True,
b"keyCertSign, cRLSign"),
crypto.X509Extension(b"subjectKeyIdentifier", False, b"hash",
subject=self.__ca),
])
self.__ca.add_extensions([
crypto.X509Extension(b"authorityKeyIdentifier", False,
b"keyid:always", issuer=self.__ca)
])
self.__ca.sign(self.__key, 'sha512')
# create self-signed client certs
for client in ['client-ro', 'client-rw', 'server']:
# key
self.__certs[client] = {}
self.__certs[client]['key'] = crypto.PKey()
self.__certs[client]['key'].generate_key(crypto.TYPE_RSA, 2048)
# cert
rand_int = random.randint(50000000, 100000000)
self.__certs[client]['cert'] = crypto.X509()
self.__certs[client]['cert'].get_subject().C = "US"
self.__certs[client]['cert'].get_subject().ST = "New York"
self.__certs[client]['cert'].get_subject().L = "New York City"
self.__certs[client]['cert'].get_subject().O = "Bloomberg L.P." # noqa
self.__certs[client]['cert'].get_subject().OU = "ENG Cloud Infrastructure" # noqa
self.__certs[client]['cert'].get_subject().CN = client
self.__certs[client]['cert'].set_version(2)
self.__certs[client]['cert'].set_serial_number(rand_int)
self.__certs[client]['cert'].gmtime_adj_notBefore(0)
self.__certs[client]['cert'].gmtime_adj_notAfter(10 * 365 * 24 * 60 * 60) # noqa
self.__certs[client]['cert'].set_issuer(self.__ca.get_issuer())
self.__certs[client]['cert'].set_pubkey(self.__certs[client]['key']) # noqa
self.__certs[client]['cert'].add_extensions([
crypto.X509Extension(b"basicConstraints", False, b"CA:FALSE"),
crypto.X509Extension(
b"subjectKeyIdentifier",
False,
b"hash",
subject=self.__certs[client]['cert']
),
])
self.__certs[client]['cert'].add_extensions([
crypto.X509Extension(
b"authorityKeyIdentifier",
False,
b"keyid:always",
issuer=self.__ca
),
crypto.X509Extension(b"extendedKeyUsage", False, b"clientAuth",), # noqa
crypto.X509Extension(b"keyUsage", False, b"digitalSignature"),
])
if client == 'server':
alt_names = ','.join([
'IP:10.65.0.2',
'IP:10.65.0.16',
'IP:10.65.0.32',
'IP:127.0.0.1'
]).encode()
self.__certs[client]['cert'].add_extensions([
crypto.X509Extension(b'subjectAltName', False, alt_names),
crypto.X509Extension(b"extendedKeyUsage", False, b"serverAuth"), # noqa
])
self.__certs[client]['cert'].sign(self.__key, 'sha256')
def ca_crt(self):
certificate = crypto.dump_certificate(crypto.FILETYPE_PEM, self.__ca)
return base64.b64encode(certificate).decode()
def server_crt(self):
dump = crypto.dump_certificate(crypto.FILETYPE_PEM,
self.__certs['server']['cert'])
return base64.b64encode(dump).decode()
def server_key(self):
dump = crypto.dump_privatekey(crypto.FILETYPE_PEM,
self.__certs['server']['key'])
return base64.b64encode(dump).decode()
def client_ro_crt(self):
dump = crypto.dump_certificate(crypto.FILETYPE_PEM,
self.__certs['client-ro']['cert'])
return base64.b64encode(dump).decode()
def client_ro_key(self):
dump = crypto.dump_privatekey(crypto.FILETYPE_PEM,
self.__certs['client-ro']['key'])
return base64.b64encode(dump).decode()
def client_rw_crt(self):
dump = crypto.dump_certificate(crypto.FILETYPE_PEM,
self.__certs['client-rw']['cert'])
return base64.b64encode(dump).decode()
def client_rw_key(self):
dump = crypto.dump_privatekey(crypto.FILETYPE_PEM,
self.__certs['client-rw']['key'])
return base64.b64encode(dump).decode()
class SSH:
def __init__(self):
self.__key = RSA.generate(1024)
@property
def key(self):
return self.__key
def public(self):
key = self.key.publickey().exportKey('OpenSSH')
return base64.b64encode(key).decode()
def private(self):
key = self.key.exportKey('PEM')
return base64.b64encode(key).decode()
class BCCChefDatabags:
def __init__(self):
self.__etcd_ssl = EtcdSSL()
self.__nova_ssh = SSH()
self.__ssh = SSH()
self.__api_ssl = APISSL()
@property
def etcd_ssl(self):
return self.__etcd_ssl
@property
def nova_ssh(self):
return self.__nova_ssh
@property
def ssh(self):
return self.__ssh
@property
def api_ssl(self):
return self.__api_ssl
def generate_ceph_key(self):
key = os.urandom(16)
header = struct.pack('<hiih', 1, int(time.time()), 0, len(key))
return base64.b64encode(header + key).decode()
def generate_fernet(self):
return base64.urlsafe_b64encode(os.urandom(32)).decode()
def generate_string(self, length=32):
return ''.join((secrets.choice(string.ascii_letters) for i in range(length))) # noqa
def generate_uuid(self):
return str(uuid.uuid4())
def save(self, force=False):
cmd = 'git rev-parse --show-toplevel'
root = subprocess.check_output(cmd.split(" ")).decode().rstrip('\n')
fp = '{0}/{1}'.format(root, 'ansible/group_vars/all/chef_databags.yml')
if os.path.isfile(fp) and not force:
msg = '{} exists.\nWill not overwrite without force.'
msg = msg.format(fp)
raise FileExistsError(msg)
with open(fp, 'w') as file:
yaml.dump(
self.generate(),
file, default_flow_style=False, indent=2
)
def generate(self):
config = {
'id': 'config',
'openstack': {
'admin': {
'password': self.generate_string()
}
},
'apache': {
'status': {
'username': 'apache_status',
'password': self.generate_string()
}
},
'ceph': {
'fsid': self.generate_uuid(),
'mon': {
'key': self.generate_ceph_key()
},
'bootstrap': {
'mds': {
'key': self.generate_ceph_key()
},
'mgr': {
'key': self.generate_ceph_key()
},
'osd': {
'key': self.generate_ceph_key()
},
'rgw': {
'key': self.generate_ceph_key()
},
'rbd': {
'key': self.generate_ceph_key()
},
},
'client': {
'admin': {
'key': self.generate_ceph_key()
},
'cinder': {
'key': self.generate_ceph_key()
},
'glance': {
'key': self.generate_ceph_key()
}
},
},
'etcd': {
'users': [
{
'username': 'root',
'password': self.generate_string()
},
{
'username': 'server',
'password': self.generate_string()
},
{
'username': 'client-ro',
'password': self.generate_string()
},
{
'username': 'client-rw',
'password': self.generate_string()
},
],
'ssl': {
'ca': {
'crt': self.etcd_ssl.ca_crt(),
},
'server': {
'crt': self.etcd_ssl.server_crt(),
'key': self.etcd_ssl.server_key(),
},
'client-ro': {
'crt': self.etcd_ssl.client_ro_crt(),
'key': self.etcd_ssl.client_ro_key(),
},
'client-rw': {
'crt': self.etcd_ssl.client_rw_crt(),
'key': self.etcd_ssl.client_rw_key(),
},
}
},
'powerdns': {
'creds': {
'db': {
'username': 'pdns',
'password': self.generate_string()
},
'webserver': {'password': self.generate_string()},
'api': {'key': self.generate_string()},
}
},
'proxysql': {
'creds': {
'db': {
'username': 'psql_monitor',
'password': self.generate_string(),
},
'admin': {
'username': 'psql_admin',
'password': self.generate_string(),
},
'stats': {
'username': 'psql_stats',
'password': self.generate_string(),
},
},
},
'keystone': {
'db': {
'username': 'keystone',
'password': self.generate_string()
},
'fernet': {
'keys': {
'primary': self.generate_fernet(),
'secondary': self.generate_fernet(),
'staged': self.generate_fernet(),
}
}
},
'glance': {
'creds': {
'db': {
'username': 'glance',
'password': self.generate_string()
},
'os': {
'username': 'glance',
'password': self.generate_string()
},
}
},
'cinder': {
'creds': {
'db': {
'username': 'cinder',
'password': self.generate_string()
},
'os': {
'username': 'cinder',
'password': self.generate_string()
},
}
},
'heat': {
'creds': {
'db': {
'username': 'heat',
'password': self.generate_string()
},
'os': {
'username': 'heat',
'password': self.generate_string()
},
}
},
'horizon': {'secret': self.generate_string()},
'libvirt': {'secret': self.generate_uuid()},
'neutron': {
'creds': {
'db': {
'username': 'neutron',
'password': self.generate_string()
},
'os': {
'username': 'neutron',
'password': self.generate_string()
},
}
},
'nova': {
'creds': {
'db': {
'username': 'nova',
'password': self.generate_string()
},
'os': {
'username': 'nova',
'password': self.generate_string()
},
},
'ssh': {
'crt': self.nova_ssh.public(),
'key': self.nova_ssh.private()
}
},
'placement': {
'creds': {
'db': {
'username': 'placement',
'password': self.generate_string()
},
'os': {
'username': 'placement',
'password': self.generate_string()
},
}
},
'mysql': {
'users': {
'sst': {'password': self.generate_string()},
'root': {'password': self.generate_string()},
'check': {'password': self.generate_string()},
}
},
'rabbit': {
'username': 'guest',
'password': self.generate_string(),
'cookie': self.generate_string()
},
'haproxy': {
'username': 'haproxy',
'password': self.generate_string(),
},
'ssh': {
'public': self.ssh.public(),
'private': self.ssh.private()
},
'ssl': {
'key': self.api_ssl.key(),
'crt': self.api_ssl.crt(),
'intermediate': None
},
'watcher': {
'creds': {
'db': {
'username': 'watcher',
'password': self.generate_string()
},
'os': {
'username': 'watcher',
'password': self.generate_string()
}
}
}
}
zones = {
'id': 'zones',
'dev': {
'ceph': {
'client': {
'cinder': {'key': self.generate_ceph_key()}
}
},
'libvirt': {'secret': self.generate_uuid()}
}
}
return {'chef_databags': [config, zones]}
|
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import dates
import os
import pickle
from datetime import datetime
from pprint import pprint
import sys
from datetime import timedelta
import calendar
import math
import copy
timezone = timedelta(hours = 0) #using zero here b/c most files were written with old PST code, have a correction further down for those (2009 early 2012) run with newer UTC code
AD_corr = True
#1. #alter the dates to set limits on data analysis range
start_analysis_at = datetime.strptime('20120101','%Y%m%d')
end_analysis_at = datetime.strptime('20120531','%Y%m%d')
########data dirs
directory_list = [
#'D:/2009/WHI_ECSP2/Binary/',
#'D:/2010/WHI_ECSP2/Binary/',
'D:/2012/WHI_UBCSP2/Binary/',
]
#tracking odd neg intervals (buffering issue?)
argh = 0
ok = 0
err_count = 0
non_err_count = 0
##############initialize binning variables
bins = []
start_size = 70 #VED in nm
end_size = 220 #VED in nm
interval_length = 5 #in nm
#need durations to calc sampled volume later for concs
sampling_duration_cluster_1_no_precip = 0
sampling_duration_cluster_2_no_precip = 0
sampling_duration_cluster_3_no_precip = 0
sampling_duration_cluster_4_no_precip = 0
sampling_duration_cluster_5_no_precip = 0
sampling_duration_cluster_6_no_precip = 0
sampling_duration_GBPS_no_precip = 0
sampling_duration_fresh_no_precip = 0
sampling_duration_cluster_1_precip = 0
sampling_duration_cluster_2_precip = 0
sampling_duration_cluster_3_precip = 0
sampling_duration_cluster_4_precip = 0
sampling_duration_cluster_5_precip = 0
sampling_duration_cluster_6_precip = 0
sampling_duration_GBPS_precip = 0
sampling_duration_fresh_precip = 0
sampling_duration_allFT = 0
#create list of size bins
while start_size < end_size:
bins.append(start_size)
start_size += interval_length
#create dictionary with size bins as keys
binned_data = {}
for bin in bins:
binned_data[bin] = [0,0]
###create a binning dictionary for each air mass category
rBC_FT_data_cluster_1_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_2_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_3_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_4_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_5_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_6_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_GBPS_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_fresh_no_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_1_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_2_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_3_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_4_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_5_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_6_precip = copy.deepcopy(binned_data)
rBC_FT_data_cluster_GBPS_precip = copy.deepcopy(binned_data)
rBC_FT_data_fresh_precip = copy.deepcopy(binned_data)
rBC_FT_data_all = copy.deepcopy(binned_data)
######get spike times (these are sorted by datetime)
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/')
file = open('WHI_rBC_record_2009to2013-spike_times.rbcpckl', 'r')
spike_times_full = pickle.load(file)
file.close()
spike_times = []
for spike in spike_times_full:
if spike.year >= start_analysis_at.year:
if spike <= end_analysis_at:
spike_times.append(spike)
##########open cluslist and read into a python list
cluslist = []
#CLUSLIST_file = 'C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/CLUSLIST_6-mod'
#CLUSLIST_file = 'C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/CLUSLIST_6-mod-precip_added-sig_precip_any_time'
CLUSLIST_file = 'C:/hysplit4/working/WHI/2hrly_HYSPLIT_files/all_with_sep_GBPS/CLUSLIST_6-mod-precip_added-sig_precip_72hrs_pre_arrival'
with open(CLUSLIST_file,'r') as f:
for line in f:
newline = line.split()
cluster_no = int(newline[0])
traj_time = datetime(int(newline[2])+2000,int(newline[3]),int(newline[4]),int(newline[5])) + timezone
significant_rainfall = newline[8]
if traj_time.year >= start_analysis_at.year:
cluslist.append([traj_time,cluster_no,significant_rainfall])
# sort cluslist by row_datetime in place
cluslist.sort(key=lambda clus_info: clus_info[0])
######this helper method allows conversion of BC mass from a value arrived at via an old calibration to a value arrived at via a new calibration
#quad eqytn = ax2 + bx + c
def PeakHtFromMass(BC_mass,var_C,var_b,var_a):
C = var_C
b = var_b
a = var_a
c = C - BC_mass
d = b**2-4*a*c
if d < 0:
#This equation has no real solution"
return np.nan
elif d == 0:
# This equation has one solutions
x = (-b+math.sqrt(b**2-4*a*c))/(2*a)
return x
else:
#This equation has two solutions
x1 = (-b+math.sqrt((b**2)-(4*(a*c))))/(2*a)
x2 = (-b-math.sqrt((b**2)-(4*(a*c))))/(2*a)
if x1 <4000:
return x1
if x2 <4000:
return x2
#get BC data
for directory in directory_list:
os.chdir(directory)
print directory
for item in os.listdir('.'):
if os.path.isdir(item) == True and item.startswith('20'):
folder_date = datetime.strptime(item, '%Y%m%d')
if folder_date >= start_analysis_at and folder_date <= end_analysis_at:
if folder_date.year == 2009:
old_C = 0
old_b = 0.012
old_a = 0
new_C = 0.01244
new_b = 0.0172
if folder_date.year == 2010:
old_C = 0.156
old_b = 0.00606
old_a = 6.3931e-7
new_C = -0.32619
new_b = 0.01081
if folder_date.year == 2012:
old_C = 0.20699
old_b = 0.00246
old_a = -1.09254e-7
new_C = 0.24826
new_b = 0.003043
os.chdir(item)
for file in os.listdir('.'):
if file.endswith('.ptxt'):
print file
f = open(file,'r')
f.readline()
for line in f:
newline = line.split('\t')
start_time = float(newline[0])
end_time = float(newline[1])
incand_flag = float(newline[2])
incand_sat_flag = int(newline[3])
BC_mass = float(newline[4])
BC_mass_old = float(newline[4])
if AD_corr == True:
if folder_date.year == 2009:
pk_ht = BC_mass/old_b
else:
pk_ht = PeakHtFromMass(BC_mass, old_C, old_b, old_a)
BC_mass = new_b*pk_ht + new_C
try:
BC_VED = (((BC_mass/(10**15*1.8))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
except:
#print BC_mass, BC_mass_old, datetime.utcfromtimestamp(end_time), err_count
err_count+=1
continue
non_err_count +=1
#this is to account for me running the first few 2012 days and all of 2009 with the new UTC code (the rest are old PST code)
if datetime.strptime('20120401', '%Y%m%d') <= datetime.utcfromtimestamp(start_time) <= datetime.strptime('20120410', '%Y%m%d'):
timezone = timedelta(hours = -8)
if datetime.utcfromtimestamp(start_time) <= datetime.strptime('20091231', '%Y%m%d'):
timezone = timedelta(hours = -8)
start_time_obj = datetime.utcfromtimestamp(start_time)+timezone
end_time_obj = datetime.utcfromtimestamp(end_time)+timezone
#ignore annoying neg intervals
if end_time_obj < start_time_obj:
argh += 1
continue
else:
ok +=1
#pop off any cluslist times that are in the past
cluslist_current_datetime = cluslist[0][0] #in PST
while end_time_obj > (cluslist_current_datetime + timedelta(hours=1)):
cluslist.pop(0)
if len(cluslist):
cluslist_current_datetime = cluslist[0][0]
else:
break
#get cluster no
cluslist_current_cluster_no = cluslist[0][1]
sig_rain_str = cluslist[0][2]
if sig_rain_str == 'True':
sig_rain = True
if sig_rain_str == 'False':
sig_rain = False
#use spike times to get fresh emissions data
spike_half_interval = 2
if len(spike_times):
spike_start = spike_times[0]-timedelta(minutes=spike_half_interval)
spike_end = spike_times[0]+timedelta(minutes=spike_half_interval)
while end_time_obj >= spike_end:
print 'pop spike time', end_time_obj, spike_times[0]
spike_times.pop(0)
if len(spike_times):
spike_start = spike_times[0]-timedelta(minutes=spike_half_interval)
spike_end = spike_times[0]+timedelta(minutes=spike_half_interval)
if len(spike_times) == 0:
print 'no more spike times'
break
if (start_time_obj < spike_start or start_time_obj < spike_end) and (end_time_obj > spike_start or end_time_obj > spike_end):
for key in rBC_FT_data_fresh_precip:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
if sig_rain == True:
rBC_FT_data_fresh_precip[key][0] = rBC_FT_data_fresh_precip[key][0] + BC_mass
rBC_FT_data_fresh_precip[key][1] = rBC_FT_data_fresh_precip[key][1] + 1
sampling_duration_fresh_precip = sampling_duration_fresh_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_fresh_no_precip[key][0] = rBC_FT_data_fresh_no_precip[key][0] + BC_mass
rBC_FT_data_fresh_no_precip[key][1] = rBC_FT_data_fresh_no_precip[key][1] + 1
sampling_duration_fresh_no_precip = sampling_duration_fresh_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
#add data to list in cluster dictionaries (1 list per cluster time early night/late night)
if ((cluslist_current_datetime-timedelta(hours=3)) <= end_time_obj <= (cluslist_current_datetime+timedelta(hours=3))):
if cluslist_current_cluster_no == 7:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_GBPS_precip[key][0] = rBC_FT_data_cluster_GBPS_precip[key][0] + BC_mass
rBC_FT_data_cluster_GBPS_precip[key][1] = rBC_FT_data_cluster_GBPS_precip[key][1] + 1
sampling_duration_GBPS_precip = sampling_duration_GBPS_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_GBPS_no_precip[key][0] = rBC_FT_data_cluster_GBPS_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_GBPS_no_precip[key][1] = rBC_FT_data_cluster_GBPS_no_precip[key][1] + 1
sampling_duration_GBPS_no_precip = sampling_duration_GBPS_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 1:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_1_precip[key][0] = rBC_FT_data_cluster_1_precip[key][0] + BC_mass
rBC_FT_data_cluster_1_precip[key][1] = rBC_FT_data_cluster_1_precip[key][1] + 1
sampling_duration_cluster_1_precip = sampling_duration_cluster_1_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_1_no_precip[key][0] = rBC_FT_data_cluster_1_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_1_no_precip[key][1] = rBC_FT_data_cluster_1_no_precip[key][1] + 1
sampling_duration_cluster_1_no_precip = sampling_duration_cluster_1_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 2:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_2_precip[key][0] = rBC_FT_data_cluster_2_precip[key][0] + BC_mass
rBC_FT_data_cluster_2_precip[key][1] = rBC_FT_data_cluster_2_precip[key][1] + 1
sampling_duration_cluster_2_precip = sampling_duration_cluster_2_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_2_no_precip[key][0] = rBC_FT_data_cluster_2_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_2_no_precip[key][1] = rBC_FT_data_cluster_2_no_precip[key][1] + 1
sampling_duration_cluster_2_no_precip = sampling_duration_cluster_2_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 3:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_3_precip[key][0] = rBC_FT_data_cluster_3_precip[key][0] + BC_mass
rBC_FT_data_cluster_3_precip[key][1] = rBC_FT_data_cluster_3_precip[key][1] + 1
sampling_duration_cluster_3_precip = sampling_duration_cluster_3_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_3_no_precip[key][0] = rBC_FT_data_cluster_3_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_3_no_precip[key][1] = rBC_FT_data_cluster_3_no_precip[key][1] + 1
sampling_duration_cluster_3_no_precip = sampling_duration_cluster_3_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 4:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_4_precip[key][0] = rBC_FT_data_cluster_4_precip[key][0] + BC_mass
rBC_FT_data_cluster_4_precip[key][1] = rBC_FT_data_cluster_4_precip[key][1] + 1
sampling_duration_cluster_4_precip = sampling_duration_cluster_4_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_4_no_precip[key][0] = rBC_FT_data_cluster_4_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_4_no_precip[key][1] = rBC_FT_data_cluster_4_no_precip[key][1] + 1
sampling_duration_cluster_4_no_precip = sampling_duration_cluster_4_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 5:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_5_precip[key][0] = rBC_FT_data_cluster_5_precip[key][0] + BC_mass
rBC_FT_data_cluster_5_precip[key][1] = rBC_FT_data_cluster_5_precip[key][1] + 1
sampling_duration_cluster_5_precip = sampling_duration_cluster_5_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_5_no_precip[key][0] = rBC_FT_data_cluster_5_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_5_no_precip[key][1] = rBC_FT_data_cluster_5_no_precip[key][1] + 1
sampling_duration_cluster_5_no_precip = sampling_duration_cluster_5_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
if cluslist_current_cluster_no == 6:
sampling_duration_allFT = sampling_duration_allFT + end_time - start_time
for key in rBC_FT_data_all:
key_value = float(key)
interval_end = key_value + interval_length
if BC_VED >= key_value and BC_VED < interval_end:
rBC_FT_data_all[key][0] = rBC_FT_data_all[key][0] + BC_mass
rBC_FT_data_all[key][1] = rBC_FT_data_all[key][1] + 1
if sig_rain == True:
rBC_FT_data_cluster_6_precip[key][0] = rBC_FT_data_cluster_6_precip[key][0] + BC_mass
rBC_FT_data_cluster_6_precip[key][1] = rBC_FT_data_cluster_6_precip[key][1] + 1
sampling_duration_cluster_6_precip = sampling_duration_cluster_6_precip + end_time - start_time #need duration to calc sampled volume later for concs
if sig_rain == False:
rBC_FT_data_cluster_6_no_precip[key][0] = rBC_FT_data_cluster_6_no_precip[key][0] + BC_mass
rBC_FT_data_cluster_6_no_precip[key][1] = rBC_FT_data_cluster_6_no_precip[key][1] + 1
sampling_duration_cluster_6_no_precip = sampling_duration_cluster_6_no_precip + end_time - start_time #need duration to calc sampled volume later for concs
break
f.close()
os.chdir(directory)
print 'neg times', argh, ok, argh*100./(argh+ok)
print err_count, non_err_count, err_count*100./(err_count+non_err_count)
average_flow = 120
total_sampled_volume_1_precip = sampling_duration_cluster_1_precip*average_flow/60
total_sampled_volume_2_precip = sampling_duration_cluster_2_precip*average_flow/60
total_sampled_volume_3_precip = sampling_duration_cluster_3_precip *average_flow/60
total_sampled_volume_4_precip = sampling_duration_cluster_4_precip*average_flow/60
total_sampled_volume_5_precip = sampling_duration_cluster_5_precip *average_flow/60
total_sampled_volume_6_precip = sampling_duration_cluster_6_precip*average_flow/60
total_sampled_volume_GBPS_precip = sampling_duration_GBPS_precip*average_flow/60
total_sampled_volume_fresh_precip = sampling_duration_fresh_precip*average_flow/60
total_sampled_volume_1_no_precip = sampling_duration_cluster_1_no_precip*average_flow/60
total_sampled_volume_2_no_precip = sampling_duration_cluster_2_no_precip*average_flow/60
total_sampled_volume_3_no_precip = sampling_duration_cluster_3_no_precip *average_flow/60
total_sampled_volume_4_no_precip = sampling_duration_cluster_4_no_precip*average_flow/60
total_sampled_volume_5_no_precip = sampling_duration_cluster_5_no_precip *average_flow/60
total_sampled_volume_6_no_precip = sampling_duration_cluster_6_no_precip*average_flow/60
total_sampled_volume_GBPS_no_precip = sampling_duration_GBPS_no_precip*average_flow/60
total_sampled_volume_fresh_no_precip = sampling_duration_fresh_no_precip*average_flow/60
total_sampled_volume_allFT = sampling_duration_allFT*average_flow/60
#v=create lists
rBC_FT_data_cluster_1_l_precip = []
rBC_FT_data_cluster_2_l_precip = []
rBC_FT_data_cluster_3_l_precip = []
rBC_FT_data_cluster_4_l_precip = []
rBC_FT_data_cluster_5_l_precip = []
rBC_FT_data_cluster_6_l_precip = []
rBC_FT_data_cluster_GBPS_l_precip = []
rBC_FT_data_fresh_l_precip = []
rBC_FT_data_cluster_1_l_no_precip = []
rBC_FT_data_cluster_2_l_no_precip = []
rBC_FT_data_cluster_3_l_no_precip = []
rBC_FT_data_cluster_4_l_no_precip = []
rBC_FT_data_cluster_5_l_no_precip = []
rBC_FT_data_cluster_6_l_no_precip = []
rBC_FT_data_cluster_GBPS_l_no_precip = []
rBC_FT_data_fresh_l_no_precip = []
rBC_FT_data_all_l = []
#put lists etc in array
binned_data_lists = [
[rBC_FT_data_cluster_1_precip ,rBC_FT_data_cluster_1_l_precip , total_sampled_volume_1_precip,'c1_precip'],
[rBC_FT_data_cluster_2_precip ,rBC_FT_data_cluster_2_l_precip , total_sampled_volume_2_precip,'c2_precip'],
[rBC_FT_data_cluster_3_precip ,rBC_FT_data_cluster_3_l_precip , total_sampled_volume_3_precip,'c3_precip'],
[rBC_FT_data_cluster_4_precip ,rBC_FT_data_cluster_4_l_precip , total_sampled_volume_4_precip,'c4_precip'],
[rBC_FT_data_cluster_5_precip ,rBC_FT_data_cluster_5_l_precip , total_sampled_volume_5_precip,'c5_precip'],
[rBC_FT_data_cluster_6_precip ,rBC_FT_data_cluster_6_l_precip , total_sampled_volume_6_precip,'c6_precip'],
[rBC_FT_data_cluster_GBPS_precip ,rBC_FT_data_cluster_GBPS_l_precip , total_sampled_volume_GBPS_precip,'GBPS_precip'],
[rBC_FT_data_fresh_precip ,rBC_FT_data_fresh_l_precip , total_sampled_volume_fresh_precip,'fresh_precip'],
[rBC_FT_data_cluster_1_no_precip ,rBC_FT_data_cluster_1_l_no_precip , total_sampled_volume_1_no_precip,'c1_no_precip'],
[rBC_FT_data_cluster_2_no_precip ,rBC_FT_data_cluster_2_l_no_precip , total_sampled_volume_2_no_precip,'c2_no_precip'],
[rBC_FT_data_cluster_3_no_precip ,rBC_FT_data_cluster_3_l_no_precip , total_sampled_volume_3_no_precip,'c3_no_precip'],
[rBC_FT_data_cluster_4_no_precip ,rBC_FT_data_cluster_4_l_no_precip , total_sampled_volume_4_no_precip,'c4_no_precip'],
[rBC_FT_data_cluster_5_no_precip ,rBC_FT_data_cluster_5_l_no_precip , total_sampled_volume_5_no_precip,'c5_no_precip'],
[rBC_FT_data_cluster_6_no_precip ,rBC_FT_data_cluster_6_l_no_precip , total_sampled_volume_6_no_precip,'c6_no_precip'],
[rBC_FT_data_cluster_GBPS_no_precip ,rBC_FT_data_cluster_GBPS_l_no_precip , total_sampled_volume_GBPS_no_precip,'GBPS_no_precip'],
[rBC_FT_data_fresh_no_precip ,rBC_FT_data_fresh_l_no_precip , total_sampled_volume_fresh_no_precip,'fresh_no_precip'],
[rBC_FT_data_all ,rBC_FT_data_all_l , total_sampled_volume_allFT,'all_FT'],
]
#fiddle with data (sort, normalize, etc)
for line in binned_data_lists:
dict = line[0]
list = line[1]
sampled_vol = line[2]
for bin, value in dict.iteritems():
bin_mass = value[0]
bin_numb = value[1]
try:
bin_mass_conc = bin_mass/sampled_vol #gives mass per cc
bin_numb_conc = bin_numb/sampled_vol #gives number per cc
temp = [bin,bin_mass_conc,bin_numb_conc]
except:
temp = [bin,np.nan,np.nan]
list.append(temp)
list.sort()
for row in list: #normalize
row.append(row[1]) #these 2 lines append teh raw mass and number concs
row.append(row[2])
row[1] = row[1]/(math.log(row[0]+interval_length)-math.log(row[0])) #d/dlog(VED)
row[2] = row[2]/(math.log(row[0]+interval_length)-math.log(row[0])) #d/dlog(VED)
row[0] = row[0]+interval_length/2 #correction for our binning code recording bin starts as keys instead of midpoints
#write final list of interval data to file and pickle
os.chdir('C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/size_distrs/')
for list in binned_data_lists:
file = open('AD_corr - size distr - FT - ' + list[3] + '.txt', 'w')
file.write('size_bin_midpoint(VEDnm)' + '\t'+ 'dM/dlog(VED)_(ng/cm3)' + '\t'+ 'd#/dlog(VED)_(#/cm3)' + '\t' + 'dM(VED)_(ng/cm3)' + '\t'+ 'd#(VED)_(#/cm3)' + '\n')
file.write('total sampled volume:' + str(list[2]) + 'cc' + '\n')
for row in list[1]:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
file = open('AD_corr - size distr - FT - ' + list[3] + '.sdbinpickl', 'w')
pickle.dump(list[1], file)
file.close()
|
|
"""Base primititve classes for working with Storm."""
from __future__ import absolute_import, print_function, unicode_literals
import io
import logging
import os
import signal
import sys
from collections import deque, namedtuple
from logging.handlers import RotatingFileHandler
from os.path import join
from threading import RLock
from traceback import format_exc
import simplejson as json
# Support for Storm Log levels as per STORM-414
_STORM_LOG_TRACE = 0
_STORM_LOG_DEBUG = 1
_STORM_LOG_INFO = 2
_STORM_LOG_WARN = 3
_STORM_LOG_ERROR = 4
_STORM_LOG_LEVELS = {
'trace': _STORM_LOG_TRACE,
'debug': _STORM_LOG_DEBUG,
'info': _STORM_LOG_INFO,
'warn': _STORM_LOG_WARN,
'warning': _STORM_LOG_WARN,
'error': _STORM_LOG_ERROR,
'critical': _STORM_LOG_ERROR
}
_PYTHON_LOG_LEVELS = {
'critical': logging.CRITICAL,
'error': logging.ERROR,
'warning': logging.WARNING,
'warn': logging.WARNING,
'info': logging.INFO,
'debug': logging.DEBUG,
'trace': logging.DEBUG
}
log = logging.getLogger(__name__)
def remote_pdb_handler(signum, frame):
""" Handler to drop us into a remote debugger upon receiving SIGUSR1 """
try:
from remote_pdb import RemotePdb
rdb = RemotePdb(host='127.0.0.1', port=0)
rdb.set_trace(frame=frame)
except ImportError:
log.warning('remote_pdb unavailable. Please install remote_pdb to '
'allow remote debugging.')
# Restore signal handler for later
signal.signal(signum, remote_pdb_handler)
class StormHandler(logging.Handler):
"""Handler that will send messages back to Storm."""
def __init__(self, stream=None):
""" Initialize handler """
if stream is None:
stream = sys.stdout
super(StormHandler, self).__init__()
self._component = Component(output_stream=stream)
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
If exception information is present, it is formatted using
traceback.print_exception and sent to Storm.
"""
try:
msg = self.format(record)
level = _STORM_LOG_LEVELS.get(record.levelname.lower(),
_STORM_LOG_INFO)
self._component.send_message({'command': 'log', 'msg': str(msg),
'level': level})
except Exception:
self.handleError(record)
class LogStream(object):
"""Object that implements enough of the Python stream API to be used as
sys.stdout. Messages are written to the Python logger.
"""
def __init__(self, logger):
self.logger = logger
def write(self, message):
if message.strip() == "":
return # skip blank lines
try:
self.logger.info(message)
except:
# There's been an issue somewhere in the logging sub-system
# so we'll put stderr and stdout back to their originals and
# raise the exception which will cause Storm to choke
sys.stdout = sys.__stdout__
raise
def flush(self):
"""No-op method to prevent crashes when someone does
sys.stdout.flush.
"""
pass
Tuple = namedtuple('Tuple', 'id component stream task values')
"""Storm's primitive data type passed around via streams.
:ivar id: the ID of the Tuple.
:type id: str
:ivar component: component that the Tuple was generated from.
:type component: str
:ivar stream: the stream that the Tuple was emitted into.
:type stream: str
:ivar task: the task the Tuple was generated from.
:type task: int
:ivar values: the payload of the Tuple where data is stored.
:type values: list
"""
class Component(object):
"""Base class for spouts and bolts which contains class methods for
logging messages back to the Storm worker process.
:ivar input_stream: The ``file``-like object to use to retrieve commands
from Storm. Defaults to ``sys.stdin``.
:ivar output_stream: The ``file``-like object to send messages to Storm with.
Defaults to ``sys.stdout``.
:ivar topology_name: The name of the topology sent by Storm in the initial
handshake.
:ivar task_id: The numerical task ID for this component, as sent by Storm in
the initial handshake.
:ivar component_name: The name of this component, as sent by Storm in the
initial handshake.
:ivar debug: A ``bool`` indicating whether or not Storm is running in debug
mode. Specified by the `topology.debug` Storm setting.
:ivar storm_conf: A ``dict`` containing the configuration values sent by
Storm in the initial handshake with this component.
:ivar context: The context of where this component is in the topology. See
`the Storm Multi-Lang protocol documentation <https://storm.apache.org/documentation/Multilang-protocol.html>`__
for details.
:ivar pid: An ``int`` indicating the process ID of this component as
retrieved by ``os.getpid()``.
:ivar logger: A logger to use with this component.
.. note::
Using ``Component.logger`` combined with the
:class:`streamparse.storm.component.StormHandler` handler is
the recommended way for logging messages from your
component. If you use ``Component.log`` instead, the logging
messages will *always* be sent to Storm, even if they are
``debug`` level messages and you are running in production.
Using :class:`streamparse.storm.component.StormHandler`
ensures that you will instead have your logging messages
filtered on the Python side and only have the messages you
actually want logged serialized and sent to Storm.
"""
def __init__(self, input_stream=sys.stdin, output_stream=sys.stdout,
rdb_signal=signal.SIGUSR1):
# Ensure we don't fall back on the platform-dependent encoding and
# always use UTF-8
self.input_stream = self._wrap_stream(input_stream)
self.output_stream = self._wrap_stream(output_stream)
self.topology_name = None
self.task_id = None
self.component_name = None
self.debug = None
self.storm_conf = None
self.context = None
self.pid = os.getpid()
self.logger = None
# pending commands/Tuples we read while trying to read task IDs
self._pending_commands = deque()
# pending task IDs we read while trying to read commands/Tuples
self._pending_task_ids = deque()
self._reader_lock = RLock()
self._writer_lock = RLock()
# Setup remote pdb handler if asked to
if rdb_signal is not None:
signal.signal(rdb_signal, remote_pdb_handler)
@staticmethod
def is_heartbeat(tup):
""" :returns: Whether or not the given Tuple is a heartbeat """
return tup.task == -1 and tup.stream == '__heartbeat'
@staticmethod
def _wrap_stream(stream):
"""Returns a TextIOWrapper around the given stream that handles UTF-8
encoding/decoding.
"""
if hasattr(stream, 'buffer'):
return io.TextIOWrapper(stream.buffer, encoding='utf-8')
elif hasattr(stream, 'readable'):
return io.TextIOWrapper(stream, encoding='utf-8')
# Python 2.x stdin and stdout are just files
else:
return io.open(stream.fileno(), mode=stream.mode, encoding='utf-8')
def _setup_component(self, storm_conf, context):
"""Add helpful instance variables to component after initial handshake
with Storm. Also configure logging.
"""
self.topology_name = storm_conf.get('topology.name', '')
self.task_id = context.get('taskid', '')
self.component_name = context.get('componentid')
# If using Storm before 0.10.0 componentid is not available
if self.component_name is None:
self.component_name = context.get('task->component', {})\
.get(str(self.task_id), '')
self.debug = storm_conf.get("topology.debug", False)
self.storm_conf = storm_conf
self.context = context
# Set up logging
self.logger = logging.getLogger('.'.join((__name__,
self.component_name)))
log_path = self.storm_conf.get('streamparse.log.path')
if log_path:
root_log = logging.getLogger()
max_bytes = self.storm_conf.get('streamparse.log.max_bytes',
1000000) # 1 MB
backup_count = self.storm_conf.get('streamparse.log.backup_count',
10)
log_file = join(log_path,
('streamparse_{topology_name}_{component_name}'
'_{task_id}_{pid}.log'
.format(topology_name=self.topology_name,
component_name=self.component_name,
task_id=self.task_id,
pid=self.pid)))
handler = RotatingFileHandler(log_file, maxBytes=max_bytes,
backupCount=backup_count)
formatter = logging.Formatter('%(asctime)s - %(name)s - '
'%(levelname)s - %(message)s')
handler.setFormatter(formatter)
root_log.addHandler(handler)
log_level = self.storm_conf.get('streamparse.log.level', 'info')
log_level = _PYTHON_LOG_LEVELS.get(log_level, logging.INFO)
if self.debug:
# potentially override logging that was provided if
# topology.debug was set to true
log_level = logging.DEBUG
root_log.setLevel(log_level)
else:
self.send_message({'command': 'log',
'msg': ('WARNING: streamparse logging is not '
'configured. Please set streamparse.log.'
'path in your config.json.')})
# Redirect stdout to ensure that print statements/functions
# won't disrupt the multilang protocol
sys.stdout = LogStream(logging.getLogger('streamparse.stdout'))
def read_message(self):
"""Read a message from Storm, reconstruct newlines appropriately.
All of Storm's messages (for either bolts or spouts) should be of the
form::
'<command or task_id form prior emit>\\nend\\n'
Command example, an incoming Tuple to a bolt::
'{ "id": "-6955786537413359385", "comp": "1", "stream": "1", "task": 9, "tuple": ["snow white and the seven dwarfs", "field2", 3]}\\nend\\n'
Command example for a spout to emit its next Tuple::
'{"command": "next"}\\nend\\n'
Example, the task IDs a prior emit was sent to::
'[12, 22, 24]\\nend\\n'
The edge case of where we read ``''`` from ``input_stream`` indicating
EOF, usually means that communication with the supervisor has been
severed.
"""
msg = ""
num_blank_lines = 0
while True:
# readline will return trailing \n so that output is unambigious, we
# should only have line == '' if we're at EOF
with self._reader_lock:
line = self.input_stream.readline()
if line == 'end\n':
break
elif line == '':
log.error("Received EOF while trying to read stdin from Storm, "
"pipe appears to be broken, exiting.")
sys.exit(1)
elif line == '\n':
num_blank_lines += 1
if num_blank_lines % 1000 == 0:
log.warn("While trying to read a command or pending task "
"ID, Storm has instead sent %s '\\n' messages.",
num_blank_lines)
continue
msg = '{}{}\n'.format(msg, line[0:-1])
try:
return json.loads(msg)
except Exception:
log.error("JSON decode error for message: %r", msg, exc_info=True)
raise
def read_task_ids(self):
if self._pending_task_ids:
return self._pending_task_ids.popleft()
else:
msg = self.read_message()
while not isinstance(msg, list):
self._pending_commands.append(msg)
msg = self.read_message()
return msg
def read_command(self):
if self._pending_commands:
return self._pending_commands.popleft()
else:
msg = self.read_message()
while isinstance(msg, list):
self._pending_task_ids.append(msg)
msg = self.read_message()
return msg
def read_tuple(self):
cmd = self.read_command()
return Tuple(cmd['id'], cmd['comp'], cmd['stream'], cmd['task'],
cmd['tuple'])
def read_handshake(self):
"""Read and process an initial handshake message from Storm."""
msg = self.read_message()
pid_dir, _conf, _context = msg['pidDir'], msg['conf'], msg['context']
# Write a blank PID file out to the pidDir
open(join(pid_dir, str(self.pid)), 'w').close()
self.send_message({'pid': self.pid})
return _conf, _context
def send_message(self, message):
"""Send a message to Storm via stdout."""
if not isinstance(message, dict):
log.error("%s.%d attempted to send a non dict message to Storm: %r",
self.component_name, self.pid, message)
return
wrapped_msg = "{}\nend\n".format(json.dumps(message))
with self._writer_lock:
self.output_stream.flush()
self.output_stream.write(wrapped_msg)
self.output_stream.flush()
def raise_exception(self, exception, tup=None):
"""Report an exception back to Storm via logging.
:param exception: a Python exception.
:param tup: a :class:`Tuple` object.
"""
if tup:
message = ('Python {exception_name} raised while processing Tuple '
'{tup!r}\n{traceback}')
else:
message = 'Python {exception_name} raised\n{traceback}'
message = message.format(exception_name=exception.__class__.__name__,
tup=tup,
traceback=format_exc())
self.send_message({'command': 'error', 'msg': str(message)})
self.send_message({'command': 'sync'}) # sync up right away
def log(self, message, level=None):
"""Log a message to Storm optionally providing a logging level.
:param message: the log message to send to Storm.
:type message: str
:param level: the logging level that Storm should use when writing the
``message``. Can be one of: trace, debug, info, warn, or
error (default: ``info``).
:type level: str
.. warning::
This will send your message to Storm regardless of what level you
specify. In almost all cases, you are better of using
``Component.logger`` with a
:class:`streamparse.storm.component.StormHandler`, because the
filtering will happen on the Python side (instead of on the Java side
after taking the time to serialize your message and send it to Storm).
"""
level = _STORM_LOG_LEVELS.get(level, _STORM_LOG_INFO)
self.send_message({'command': 'log', 'msg': str(message),
'level': level})
def emit(self, tup, tup_id=None, stream=None, anchors=None,
direct_task=None, need_task_ids=True):
"""Emit a new Tuple to a stream.
:param tup: the Tuple payload to send to Storm, should contain only
JSON-serializable data.
:type tup: :class:`list` or :class:`streamparse.storm.component.Tuple`
:param tup_id: the ID for the Tuple. If omitted by a
:class:`streamparse.storm.spout.Spout`, this emit will be
unreliable.
:type tup_id: str
:param stream: the ID of the stream to emit this Tuple to. Specify
``None`` to emit to default stream.
:type stream: str
:param anchors: IDs the Tuples (or
:class:`streamparse.storm.component.Tuple` instances)
which the emitted Tuples should be anchored to. This is
only passed by :class:`streamparse.storm.bolt.Bolt`.
:type anchors: list
:param direct_task: the task to send the Tuple to.
:type direct_task: int
:param need_task_ids: indicate whether or not you'd like the task IDs
the Tuple was emitted (default: ``True``).
:type need_task_ids: bool
:returns: a ``list`` of task IDs that the Tuple was sent to. Note that
when specifying direct_task, this will be equal to
``[direct_task]``. If you specify ``need_task_ids=False``,
this function will return ``None``.
"""
if not isinstance(tup, (list, tuple)):
raise TypeError('All Tuples must be either lists or tuples, '
'received {!r} instead.'.format(type(tup)))
msg = {'command': 'emit', 'tuple': tup}
if anchors is not None:
msg['anchors'] = anchors
if tup_id is not None:
msg['id'] = tup_id
if stream is not None:
msg['stream'] = stream
if direct_task is not None:
msg['task'] = direct_task
if need_task_ids is False:
# only need to send on False, Storm's default is True
msg['need_task_ids'] = need_task_ids
# Use both locks so we ensure send_message and read_task_ids are for
# same emit
with self._reader_lock, self._writer_lock:
# Message encoding will convert both list and tuple to a JSON array.
self.send_message(msg)
if need_task_ids is True:
downstream_task_ids = [direct_task] if direct_task is not None \
else self.read_task_ids()
return downstream_task_ids
else:
return None
def run(self):
"""Main run loop for all components.
Performs initial handshake with Storm and reads Tuples handing them off
to subclasses. Any exceptions are caught and logged back to Storm
prior to the Python process exiting.
.. warning::
Subclasses should **not** override this method.
"""
storm_conf, context = self.read_handshake()
self._setup_component(storm_conf, context)
try:
self.initialize(storm_conf, context)
while True:
self._run()
except Exception as e:
self._handle_run_exception(e)
sys.exit(1)
def _handle_run_exception(self, exc):
"""Process an exception encountered while running the ``run()`` loop.
Called right before program exits.
"""
log_msg = "Exception in {}.run()".format(self.__class__.__name__)
log.error(log_msg, exc_info=True)
self.raise_exception(exc)
|
|
"""Random collection of usefull functions"""
import os
from math import log10
import numpy as np
from enrico.constants import met_ref,mjd_ref, jd_ref, DAY_IN_SECOND
def _log(text, line=True):
if line:
print("\033[34m"+'# ' + '*' * 60)
print("\033[34m"+"# *** %s ***" % text)
if line:
print "\033[34m"+'# ' + '*' * 60+"\033[0m"
def fluxScale(flux_value):
"""Get the scale of the flux value
ex : 1.4e-14 ---> 1e-14"""
return 10 ** np.floor(np.log10(flux_value) + 0.5)
def fluxNorm(flux_value):
"""Return the norm from the flux_value
ex : 1.4e-14 ---> 1.4"""
return pow(10, np.log10(flux_value) - int(np.log10(flux_value))) * 10
def Prefactor(flux,index,emin,emax,escale):
"""Compute the prefactor at the energy escale knowing
the flux and index between emin and emax"""
Denomin = pow(emax,-abs(index)+1.) -pow(emin,-abs(index)+1.)
return flux*(-abs(index)+1)*pow(escale,-abs(index)) / Denomin
def dNde(energy,Fit,name):
'''Compute the dN/dE value at energy E fir the source name'''
import pyLikelihood
ptsrc = pyLikelihood.PointSource_cast(Fit[name].src)
arg = pyLikelihood.dArg(energy)
return ptsrc.spectrum()(arg)
def meanEnergy(emin, emax, index_value):
"""Get the mean energy, weighted with a power law of a given index"""
x = emax / emin
if index_value == -2.0:
eflux = emax * np.log(x) / (x - 1)
elif index_value == -1.0:
eflux = emin * (x - 1) / np.log(x)
else:
factor1 = emin * (index_value + 1) / (index_value + 2)
factor2 = (x ** (index_value + 2) - 1) / (x ** (index_value + 1) - 1)
eflux = factor1 * factor2
return eflux
def GetE0(a,b):
""""Get the center of a bin in log space"""
return int(pow(10, (np.log10(a) + np.log10(b)) / 2))
def calcAngSepDeg(ra0, dec0, ra1, dec1):
'''Return the angular separation between two objects. Use the
special case of the Vincenty formula that is accurate for all
distances'''
C = np.pi / 180
d0 = C * dec0
d1 = C * dec1
r12 = C * (ra0 - ra1)
cd0 = np.cos(d0)
sd0 = np.sin(d0)
cd1 = np.cos(d1)
sd1 = np.sin(d1)
cr12 = np.cos(r12)
sr12 = np.sin(r12)
num = np.sqrt((cd0 * sr12) ** 2 + (cd1 * sd0 - sd1 * cd0 * cr12) ** 2)
den = sd0 * sd1 + cd0 * cd1 * cr12
return np.arctan2(num, den) / C
def etag(emin, emax, fmt='%07d'):
return ('emin_%s_emax_%s' % (fmt, fmt)) % (emin, emax)
def cube_to_image(cube, slicepos=None, mean=False):
""" Make an image out of a cube.
Both in- and output shoud by pyfits.HDUs"""
from pyfits import PrimaryHDU
header = cube.header.copy()
header['NAXIS'] = 2
del header['NAXIS3']
del header['CRVAL3']
del header['CDELT3']
if slicepos:
data = cube.data[slicepos]
else:
if mean:
data = cube.data.mean(0).astype(cube.data.dtype)
else:
data = cube.data.sum(0).astype(cube.data.dtype)
return PrimaryHDU(data, header)
def SubtractFits(infile1, infile2, config):
"""Create (absolute and relative) difference images"""
import pyfits
data1 = pyfits.getdata(infile1)
data2 = pyfits.getdata(infile2)
head = pyfits.getheader(infile2)
filebase = config['out'] + "/" + config['target']['name']
abs_diff_file = filebase + "_Subtract_Model_cmap.fits"
rel_diff_file = filebase + "_Residual_Model_cmap.fits"
os.system("rm " + abs_diff_file)
os.system("rm " + rel_diff_file)
pyfits.writeto(abs_diff_file, data1 - data2, head)
pyfits.writeto(rel_diff_file, (data1 - data2) / data2, head)
def GetFluxes(Fit,Emin=1e2,Emax=3e5):
"""Print the integral flux and error for all the sources"""
print "\nSource Flux [%2.2e MeV, %2.2e MeV] : " %(Emin,Emax)
for src in Fit.model.srcNames:
try:
print(src + " Integral Flux : %2.2e +/- %2.2e ph/cm2/s" %
(Fit.flux(src,Emin,Emax), Fit.fluxError(src,Emin,Emax)))
except:
pass
print
def GetCovar(srcname, Fit, verbose = True):
"""Extract covariance matrix"""
import pyLikelihood
par_index_map = {}
indx = 0
for src in Fit.sourceNames():
parNames = pyLikelihood.StringVector()
Fit[src].src.spectrum().getFreeParamNames(parNames)
for par in parNames:
par_index_map["::".join((src, par))] = indx
indx += 1
if Fit.covariance is None:
raise RuntimeError("Covariance matrix has not been computed.")
covar = np.array(Fit.covariance)
if len(covar) != len(par_index_map):
raise RuntimeError("Covariance matrix size does not match the " +
"number of free parameters.")
my_covar = []
srcpars = pyLikelihood.StringVector()
Fit[srcname].src.spectrum().getFreeParamNames(srcpars)
pars = ["::".join((srcname, x)) for x in srcpars]
for xpar in pars:
ix = par_index_map[xpar]
my_covar.append([covar[ix][par_index_map[ypar]] for ypar in pars])
if verbose :
print "The covariance matrix is :\n", np.array(my_covar)
print
return my_covar
def getParamIndx(fit, name, parameter):
"""Get index for a specific parameter for a specific source
from model in UnbinnedAnalysis object fit"""
ID = -1
spec = fit[name].funcs['Spectrum']
for indx, parName in zip(spec._parIds, spec.paramNames):
if(parName == parameter):
ID = indx
if(ID == -1):
print('Parameter %s not found for source %s in file %s.' %
(parameter, name, fit.srcModel))
return ID
def FreezeParams(fit, name, parameter, value):
fit.logLike.getSource(name).getSrcFuncs()['Spectrum'].getParam(parameter).setValue(value)
fit.logLike.getSource(name).getSrcFuncs()['Spectrum'].getParam(parameter).setFree(0)
def ApproxPref(Fit, ener,name):
Pref = np.zeros(len(ener)-1)
for ibin in xrange(len(ener)-1):
Eav = GetE0(ener[ibin+1],ener[ibin])
Pref[ibin] = dNde(Eav,Fit,name)
return Pref
def ApproxGamma(Fit, ener,name):
""" Get an approximation of the index for different bin in energy"""
Gamma = np.zeros(len(ener)-1)
for ibin in xrange(len(ener)-1):
#Compute an approximation of an index
dnde1 = np.log10(dNde(ener[ibin],Fit,name))
dnde2 = np.log10(dNde(ener[ibin+1],Fit,name))
Gamma[ibin] = (dnde2-dnde1)/(np.log10(1.*ener[ibin+1])-np.log10(1.*ener[ibin]))
return Gamma
def _SpecFileName(config):
"""return a generic name for the file related to the spectrum (plot, results...)"""
from enrico.constants import SpectrumPath
return config['out'] + '/'+SpectrumPath+'/SED_' + config['target']['name'] +'_'+ config['target']['spectrum']
def _dump_xml(config) :
"""Give the name of the XML file where the results will be save by gtlike"""
return (config['out'] + "/" + config['target']['name']
+ "_" + config['target']['spectrum'] + "_"+
config['file']['tag'] + "_out.xml")
def _dump_reg(config):
return config['out'] + "/Roi_model.reg"
def _dump_findsrcout(config):
res = config['out'] + "/FindSource.out"
os.system("touch "+res)
return res
def _dump_filename(config):
"""Give the name of the file where the results will be dumped"""
return (config['out'] + '/' + config['target']['name'] + '_' +
str(config['target']['spectrum']) + '_' +
str(int(config['time']['tmin'])) + '_' +
str(int(config['time']['tmax'])) + '_' +
str(int(config['energy']['emin'])) + '_' +
str(int(config['energy']['emax'])) + "_"+
config['file']['tag'] + ".results")
def DumpResult(Result, config):
"""Dump the result into an ascii file """
Dumpfile = open(_dump_filename(config), "w")
for key in Result.iterkeys():
Dumpfile.write(key + '\t' + str(Result[key]) + '\n')
Dumpfile.close()
def ReadResult(config):
"""Read the result from an ascii file """
lines = open(_dump_filename(config)).readlines()
results = dict()
for line in lines:
key, value = line.split()[0:2]
try:
value = float(value)
except:
pass
results[key] = value
return results
def time_selection_string(config,numbin0):
"""Convert file with start stop pairs to gtmktime filter string"""
if numbin0==None:
numbin0=0
# Read MET_TSTART, MET_TSTOP pairs from file
bins = np.loadtxt(config['time']['file'])
if config['time']['type']=='MJD':
bins = MJD_to_met(bins)
elif config['time']['type']=='JD':
bins = JD_to_met(bins)
selstr=''
last=True
for numbin in range(numbin0,len(bins)):
tbin=bins[numbin]
selstr+='((START>{0:.0f})&&(STOP<{1:.0f}))||'.format(tbin[0],tbin[1])
if len(selstr)>=800:
last=False
break
# remove last ||, and enclose in parens
selstr='('+selstr[:-2]+')'
# add one to numbin so that on next call it starts on the following bin to the last one that was included in selstr
return selstr, numbin0+1, last
def met_to_MJD(met):
return mjd_ref + (met-met_ref)/DAY_IN_SECOND
def MJD_to_met(mjd):
return met_ref + (mjd-mjd_ref)*DAY_IN_SECOND
def JD_to_met(jd):
return MJD_to_met(mjd)+2400000.5
def Checkevtclass(evclass):
classirfs = {1:"P8R2_TRANSIENT100A",2:"P8R2_TRANSIENT100E",4:"P8R2_TRANSIENT100",8:"P8R2_TRANSIENT020E",
16:"P8R2_TRANSIENT020",32:"P8R2_TRANSIENT010E",64:"P8R2_TRANSIENT010",128:"P8R2_SOURCE",
256:"P8R2_CLEAN",521:"P8R2_ULTRACLEAN",1024:"P8R2_ULTRACLEANVETO",32768:"P8R2_TRANSIENT100S",
65536:"P8R2_TRANSIENT015S",16777216:"P8R2_LLE"}
try :
tmp = classirfs[evclass]
except:
from enrico import Loggin
mes = Loggin.Message()
mes.error("evclass value in config file not valid")
def GetSDC(val):
deno = 0
while val>=2:
val = val/2
deno += 1
return deno
def GetIRFS(evtclass,evttype):
classirfs = {1:"P8R2_TRANSIENT100A",2:"P8R2_TRANSIENT100E",4:"P8R2_TRANSIENT100",8:"P8R2_TRANSIENT020E",
16:"P8R2_TRANSIENT020",32:"P8R2_TRANSIENT010E",64:"P8R2_TRANSIENT010",128:"P8R2_SOURCE",
256:"P8R2_CLEAN",521:"P8R2_ULTRACLEAN",1024:"P8R2_ULTRACLEANVETO",32768:"P8R2_TRANSIENT100S",
65536:"P8R2_TRANSIENT015S",16777216:"P8R2_LLE"}
typeirfs={1:"FRONT",2:"BACK",4:"PSF0",8:"PSF1",16:"PSF2",32:"PSF3",64:"EDISP0",
128:"EDISP1",256:"EDISP2",512:"EDISP3"}
result = []
val = evttype
while val>0 :
deno = GetSDC(val)
result.append(2**deno)
val = val - result[-1]
typ = []
for t in result:
typ.append(typeirfs[t])
return classirfs[evtclass]+"_V6",typ
def GetIso(evtclass,evttype):
irfs = GetIRFS(evtclass,evttype)
import enrico.environ as e
if len(irfs[1])> 1:
res = os.path.join(e.DIFFUSE_DIR,'iso_'+str(irfs[0])+'_v06.txt')
else:
res = os.path.join(e.DIFFUSE_DIR,'iso_'+irfs[0]+'_'+str(irfs[1])+'_v06.txt')
return res
def GetZenithCut(evtclass,evttype,emin):
irfs = GetIRFS(evtclass,evttype)
ener = np.array([50,100,200,300,500])
emin_ind = sum(ener-0.1<emin)-1
All = [80,90 ,95 ,100 ,100 ]
#FRONT+BACK, EDISP0-EDISP3 80 90 95 100 100
FRONT = [85 ,95 ,100 ,100 ,100]
BACK = [75,85 ,90 ,95 ,100]
PSF0 = [70,80 ,85 ,90 ,95]
PSF1 = [75,85 ,95 ,100 ,100]
PSF2 = [85,95,100,100,100]
PSF3 = [90,100,100,100,100]
if len(irfs[1])>1 :
return All[emin_ind]
elif irfs[1][0] == "FRONT":
return FRONT[emin_ind]
elif irfs[1][0] == "BACK":
return BACK[emin_ind]
elif irfs[1][0] == "PSF0":
return PSF0[emin_ind]
elif irfs[1][0] == "PSF1":
return PSF1[emin_ind]
elif irfs[1][0] == "PSF2":
return PSF2[emin_ind]
elif irfs[1][0] == "PSF3":
return PSF3[emin_ind]
|
|
# Copyright (c) 2012 Santosh Philip
# Copyright (c) 2016 Jamie Bull
# =======================================================================
# Distributed under the MIT License.
# (See accompanying file LICENSE or copy at
# http://opensource.org/licenses/MIT)
# =======================================================================
"""Draw all the loops in the IDF file.
There are two output files saved in the same location as the idf file:
- idf_file_location/idf_filename.dot
- idf_file_location/idf_filename.png
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
pathnameto_eplusscripting = "../../"
sys.path.append(pathnameto_eplusscripting)
import argparse
import eppy
import eppy.EPlusInterfaceFunctions
from eppy.EPlusInterfaceFunctions import readidf
# import pydot3k as pydot
import pydot
import eppy.loops as loops
pathnameto_eplusscripting = "../../"
sys.path.append(pathnameto_eplusscripting)
def firstisnode(edge):
if type(edge[0]) == tuple:
return True
else:
return False
def secondisnode(edge):
if type(edge[1]) == tuple:
return True
else:
return False
def bothnodes(edge):
if type(edge[0]) == tuple and type(edge[1]) == tuple:
return True
else:
return False
def dropnodes(edges):
"""draw a graph without the nodes"""
newedges = []
added = False
for edge in edges:
if bothnodes(edge):
newtup = (edge[0][0], edge[1][0])
newedges.append(newtup)
added = True
elif firstisnode(edge):
for edge1 in edges:
if edge[0] == edge1[1]:
newtup = (edge1[0], edge[1])
try:
newedges.index(newtup)
except ValueError as e:
newedges.append(newtup)
added = True
elif secondisnode(edge):
for edge1 in edges:
if edge[1] == edge1[0]:
newtup = (edge[0], edge1[1])
try:
newedges.index(newtup)
except ValueError as e:
newedges.append(newtup)
added = True
# gets the hanging nodes - nodes with no connection
if not added:
if firstisnode(edge):
newedges.append((edge[0][0], edge[1]))
if secondisnode(edge):
newedges.append((edge[0], edge[1][0]))
added = False
return newedges
def makeanode(name):
return pydot.Node(name, shape="plaintext", label=name)
def makeabranch(name):
return pydot.Node(name, shape="box3d", label=name)
def makeendnode(name):
return pydot.Node(
name, shape="doubleoctagon", label=name, style="filled", fillcolor="#e4e4e4"
)
def istuple(x):
return type(x) == tuple
def nodetype(anode):
"""return the type of node"""
try:
return anode[1]
except IndexError as e:
return None
def edges2nodes(edges):
"""gather the nodes from the edges"""
nodes = []
for e1, e2 in edges:
nodes.append(e1)
nodes.append(e2)
nodedict = dict([(n, None) for n in nodes])
justnodes = list(nodedict.keys())
# justnodes.sort()
justnodes = sorted(justnodes, key=lambda x: str(x[0]))
return justnodes
def makediagram(edges):
"""make the diagram with the edges"""
graph = pydot.Dot(graph_type="digraph")
nodes = edges2nodes(edges)
epnodes = [
(node, makeanode(node[0])) for node in nodes if nodetype(node) == "epnode"
]
endnodes = [
(node, makeendnode(node[0])) for node in nodes if nodetype(node) == "EndNode"
]
epbr = [(node, makeabranch(node)) for node in nodes if not istuple(node)]
nodedict = dict(epnodes + epbr + endnodes)
for value in list(nodedict.values()):
graph.add_node(value)
for e1, e2 in edges:
graph.add_edge(pydot.Edge(nodedict[e1], nodedict[e2]))
return graph
def transpose2d(mtx):
"""Transpose a 2d matrix
[
[1,2,3],
[4,5,6]
]
becomes
[
[1,4],
[2,5],
[3,6]
]
"""
return zip(*mtx)
def makebranchcomponents(data, commdct, anode="epnode"):
"""return the edges jointing the components of a branch"""
alledges = []
objkey = "BRANCH"
cnamefield = "Component %s Name"
inletfield = "Component %s Inlet Node Name"
outletfield = "Component %s Outlet Node Name"
numobjects = len(data.dt[objkey])
cnamefields = loops.repeatingfields(data, commdct, objkey, cnamefield)
inletfields = loops.repeatingfields(data, commdct, objkey, inletfield)
outletfields = loops.repeatingfields(data, commdct, objkey, outletfield)
inlts = loops.extractfields(data, commdct, objkey, [inletfields] * numobjects)
cmps = loops.extractfields(data, commdct, objkey, [cnamefields] * numobjects)
otlts = loops.extractfields(data, commdct, objkey, [outletfields] * numobjects)
zipped = list(zip(inlts, cmps, otlts))
tzipped = [transpose2d(item) for item in zipped]
for i in range(len(data.dt[objkey])):
tt = tzipped[i]
# branchname = data.dt[objkey][i][1]
edges = []
for t0 in tt:
edges = edges + [((t0[0], anode), t0[1]), (t0[1], (t0[2], anode))]
alledges = alledges + edges
return alledges
def makeairplantloop(data, commdct):
"""make the edges for the airloop and the plantloop"""
anode = "epnode"
endnode = "EndNode"
# in plantloop get:
# demand inlet, outlet, branchlist
# supply inlet, outlet, branchlist
plantloops = loops.plantloopfields(data, commdct)
# splitters
# inlet
# outlet1
# outlet2
splitters = loops.splitterfields(data, commdct)
#
# mixer
# outlet
# inlet1
# inlet2
mixers = loops.mixerfields(data, commdct)
#
# supply barnchlist
# branch1 -> inlet, outlet
# branch2 -> inlet, outlet
# branch3 -> inlet, outlet
#
# CONNET INLET OUTLETS
edges = []
# get all branches
branchkey = "branch".upper()
branches = data.dt[branchkey]
branch_i_o = {}
for br in branches:
br_name = br[1]
in_out = loops.branch_inlet_outlet(data, commdct, br_name)
branch_i_o[br_name] = dict(list(zip(["inlet", "outlet"], in_out)))
# for br_name, in_out in branch_i_o.items():
# edges.append(((in_out["inlet"], anode), br_name))
# edges.append((br_name, (in_out["outlet"], anode)))
# instead of doing the branch
# do the content of the branch
edges = makebranchcomponents(data, commdct)
# connect splitter to nodes
for splitter in splitters:
# splitter_inlet = inletbranch.node
splittername = splitter[0]
inletbranchname = splitter[1]
splitter_inlet = branch_i_o[inletbranchname]["outlet"]
# edges = splitter_inlet -> splittername
edges.append(((splitter_inlet, anode), splittername))
# splitter_outlets = ouletbranches.nodes
outletbranchnames = [br for br in splitter[2:]]
splitter_outlets = [branch_i_o[br]["inlet"] for br in outletbranchnames]
# edges = [splittername -> outlet for outlet in splitter_outlets]
moreedges = [(splittername, (outlet, anode)) for outlet in splitter_outlets]
edges = edges + moreedges
for mixer in mixers:
# mixer_outlet = outletbranch.node
mixername = mixer[0]
outletbranchname = mixer[1]
mixer_outlet = branch_i_o[outletbranchname]["inlet"]
# edges = mixername -> mixer_outlet
edges.append((mixername, (mixer_outlet, anode)))
# mixer_inlets = inletbranches.nodes
inletbranchnames = [br for br in mixer[2:]]
mixer_inlets = [branch_i_o[br]["outlet"] for br in inletbranchnames]
# edges = [mixername -> inlet for inlet in mixer_inlets]
moreedges = [((inlet, anode), mixername) for inlet in mixer_inlets]
edges = edges + moreedges
# connect demand and supply side
# for plantloop in plantloops:
# supplyinlet = plantloop[1]
# supplyoutlet = plantloop[2]
# demandinlet = plantloop[4]
# demandoutlet = plantloop[5]
# # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]
# moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)),
# ((demandoutlet, endnode), (supplyinlet, endnode))]
# edges = edges + moreedges
#
# -----------air loop stuff----------------------
# from s_airloop2.py
# Get the demand and supply nodes from 'airloophvac'
# in airloophvac get:
# get branch, supplyinlet, supplyoutlet, demandinlet, demandoutlet
objkey = "airloophvac".upper()
fieldlists = [
[
"Branch List Name",
"Supply Side Inlet Node Name",
"Demand Side Outlet Node Name",
"Demand Side Inlet Node Names",
"Supply Side Outlet Node Names",
]
] * loops.objectcount(data, objkey)
airloophvacs = loops.extractfields(data, commdct, objkey, fieldlists)
# airloophvac = airloophvacs[0]
# in AirLoopHVAC:ZoneSplitter:
# get Name, inlet, all outlets
objkey = "AirLoopHVAC:ZoneSplitter".upper()
singlefields = ["Name", "Inlet Node Name"]
fld = "Outlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
zonesplitters = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:SupplyPlenum:
# get Name, Zone Name, Zone Node Name, inlet, all outlets
objkey = "AirLoopHVAC:SupplyPlenum".upper()
singlefields = ["Name", "Zone Name", "Zone Node Name", "Inlet Node Name"]
fld = "Outlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
supplyplenums = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:ZoneMixer:
# get Name, outlet, all inlets
objkey = "AirLoopHVAC:ZoneMixer".upper()
singlefields = ["Name", "Outlet Node Name"]
fld = "Inlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
zonemixers = loops.extractfields(data, commdct, objkey, fieldlists)
# in AirLoopHVAC:ReturnPlenum:
# get Name, Zone Name, Zone Node Name, outlet, all inlets
objkey = "AirLoopHVAC:ReturnPlenum".upper()
singlefields = ["Name", "Zone Name", "Zone Node Name", "Outlet Node Name"]
fld = "Inlet %s Node Name"
repeatfields = loops.repeatingfields(data, commdct, objkey, fld)
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
returnplenums = loops.extractfields(data, commdct, objkey, fieldlists)
# connect room to each equip in equiplist
# in ZoneHVAC:EquipmentConnections:
# get Name, equiplist, zoneairnode, returnnode
objkey = "ZoneHVAC:EquipmentConnections".upper()
singlefields = [
"Zone Name",
"Zone Conditioning Equipment List Name",
"Zone Air Node Name",
"Zone Return Air Node Name",
]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
equipconnections = loops.extractfields(data, commdct, objkey, fieldlists)
# in ZoneHVAC:EquipmentList:
# get Name, all equiptype, all equipnames
objkey = "ZoneHVAC:EquipmentList".upper()
singlefields = ["Name"]
fieldlist = singlefields
flds = ["Zone Equipment %s Object Type", "Zone Equipment %s Name"]
repeatfields = loops.repeatingfields(data, commdct, objkey, flds)
fieldlist = fieldlist + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
equiplists = loops.extractfields(data, commdct, objkey, fieldlists)
equiplistdct = dict([(ep[0], ep[1:]) for ep in equiplists])
for key, equips in list(equiplistdct.items()):
enames = [equips[i] for i in range(1, len(equips), 2)]
equiplistdct[key] = enames
# adistuunit -> room
# adistuunit <- VAVreheat
# airinlet -> VAVreheat
# in ZoneHVAC:AirDistributionUnit:
# get Name, equiplist, zoneairnode, returnnode
objkey = "ZoneHVAC:AirDistributionUnit".upper()
singlefields = ["Name", "Air Terminal Object Type", "Air Terminal Name"]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
adistuunits = loops.extractfields(data, commdct, objkey, fieldlists)
# code only for AirTerminal:SingleDuct:VAV:Reheat
# get airinletnodes for vavreheats
# in AirTerminal:SingleDuct:VAV:Reheat:
# get Name, airinletnode
adistuinlets = loops.makeadistu_inlets(data, commdct)
alladistu_comps = []
for key in list(adistuinlets.keys()):
objkey = key.upper()
singlefields = ["Name"] + adistuinlets[key]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
adistu_components = loops.extractfields(data, commdct, objkey, fieldlists)
alladistu_comps.append(adistu_components)
# in AirTerminal:SingleDuct:Uncontrolled:
# get Name, airinletnode
objkey = "AirTerminal:SingleDuct:Uncontrolled".upper()
singlefields = ["Name", "Zone Supply Air Node Name"]
repeatfields = []
fieldlist = singlefields + repeatfields
fieldlists = [fieldlist] * loops.objectcount(data, objkey)
uncontrolleds = loops.extractfields(data, commdct, objkey, fieldlists)
anode = "epnode"
endnode = "EndNode"
# edges = []
# connect demand and supply side
# for airloophvac in airloophvacs:
# supplyinlet = airloophvac[1]
# supplyoutlet = airloophvac[4]
# demandinlet = airloophvac[3]
# demandoutlet = airloophvac[2]
# # edges = [supplyoutlet -> demandinlet, demandoutlet -> supplyinlet]
# moreedges = [((supplyoutlet, endnode), (demandinlet, endnode)),
# ((demandoutlet, endnode), (supplyinlet, endnode))]
# edges = edges + moreedges
# connect zonesplitter to nodes
for zonesplitter in zonesplitters:
name = zonesplitter[0]
inlet = zonesplitter[1]
outlets = zonesplitter[2:]
edges.append(((inlet, anode), name))
for outlet in outlets:
edges.append((name, (outlet, anode)))
# connect supplyplenum to nodes
for supplyplenum in supplyplenums:
name = supplyplenum[0]
inlet = supplyplenum[3]
outlets = supplyplenum[4:]
edges.append(((inlet, anode), name))
for outlet in outlets:
edges.append((name, (outlet, anode)))
# connect zonemixer to nodes
for zonemixer in zonemixers:
name = zonemixer[0]
outlet = zonemixer[1]
inlets = zonemixer[2:]
edges.append((name, (outlet, anode)))
for inlet in inlets:
edges.append(((inlet, anode), name))
# connect returnplenums to nodes
for returnplenum in returnplenums:
name = returnplenum[0]
outlet = returnplenum[3]
inlets = returnplenum[4:]
edges.append((name, (outlet, anode)))
for inlet in inlets:
edges.append(((inlet, anode), name))
# connect room to return node
for equipconnection in equipconnections:
zonename = equipconnection[0]
returnnode = equipconnection[-1]
edges.append((zonename, (returnnode, anode)))
# connect equips to room
for equipconnection in equipconnections:
zonename = equipconnection[0]
zequiplistname = equipconnection[1]
for zequip in equiplistdct[zequiplistname]:
edges.append((zequip, zonename))
# adistuunit <- adistu_component
for adistuunit in adistuunits:
unitname = adistuunit[0]
compname = adistuunit[2]
edges.append((compname, unitname))
# airinlet -> adistu_component
for adistu_comps in alladistu_comps:
for adistu_comp in adistu_comps:
name = adistu_comp[0]
for airnode in adistu_comp[1:]:
edges.append(((airnode, anode), name))
# supplyairnode -> uncontrolled
for uncontrolled in uncontrolleds:
name = uncontrolled[0]
airnode = uncontrolled[1]
edges.append(((airnode, anode), name))
# edges = edges + moreedges
return edges
def getedges(fname, iddfile):
"""return the edges of the idf file fname"""
data, commdct, _idd_index = readidf.readdatacommdct(fname, iddfile=iddfile)
edges = makeairplantloop(data, commdct)
return edges
def replace_colon(s, replacewith="__"):
"""replace the colon with something"""
return s.replace(":", replacewith)
def clean_edges(arg):
if isinstance(arg, str):
return replace_colon(arg)
try:
return tuple(clean_edges(x) for x in arg)
except TypeError: # catch when for loop fails
return replace_colon(arg) # not a sequence so just return repr
def make_and_save_diagram(fname, iddfile):
g = process_idf(fname, iddfile)
save_diagram(fname, g)
def process_idf(fname, iddfile):
data, commdct, _iddindex = readidf.readdatacommdct(fname, iddfile=iddfile)
print("constructing the loops")
edges = makeairplantloop(data, commdct)
print("cleaning edges")
edges = clean_edges(edges)
print("making the diagram")
return makediagram(edges)
def save_diagram(fname, g, silent=False):
dotname = "%s.dot" % (os.path.splitext(fname)[0])
pngname = "%s.png" % (os.path.splitext(fname)[0])
g.write(dotname)
if not silent:
print("saved file: %s" % (dotname))
g.write_png(pngname)
if not silent:
print("saved file: %s" % (pngname))
def main():
parser = argparse.ArgumentParser(
usage=None, description=__doc__, formatter_class=argparse.RawTextHelpFormatter
)
# need the formatter to print newline from __doc__
parser.add_argument(
"idd",
type=str,
action="store",
help="location of idd file = ./somewhere/eplusv8-0-1.idd",
)
parser.add_argument(
"file",
type=str,
action="store",
help="location of idf file = ./somewhere/f1.idf",
)
args = parser.parse_args()
make_and_save_diagram(args.file, args.idd)
if __name__ == "__main__":
sys.exit(main())
|
|
# -*- test-case-name: twisted.trial.test.test_script -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import print_function
import gc
import inspect
import os
import pdb
import random
import sys
import time
import warnings
from twisted.internet import defer
from twisted.application import app
from twisted.python import usage, reflect, failure
from twisted.python.filepath import FilePath
from twisted.python.reflect import namedModule
from twisted import plugin
from twisted.python.util import spewer
from twisted.trial import runner, itrial, reporter
# Yea, this is stupid. Leave it for command-line compatibility for a
# while, though.
TBFORMAT_MAP = {
'plain': 'default',
'default': 'default',
'emacs': 'brief',
'brief': 'brief',
'cgitb': 'verbose',
'verbose': 'verbose'
}
def _parseLocalVariables(line):
"""
Accepts a single line in Emacs local variable declaration format and
returns a dict of all the variables {name: value}.
Raises ValueError if 'line' is in the wrong format.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
paren = '-*-'
start = line.find(paren) + len(paren)
end = line.rfind(paren)
if start == -1 or end == -1:
raise ValueError("%r not a valid local variable declaration" % (line,))
items = line[start:end].split(';')
localVars = {}
for item in items:
if len(item.strip()) == 0:
continue
split = item.split(':')
if len(split) != 2:
raise ValueError("%r contains invalid declaration %r"
% (line, item))
localVars[split[0].strip()] = split[1].strip()
return localVars
def loadLocalVariables(filename):
"""
Accepts a filename and attempts to load the Emacs variable declarations
from that file, simulating what Emacs does.
See http://www.gnu.org/software/emacs/manual/html_node/File-Variables.html
"""
f = file(filename, "r")
lines = [f.readline(), f.readline()]
f.close()
for line in lines:
try:
return _parseLocalVariables(line)
except ValueError:
pass
return {}
def getTestModules(filename):
testCaseVar = loadLocalVariables(filename).get('test-case-name', None)
if testCaseVar is None:
return []
return testCaseVar.split(',')
def isTestFile(filename):
"""
Returns true if 'filename' looks like a file containing unit tests.
False otherwise. Doesn't care whether filename exists.
"""
basename = os.path.basename(filename)
return (basename.startswith('test_')
and os.path.splitext(basename)[1] == ('.py'))
def _reporterAction():
return usage.CompleteList([p.longOpt for p in
plugin.getPlugins(itrial.IReporter)])
def _maybeFindSourceLine(testThing):
"""
Try to find the source line of the given test thing.
@param testThing: the test item to attempt to inspect
@type testThing: an L{TestCase}, test method, or module, though only the
former two have a chance to succeed
@rtype: int
@return: the starting source line, or -1 if one couldn't be found
"""
# an instance of L{TestCase} -- locate the test it will run
method = getattr(testThing, "_testMethodName", None)
if method is not None:
testThing = getattr(testThing, method)
# If it's a function, we can get the line number even if the source file no
# longer exists
code = getattr(testThing, "__code__", None)
if code is not None:
return code.co_firstlineno
try:
return inspect.getsourcelines(testThing)[1]
except (IOError, TypeError):
# either testThing is a module, which raised a TypeError, or the file
# couldn't be read
return -1
# orders which can be passed to trial --order
_runOrders = {
"alphabetical" : (
"alphabetical order for test methods, arbitrary order for test cases",
runner.name),
"toptobottom" : (
"attempt to run test cases and methods in the order they were defined",
_maybeFindSourceLine),
}
def _checkKnownRunOrder(order):
"""
Check that the given order is a known test running order.
Does nothing else, since looking up the appropriate callable to sort the
tests should be done when it actually will be used, as the default argument
will not be coerced by this function.
@param order: one of the known orders in C{_runOrders}
@return: the order unmodified
"""
if order not in _runOrders:
raise usage.UsageError(
"--order must be one of: %s. See --help-orders for details" %
(", ".join(repr(order) for order in _runOrders),))
return order
class _BasicOptions(object):
"""
Basic options shared between trial and its local workers.
"""
synopsis = """%s [options] [[file|package|module|TestCase|testmethod]...]
""" % (os.path.basename(sys.argv[0]),)
longdesc = ("trial loads and executes a suite of unit tests, obtained "
"from modules, packages and files listed on the command line.")
optFlags = [["help", "h"],
["no-recurse", "N", "Don't recurse into packages"],
['help-orders', None, "Help on available test running orders"],
['help-reporters', None,
"Help on available output plugins (reporters)"],
["rterrors", "e", "realtime errors, print out tracebacks as "
"soon as they occur"],
["unclean-warnings", None,
"Turn dirty reactor errors into warnings"],
["force-gc", None, "Have Trial run gc.collect() before and "
"after each test case."],
["exitfirst", "x",
"Exit after the first non-successful result (cannot be "
"specified along with --jobs)."],
]
optParameters = [
["order", "o", None,
"Specify what order to run test cases and methods. "
"See --help-orders for more info.", _checkKnownRunOrder],
["random", "z", None,
"Run tests in random order using the specified seed"],
['temp-directory', None, '_trial_temp',
'Path to use as working directory for tests.'],
['reporter', None, 'verbose',
'The reporter to use for this test run. See --help-reporters for '
'more info.']]
compData = usage.Completions(
optActions={"order": usage.CompleteList(_runOrders),
"reporter": _reporterAction,
"logfile": usage.CompleteFiles(descr="log file name"),
"random": usage.Completer(descr="random seed")},
extraActions=[usage.CompleteFiles(
"*.py", descr="file | module | package | TestCase | testMethod",
repeat=True)],
)
fallbackReporter = reporter.TreeReporter
tracer = None
def __init__(self):
self['tests'] = []
usage.Options.__init__(self)
def coverdir(self):
"""
Return a L{FilePath} representing the directory into which coverage
results should be written.
"""
coverdir = 'coverage'
result = FilePath(self['temp-directory']).child(coverdir)
print("Setting coverage directory to %s." % (result.path,))
return result
# TODO: Some of the opt_* methods on this class have docstrings and some do
# not. This is mostly because usage.Options's currently will replace
# any intended output in optFlags and optParameters with the
# docstring. See #6427. When that is fixed, all methods should be
# given docstrings (and it should be verified that those with
# docstrings already have content suitable for printing as usage
# information).
def opt_coverage(self):
"""
Generate coverage information in the coverage file in the
directory specified by the temp-directory option.
"""
import trace
self.tracer = trace.Trace(count=1, trace=0)
sys.settrace(self.tracer.globaltrace)
self['coverage'] = True
def opt_testmodule(self, filename):
"""
Filename to grep for test cases (-*- test-case-name).
"""
# If the filename passed to this parameter looks like a test module
# we just add that to the test suite.
#
# If not, we inspect it for an Emacs buffer local variable called
# 'test-case-name'. If that variable is declared, we try to add its
# value to the test suite as a module.
#
# This parameter allows automated processes (like Buildbot) to pass
# a list of files to Trial with the general expectation of "these files,
# whatever they are, will get tested"
if not os.path.isfile(filename):
sys.stderr.write("File %r doesn't exist\n" % (filename,))
return
filename = os.path.abspath(filename)
if isTestFile(filename):
self['tests'].append(filename)
else:
self['tests'].extend(getTestModules(filename))
def opt_spew(self):
"""
Print an insanely verbose log of everything that happens. Useful
when debugging freezes or locks in complex code.
"""
sys.settrace(spewer)
def opt_help_orders(self):
synopsis = ("Trial can attempt to run test cases and their methods in "
"a few different orders. You can select any of the "
"following options using --order=<foo>.\n")
print(synopsis)
for name, (description, _) in sorted(_runOrders.items()):
print(' ', name, '\t', description)
sys.exit(0)
def opt_help_reporters(self):
synopsis = ("Trial's output can be customized using plugins called "
"Reporters. You can\nselect any of the following "
"reporters using --reporter=<foo>\n")
print(synopsis)
for p in plugin.getPlugins(itrial.IReporter):
print(' ', p.longOpt, '\t', p.description)
sys.exit(0)
def opt_disablegc(self):
"""
Disable the garbage collector
"""
self["disablegc"] = True
gc.disable()
def opt_tbformat(self, opt):
"""
Specify the format to display tracebacks with. Valid formats are
'plain', 'emacs', and 'cgitb' which uses the nicely verbose stdlib
cgitb.text function
"""
try:
self['tbformat'] = TBFORMAT_MAP[opt]
except KeyError:
raise usage.UsageError(
"tbformat must be 'plain', 'emacs', or 'cgitb'.")
def opt_recursionlimit(self, arg):
"""
see sys.setrecursionlimit()
"""
try:
sys.setrecursionlimit(int(arg))
except (TypeError, ValueError):
raise usage.UsageError(
"argument to recursionlimit must be an integer")
else:
self["recursionlimit"] = int(arg)
def opt_random(self, option):
try:
self['random'] = long(option)
except ValueError:
raise usage.UsageError(
"Argument to --random must be a positive integer")
else:
if self['random'] < 0:
raise usage.UsageError(
"Argument to --random must be a positive integer")
elif self['random'] == 0:
self['random'] = long(time.time() * 100)
def opt_without_module(self, option):
"""
Fake the lack of the specified modules, separated with commas.
"""
self["without-module"] = option
for module in option.split(","):
if module in sys.modules:
warnings.warn("Module '%s' already imported, "
"disabling anyway." % (module,),
category=RuntimeWarning)
sys.modules[module] = None
def parseArgs(self, *args):
self['tests'].extend(args)
def _loadReporterByName(self, name):
for p in plugin.getPlugins(itrial.IReporter):
qual = "%s.%s" % (p.module, p.klass)
if p.longOpt == name:
return reflect.namedAny(qual)
raise usage.UsageError("Only pass names of Reporter plugins to "
"--reporter. See --help-reporters for "
"more info.")
def postOptions(self):
# Only load reporters now, as opposed to any earlier, to avoid letting
# application-defined plugins muck up reactor selecting by importing
# t.i.reactor and causing the default to be installed.
self['reporter'] = self._loadReporterByName(self['reporter'])
if 'tbformat' not in self:
self['tbformat'] = 'default'
if self['order'] is not None and self['random'] is not None:
raise usage.UsageError(
"You can't specify --random when using --order")
class Options(_BasicOptions, usage.Options, app.ReactorSelectionMixin):
"""
Options to the trial command line tool.
@ivar _workerFlags: List of flags which are accepted by trial distributed
workers. This is used by C{_getWorkerArguments} to build the command
line arguments.
@type _workerFlags: C{list}
@ivar _workerParameters: List of parameter which are accepted by trial
distrubuted workers. This is used by C{_getWorkerArguments} to build
the command line arguments.
@type _workerParameters: C{list}
"""
optFlags = [
["debug", "b", "Run tests in a debugger. If that debugger is "
"pdb, will load '.pdbrc' from current directory if it exists."
],
["debug-stacktraces", "B", "Report Deferred creation and "
"callback stack traces"],
["nopm", None, "don't automatically jump into debugger for "
"postmorteming of exceptions"],
["dry-run", 'n', "do everything but run the tests"],
["profile", None, "Run tests under the Python profiler"],
["until-failure", "u", "Repeat test until it fails"],
]
optParameters = [
["debugger", None, "pdb", "the fully qualified name of a debugger to "
"use if --debug is passed"],
["logfile", "l", "test.log", "log file name"],
["jobs", "j", None, "Number of local workers to run"]
]
compData = usage.Completions(
optActions = {
"tbformat": usage.CompleteList(["plain", "emacs", "cgitb"]),
"reporter": _reporterAction,
},
)
_workerFlags = ["disablegc", "force-gc", "coverage"]
_workerParameters = ["recursionlimit", "reactor", "without-module"]
fallbackReporter = reporter.TreeReporter
extra = None
tracer = None
def opt_jobs(self, number):
"""
Number of local workers to run, a strictly positive integer.
"""
try:
number = int(number)
except ValueError:
raise usage.UsageError(
"Expecting integer argument to jobs, got '%s'" % number)
if number <= 0:
raise usage.UsageError(
"Argument to jobs must be a strictly positive integer")
self["jobs"] = number
def _getWorkerArguments(self):
"""
Return a list of options to pass to distributed workers.
"""
args = []
for option in self._workerFlags:
if self.get(option) is not None:
if self[option]:
args.append("--%s" % (option,))
for option in self._workerParameters:
if self.get(option) is not None:
args.extend(["--%s" % (option,), str(self[option])])
return args
def postOptions(self):
_BasicOptions.postOptions(self)
if self['jobs']:
conflicts = ['debug', 'profile', 'debug-stacktraces', 'exitfirst']
for option in conflicts:
if self[option]:
raise usage.UsageError(
"You can't specify --%s when using --jobs" % option)
if self['nopm']:
if not self['debug']:
raise usage.UsageError("You must specify --debug when using "
"--nopm ")
failure.DO_POST_MORTEM = False
def _initialDebugSetup(config):
# do this part of debug setup first for easy debugging of import failures
if config['debug']:
failure.startDebugMode()
if config['debug'] or config['debug-stacktraces']:
defer.setDebugging(True)
def _getSuite(config):
loader = _getLoader(config)
recurse = not config['no-recurse']
return loader.loadByNames(config['tests'], recurse)
def _getLoader(config):
loader = runner.TestLoader()
if config['random']:
randomer = random.Random()
randomer.seed(config['random'])
loader.sorter = lambda x : randomer.random()
print('Running tests shuffled with seed %d\n' % config['random'])
elif config['order']:
_, sorter = _runOrders[config['order']]
loader.sorter = sorter
if not config['until-failure']:
loader.suiteFactory = runner.DestructiveTestSuite
return loader
def _wrappedPdb():
"""
Wrap an instance of C{pdb.Pdb} with readline support and load any .rcs.
"""
dbg = pdb.Pdb()
try:
namedModule('readline')
except ImportError:
print("readline module not available")
sys.exc_clear()
for path in ('.pdbrc', 'pdbrc'):
if os.path.exists(path):
try:
rcFile = file(path, 'r')
except IOError:
sys.exc_clear()
else:
dbg.rcLines.extend(rcFile.readlines())
return dbg
class _DebuggerNotFound(Exception):
"""
A debugger import failed.
Used to allow translating these errors into usage error messages.
"""
def _makeRunner(config):
"""
Return a trial runner class set up with the parameters extracted from
C{config}.
@return: A trial runner instance.
@rtype: L{runner.TrialRunner} or C{DistTrialRunner} depending on the
configuration.
"""
cls = runner.TrialRunner
args = {'reporterFactory': config['reporter'],
'tracebackFormat': config['tbformat'],
'realTimeErrors': config['rterrors'],
'uncleanWarnings': config['unclean-warnings'],
'logfile': config['logfile'],
'workingDirectory': config['temp-directory']}
if config['dry-run']:
args['mode'] = runner.TrialRunner.DRY_RUN
elif config['jobs']:
from twisted.trial._dist.disttrial import DistTrialRunner
cls = DistTrialRunner
args['workerNumber'] = config['jobs']
args['workerArguments'] = config._getWorkerArguments()
else:
if config['debug']:
args['mode'] = runner.TrialRunner.DEBUG
debugger = config['debugger']
if debugger != 'pdb':
try:
args['debugger'] = reflect.namedAny(debugger)
except reflect.ModuleNotFound:
raise _DebuggerNotFound(
'%r debugger could not be found.' % (debugger,))
else:
args['debugger'] = _wrappedPdb()
args['exitFirst'] = config['exitfirst']
args['profile'] = config['profile']
args['forceGarbageCollection'] = config['force-gc']
return cls(**args)
def run():
if len(sys.argv) == 1:
sys.argv.append("--help")
config = Options()
try:
config.parseOptions()
except usage.error as ue:
raise SystemExit("%s: %s" % (sys.argv[0], ue))
_initialDebugSetup(config)
try:
trialRunner = _makeRunner(config)
except _DebuggerNotFound as e:
raise SystemExit('%s: %s' % (sys.argv[0], str(e)))
suite = _getSuite(config)
if config['until-failure']:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if config.tracer:
sys.settrace(None)
results = config.tracer.results()
results.write_results(show_missing=1, summary=False,
coverdir=config.coverdir().path)
sys.exit(not test_result.wasSuccessful())
|
|
# Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for methods in utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pytest
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from qkeras import *
from qkeras.utils import get_model_sparsity
from qkeras.utils import model_quantize
from qkeras.utils import convert_to_folded_model
from qkeras.utils import is_TFOpLambda_layer
from qkeras.utils import find_bn_fusing_layer_pair
from qkeras.utils import add_bn_fusing_weights
def create_quantized_network():
"""Creates a simple quantized conv net model."""
# Create a simple model
xi = Input((28, 28, 1))
x = Conv2D(32, (3, 3))(xi)
x = Activation("relu")(x)
x = Conv2D(32, (3, 3), activation="relu")(x)
x = Activation("softmax")(x)
model = Model(inputs=xi, outputs=x)
# Quantize the model
quantizer_config = {
"QConv2D": {
"kernel_quantizer": "quantized_bits(4)",
"bias_quantizer": "quantized_bits(4)"
},
"QActivation": {
"relu": "ternary"
}
}
activation_bits = 4
qmodel = model_quantize(model, quantizer_config, activation_bits)
return qmodel
def create_quantized_po2_network():
"""Creates a simple quantized conv net model with po2 quantizers."""
xi = Input((28, 28, 1))
x = QConv2D(32, (3, 3), kernel_quantizer=quantized_po2(4))(xi)
x = QActivation(quantized_bits(8))(x)
x = QConv2D(32, (3, 3), kernel_quantizer=quantized_po2(4))(x)
x = QActivation(quantized_bits(8))(x)
qmodel = Model(xi, x, name='simple_po2_qmodel')
return qmodel
def set_network_sparsity(model, sparsity):
"""Set the sparsity of the given model using random weights."""
for layer in model.layers:
new_weights = []
for w in layer.get_weights():
# Create weights with desired sparsity
sparse_weights = np.random.rand(w.size)+0.1
sparse_weights[:int(w.size*sparsity)] = 0
np.random.shuffle(sparse_weights)
new_weights.append(sparse_weights.reshape(w.shape))
layer.set_weights(new_weights)
return model
def test_get_model_sparsity():
"""Tests if the method get_model_sparsity in utils.py works correctly."""
qmodel = create_quantized_network()
# Generate sparsity levels to test
sparsity_levels = np.concatenate((np.random.rand(10), [1.0, 0.0])).round(2)
# Test various sparsity levels
for true_sparsity in sparsity_levels:
qmodel = set_network_sparsity(qmodel, true_sparsity)
calc_sparsity = get_model_sparsity(qmodel)
assert np.abs(calc_sparsity - true_sparsity) < 0.01
def test_get_po2_model_sparsity():
"""Tests get_model_sparsity on a po2-quantized model.
Models quantized with po2 quantizers should have a sparsity near 0 because
if the exponent is set to 0, the value of the weight will equal 2^0 == 1 != 0
"""
qmodel = create_quantized_po2_network()
# Generate sparsity levels to test
sparsity_levels = np.concatenate((np.random.rand(10), [1.0, 0.0])).round(2)
# Test various sparsity levels
for set_sparsity in sparsity_levels:
qmodel = set_network_sparsity(qmodel, set_sparsity)
calc_sparsity = get_model_sparsity(qmodel)
assert np.abs(calc_sparsity - 0) < 0.01
def test_convert_to_folded_model():
"""Test convert_to_folded_model to work properly on non-sequential model."""
def get_add_model():
x = x_in = Input(shape=(4, 4, 1), name="input")
x1 = Conv2D(4, kernel_size=(2, 2), padding="valid", strides=(1, 1),
name="conv2d_1")(x)
x1 = BatchNormalization(name="bn_1")(x1)
x1 = Activation("relu", name="relu_1")(x1)
x2 = Conv2D(4, kernel_size=(2, 2), padding="valid", strides=(1, 1),
name="conv2d_2")(x)
x2 = BatchNormalization(name="bn_2")(x2)
x2 = Activation("relu", name="relu_2")(x2)
x = Add(name="add")([x1, x2])
x = Softmax()(x)
return Model(inputs=[x_in], outputs=[x])
model = get_add_model()
fmodel, _ = convert_to_folded_model(model)
assert fmodel.layers[5].name == "add"
# test if convert_to_folded_model work with TFOpLambda layers
def hard_sigmoid(x):
return ReLU(6.)(x + 3.) * (1. / 6.)
def hard_swish(x):
return Multiply()([hard_sigmoid(x), x])
def get_lambda_model():
x = x_in = Input(shape=(4, 4, 1), name="input")
x = Conv2D(
4, kernel_size=(2, 2), padding="valid", strides=(1, 1),
name="conv2d_1")(x)
x = hard_swish(x)
return Model(inputs=[x_in], outputs=[x])
model = get_lambda_model()
fmodel, _ = convert_to_folded_model(model)
assert is_TFOpLambda_layer(model.layers[2])
assert is_TFOpLambda_layer(model.layers[4])
assert isinstance(fmodel.layers[5], Multiply)
def test_find_bn_fusing_layer_pair():
x = x_in = Input((23, 23, 1), name="input")
x1 = QConv2D(
2, 2, 1,
kernel_quantizer=quantized_bits(4, 0, 1),
bias_quantizer=quantized_bits(4, 0, 1),
use_bias=False,
name="conv1")(x)
x1 = QBatchNormalization(
mean_quantizer=quantized_bits(4, 0, 1),
gamma_quantizer=None,
variance_quantizer=None,
beta_quantizer=quantized_bits(4, 0, 1),
inverse_quantizer=quantized_bits(8, 0, 1), name="bn1")(x1)
x2 = QConv2D(
2, 2, 1,
kernel_quantizer=quantized_bits(3, 0),
bias_quantizer=quantized_bits(3, 2),
name="conv2")(x)
x2 = QBatchNormalization(
mean_quantizer=quantized_bits(4, 0, 1),
gamma_quantizer=None,
variance_quantizer=None,
beta_quantizer=quantized_bits(4, 0, 1),
inverse_quantizer=quantized_bits(8, 0, 1), name="bn2")(x2)
x = Add(name="add")([x1, x2])
model = Model(inputs=[x_in], outputs=[x])
(conv_bn_pair_dict, _) = find_bn_fusing_layer_pair(model)
assert conv_bn_pair_dict["conv1"] == "bn1"
assert conv_bn_pair_dict["conv2"] == "bn2"
conv_layer = model.layers[1]
bn_layer = model.layers[3]
conv_layer.set_weights([
np.array([[[[0.5, 0.75]], [[1.5, -0.625]]],
[[[-0.875, 1.25]], [[-1.25, -2.5]]]])
])
bn_layer.set_weights([
np.array([1., 0.25]),
np.array([0.5, 1.0]),
np.array([0.5, 2.5]),
np.array([1.5, 1.])
])
saved_weights = {}
saved_weights[conv_layer.name] = {}
add_bn_fusing_weights(conv_layer, bn_layer, saved_weights)
d = saved_weights[conv_layer.name]
assert d["enable_bn_fusing"]
assert d["fused_bn_layer_name"] == "bn1"
assert np.all(d["bn_inv"] == np.array([0.8125, 0.25]))
assert np.all(d["fused_bias"] == np.array([0.09375, 0.65625]))
if __name__ == "__main__":
pytest.main([__file__])
|
|
# -*- coding: utf-8 -*-
'''
Swift utility class
===================
Author: Anthony Stanton <[email protected]>
'''
from __future__ import absolute_import
# Import python libs
import logging
from sys import stdout
from os import makedirs
from os.path import dirname, isdir
from errno import EEXIST
# Import Salt libs
import salt.utils
# Get logging started
log = logging.getLogger(__name__)
# Import Swift client libs
HAS_SWIFT = False
try:
from swiftclient import client
HAS_SWIFT = True
except ImportError:
pass
def check_swift():
return HAS_SWIFT
def mkdirs(path):
try:
makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
# we've been playing fast and loose with kwargs, but the swiftclient isn't
# going to accept any old thing
def _sanitize(kwargs):
variables = (
'user', 'key', 'authurl',
'retries', 'preauthurl', 'preauthtoken', 'snet',
'starting_backoff', 'max_backoff', 'tenant_name',
'os_options', 'auth_version', 'cacert',
'insecure', 'ssl_compression'
)
ret = {}
for var in kwargs:
if var in variables:
ret[var] = kwargs[var]
return ret
class SaltSwift(object):
'''
Class for all swiftclient functions
'''
def __init__(
self,
user,
tenant_name,
auth_url,
password=None,
auth_version=2,
**kwargs
):
'''
Set up openstack credentials
'''
if not HAS_SWIFT:
log.error('Error:: unable to find swiftclient. Try installing it from the appropriate repository.')
return None
self.kwargs = kwargs.copy()
self.kwargs['user'] = user
self.kwargs['password'] = password
self.kwargs['tenant_name'] = tenant_name
self.kwargs['authurl'] = auth_url
self.kwargs['auth_version'] = auth_version
if 'key' not in self.kwargs:
self.kwargs['key'] = password
self.kwargs = _sanitize(self.kwargs)
self.conn = client.Connection(**self.kwargs)
def get_account(self):
'''
List Swift containers
'''
try:
listing = self.conn.get_account()
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def get_container(self, cont):
'''
List files in a Swift container
'''
try:
listing = self.conn.get_container(cont)
return listing
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def put_container(self, cont):
'''
Create a new Swift container
'''
try:
self.conn.put_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def delete_container(self, cont):
'''
Delete a Swift container
'''
try:
self.conn.delete_container(cont)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def post_container(self, cont, metadata=None):
'''
Update container metadata
'''
pass
def head_container(self, cont):
'''
Get container metadata
'''
pass
def get_object(self, cont, obj, local_file=None, return_bin=False):
'''
Retrieve a file from Swift
'''
try:
if local_file is None and return_bin is False:
return False
headers, body = self.conn.get_object(cont, obj, resp_chunk_size=65536)
if return_bin is True:
fp = stdout
else:
dirpath = dirname(local_file)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
fp = salt.utils.fopen(local_file, 'wb')
read_length = 0
for chunk in body:
read_length += len(chunk)
fp.write(chunk)
fp.close()
return True
# ClientException
# file/dir exceptions
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def put_object(self, cont, obj, local_file):
'''
Upload a file to Swift
'''
try:
with salt.utils.fopen(local_file, 'rb') as fp_:
self.conn.put_object(cont, obj, fp_)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def delete_object(self, cont, obj):
'''
Delete a file from Swift
'''
try:
self.conn.delete_object(cont, obj)
return True
except Exception as exc:
log.error('There was an error::')
if hasattr(exc, 'code') and hasattr(exc, 'msg'):
log.error(' Code: {0}: {1}'.format(exc.code, exc.msg))
log.error(' Content: \n{0}'.format(getattr(exc, 'read', lambda: str(exc))()))
return False
def head_object(self, cont, obj):
'''
Get object metadata
'''
pass
def post_object(self, cont, obj, metadata):
'''
Update object metadata
'''
pass
|
|
# coding: utf-8
# Copyright (c) 2013 Jorge Javier Araya Navarro <[email protected]>
#
# This file is free software: you may copy, redistribute and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# Python Interpreter
# 95% of the code from 'Bruce: the presentation tool' by Richard Jones
# http://code.google.com/p/bruce-tpt/
#
#
__docformat__ = 'restructuredtext'
import sys
import os
import code
import pyglet
from pyglet import graphics
from pyglet import text
from pyglet.text import caret, document, layout
import summa
from summa.director import director
from base_layers import Layer
from util_layers import ColorLayer
__all__ = ['PythonInterpreterLayer']
class Output:
def __init__(self, display, realstdout):
self.out = display
self.realstdout = realstdout
self.data = ''
def write(self, data):
self.out(data)
class MyInterpreter(code.InteractiveInterpreter):
def __init__(self, locals, display):
self.write = display
code.InteractiveInterpreter.__init__(self, locals=locals)
def execute(self, input):
old_stdout = sys.stdout
sys.stdout = Output(self.write, old_stdout)
more = self.runsource(input)
sys.stdout = old_stdout
return more
class PythonInterpreterLayer(ColorLayer):
'''Runs an interactive Python interpreter as a child `Layer` of the current `Scene`.
'''
cfg = {'code.font_name':'Arial',
'code.font_size':12,
'code.color':(255,255,255,255),
'caret.color':(255,255,255),
}
name = 'py'
prompt = ">>> " #: python prompt
prompt_more = "... " #: python 'more' prompt
doing_more = False
is_event_handler = True #: enable pyglet's events
def __init__(self):
super(PythonInterpreterLayer, self).__init__( 32,32,32,192 )
self.content = self.prompt
local_vars = director.interpreter_locals
local_vars["self"] = self
self.interpreter = MyInterpreter(
local_vars, self._write)
self.current_input = []
self.history = ['']
self.history_pos = 0
def on_enter(self):
super(PythonInterpreterLayer, self).on_enter()
vw,vh = summa.director.director.get_window_size()
# format the code
self.document = document.FormattedDocument(self.content)
self.document.set_style(0, len(self.document.text), {
'font_name': self.cfg['code.font_name'],
'font_size': self.cfg['code.font_size'],
'color': self.cfg['code.color'],
})
self.batch = graphics.Batch()
# generate the document
self.layout = layout.IncrementalTextLayout(self.document,
vw, vh, multiline=True, batch=self.batch)
self.layout.anchor_y= 'top'
self.caret = caret.Caret(self.layout, color=self.cfg['caret.color'] )
self.caret.on_activate()
self.on_resize(vw, vh)
self.start_of_line = len(self.document.text)
def on_resize(self, x, y):
vw, vh = director.get_window_size()
self.layout.begin_update()
self.layout.height = vh
self.layout.x = 2
self.layout.width = vw - 4
self.layout.y = vh
self.layout.end_update()
# XXX: hack
x,y = director.window.width, director.window.height
self.layout.top_group._scissor_width=x-4
self.caret.position = len(self.document.text)
def on_exit(self):
super(PythonInterpreterLayer, self).on_exit()
self.content = self.document.text
self.document = None
self.layout = None
self.batch = None
self.caret = None
def on_key_press(self, symbol, modifiers):
if symbol == pyglet.window.key.TAB:
return self.caret.on_text('\t')
elif symbol in (pyglet.window.key.ENTER, pyglet.window.key.NUM_ENTER):
# write the newline
self._write('\n')
line = self.document.text[self.start_of_line:]
if line.strip() == 'help()':
line = 'print "help() not supported, sorry!"'
self.current_input.append(line)
self.history_pos = len(self.history)
if line.strip():
self.history[self.history_pos-1] = line.strip()
self.history.append('')
more = False
if not self.doing_more:
more = self.interpreter.execute('\n'.join(self.current_input))
if self.doing_more and not line.strip():
self.doing_more = False
self.interpreter.execute('\n'.join(self.current_input))
more = more or self.doing_more
if not more:
self.current_input = []
self._write(self.prompt)
else:
self.doing_more = True
self._write(self.prompt_more)
self.start_of_line = len(self.document.text)
self.caret.position = len(self.document.text)
elif symbol == pyglet.window.key.SPACE:
pass
else:
return pyglet.event.EVENT_UNHANDLED
return pyglet.event.EVENT_HANDLED
def on_text(self, symbol):
# squash carriage return - we already handle them above
if symbol == '\r':
return pyglet.event.EVENT_HANDLED
self._scroll_to_bottom()
return self.caret.on_text(symbol)
def on_text_motion(self, motion):
at_sol = self.caret.position == self.start_of_line
if motion == pyglet.window.key.MOTION_UP:
# move backward in history, storing the current line of input
# if we're at the very end of time
line = self.document.text[self.start_of_line:]
if self.history_pos == len(self.history)-1:
self.history[self.history_pos] = line
self.history_pos = max(0, self.history_pos-1)
self.document.delete_text(self.start_of_line,
len(self.document.text))
self._write(self.history[self.history_pos])
self.caret.position = len(self.document.text)
elif motion == pyglet.window.key.MOTION_DOWN:
# move forward in the history
self.history_pos = min(len(self.history)-1, self.history_pos+1)
self.document.delete_text(self.start_of_line,
len(self.document.text))
self._write(self.history[self.history_pos])
self.caret.position = len(self.document.text)
elif motion == pyglet.window.key.MOTION_BACKSPACE:
# can't delete the prompt
if not at_sol:
return self.caret.on_text_motion(motion)
elif motion == pyglet.window.key.MOTION_LEFT:
# can't move back beyond start of line
if not at_sol:
return self.caret.on_text_motion(motion)
elif motion == pyglet.window.key.MOTION_PREVIOUS_WORD:
# can't move back word beyond start of line
if not at_sol:
return self.caret.on_text_motion(motion)
else:
return self.caret.on_text_motion(motion)
return pyglet.event.EVENT_HANDLED
def _write(self, s):
self.document.insert_text(len(self.document.text), s, {
'font_name': self.cfg['code.font_name'],
'font_size': self.cfg['code.font_size'],
'color': self.cfg['code.color'],
})
self._scroll_to_bottom()
def _scroll_to_bottom(self):
# on key press always move the view to the bottom of the screen
if self.layout.height < self.layout.content_height:
self.layout.anchor_y= 'bottom'
self.layout.y = 0
self.layout.view_y = 0
if self.caret.position < self.start_of_line:
self.caret.position = len(self.document.text)
def draw(self):
super( PythonInterpreterLayer, self).draw()
self.batch.draw()
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Interfaces for the face layer of RPC Framework."""
import abc
import enum
# cardinality, style, exceptions, abandonment, future, and stream are
# referenced from specification in this module.
from grpc.framework.common import cardinality # pylint: disable=unused-import
from grpc.framework.common import style # pylint: disable=unused-import
from grpc.framework.face import exceptions # pylint: disable=unused-import
from grpc.framework.foundation import abandonment # pylint: disable=unused-import
from grpc.framework.foundation import future # pylint: disable=unused-import
from grpc.framework.foundation import stream # pylint: disable=unused-import
@enum.unique
class Abortion(enum.Enum):
"""Categories of RPC abortion."""
CANCELLED = 'cancelled'
EXPIRED = 'expired'
NETWORK_FAILURE = 'network failure'
SERVICED_FAILURE = 'serviced failure'
SERVICER_FAILURE = 'servicer failure'
class CancellableIterator(object):
"""Implements the Iterator protocol and affords a cancel method."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __iter__(self):
"""Returns the self object in accordance with the Iterator protocol."""
raise NotImplementedError()
@abc.abstractmethod
def next(self):
"""Returns a value or raises StopIteration per the Iterator protocol."""
raise NotImplementedError()
@abc.abstractmethod
def cancel(self):
"""Requests cancellation of whatever computation underlies this iterator."""
raise NotImplementedError()
class RpcContext(object):
"""Provides RPC-related information and action."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def is_active(self):
"""Describes whether the RPC is active or has terminated."""
raise NotImplementedError()
@abc.abstractmethod
def time_remaining(self):
"""Describes the length of allowed time remaining for the RPC.
Returns:
A nonnegative float indicating the length of allowed time in seconds
remaining for the RPC to complete before it is considered to have timed
out.
"""
raise NotImplementedError()
@abc.abstractmethod
def add_abortion_callback(self, abortion_callback):
"""Registers a callback to be called if the RPC is aborted.
Args:
abortion_callback: A callable to be called and passed an Abortion value
in the event of RPC abortion.
"""
raise NotImplementedError()
class Call(object):
"""Invocation-side representation of an RPC.
Attributes:
context: An RpcContext affording information about the RPC.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def cancel(self):
"""Requests cancellation of the RPC."""
raise NotImplementedError()
class UnaryUnaryMultiCallable(object):
"""Affords invoking a unary-unary RPC in any call style."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, request, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self, request, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future's result value will be the response value of the RPC.
In the event of RPC abortion, the returned Future's exception value
will be an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, request, response_callback, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_callback: A callback to be called to accept the restponse value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
class UnaryStreamMultiCallable(object):
"""Affords invoking a unary-stream RPC in any call style."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, request, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion
of the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, request, response_consumer, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_consumer: A stream.Consumer to be called to accept the restponse
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
class StreamUnaryMultiCallable(object):
"""Affords invoking a stream-unary RPC in any call style."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, request_iterator, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future(self, request_iterator, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future's result value will be the response value of the RPC.
In the event of RPC abortion, the returned Future's exception value
will be an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, response_callback, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
Args:
request: The request value for the RPC.
response_callback: A callback to be called to accept the restponse value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
class StreamStreamMultiCallable(object):
"""Affords invoking a stream-stream RPC in any call style."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __call__(self, request_iterator, timeout):
"""Synchronously invokes the underlying RPC.
Args:
request_iterator: An iterator that yields request values for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion
of the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event(self, response_consumer, abortion_callback, timeout):
"""Asynchronously invokes the underlying RPC.
l Args:
response_consumer: A stream.Consumer to be called to accept the restponse
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
class MethodImplementation(object):
"""A sum type that describes an RPC method implementation.
Attributes:
cardinality: A cardinality.Cardinality value.
style: A style.Service value.
unary_unary_inline: The implementation of the RPC method as a callable
value that takes a request value and an RpcContext object and returns a
response value. Only non-None if cardinality is
cardinality.Cardinality.UNARY_UNARY and style is style.Service.INLINE.
unary_stream_inline: The implementation of the RPC method as a callable
value that takes a request value and an RpcContext object and returns an
iterator of response values. Only non-None if cardinality is
cardinality.Cardinality.UNARY_STREAM and style is style.Service.INLINE.
stream_unary_inline: The implementation of the RPC method as a callable
value that takes an iterator of request values and an RpcContext object
and returns a response value. Only non-None if cardinality is
cardinality.Cardinality.STREAM_UNARY and style is style.Service.INLINE.
stream_stream_inline: The implementation of the RPC method as a callable
value that takes an iterator of request values and an RpcContext object
and returns an iterator of response values. Only non-None if cardinality
is cardinality.Cardinality.STREAM_STREAM and style is
style.Service.INLINE.
unary_unary_event: The implementation of the RPC method as a callable value
that takes a request value, a response callback to which to pass the
response value of the RPC, and an RpcContext. Only non-None if
cardinality is cardinality.Cardinality.UNARY_UNARY and style is
style.Service.EVENT.
unary_stream_event: The implementation of the RPC method as a callable
value that takes a request value, a stream.Consumer to which to pass the
the response values of the RPC, and an RpcContext. Only non-None if
cardinality is cardinality.Cardinality.UNARY_STREAM and style is
style.Service.EVENT.
stream_unary_event: The implementation of the RPC method as a callable
value that takes a response callback to which to pass the response value
of the RPC and an RpcContext and returns a stream.Consumer to which the
request values of the RPC should be passed. Only non-None if cardinality
is cardinality.Cardinality.STREAM_UNARY and style is style.Service.EVENT.
stream_stream_event: The implementation of the RPC method as a callable
value that takes a stream.Consumer to which to pass the response values
of the RPC and an RpcContext and returns a stream.Consumer to which the
request values of the RPC should be passed. Only non-None if cardinality
is cardinality.Cardinality.STREAM_STREAM and style is
style.Service.EVENT.
"""
__metaclass__ = abc.ABCMeta
class MultiMethodImplementation(object):
"""A general type able to service many RPC methods."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def service(self, name, response_consumer, context):
"""Services an RPC.
Args:
name: The RPC method name.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
context: An RpcContext object.
Returns:
A stream.Consumer with which to accept the request values of the RPC. The
consumer returned from this method may or may not be invoked to
completion: in the case of RPC abortion, RPC Framework will simply stop
passing values to this object. Implementations must not assume that this
object will be called to completion of the request stream or even called
at all.
Raises:
abandonment.Abandoned: May or may not be raised when the RPC has been
aborted.
exceptions.NoSuchMethodError: If this MultiMethod does not recognize the
given RPC method name and is not able to service the RPC.
"""
raise NotImplementedError()
class GenericStub(object):
"""Affords RPC methods to callers."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def blocking_value_in_value_out(self, name, request, timeout):
"""Invokes a unary-request-unary-response RPC method.
This method blocks until either returning the response value of the RPC
(in the event of RPC completion) or raising an exception (in the event of
RPC abortion).
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future_value_in_value_out(self, name, request, timeout):
"""Invokes a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future will return an outcome indicating that the RPC returned
the response value of the RPC. In the event of RPC abortion, the
returned Future will return an outcome indicating that the RPC raised
an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def inline_value_in_stream_out(self, name, request, timeout):
"""Invokes a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion of
the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def blocking_stream_in_value_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-unary-response RPC method.
This method blocks until either returning the response value of the RPC
(in the event of RPC completion) or raising an exception (in the event of
RPC abortion).
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
The response value for the RPC.
Raises:
exceptions.RpcError: Indicating that the RPC was aborted.
"""
raise NotImplementedError()
@abc.abstractmethod
def future_stream_in_value_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-unary-response RPC method.
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A future.Future representing the RPC. In the event of RPC completion, the
returned Future will return an outcome indicating that the RPC returned
the response value of the RPC. In the event of RPC abortion, the
returned Future will return an outcome indicating that the RPC raised
an exceptions.RpcError.
"""
raise NotImplementedError()
@abc.abstractmethod
def inline_stream_in_stream_out(self, name, request_iterator, timeout):
"""Invokes a stream-request-stream-response RPC method.
Args:
name: The RPC method name.
request_iterator: An iterator that yields the request values of the RPC.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A CancellableIterator that yields the response values of the RPC and
affords RPC cancellation. Drawing response values from the returned
CancellableIterator may raise exceptions.RpcError indicating abortion of
the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_value_in_value_out(
self, name, request, response_callback, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
response_callback: A callback to be called to accept the response value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_value_in_stream_out(
self, name, request, response_consumer, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
request: The request value for the RPC.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A Call object for the RPC.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_stream_in_value_out(
self, name, response_callback, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-unary-response RPC method.
Args:
name: The RPC method name.
response_callback: A callback to be called to accept the response value
of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
@abc.abstractmethod
def event_stream_in_stream_out(
self, name, response_consumer, abortion_callback, timeout):
"""Event-driven invocation of a unary-request-stream-response RPC method.
Args:
name: The RPC method name.
response_consumer: A stream.Consumer to be called to accept the response
values of the RPC.
abortion_callback: A callback to be called and passed an Abortion value
in the event of RPC abortion.
timeout: A duration of time in seconds to allow for the RPC.
Returns:
A pair of a Call object for the RPC and a stream.Consumer to which the
request values of the RPC should be passed.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_unary_multi_callable(self, name):
"""Creates a UnaryUnaryMultiCallable for a unary-unary RPC method.
Args:
name: The RPC method name.
Returns:
A UnaryUnaryMultiCallable value for the named unary-unary RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def unary_stream_multi_callable(self, name):
"""Creates a UnaryStreamMultiCallable for a unary-stream RPC method.
Args:
name: The RPC method name.
Returns:
A UnaryStreamMultiCallable value for the name unary-stream RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_unary_multi_callable(self, name):
"""Creates a StreamUnaryMultiCallable for a stream-unary RPC method.
Args:
name: The RPC method name.
Returns:
A StreamUnaryMultiCallable value for the named stream-unary RPC method.
"""
raise NotImplementedError()
@abc.abstractmethod
def stream_stream_multi_callable(self, name):
"""Creates a StreamStreamMultiCallable for a stream-stream RPC method.
Args:
name: The RPC method name.
Returns:
A StreamStreamMultiCallable value for the named stream-stream RPC method.
"""
raise NotImplementedError()
class DynamicStub(object):
"""A stub with RPC-method-bound multi-callable attributes.
Instances of this type responsd to attribute access as follows: if the
requested attribute is the name of a unary-unary RPC method, the value of the
attribute will be a UnaryUnaryMultiCallable with which to invoke the RPC
method; if the requested attribute is the name of a unary-stream RPC method,
the value of the attribute will be a UnaryStreamMultiCallable with which to
invoke the RPC method; if the requested attribute is the name of a
stream-unary RPC method, the value of the attribute will be a
StreamUnaryMultiCallable with which to invoke the RPC method; and if the
requested attribute is the name of a stream-stream RPC method, the value of
the attribute will be a StreamStreamMultiCallable with which to invoke the
RPC method.
"""
__metaclass__ = abc.ABCMeta
|
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, F, Func,
IntegerField, Sum, Value,
)
from django.db.models.functions import Lower
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' and six.PY3 else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual(set(p.last_name for p in people), {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword .*"):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with six.assertRaisesRegex(self, FieldDoesNotExist, "\w has no field named u?'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Test that annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Test that columns are aligned in the correct order for
resolve_columns. This test will fail on mysql if column
ordering is out. Column fields should be aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE'
)
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.compute_v1.types import compute
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution("google-cloud-compute",).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class InterconnectsTransport(abc.ABC):
"""Abstract transport class for Interconnects."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
)
DEFAULT_HOST: str = "compute.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.delete: gapic_v1.method.wrap_method(
self.delete, default_timeout=None, client_info=client_info,
),
self.get: gapic_v1.method.wrap_method(
self.get, default_timeout=None, client_info=client_info,
),
self.get_diagnostics: gapic_v1.method.wrap_method(
self.get_diagnostics, default_timeout=None, client_info=client_info,
),
self.insert: gapic_v1.method.wrap_method(
self.insert, default_timeout=None, client_info=client_info,
),
self.list: gapic_v1.method.wrap_method(
self.list, default_timeout=None, client_info=client_info,
),
self.patch: gapic_v1.method.wrap_method(
self.patch, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def delete(
self,
) -> Callable[
[compute.DeleteInterconnectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def get(
self,
) -> Callable[
[compute.GetInterconnectRequest],
Union[compute.Interconnect, Awaitable[compute.Interconnect]],
]:
raise NotImplementedError()
@property
def get_diagnostics(
self,
) -> Callable[
[compute.GetDiagnosticsInterconnectRequest],
Union[
compute.InterconnectsGetDiagnosticsResponse,
Awaitable[compute.InterconnectsGetDiagnosticsResponse],
],
]:
raise NotImplementedError()
@property
def insert(
self,
) -> Callable[
[compute.InsertInterconnectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
@property
def list(
self,
) -> Callable[
[compute.ListInterconnectsRequest],
Union[compute.InterconnectList, Awaitable[compute.InterconnectList]],
]:
raise NotImplementedError()
@property
def patch(
self,
) -> Callable[
[compute.PatchInterconnectRequest],
Union[compute.Operation, Awaitable[compute.Operation]],
]:
raise NotImplementedError()
__all__ = ("InterconnectsTransport",)
|
|
#!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import glob
import logging
import pwd
import re
import time
import subprocess
import threading
import shlex
import platform
from PackagesAnalyzer import PackagesAnalyzer
from HostCheckReportFileHandler import HostCheckReportFileHandler
from Hardware import Hardware
logger = logging.getLogger()
class HostInfo:
# List of project names to be used to find alternatives folders etc.
DEFAULT_PROJECT_NAMES = [
"hadoop*", "hadoop", "hbase", "hcatalog", "hive", "ganglia", "nagios",
"oozie", "sqoop", "hue", "zookeeper", "mapred", "hdfs", "flume",
"ambari_qa", "hadoop_deploy", "rrdcached", "hcat", "ambari-qa",
"sqoop-ambari-qa", "sqoop-ambari_qa", "webhcat", "hadoop-hdfs", "hadoop-yarn",
"hadoop-mapreduce"
]
# List of live services checked for on the host, takes a map of plan strings
DEFAULT_LIVE_SERVICES = [
{"redhat":"ntpd", "suse":"ntp"}
]
# Set of default users (need to be replaced with the configured user names)
DEFAULT_USERS = [
"nagios", "hive", "ambari-qa", "oozie", "hbase", "hcat", "mapred",
"hdfs", "rrdcached", "zookeeper", "flume", "sqoop", "sqoop2",
"hue", "yarn"
]
# Filters used to identify processed
PROC_FILTER = [
"hadoop", "zookeeper"
]
# Additional path patterns to find existing directory
DIRNAME_PATTERNS = [
"/tmp/hadoop-", "/tmp/hsperfdata_"
]
# Default set of directories that are checked for existence of files and folders
DEFAULT_DIRS = [
"/etc", "/var/run", "/var/log", "/usr/lib", "/var/lib", "/var/tmp", "/tmp", "/var", "/hadoop"
]
# Packages that are used to find repos (then repos are used to find other packages)
PACKAGES = [
"hadoop", "zookeeper", "webhcat", "*-manager-server-db", "*-manager-daemons"
]
# Additional packages to look for (search packages that start with these)
ADDITIONAL_PACKAGES = [
"rrdtool", "rrdtool-python", "nagios", "ganglia", "gmond", "gweb", "libconfuse", "ambari-log4j",
"hadoop", "zookeeper", "oozie", "webhcat"
]
# ignore packages from repos whose names start with these strings
IGNORE_PACKAGES_FROM_REPOS = [
"ambari", "installed"
]
# ignore required packages
IGNORE_PACKAGES = [
"epel-release"
]
# ignore repos from the list of repos to be cleaned
IGNORE_REPOS = [
"ambari", "HDP-UTILS"
]
# default timeout for async invoked processes
TIMEOUT_SECONDS = 60
RESULT_UNAVAILABLE = "unable_to_determine"
IP_TBLS_IS_NOT_RUNNING = "iptables: Firewall is not running."
event = threading.Event()
current_umask = -1
def __init__(self, config=None):
self.packages = PackagesAnalyzer()
self.reportFileHandler = HostCheckReportFileHandler(config)
def dirType(self, path):
if not os.path.exists(path):
return 'not_exist'
elif os.path.islink(path):
return 'sym_link'
elif os.path.isdir(path):
return 'directory'
elif os.path.isfile(path):
return 'file'
return 'unknown'
def hadoopVarRunCount(self):
if not os.path.exists('/var/run/hadoop'):
return 0
pids = glob.glob('/var/run/hadoop/*/*.pid')
return len(pids)
def hadoopVarLogCount(self):
if not os.path.exists('/var/log/hadoop'):
return 0
logs = glob.glob('/var/log/hadoop/*/*.log')
return len(logs)
def etcAlternativesConf(self, projects, etcResults):
if not os.path.exists('/etc/alternatives'):
return []
projectRegex = "'" + '|'.join(projects) + "'"
files = [f for f in os.listdir('/etc/alternatives') if re.match(projectRegex, f)]
for conf in files:
result = {}
filePath = os.path.join('/etc/alternatives', conf)
if os.path.islink(filePath):
realConf = os.path.realpath(filePath)
result['name'] = conf
result['target'] = realConf
etcResults.append(result)
def get_os_type(self):
os_info = platform.linux_distribution(None, None, None, ['SuSE',
'redhat' ], 0)
return os_info[0].lower()
def checkLiveServices(self, services, result):
osType = self.get_os_type()
for service in services:
svcCheckResult = {}
if isinstance(service, dict):
serviceName = service[osType]
else:
serviceName = service
svcCheckResult['name'] = serviceName
svcCheckResult['status'] = "UNKNOWN"
svcCheckResult['desc'] = ""
try:
cmd = "/sbin/service " + serviceName + " status"
osStat = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = osStat.communicate()
if 0 != osStat.returncode:
svcCheckResult['status'] = "Unhealthy"
svcCheckResult['desc'] = out
if len(out) == 0:
svcCheckResult['desc'] = err
else:
svcCheckResult['status'] = "Healthy"
except Exception, e:
svcCheckResult['status'] = "Unhealthy"
svcCheckResult['desc'] = repr(e)
result.append(svcCheckResult)
def checkUsers(self, users, results):
f = open('/etc/passwd', 'r')
for userLine in f:
fields = userLine.split(":")
if fields[0] in users:
result = {}
homeDir = fields[5]
result['name'] = fields[0]
result['homeDir'] = fields[5]
result['status'] = "Available";
if not os.path.exists(homeDir):
result['status'] = "Invalid home directory";
results.append(result)
def osdiskAvailableSpace(self, path):
diskInfo = {}
try:
df = subprocess.Popen(["df", "-kPT", path], stdout=subprocess.PIPE)
dfdata = df.communicate()[0]
return Hardware.extractMountInfo(dfdata.splitlines()[-1])
except:
pass
return diskInfo
def checkFolders(self, basePaths, projectNames, existingUsers, dirs):
foldersToIgnore = []
for user in existingUsers:
foldersToIgnore.append(user['homeDir'])
try:
for dirName in basePaths:
for project in projectNames:
path = os.path.join(dirName.strip(), project.strip())
if not path in foldersToIgnore and os.path.exists(path):
obj = {}
obj['type'] = self.dirType(path)
obj['name'] = path
dirs.append(obj)
except:
pass
def javaProcs(self, list):
try:
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
cmd = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
cmd = cmd.replace('\0', ' ')
if not 'AmbariServer' in cmd:
if 'java' in cmd:
dict = {}
dict['pid'] = int(pid)
dict['hadoop'] = False
for filter in self.PROC_FILTER:
if filter in cmd:
dict['hadoop'] = True
dict['command'] = cmd.strip()
for line in open(os.path.join('/proc', pid, 'status')):
if line.startswith('Uid:'):
uid = int(line.split()[1])
dict['user'] = pwd.getpwuid(uid).pw_name
list.append(dict)
except:
pass
pass
def getReposToRemove(self, repos, ignoreList):
reposToRemove = []
for repo in repos:
addToRemoveList = True
for ignoreRepo in ignoreList:
if self.packages.nameMatch(ignoreRepo, repo):
addToRemoveList = False
continue
if addToRemoveList:
reposToRemove.append(repo)
return reposToRemove
def getUMask(self):
if (self.current_umask == -1):
self.current_umask = os.umask(self.current_umask)
os.umask(self.current_umask)
return self.current_umask
else:
return self.current_umask
def get_os_type(self):
os_info = platform.linux_distribution(
None, None, None, ['SuSE', 'redhat' ], 0
)
return os_info[0].lower()
def checkIptables(self):
iptablesIsRunning = False
try:
iptables = subprocess.Popen(["/sbin/service", "iptables", "status"], stdout=subprocess.PIPE)
iptablesOut = iptables.communicate()[0]
if iptablesOut and len(iptablesOut) > 0 and not iptablesOut.strip() == self.IP_TBLS_IS_NOT_RUNNING:
iptablesIsRunning = True
except:
pass
return iptablesIsRunning
""" Return various details about the host
componentsMapped: indicates if any components are mapped to this host
commandsInProgress: indicates if any commands are in progress
"""
def register(self, dict, componentsMapped=True, commandsInProgress=True):
dict['hostHealth'] = {}
java = []
self.javaProcs(java)
dict['hostHealth']['activeJavaProcs'] = java
dict['hostHealth']['diskStatus'] = [self.osdiskAvailableSpace("/")]
liveSvcs = []
self.checkLiveServices(self.DEFAULT_LIVE_SERVICES, liveSvcs)
dict['hostHealth']['liveServices'] = liveSvcs
dict['umask'] = str(self.getUMask())
# detailed host check is not available for Suse
isSuse = 'suse' == self.get_os_type()
dict['iptablesIsRunning'] = self.checkIptables()
# If commands are in progress or components are already mapped to this host
# Then do not perform certain expensive host checks
if componentsMapped or commandsInProgress or isSuse:
dict['existingRepos'] = [self.RESULT_UNAVAILABLE]
dict['installedPackages'] = []
dict['alternatives'] = []
dict['stackFoldersAndFiles'] = []
dict['existingUsers'] = []
else:
etcs = []
self.etcAlternativesConf(self.DEFAULT_PROJECT_NAMES, etcs)
dict['alternatives'] = etcs
existingUsers = []
self.checkUsers(self.DEFAULT_USERS, existingUsers)
dict['existingUsers'] = existingUsers
dirs = []
self.checkFolders(self.DEFAULT_DIRS, self.DEFAULT_PROJECT_NAMES, existingUsers, dirs)
dict['stackFoldersAndFiles'] = dirs
installedPackages = []
availablePackages = []
self.packages.allInstalledPackages(installedPackages)
self.packages.allAvailablePackages(availablePackages)
repos = []
self.packages.getInstalledRepos(self.PACKAGES, installedPackages + availablePackages,
self.IGNORE_PACKAGES_FROM_REPOS, repos)
packagesInstalled = self.packages.getInstalledPkgsByRepo(repos, self.IGNORE_PACKAGES, installedPackages)
additionalPkgsInstalled = self.packages.getInstalledPkgsByNames(
self.ADDITIONAL_PACKAGES, installedPackages)
allPackages = list(set(packagesInstalled + additionalPkgsInstalled))
dict['installedPackages'] = self.packages.getPackageDetails(installedPackages, allPackages)
repos = self.getReposToRemove(repos, self.IGNORE_REPOS)
dict['existingRepos'] = repos
self.reportFileHandler.writeHostCheckFile(dict)
pass
# The time stamp must be recorded at the end
dict['hostHealth']['agentTimeStampAtReporting'] = int(time.time() * 1000)
pass
def main(argv=None):
h = HostInfo()
struct = {}
h.register(struct)
print struct
if __name__ == '__main__':
main()
|
|
"""Backend for distributed parameter evaluation."""
import os
import shutil
from psyrun.backend.base import Backend, JobSourceFile
from psyrun.jobs import Job, JobChain, JobArray
from psyrun.pspace import dict_concat, missing, Param
from psyrun.mapper import map_pspace_hdd_backed
from psyrun.store import DefaultStore
from psyrun.utils.doc import inherit_docs
@inherit_docs
class DistributeBackend(Backend):
"""Create subtasks for distributed parameter evaluation.
This will create one tasks that splits the parameter space in a number of
equal batches (at most *max_jobs*, but with at least *min_items* for each
batch). After processing all batches the results will be merged into a
single file.
This is similar to map-reduce processing.
Parameters
----------
task : `TaskDef`
Task definition to create subtasks for.
"""
@property
def resultfile(self):
"""File in which the results will be stored."""
if self.task.resultfile:
return self.task.resultfile
else:
return os.path.join(
self.workdir, 'result' + self.task.store.ext)
@property
def pspace_file(self):
"""File that will store the input parameters space."""
return os.path.join(self.workdir, 'pspace' + self.task.store.ext)
def _try_mv_to_out(self, filename):
try:
os.rename(
os.path.join(self.workdir, filename),
os.path.join(self.workdir, 'out', 'pre' + self.task.store.ext))
return True
except OSError:
return False
def create_job(self, cont=False):
if cont:
outdir = os.path.join(self.workdir, 'out')
if not self._try_mv_to_out('result' + self.task.store.ext):
Splitter.merge(
outdir, os.path.join(outdir, 'pre' + self.task.store.ext))
for filename in os.listdir(outdir):
if not filename.startswith('pre'):
os.remove(os.path.join(outdir, filename))
pspace = self.get_missing()
try:
indir = os.path.join(self.workdir, 'in')
shutil.rmtree(indir)
except OSError:
pass
else:
pspace = self.task.pspace
self.task.store.save(self.pspace_file, pspace.build())
splitter = Splitter(
self.workdir, pspace, self.task.max_jobs, self.task.min_items,
store=self.task.store)
split = self.create_split_job(splitter)
process = self.create_process_job(splitter)
merge = self.create_merge_job(splitter)
return JobChain(self.task.name, [split, process, merge])
def create_split_job(self, splitter):
code = '''
from psyrun.backend.distribute import Splitter
from psyrun.pspace import Param
pspace = Param(**task.store.load({pspace!r}))
Splitter(
{workdir!r}, pspace, {max_jobs!r}, {min_items!r},
store=task.store).split()
'''.format(
pspace=self.pspace_file,
workdir=splitter.workdir, max_jobs=self.task.max_jobs,
min_items=self.task.min_items)
file_dep = [os.path.join(os.path.dirname(self.task.path), f)
for f in self.task.file_dep]
return Job(
'split', self.submit_code, {'code': code},
[self.task.path] + file_dep,
[f for f, _ in splitter.iter_in_out_files()])
def create_process_job(self, splitter):
source_file = JobSourceFile(
os.path.join(self.workdir, self.task.name + ':process.py'),
self.task,
'''
import sys
from psyrun.backend.distribute import Worker
def execute(*args, **kwargs):
return task.execute(*args, **kwargs)
if __name__ == '__main__':
Worker(
int(sys.argv[3]), store=task.store,
exclude_from_result=task.exclude_from_result).start(
execute, sys.argv[1], sys.argv[2], pool_size={pool_size},
setup_fn=task.setup)
'''.format(pool_size=self.task.pool_size))
infile = os.path.join(splitter.indir, '%a' + splitter.store.ext)
outfile = os.path.join(splitter.outdir, '%a' + splitter.store.ext)
return JobArray(
splitter.n_splits, 'process', self.submit_array, self.submit_file,
{'job_source_file': source_file, 'args': [infile, outfile, '%a']},
[infile], [outfile])
def create_merge_job(self, splitter):
code = '''
from psyrun.backend.distribute import Splitter
Splitter.merge({outdir!r}, {filename!r}, append=False, store=task.store)
'''.format(outdir=splitter.outdir, filename=self.resultfile)
return Job(
'merge', self.submit_code, {'code': code},
[f for _, f in splitter.iter_in_out_files()], [self.resultfile])
def get_missing(self):
pspace = self.task.pspace
try:
missing_items = missing(
pspace, Param(**self.task.store.load(self.resultfile)))
except (IOError, OSError):
missing_items = pspace
try:
for filename in os.listdir(os.path.join(self.workdir, 'out')):
if os.path.splitext(filename)[1] != self.task.store.ext:
continue
outfile = os.path.join(self.workdir, 'out', filename)
try:
missing_items = missing(
missing_items,
Param(**self.task.store.load(outfile)))
except (IOError, OSError):
pass
except (IOError, OSError):
pass
return missing_items
def get_queued(self):
scheduler = self.task.scheduler
status = [scheduler.get_status(j) for j in scheduler.get_jobs()]
for s in status:
if s.status != 'D' and self.task.name + ':split' in s.name:
return Param(**self.task.store.load(self.pspace_file))
queued = Param()
for s in status:
if s.status != 'D' and self.task.name + ':process' in s.name:
num = s.name.rsplit(':', 1)[-1]
filename = os.path.join(
self.workdir, 'in', num + self.task.store.ext)
queued += Param(**self.task.store.load(filename))
return queued
def get_failed(self):
scheduler = self.task.scheduler
status = (scheduler.get_status(j) for j in scheduler.get_jobs())
queued = [s.name for s in status if s.status != 'D']
indir = os.path.join(self.workdir, 'in')
if (not os.path.exists(indir) or
self.task.name + ':split' in queued):
return []
elif not os.path.exists(indir) or len(os.listdir(indir)) == 0:
return [self.task.name + ':split']
failed = []
for filename in os.listdir(indir):
if not os.path.exists(os.path.join(self.workdir, 'out', filename)):
jobname = self.task.name + ':process:' + os.path.splitext(
filename)[0]
if jobname not in queued:
failed.append(jobname)
if len(failed) == 0:
if not os.path.exists(self.resultfile):
return [self.task.name + ':merge']
return failed
class Splitter(object):
"""Split a parameter space into multiple input files and merge results
after processing.
Parameters
----------
workdir : str
Working directory to create input files in and read output files from.
pspace : `ParameterSpace`
Parameter space to split up.
max_splits : int, optional
Maximum number of splits to perform.
min_items : int, optional
Minimum number of parameter sets in each split.
store : `Store`, optional
Input/output backend.
Attributes
----------
indir : str
Directory to store input files.
max_splits : int
Maximum number of splits to perform.
min_items : int
Minimum number of parameter sets in each split.
outdir : str
Directory to store output files.
pspace : `ParameterSpace`
Parameter space to split up.
store : `Store`
Input/output backend.
workdir : str
Working directory to create input files in and read output files from.
"""
def __init__(
self, workdir, pspace, max_splits=64, min_items=4,
store=DefaultStore()):
self.workdir = workdir
self.indir = self._get_indir(workdir)
self.outdir = self._get_outdir(workdir)
if not os.path.exists(self.indir):
os.makedirs(self.indir)
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
self.pspace = pspace
self.max_splits = max_splits
self.min_items = min_items
self.store = store
@property
def n_splits(self):
"""Number of total splits that will be generated."""
n_splits = (len(self.pspace) - 1) // self.min_items + 1
if self.max_splits is not None:
n_splits = min(self.max_splits, n_splits)
return n_splits
def split(self):
"""Perform splitting of parameters space and save input files for
processing."""
items_remaining = len(self.pspace)
param_iter = self.pspace.iterate()
for i, filename in enumerate(self._iter_filenames()):
split_size = self.min_items
if self.max_splits is not None:
split_size = max(
split_size, items_remaining // (self.max_splits - i))
items_remaining -= split_size
block = dict_concat(
[row for row in self._iter_n(param_iter, split_size)])
self.store.save(os.path.join(self.indir, filename), block)
@classmethod
def merge(cls, outdir, merged_filename, append=True, store=DefaultStore()):
"""Merge processed files together.
Parameters
----------
outdir : str
Directory with the output files.
merged_filename : str
Filename of file to save with the merged results.
append : bool, optional
If True the merged data will be appended, otherwise the file
will be overwritten with the merged data.
store : `Store`, optional
Input/output backend.
"""
if not append:
store.save(merged_filename, {})
for filename in os.listdir(outdir):
if os.path.splitext(filename)[1] != store.ext:
continue
infile = os.path.join(outdir, filename)
store.append(merged_filename, store.load(infile))
def iter_in_out_files(self):
"""Return generator returning tuples of corresponding input and output
filenames."""
return ((os.path.join(self.indir, f), os.path.join(self.outdir, f))
for f in self._iter_filenames())
def _iter_filenames(self):
return (str(i) + self.store.ext for i in range(self.n_splits))
@staticmethod
def _iter_n(it, n):
for _ in range(n):
yield next(it)
@classmethod
def _get_indir(cls, workdir):
return os.path.join(workdir, 'in')
@classmethod
def _get_outdir(cls, workdir):
return os.path.join(workdir, 'out')
class Worker(object):
"""Maps a function to the parameter space loaded from a file and writes the
result to an output file.
Parameters
----------
proc_id : int
Worker ID.
store : `Store`, optional
Input/output backend.
exclude_from_result : sequence, optional
Keys of items to exclude from the result.
Attributes
----------
proc_id : int
Worker ID.
store : `Store`
Input/output backend.
exclude_from_result : sequence, optional
Keys of items to exclude from the result.
"""
def __init__(
self, proc_id, store=DefaultStore(), exclude_from_result=None):
self.proc_id = proc_id
self.store = store
if exclude_from_result is None:
exclude_from_result = []
self.exclude_from_result = exclude_from_result
def start(self, fn, infile, outfile, pool_size=1, setup_fn=None):
"""Start processing a parameter space.
Parameters
----------
fn : function
Function to evaluate on the parameter space.
infile : str
Parameter space input filename.
outfile : str
Output filename for the results.
pool_size : int, optional
Number of parallel processes.
setup_fn : function, optional
Setup function, called with the worker ID as argument before
processing of parameter sets begins. May return a dictionary of
parameters added to the invocation of *fn*.
"""
add_params = None
if setup_fn is not None:
add_params = setup_fn(self.proc_id)
if add_params is None:
add_params = {}
pspace = Param(**self.store.load(infile))
out_root, out_ext = os.path.splitext(outfile)
map_pspace_hdd_backed(
fn, Param(**add_params) * pspace, out_root + '.part' + out_ext,
store=self.store, return_data=False, pool_size=pool_size,
exclude=self.exclude_from_result)
os.rename(out_root + '.part' + out_ext, outfile)
|
|
#!/usr/bin/env python3
"""Unit tests run as PYTHONPATH=../../.. python3 ./test_valve.py."""
# pylint: disable=too-many-lines
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import copy
import hashlib
import unittest
import time
from os_ken.ofproto import ofproto_v1_3 as ofp
from faucet import config_parser_util
from faucet import valve_of
from clib.fakeoftable import CONTROLLER_PORT
from clib.valve_test_lib import BASE_DP1_CONFIG, CONFIG, DP1_CONFIG, FAUCET_MAC, ValveTestBases
class ValveIncludeTestCase(ValveTestBases.ValveTestNetwork):
"""Test include optional files."""
CONFIG = """
include-optional: ['/does/not/exist/']
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % DP1_CONFIG
def setUp(self):
"""Setup config with non-existent optional include file"""
self.setup_valves(self.CONFIG)
def test_include_optional(self):
"""Test include optional files."""
self.assertEqual(1, int(self.get_prom('dp_status')))
class ValveBadConfTestCase(ValveTestBases.ValveTestNetwork):
"""Test recovery from a bad config file."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % DP1_CONFIG
MORE_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x100
""" % DP1_CONFIG
BAD_CONFIG = """
dps: {}
"""
def setUp(self):
"""Setup invalid config"""
self.setup_valves(self.CONFIG)
def test_bad_conf(self):
"""Test various config types & config reloading"""
for config, load_error in (
(self.CONFIG, 0),
(self.BAD_CONFIG, 1),
(self.CONFIG, 0),
(self.MORE_CONFIG, 0),
(self.BAD_CONFIG, 1),
(self.CONFIG, 0)):
with open(self.config_file, 'w', encoding='utf-8') as config_file:
config_file.write(config)
self.valves_manager.request_reload_configs(self.mock_time(), self.config_file)
self.assertEqual(
load_error,
self.get_prom('faucet_config_load_error', bare=True),
msg='%u: %s' % (load_error, config))
class ValveChangeVLANACLTestCase(ValveTestBases.ValveTestNetwork):
CONFIG = """
acls:
acl1:
- rule:
eth_type: 0x0806
actions:
allow: 1
vlans:
vlan1:
acls_in:
- acl1
vid: 10
dps:
s1:
%s
interfaces:
1:
native_vlan: vlan1
""" % DP1_CONFIG
MORE_CONFIG = """
acls:
acl1:
- rule:
eth_type: 0x0806
actions:
allow: 1
- rule:
eth_type: 0x0800
actions:
allow: 0
vlans:
vlan1:
acls_in:
- acl1
vid: 10
dps:
s1:
%s
interfaces:
1:
native_vlan: vlan1
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_change_vlan_acl(self):
"""Test vlan ACL change is detected."""
self.update_and_revert_config(self.CONFIG, self.MORE_CONFIG, 'cold')
class ValveChangePortTestCase(ValveTestBases.ValveTestNetwork):
"""Test changes to config on ports."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x200
permanent_learn: True
""" % DP1_CONFIG
LESS_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x200
permanent_learn: False
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_delete_permanent_learn(self):
"""Test port permanent learn can deconfigured."""
table = self.network.tables[self.DP_ID]
before_table_state = table.table_state()
self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': self.P3_V200_MAC,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3',
'vid': 0x200})
self.update_and_revert_config(
self.CONFIG, self.LESS_CONFIG,
'warm', before_table_states={self.DP_ID: before_table_state})
class ValveDeletePortTestCase(ValveTestBases.ValveTestNetwork):
"""Test deletion of a port."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
tagged_vlans: [0x100]
""" % DP1_CONFIG
LESS_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_port_delete(self):
"""Test port can be deleted."""
self.update_and_revert_config(self.CONFIG, self.LESS_CONFIG, 'cold')
class ValveAddPortMirrorNoDelVLANTestCase(ValveTestBases.ValveTestNetwork):
"""Test addition of port mirroring does not cause a del VLAN."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
output_only: true
""" % DP1_CONFIG
MORE_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
output_only: true
mirror: [1]
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
_ = self.setup_valves(self.CONFIG)[self.DP_ID]
def test_port_mirror(self):
"""Test addition of port mirroring is a warm start."""
_ = self.update_config(self.MORE_CONFIG, reload_type='warm')[self.DP_ID]
class ValveAddPortTestCase(ValveTestBases.ValveTestNetwork):
"""Test addition of a port."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
""" % DP1_CONFIG
MORE_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
tagged_vlans: [0x100]
""" % DP1_CONFIG
@staticmethod
def _inport_flows(in_port, ofmsgs):
return [
ofmsg for ofmsg in ValveTestBases.flowmods_from_flows(ofmsgs)
if ofmsg.match.get('in_port') == in_port]
def setUp(self):
"""Setup basic port and vlan config"""
initial_ofmsgs = self.setup_valves(self.CONFIG)[self.DP_ID]
self.assertFalse(self._inport_flows(3, initial_ofmsgs))
def test_port_add(self):
"""Test port can be added."""
reload_ofmsgs = self.update_config(self.MORE_CONFIG, reload_type='cold')[self.DP_ID]
self.assertTrue(self._inport_flows(3, reload_ofmsgs))
class ValveAddPortTrafficTestCase(ValveTestBases.ValveTestNetwork):
"""Test addition of a port with traffic."""
# NOTE: This needs to use 'Generic' hardware,
# as GenericTFM does not support 'warm' start
REQUIRE_TFM = False
CONFIG = """
dps:
s1:
dp_id: 1
hardware: Generic
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
"""
MORE_CONFIG = """
dps:
s1:
dp_id: 1
hardware: Generic
interfaces:
p1:
number: 1
tagged_vlans: [0x100]
p2:
number: 2
tagged_vlans: [0x100]
p3:
number: 3
tagged_vlans: [0x100]
"""
@staticmethod
def _inport_flows(in_port, ofmsgs):
return [
ofmsg for ofmsg in ValveTestBases.flowmods_from_flows(ofmsgs)
if ofmsg.match.get('in_port') == in_port]
def _learn(self, in_port):
ucast_pkt = self.pkt_match(in_port, 1)
ucast_pkt['in_port'] = in_port
ucast_pkt['vlan_vid'] = self.V100
table = self.network.tables[self.DP_ID]
self.assertTrue(table.is_output(ucast_pkt, port=CONTROLLER_PORT))
self.rcv_packet(in_port, self.V100, ucast_pkt)
def _unicast_between(self, in_port, out_port, not_out=1):
ucast_match = self.pkt_match(in_port, out_port)
ucast_match['in_port'] = in_port
ucast_match['vlan_vid'] = self.V100
table = self.network.tables[self.DP_ID]
self.assertTrue(table.is_output(ucast_match, port=out_port))
self.assertFalse(table.is_output(ucast_match, port=not_out))
def setUp(self):
initial_ofmsgs = self.setup_valves(self.CONFIG)[self.DP_ID]
self.assertFalse(self._inport_flows(3, initial_ofmsgs))
def test_port_add_no_ofmsgs(self):
"""New config does not generate new flows."""
update_ofmsgs = self.update_config(self.MORE_CONFIG,
reload_type='warm')[self.DP_ID]
self.assertFalse(self._inport_flows(3, update_ofmsgs))
def test_port_add_link_state(self):
"""New port can be added in link-down state."""
self.update_config(self.MORE_CONFIG, reload_type='warm')
self.add_port(3, link_up=False)
self.port_expected_status(3, 0)
self.set_port_link_up(3)
self.port_expected_status(3, 1)
def test_port_add_traffic(self):
"""New port can be added, and pass traffic."""
self.update_config(self.MORE_CONFIG, reload_type='warm')
self.add_port(3)
self._learn(2)
self._learn(3)
self._unicast_between(2, 3)
self._unicast_between(3, 2)
class ValveWarmStartVLANTestCase(ValveTestBases.ValveTestNetwork):
"""Test change of port VLAN only is a warm start."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 9
tagged_vlans: [0x100]
p2:
number: 11
tagged_vlans: [0x100]
p3:
number: 13
tagged_vlans: [0x100]
p4:
number: 14
native_vlan: 0x200
""" % DP1_CONFIG
WARM_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 9
tagged_vlans: [0x100]
p2:
number: 11
tagged_vlans: [0x100]
p3:
number: 13
tagged_vlans: [0x100]
p4:
number: 14
native_vlan: 0x300
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_warm_start(self):
"""Test VLAN change is warm startable and metrics maintained."""
self.update_and_revert_config(self.CONFIG, self.WARM_CONFIG, 'warm')
self.rcv_packet(9, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
vlan_labels = {'vlan': str(int(0x100))}
port_labels = {'port': 'p1', 'port_description': 'p1'}
port_labels.update(vlan_labels)
def verify_func():
self.assertEqual(
1, self.get_prom('vlan_hosts_learned', labels=vlan_labels))
self.assertEqual(
1, self.get_prom('port_vlan_hosts_learned', labels=port_labels))
verify_func()
self.update_config(self.WARM_CONFIG, reload_type='warm')
verify_func()
class ValveDeleteVLANTestCase(ValveTestBases.ValveTestNetwork):
"""Test deleting VLAN."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100, 0x200]
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
LESS_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x200]
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_delete_vlan(self):
"""Test VLAN can be deleted."""
self.update_and_revert_config(self.CONFIG, self.LESS_CONFIG, 'cold')
class ValveChangeDPTestCase(ValveTestBases.ValveTestNetwork):
"""Test changing DP."""
CONFIG = """
dps:
s1:
%s
priority_offset: 4321
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x100
""" % DP1_CONFIG
NEW_CONFIG = """
dps:
s1:
%s
priority_offset: 1234
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
native_vlan: 0x100
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config with priority offset"""
self.setup_valves(self.CONFIG)
def test_change_dp(self):
"""Test DP changed."""
self.update_and_revert_config(self.CONFIG, self.NEW_CONFIG, 'cold')
class ValveAddVLANTestCase(ValveTestBases.ValveTestNetwork):
"""Test adding VLAN."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100, 0x200]
p2:
number: 2
tagged_vlans: [0x100]
""" % DP1_CONFIG
MORE_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
tagged_vlans: [0x100, 0x200]
p2:
number: 2
tagged_vlans: [0x100, 0x300]
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_add_vlan(self):
"""Test VLAN can added."""
self.update_and_revert_config(self.CONFIG, self.MORE_CONFIG, 'cold')
class ValveChangeACLTestCase(ValveTestBases.ValveTestNetwork):
"""Test changes to ACL on a port."""
CONFIG = """
acls:
acl_same_a:
- rule:
actions:
allow: 1
acl_same_b:
- rule:
actions:
allow: 1
acl_diff_c:
- rule:
actions:
allow: 0
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
acl_in: acl_same_a
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
SAME_CONTENT_CONFIG = """
acls:
acl_same_a:
- rule:
actions:
allow: 1
acl_same_b:
- rule:
actions:
allow: 1
acl_diff_c:
- rule:
actions:
allow: 0
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
acl_in: acl_same_b
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
DIFF_CONTENT_CONFIG = """
acls:
acl_same_a:
- rule:
actions:
allow: 1
acl_same_b:
- rule:
actions:
allow: 1
acl_diff_c:
- rule:
actions:
allow: 0
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
acl_in: acl_diff_c
p2:
number: 2
native_vlan: 0x200
""" % DP1_CONFIG
def setUp(self):
"""Setup basic ACL config"""
self.setup_valves(self.CONFIG)
def test_change_port_acl(self):
"""Test port ACL can be changed."""
self.update_and_revert_config(self.CONFIG, self.SAME_CONTENT_CONFIG, 'warm')
self.update_config(self.SAME_CONTENT_CONFIG, reload_type='warm')
self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
vlan_labels = {'vlan': str(int(0x100))}
port_labels = {'port': 'p1', 'port_description': 'p1'}
port_labels.update(vlan_labels)
def verify_func():
self.assertEqual(
1, self.get_prom('vlan_hosts_learned', labels=vlan_labels))
self.assertEqual(
1, self.get_prom('port_vlan_hosts_learned', labels=port_labels))
verify_func()
# ACL changed but we kept the learn cache.
self.update_config(self.DIFF_CONTENT_CONFIG, reload_type='warm')
verify_func()
class ValveChangeMirrorTestCase(ValveTestBases.ValveTestNetwork):
"""Test changes mirroring port."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
output_only: True
p3:
number: 3
native_vlan: 0x200
""" % DP1_CONFIG
MIRROR_CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
p2:
number: 2
mirror: p1
p3:
number: 3
native_vlan: 0x200
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_change_port_acl(self):
"""Test port ACL can be changed."""
self.update_and_revert_config(self.CONFIG, self.MIRROR_CONFIG, reload_type='warm')
vlan_labels = {'vlan': str(int(0x100))}
port_labels = {'port': 'p1', 'port_description': 'p1'}
port_labels.update(vlan_labels)
def verify_prom():
self.assertEqual(
1, self.get_prom('vlan_hosts_learned', labels=vlan_labels))
self.assertEqual(
1, self.get_prom('port_vlan_hosts_learned', labels=port_labels))
self.rcv_packet(1, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.UNKNOWN_MAC,
'ipv4_src': '10.0.0.1',
'ipv4_dst': '10.0.0.2'})
verify_prom()
# Now mirroring port 1 but we kept the cache.
self.update_config(self.MIRROR_CONFIG, reload_type='warm')
verify_prom()
# Now unmirror again.
self.update_config(self.CONFIG, reload_type='warm')
verify_prom()
class ValveACLTestCase(ValveTestBases.ValveTestNetwork):
"""Test ACL drop/allow and reloading."""
def setUp(self):
self.setup_valves(CONFIG)
def test_vlan_acl_deny(self):
"""Test VLAN ACL denies a packet."""
acl_config = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
native_vlan: v300
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
acl_in: drop_non_ospf_ipv4
v300:
vid: 0x300
acls:
drop_non_ospf_ipv4:
- rule:
nw_dst: '224.0.0.5'
dl_type: 0x800
actions:
allow: 1
- rule:
dl_type: 0x800
actions:
allow: 0
""" % DP1_CONFIG
drop_match = {
'in_port': 2,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '192.0.2.1'}
accept_match = {
'in_port': 2,
'vlan_vid': 0,
'eth_type': 0x800,
'ipv4_dst': '224.0.0.5'}
table = self.network.tables[self.DP_ID]
# base case
for match in (drop_match, accept_match):
self.assertTrue(
table.is_output(match, port=3, vid=self.V200),
msg='Packet not output before adding ACL')
def verify_func():
self.flap_port(2)
self.assertFalse(
table.is_output(drop_match), msg='Packet not blocked by ACL')
self.assertTrue(
table.is_output(accept_match, port=3, vid=self.V200),
msg='Packet not allowed by ACL')
self.update_and_revert_config(
CONFIG, acl_config, reload_type='cold', verify_func=verify_func)
class ValveEgressACLTestCase(ValveTestBases.ValveTestNetwork):
"""Test ACL drop/allow and reloading."""
def setUp(self):
self.setup_valves(CONFIG)
def test_vlan_acl_deny(self):
"""Test VLAN ACL denies a packet."""
allow_host_v6 = 'fc00:200::1:1'
deny_host_v6 = 'fc00:200::1:2'
faucet_v100_vip = 'fc00:100::1'
faucet_v200_vip = 'fc00:200::1'
acl_config = """
dps:
s1:
{dp1_config}
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
vlans:
v100:
vid: 0x100
faucet_mac: '{mac}'
faucet_vips: ['{v100_vip}/64']
v200:
vid: 0x200
faucet_mac: '{mac}'
faucet_vips: ['{v200_vip}/64']
acl_out: drop_non_allow_host_v6
minimum_ip_size_check: false
routers:
r_v100_v200:
vlans: [v100, v200]
acls:
drop_non_allow_host_v6:
- rule:
ipv6_dst: '{allow_host}'
eth_type: 0x86DD
actions:
allow: 1
- rule:
eth_type: 0x86DD
actions:
allow: 0
""".format(dp1_config=DP1_CONFIG, mac=FAUCET_MAC, v100_vip=faucet_v100_vip,
v200_vip=faucet_v200_vip, allow_host=allow_host_v6)
l2_drop_match = {
'in_port': 2,
'eth_dst': self.P3_V200_MAC,
'vlan_vid': 0,
'eth_type': 0x86DD,
'ipv6_dst': deny_host_v6}
l2_accept_match = {
'in_port': 3,
'eth_dst': self.P2_V200_MAC,
'vlan_vid': 0x200 | ofp.OFPVID_PRESENT,
'eth_type': 0x86DD,
'ipv6_dst': allow_host_v6}
v100_accept_match = {'in_port': 1, 'vlan_vid': 0}
table = self.network.tables[self.DP_ID]
# base case
for match in (l2_drop_match, l2_accept_match):
self.assertTrue(
table.is_output(match, port=4),
msg='Packet not output before adding ACL')
def verify_func():
self.assertTrue(
table.is_output(v100_accept_match, port=3),
msg='Packet not output when on vlan with no ACL')
self.assertFalse(
table.is_output(l2_drop_match, port=3),
msg='Packet not blocked by ACL')
self.assertTrue(
table.is_output(l2_accept_match, port=2),
msg='Packet not allowed by ACL')
# unicast
self.rcv_packet(2, 0x200, {
'eth_src': self.P2_V200_MAC,
'eth_dst': self.P3_V200_MAC,
'vid': 0x200,
'ipv6_src': allow_host_v6,
'ipv6_dst': deny_host_v6,
'neighbor_advert_ip': allow_host_v6})
self.rcv_packet(3, 0x200, {
'eth_src': self.P3_V200_MAC,
'eth_dst': self.P2_V200_MAC,
'vid': 0x200,
'ipv6_src': deny_host_v6,
'ipv6_dst': allow_host_v6,
'neighbor_advert_ip': deny_host_v6})
self.assertTrue(
table.is_output(l2_accept_match, port=2),
msg='Packet not allowed by ACL')
self.assertFalse(
table.is_output(l2_drop_match, port=3),
msg='Packet not blocked by ACL')
# l3
l3_drop_match = {
'in_port': 1,
'eth_dst': FAUCET_MAC,
'vlan_vid': 0,
'eth_type': 0x86DD,
'ipv6_dst': deny_host_v6}
l3_accept_match = {
'in_port': 1,
'eth_dst': FAUCET_MAC,
'vlan_vid': 0,
'eth_type': 0x86DD,
'ipv6_dst': allow_host_v6}
self.assertTrue(
table.is_output(l3_accept_match, port=2),
msg='Routed packet not allowed by ACL')
self.assertFalse(
table.is_output(l3_drop_match, port=3),
msg='Routed packet not blocked by ACL')
# multicast
self.update_and_revert_config(CONFIG, acl_config, 'cold', verify_func=verify_func)
class ValveReloadConfigProfile(ValveTestBases.ValveTestNetwork):
"""Test reload processing time."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % BASE_DP1_CONFIG
NUM_PORTS = 100
baseline_total_tt = None
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(CONFIG)
def test_profile_reload(self):
"""Test reload processing time."""
orig_config = copy.copy(self.CONFIG)
def load_orig_config():
pstats_out, _ = self.profile(
partial(self.update_config, orig_config))
self.baseline_total_tt = pstats_out.total_tt # pytype: disable=attribute-error
for i in range(2, 100):
self.CONFIG += """
p%u:
number: %u
native_vlan: 0x100
""" % (i, i)
for i in range(5):
load_orig_config()
pstats_out, pstats_text = self.profile(
partial(self.update_config, self.CONFIG, reload_type='cold'))
cache_info = valve_of.output_non_output_actions.cache_info()
self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info)
total_tt_prop = (
pstats_out.total_tt / self.baseline_total_tt) # pytype: disable=attribute-error
# must not be 20x slower, to ingest config for 100 interfaces than 1.
# TODO: This test might have to be run separately,
# since it is marginal on GitHub actions due to parallel test runs.
if total_tt_prop < 20:
for valve in self.valves_manager.valves.values():
for table in valve.dp.tables.values():
cache_info = table._trim_inst.cache_info() # pylint: disable=protected-access
self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info)
return
time.sleep(i)
self.fail('%f: %s' % (total_tt_prop, pstats_text))
class ValveTestVLANRef(ValveTestBases.ValveTestNetwork):
"""Test reference to same VLAN by name or VID."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 333
p2:
number: 2
native_vlan: threes
vlans:
threes:
vid: 333
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def test_vlan_refs(self):
"""Test same VLAN is referred to."""
vlans = self.valves_manager.valves[self.DP_ID].dp.vlans
self.assertEqual(1, len(vlans))
self.assertEqual('threes', vlans[333].name, vlans[333])
self.assertEqual(2, len(vlans[333].untagged))
class ValveTestConfigHash(ValveTestBases.ValveTestNetwork):
"""Verify faucet_config_hash_info update after config change"""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % DP1_CONFIG
def setUp(self):
"""Setup basic port and vlan config"""
self.setup_valves(self.CONFIG)
def _get_info(self, metric, name):
""""Return (single) info dict for metric"""
# There doesn't seem to be a nice API for this,
# so we use the prometheus client internal API
metrics = list(metric.collect())
self.assertEqual(len(metrics), 1)
samples = metrics[0].samples
self.assertEqual(len(samples), 1)
sample = samples[0]
self.assertEqual(sample.name, name)
return sample.labels
def _check_hashes(self):
"""Verify and return faucet_config_hash_info labels"""
labels = self._get_info(metric=self.metrics.faucet_config_hash,
name='faucet_config_hash_info')
files = labels['config_files'].split(',')
hashes = labels['hashes'].split(',')
self.assertTrue(len(files) == len(hashes) == 1)
self.assertEqual(files[0], self.config_file, 'wrong config file')
hash_value = config_parser_util.config_file_hash(self.config_file)
self.assertEqual(hashes[0], hash_value, 'hash validation failed')
return labels
def _change_config(self):
"""Change self.CONFIG"""
if '0x100' in self.CONFIG:
self.CONFIG = self.CONFIG.replace('0x100', '0x200')
else:
self.CONFIG = self.CONFIG.replace('0x200', '0x100')
self.update_config(self.CONFIG, reload_expected=True)
return self.CONFIG
def test_config_hash_func(self):
"""Verify that faucet_config_hash_func is set correctly"""
labels = self._get_info(metric=self.metrics.faucet_config_hash_func,
name='faucet_config_hash_func')
hash_funcs = list(labels.values())
self.assertEqual(len(hash_funcs), 1, "found multiple hash functions")
hash_func = hash_funcs[0]
# Make sure that it matches and is supported in hashlib
self.assertEqual(hash_func, config_parser_util.CONFIG_HASH_FUNC)
self.assertTrue(hash_func in hashlib.algorithms_guaranteed)
def test_config_hash_update(self):
"""Verify faucet_config_hash_info is properly updated after config"""
# Verify that hashes change after config is changed
old_config = self.CONFIG
old_hashes = self._check_hashes()
starting_hashes = old_hashes
self._change_config()
new_config = self.CONFIG
self.assertNotEqual(old_config, new_config, 'config not changed')
new_hashes = self._check_hashes()
self.assertNotEqual(old_hashes, new_hashes,
'hashes not changed after config change')
# Verify that hashes don't change after config isn't changed
old_hashes = new_hashes
self.update_config(self.CONFIG, reload_expected=False)
new_hashes = self._check_hashes()
self.assertEqual(old_hashes, new_hashes,
"hashes changed when config didn't")
# Verify that hash is restored when config is restored
self._change_config()
new_hashes = self._check_hashes()
self.assertEqual(new_hashes, starting_hashes,
'hashes should be restored to starting values')
class ValveTestConfigRevert(ValveTestBases.ValveTestNetwork):
"""Test configuration revert"""
CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
p1:
number: 1
native_vlan: 0x100
"""
CONFIG_AUTO_REVERT = True
def setUp(self):
"""Setup basic port and vlan config with hardware type set"""
self.setup_valves(self.CONFIG)
def test_config_revert(self):
"""Verify config is automatically reverted if bad."""
self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 0)
self.update_config('***broken***', reload_expected=True, error_expected=1)
self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 1)
with open(self.config_file, 'r', encoding='utf-8') as config_file:
config_content = config_file.read()
self.assertEqual(self.CONFIG, config_content)
self.update_config(self.CONFIG + '\n', reload_expected=False, error_expected=0)
more_config = self.CONFIG + """
p2:
number: 2
native_vlan: 0x100
"""
self.update_config(more_config, reload_expected=True, reload_type='warm', error_expected=0)
class ValveTestConfigRevertBootstrap(ValveTestBases.ValveTestNetwork):
"""Test configuration auto reverted if bad"""
BAD_CONFIG = """
*** busted ***
"""
GOOD_CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
p1:
number: 1
native_vlan: 0x100
"""
CONFIG_AUTO_REVERT = True
def setUp(self):
"""Setup invalid config"""
self.setup_valves(self.BAD_CONFIG, error_expected=1)
def test_config_revert(self):
"""Verify config is automatically reverted if bad."""
self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 1)
self.update_config(self.GOOD_CONFIG + '\n', reload_expected=False, error_expected=0)
self.assertEqual(self.get_prom('faucet_config_load_error', bare=True), 0)
class ValveTestConfigApplied(ValveTestBases.ValveTestNetwork):
"""Test cases for faucet_config_applied."""
CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
p1:
description: "one thing"
number: 1
native_vlan: 0x100
"""
NEW_DESCR_CONFIG = """
dps:
s1:
dp_id: 0x1
hardware: 'GenericTFM'
interfaces:
p1:
description: "another thing"
number: 1
native_vlan: 0x100
"""
def setUp(self):
"""Setup basic port and vlan config with hardware type set"""
self.setup_valves(self.CONFIG)
def test_config_applied_update(self):
"""Verify that config_applied increments after DP connect"""
# 100% for a single datapath
self.assertEqual(self.get_prom('faucet_config_applied', bare=True), 1.0)
# Add a second datapath, which currently isn't programmed
self.CONFIG += """
s2:
dp_id: 0x2
hardware: 'GenericTFM'
interfaces:
p1:
number: 1
native_vlan: 0x100
"""
self.update_config(self.CONFIG, reload_expected=False)
# Should be 50%
self.assertEqual(self.get_prom('faucet_config_applied', bare=True), .5)
# We don't have a way to simulate the second datapath connecting,
# we update the statistic manually
self.valves_manager.update_config_applied({0x2: True})
# Should be 100% now
self.assertEqual(self.get_prom('faucet_config_applied', bare=True), 1.0)
def test_description_only(self):
"""Test updating config description"""
self.update_config(self.NEW_DESCR_CONFIG, reload_expected=False)
class ValveReloadConfigTestCase(ValveTestBases.ValveTestBig): # pylint: disable=too-few-public-methods
"""Repeats the tests after a config reload."""
def setUp(self):
super().setUp()
self.flap_port(1)
self.update_config(CONFIG, reload_type='warm', reload_expected=False)
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
|
|
"""
Copyright (c) 2019 John Robinson
Author: John Robinson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# Global Imports
import logging
import unittest
import RPi.GPIO as GPIO
import Adafruit_GPIO.SPI as SPI
# Local Imports
from max31856 import MAX31856 as MAX31856
logging.basicConfig(
filename='test_MAX31856.log',
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
_logger = logging.getLogger(__name__)
class Adafruit_MAX31856(unittest.TestCase):
def tearDown(self):
GPIO.cleanup()
#def test_software_spi_initialize(self):
#"""Checks to see if the sensor can initialize on the software SPI interface.
#Will fail if it cannot find the MAX31856 library or any dependencies.
#Test only checks to see that the sensor can be initialized in Software, does not check the
#hardware connection.
#"""
#_logger.debug('test_software_SPI_initialize()')
## Raspberry Pi software SPI configuration.
#software_spi = {"clk": 25, "cs": 8, "do": 9, "di": 10}
#sensor = MAX31856(software_spi=software_spi)
#if sensor:
#self.assertTrue(True)
#else:
#self.assertTrue(False)
def test_hardware_spi_initialize(self):
"""
Checks to see if the sensor can initialize on the hardware SPI interface.
Will fail if it cannot find the MAX31856 library or any dependencies.
Test only checks to see that the sensor can be initialized in Software, does not check the
hardware connection.
"""
_logger.debug('test_hardware_SPI_initialize()')
# Raspberry Pi hardware SPI configuration.
spi_port = 0
spi_device = 0
sensor = MAX31856(hardware_spi=SPI.SpiDev(spi_port, spi_device))
if sensor:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_get_register_reading(self):
"""
Checks to see if we can read a register from the device. Good test for correct
connectivity.
"""
_logger.debug('test_get_register_reading()')
# Raspberry Pi hardware SPI configuration.
spi_port = 0
spi_device = 0
sensor = MAX31856(hardware_spi=SPI.SpiDev(spi_port, spi_device))
value = sensor._read_register(MAX31856.MAX31856_REG_READ_CR0)
for ii in range(0x00, 0x10):
# Read all of the registers, will store data to log
sensor._read_register(ii) # pylint: disable-msg=protected-access
if value:
self.assertTrue(True)
else:
self.assertTrue(False)
#def test_get_temperaure_reading_software_spi(self):
#"""Checks to see if we can read a temperature from the board, using software SPI
#"""
#_logger.debug('test_get_temperature_reading_software_spi')
## Raspberry Pi software SPI configuration.
#software_spi = {"clk": 25, "cs": 8, "do": 9, "di": 10}
#sensor = MAX31856(software_spi=software_spi)
#temp = sensor.read_temp_c()
#if temp:
#self.assertTrue(True)
#else:
#self.assertTrue(False)
def test_get_temperaure_reading(self):
"""
Checks to see if we can read a temperature from the board, using Hardware SPI
"""
_logger.debug('test_get_temperaure_reading')
# Raspberry Pi hardware SPI configuration.
spi_port = 0
spi_device = 0
sensor = MAX31856(hardware_spi=SPI.SpiDev(spi_port, spi_device))
temp = sensor.read_temp_c()
if temp:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_get_internal_temperaure_reading(self):
"""
Checks to see if we can read a temperature from the board, using Hardware SPI
"""
_logger.debug('test_get_internal_temperature_reading()')
# Raspberry Pi hardware SPI configuration.
spi_port = 0
spi_device = 0
sensor = MAX31856(hardware_spi=SPI.SpiDev(spi_port, spi_device))
temp = sensor.read_internal_temp_c()
if temp:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_get_internal_temperaure_reading_k_type(self):
"""
Checks to see if we can read a temperature from the board, using Hardware SPI, and K type thermocouple
"""
_logger.debug('test_get_internal_temperature_reading()')
# Raspberry Pi hardware SPI configuration.
spi_port = 0
spi_device = 0
sensor = MAX31856(hardware_spi=SPI.SpiDev(spi_port, spi_device), tc_type=MAX31856.MAX31856_K_TYPE)
temp = sensor.read_internal_temp_c()
if temp:
self.assertTrue(True)
else:
self.assertTrue(False)
def test_temperature_byte_conversions(self):
"""
Checks the byte conversion for various known temperature byte values.
"""
_logger.debug('test_temperature_byte_conversions()')
#-------------------------------------------#
# Test Thermocouple Temperature Conversions #
byte2 = 0x01
byte1 = 0x70
byte0 = 0x20
decimal_temp = MAX31856._thermocouple_temp_from_bytes(byte0, byte1, byte2) # pylint: disable-msg=protected-access
self.assertEqual(decimal_temp, 23.0078125)
# Check a couple values from the datasheet
byte2 = 0b00000001
byte1 = 0b10010000
byte0 = 0b00000000
decimal_temp = MAX31856._thermocouple_temp_from_bytes(byte0, byte1, byte2) # pylint: disable-msg=protected-access
self.assertEqual(decimal_temp, 25.0)
byte2 = 0b00000000
byte1 = 0b00000000
byte0 = 0b00000000
decimal_temp = MAX31856._thermocouple_temp_from_bytes(byte0, byte1, byte2) # pylint: disable-msg=protected-access
self.assertEqual(decimal_temp, 0.0)
byte2 = 0b11111111
byte1 = 0b11110000
byte0 = 0b00000000
decimal_temp = MAX31856._thermocouple_temp_from_bytes(byte0, byte1, byte2) # pylint: disable-msg=protected-access
self.assertEqual(decimal_temp, -1.0)
byte2 = 0b11110000
byte1 = 0b01100000
byte0 = 0b00000000
decimal_temp = MAX31856._thermocouple_temp_from_bytes(byte0, byte1, byte2) # pylint: disable-msg=protected-access
self.assertEqual(decimal_temp, -250.0)
#---------------------------------#
# Test CJ Temperature Conversions #
msb = 0x1C
lsb = 0x64
decimal_cj_temp = MAX31856._cj_temp_from_bytes(msb, lsb) # pylint: disable-msg=protected-access
self.assertEqual(decimal_cj_temp, 28.390625)
# Check a couple values from the datasheet
msb = 0b01111111
lsb = 0b11111100
decimal_cj_temp = MAX31856._cj_temp_from_bytes(msb, lsb) # pylint: disable-msg=protected-access
self.assertEqual(decimal_cj_temp, 127.984375)
msb = 0b00011001
lsb = 0b00000000
decimal_cj_temp = MAX31856._cj_temp_from_bytes(msb, lsb) # pylint: disable-msg=protected-access
self.assertEqual(decimal_cj_temp, 25)
msb = 0b00000000
lsb = 0b00000000
decimal_cj_temp = MAX31856._cj_temp_from_bytes(msb, lsb) # pylint: disable-msg=protected-access
self.assertEqual(decimal_cj_temp, 0)
msb = 0b11100111
lsb = 0b00000000
decimal_cj_temp = MAX31856._cj_temp_from_bytes(msb, lsb) # pylint: disable-msg=protected-access
self.assertEqual(decimal_cj_temp, -25)
msb = 0b11001001
lsb = 0b00000000
decimal_cj_temp = MAX31856._cj_temp_from_bytes(msb, lsb) # pylint: disable-msg=protected-access
self.assertEqual(decimal_cj_temp, -55)
if __name__ == "__main__":
unittest.main()
|
|
##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
class NameValuePlugTest( GafferTest.TestCase ) :
def assertPlugSerialises( self, plug ):
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = plug
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["p"].getName(), plug.getName() )
self.assertEqual( s2["n"]["p"].direction(), plug.direction() )
self.assertEqual( s2["n"]["p"].getFlags(), plug.getFlags() )
self.assertEqual( s2["n"]["p"].keys(), plug.keys() )
self.assertEqual( s2["n"]["p"]["value"].getValue(), plug["value"].getValue() )
self.assertEqual( s2["n"]["p"]["value"].defaultValue(), plug["value"].defaultValue() )
self.assertEqual( s2["n"]["p"]["name"].getValue(), plug["name"].getValue() )
self.assertEqual( s2["n"]["p"]["name"].defaultValue(), plug["name"].defaultValue() )
if "enable" in plug.keys():
self.assertEqual( s2["n"]["p"]["enable"].getValue(), plug["enable"].getValue() )
self.assertEqual( s2["n"]["p"]["enable"].defaultValue(), plug["enable"].defaultValue() )
if isinstance( plug, Gaffer.IntPlug ):
self.assertEqual( s2["n"]["p"]["value"].minValue(), plug.minValue() )
self.assertEqual( s2["n"]["p"]["value"].maxValue(), plug.maxValue() )
def assertCounterpart( self, plug ):
p2 = plug.createCounterpart( "testName", Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getName(), "testName" )
self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getFlags(), plug.getFlags() )
self.assertEqual( p2.keys(), plug.keys() )
if "value" in plug.keys():
self.assertEqual( p2["value"].getValue(), plug["value"].getValue() )
self.assertEqual( p2["value"].defaultValue(), plug["value"].defaultValue() )
if "name" in plug.keys():
self.assertEqual( p2["name"].getValue(), plug["name"].getValue() )
self.assertEqual( p2["name"].defaultValue(), plug["name"].defaultValue() )
if "enable" in plug.keys():
self.assertEqual( p2["enable"].getValue(), plug["enable"].getValue() )
self.assertEqual( p2["enable"].defaultValue(), plug["enable"].defaultValue() )
if isinstance( plug, Gaffer.IntPlug ):
self.assertEqual( p2.minValue(), plug.minValue() )
self.assertEqual( p2.maxValue(), plug.maxValue() )
def test( self ) :
constructed = {}
constructed["defaults"] = {}
constructed["specified"] = {}
constructed["defaults"]["empty"] = Gaffer.NameValuePlug()
constructed["defaults"]["partialEmpty"] = Gaffer.NameValuePlug()
constructed["defaults"]["partialEmpty"].addChild( Gaffer.StringPlug( "name", defaultValue = "key") )
# Note that if we specify the direction and flags without specifying argument names, this is ambiguous
# with the later forms of the constructor. I guess this is OK since the old serialised forms
# of MemberPlug do include the argument names, and we want to deprecate this form anyway
constructed["specified"]["empty"] = Gaffer.NameValuePlug( "foo", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
constructed["specified"]["partialEmpty"] = Gaffer.NameValuePlug( "foo", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
constructed["specified"]["partialEmpty"].addChild( Gaffer.StringPlug( "name", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic, defaultValue = "key" ) )
constructed["defaults"]["fromData"] = Gaffer.NameValuePlug( "key", IECore.IntData(42) )
constructed["specified"]["fromData"] = Gaffer.NameValuePlug( "key", IECore.IntData(42), "foo", Gaffer.Plug.Direction.Out, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
constructed["defaults"]["fromPlug"] = Gaffer.NameValuePlug( "key", Gaffer.IntPlug( minValue = -3, maxValue = 5) )
constructed["specified"]["fromPlug"] = Gaffer.NameValuePlug( "key", Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ), "foo" )
constructed["defaults"]["fromDataEnable"] = Gaffer.NameValuePlug( "key", IECore.IntData(42), True )
constructed["specified"]["fromDataEnable"] = Gaffer.NameValuePlug( "key", IECore.IntData(42), True, "foo", Gaffer.Plug.Direction.Out, Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
constructed["defaults"]["fromPlugEnable"] = Gaffer.NameValuePlug( "key", Gaffer.IntPlug(), True )
constructed["specified"]["fromPlugEnable"] = Gaffer.NameValuePlug( "key", Gaffer.IntPlug( minValue = -7, maxValue = 15, direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) , True, "foo" )
for k in [ "empty", "fromData", "fromPlug", "fromDataEnable", "fromPlugEnable" ]:
defa = constructed["defaults"][k]
spec = constructed["specified"][k]
numChildren = 3 if "Enable" in k else 2
if k == "empty":
numChildren = 0
self.assertEqual( len( spec.children() ), numChildren )
self.assertEqual( len( defa.children() ), numChildren )
self.assertEqual( defa.getName(), "NameValuePlug" )
self.assertEqual( spec.getName(), "foo" )
self.assertEqual( defa.direction(), Gaffer.Plug.Direction.In )
self.assertEqual( spec.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( defa.getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( spec.getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
if k == "empty":
self.assertNotIn( "name", defa )
self.assertNotIn( "name", spec )
self.assertNotIn( "value", defa )
self.assertNotIn( "value", spec )
elif k == "partialEmpty":
self.assertEqual( defa["name"].getValue(), "key" )
self.assertEqual( spec["name"].getValue(), "key" )
self.assertNotIn( "value", defa )
self.assertNotIn( "value", spec )
else:
self.assertEqual( defa["name"].getValue(), "key" )
self.assertEqual( spec["name"].getValue(), "key" )
if "fromPlug" in k:
self.assertEqual( defa["value"].getValue(), 0 )
self.assertEqual( spec["value"].getValue(), 0 )
else:
self.assertEqual( defa["value"].getValue(), 42 )
self.assertEqual( spec["value"].getValue(), 42 )
if k == "empty":
# A completely empty NameValuePlug is invalid, but we have to partially
# support it because old serialisation code will create these before
# the addChild's run to create name and value
self.assertCounterpart( defa )
self.assertCounterpart( spec )
# We shouldn't ever serialise invalid plugs though - if the children
# haven't been created by the time we try to serialise, that's a bug
self.assertRaises( RuntimeError, self.assertPlugSerialises, spec )
elif k == "partialEmpty":
# A NameValuePlug with a name but no value, on the other hand, is just
# broken
self.assertRaises( RuntimeError, self.assertPlugSerialises, spec )
self.assertRaises( RuntimeError, self.assertCounterpart, defa )
self.assertRaises( RuntimeError, self.assertCounterpart, spec )
else:
self.assertPlugSerialises( spec )
self.assertCounterpart( defa )
self.assertCounterpart( spec )
def testBasicRepr( self ) :
p = Gaffer.NameValuePlug( "key", IECore.StringData( "value" ) )
self.assertEqual(
repr( p ),
'Gaffer.NameValuePlug( "key", Gaffer.StringPlug( "value", defaultValue = \'value\', ), "NameValuePlug", Gaffer.Plug.Flags.Default )'
)
def testEmptyPlugRepr( self ) :
# Use the deprecated constructor to create a NameValuePlug without name or value
p = Gaffer.NameValuePlug( "mm", direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertRaises( RuntimeError, repr, p )
def testValueTypes( self ) :
for v in [
IECore.FloatVectorData( [ 1, 2, 3 ] ),
IECore.IntVectorData( [ 1, 2, 3 ] ),
IECore.StringVectorData( [ "1", "2", "3" ] ),
IECore.V3fVectorData( [ imath.V3f( x ) for x in range( 1, 5 ) ] ),
IECore.Color3fVectorData( [ imath.Color3f( x ) for x in range( 1, 5 ) ] ),
IECore.M44fVectorData( [ imath.M44f() * x for x in range( 1, 5 ) ] ),
IECore.V2iVectorData( [ imath.V2i( x ) for x in range( 1, 5 ) ] ),
IECore.V3fData( imath.V3f( 1, 2, 3 ) ),
IECore.V2fData( imath.V2f( 1, 2 ) ),
IECore.M44fData( imath.M44f( *range(16) ) ),
IECore.Box2fData( imath.Box2f( imath.V2f( 0, 1 ), imath.V2f( 1, 2 ) ) ),
IECore.Box2iData( imath.Box2i( imath.V2i( -1, 10 ), imath.V2i( 11, 20 ) ) ),
IECore.Box3fData( imath.Box3f( imath.V3f( 0, 1, 2 ), imath.V3f( 3, 4, 5 ) ) ),
IECore.Box3iData( imath.Box3i( imath.V3i( 0, 1, 2 ), imath.V3i( 3, 4, 5 ) ) ),
IECore.InternedStringVectorData( [ "a", "b" ] )
]:
if 'value' in dir( v ):
expected = v.value
else:
expected = v
self.assertEqual( expected, Gaffer.NameValuePlug( "test", v )["value"].getValue() )
def testTransformPlug( self ) :
p = Gaffer.NameValuePlug( "a", Gaffer.TransformPlug() )
self.assertEqual( p["value"].matrix(), imath.M44f() )
def testAdditionalChildrenRejected( self ) :
m = Gaffer.NameValuePlug( "a", IECore.IntData( 10 ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug() )
self.assertRaises( RuntimeError, m.addChild, Gaffer.StringPlug( "name" ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug( "name" ) )
self.assertRaises( RuntimeError, m.addChild, Gaffer.IntPlug( "value" ) )
def testDefaultValues( self ) :
m = Gaffer.NameValuePlug( "a", IECore.IntData( 10 ) )
self.assertTrue( m["value"].defaultValue(), 10 )
self.assertTrue( m["value"].getValue(), 10 )
self.assertTrue( m["name"].defaultValue(), "a" )
self.assertTrue( m["name"].getValue(), "a" )
m = Gaffer.NameValuePlug( "b", IECore.FloatData( 20 ) )
self.assertTrue( m["value"].defaultValue(), 20 )
self.assertTrue( m["value"].getValue(), 20 )
self.assertTrue( m["name"].defaultValue(), "b" )
self.assertTrue( m["name"].getValue(), "b" )
m = Gaffer.NameValuePlug( "c", IECore.StringData( "abc" ) )
self.assertTrue( m["value"].defaultValue(), "abc" )
self.assertTrue( m["value"].getValue(), "abc" )
self.assertTrue( m["name"].defaultValue(), "c" )
self.assertTrue( m["name"].getValue(), "c" )
def testNonValuePlugs( self ) :
p1 = Gaffer.NameValuePlug( "name", Gaffer.Plug(), name = "p1", defaultEnabled = False )
p2 = p1.createCounterpart( "p2", Gaffer.Plug.Direction.In )
self.assertTrue( p1.settable() )
self.assertTrue( p2.settable() )
p2.setInput( p1 )
self.assertEqual( p2["name"].getInput(), p1["name"] )
self.assertEqual( p2["value"].getInput(), p1["value"] )
self.assertTrue( p1.settable() )
self.assertFalse( p2.settable() )
p2.setInput( None )
self.assertTrue( p2.settable() )
self.assertTrue( p1.isSetToDefault() )
p1["name"].setValue( "nonDefault" )
self.assertFalse( p1.isSetToDefault() )
p1.setToDefault()
self.assertTrue( p1.isSetToDefault() )
p1["name"].setValue( "nonDefault" )
p1["enabled"].setValue( True )
p2.setFrom( p1 )
self.assertEqual( p2["name"].getValue(), p1["name"].getValue() )
self.assertEqual( p2["enabled"].getValue(), p1["enabled"].getValue() )
self.assertEqual( p1.hash(), p2.hash() )
p2["enabled"].setValue( False )
self.assertNotEqual( p1.hash(), p2.hash() )
def testDynamicFlags( self ) :
def assertFlags( script ) :
self.assertEqual( script["n"]["user"]["p1"].getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( script["n"]["user"]["p1"]["name"].getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( script["n"]["user"]["p1"]["value"].getFlags(), Gaffer.Plug.Flags.Default )
c = script["n"]["user"]["p1"].createCounterpart( "c", Gaffer.Plug.Direction.In )
self.assertEqual( c.getFlags(), script["n"]["user"]["p1"].getFlags() )
self.assertEqual( script["n"]["user"]["p2"].getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
self.assertEqual( script["n"]["user"]["p2"]["name"].getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( script["n"]["user"]["p2"]["value"].getFlags(), Gaffer.Plug.Flags.Default )
self.assertEqual( script["n"]["user"]["p2"]["enabled"].getFlags(), Gaffer.Plug.Flags.Default )
c = script["n"]["user"]["p2"].createCounterpart( "c", Gaffer.Plug.Direction.In )
self.assertEqual( c.getFlags(), script["n"]["user"]["p2"].getFlags() )
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["p1"] = Gaffer.NameValuePlug( "name1", Gaffer.IntPlug( defaultValue = 1 ), flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["user"]["p2"] = Gaffer.NameValuePlug( "name2", Gaffer.IntPlug( defaultValue = 1 ), defaultEnabled = False, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
assertFlags( s )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
assertFlags( s2 )
s3 = Gaffer.ScriptNode()
s3.execute( s2.serialise() )
assertFlags( s3 )
if __name__ == "__main__":
unittest.main()
|
|
import rospy
import roslib
import rosgraph_msgs
import std_msgs.msg
import navigation
import heapq
import random
import database
import heapq
class Node(object):
IDLE = 0
RESPONDING = 1
BUSY = 2
def __init__(self, name):
self.name = name
self.status = Node.IDLE
self.idled = True
self.navigator = navigation.Navigation(name)
self.jobs = []
self.current_job = None
self.publisher = rospy.Publisher(name, std_msgs.msg.String, queue_size=10)
self.rate = rospy.Rate(10)
rospy.Subscriber('clock', rosgraph_msgs.msg.Clock, self._clock_tick_callback)
rospy.Subscriber('scheduler', std_msgs.msg.String, self._scheduler_event_callback)
self._loop()
def _get_status(self):
pass
def _assign_job(self, job):
self.current_job = job
print "Assigning job %s" % str(job)
# request the node to move to new target coordinates
self.navigator.move(job[3])
def _assign_next_job_if_available(self):
if len(self.jobs):
self._assign_job(heapq.heappop(self.jobs))
else:
self.current_job = None
self.status = Node.IDLE
def _loop(self):
while not rospy.is_shutdown():
self.rate.sleep()
curr_job_priority, curr_job_description, curr_job_time, curr_job_destination = self.current_job if self.current_job else (None, None, None, None)
try:
# get next job without altering the queue to check if there is a job requiring immediate attention
next_job_priority, next_job_description, next_job_time, next_job_destination = heapq.heappop(list(self.jobs)) if self.jobs else (None, None, None, None)
except ValueError:
raise Exception("Bogus job: %s" % heapq.heappop(list(self.jobs)))
# if there is a job we are attending to
if self.current_job:
# if the priority is 0, we need to stop everything and attend to it
# if the priority is lower (1+) then that job will be processed only once the first completes
if next_job_priority == 0 and next_job_priority < curr_job_priority:
# stop everything, this is an emergency
self._assign_next_job_if_available()
continue
# check if the current position of the node matches the location where the job should take place at
self.navigator.movement_publisher.publish(self.navigator.move_cmd)
if self.navigator.has_arrived():
print self.current_job
# if the job has not yet completed
if curr_job_time > 0:
# reassign the job with less time to finish it, so to eventually complete
self.current_job = (curr_job_priority, curr_job_description, int(curr_job_time)-1, curr_job_destination)
#dchanges the rate during events
if 'eat' in curr_job_description:
self.levels['Fullness'][1] = -4
self.levels['Hydration'][1] = -2
elif 'meds' in curr_job_description:
self.levels['Health'][1] = -0.5
elif 'Resident.idle' in curr_job_description:
self.levels['Entertainment'][1] = -2
elif 'gym' in curr_job_description:
self.levels['Fitness'][1] = -10
self.levels['Hydration'][1] = 2
self.levels['Entertainment'][1] = -4
self.levels['Sanity'][1] = -2
elif 'toilet' in curr_job_description:
self.levels['Relief'][1] = -15
elif 'bath' in curr_job_description:
self.levels['Hygiene'][1] = -10
self.levels['Sanity'][1] = -2
elif 'sleep' in curr_job_description:
self.levels['Health'][1] = -0.2
self.levels['Sanity'][1] = -0.2
elif 'heart_attack' in curr_job_description:
self.levels['Health'][1] = 15
self.status = Node.BUSY
self.idled = False
continue
else:
# out of time, job effectively completed
# if there is another job in the queue, process it now
if 'eat' in curr_job_description:
self.levels['Fullness'][1] = 1
self.levels['Hydration'][1] = 0.5
elif 'meds' in curr_job_description:
self.levels['Health'][1] = 0.1
elif 'Resident.idle' in curr_job_description:
self.levels['Entertainment'][1] = 0.5
elif 'gym' in curr_job_description:
self.levels['Fitness'][1] = 0.5
self.levels['Hydration'][1] = 0.5
self.levels['Entertainment'][1] = 0.5
self.levels['Sanity'][1] = 0.1
elif 'toilet' in curr_job_description:
self.levels['Relief'][1] = 0.5
elif 'bath' in curr_job_description:
self.levels['Hygiene'][1] = 0.5
self.levels['Sanity'][1] = 0.1
elif 'sleep' in curr_job_description:
self.levels['Health'][1] = 0.1
self.levels['Sanity'][1] = 0.1
elif 'heart_attack' in curr_job_description:
self.levels['Health'][1] = 0.1
if self.type == "Robot" and self.navigator.has_arrived() and not(self.idled):
# return the robot to its idle position
self.jobs.append((0, 'robot.returning', 0, self.idle_position))
if self.navigator.has_arrived():
self.idled = True
# print("Self.idled in loop = ", self.idled)
self._assign_next_job_if_available()
continue
self.status = Node.RESPONDING
continue
self._assign_next_job_if_available()
continue
def _clock_tick_callback(self, msg):
pass
def _scheduler_event_callback(self, msg):
pass
class Robot(Node):
def __init__(self, name):
self.type = "Robot"
self.idle_position = database.Database.ROBOT_IDLES.get(name)
super(Robot, self).__init__(name)
# #!/usr/bin/python
# import rospy
# import node
# import database
# class Resident(node.Human):
# def __init__(self, name):
# super(Resident, self).__init__(name)
# def _scheduler_event_callback(self, msg):
# if msg.data.split()[1].startswith(self.__class__.__name__):
# self.jobs.put(tuple(msg.data.split()))
# if __name__ == '__main__':
# rospy.init_node('robot_1')
# resident = Resident('robot_1')
class Human(Node):
levels = {}
def __init__(self, name):
for level in database.Database.LEVELS:
self.levels[level] = [100, 0.5] #status = (value,rate)
if level == 'Sanity' or level == 'Health':
self.levels[level][1] = 0.1
elif level == 'Fullness':
self.levels[level][1] = 1
print self.levels[level]
self.type = "Human"
self.human_pub = rospy.Publisher("human", std_msgs.msg.String, queue_size=10)
super(Human, self).__init__(name)
def _clock_tick_callback(self, msg):
# if this isn't the very first clock tick
if int(msg.clock.secs) > 0:
# if the clock tick is divisible evenly by 4
if ((int(msg.clock.secs) % 3 == 0) and (int(msg.clock.nsecs)==0)):
# reduce attribute levels by one unit
self.levels['Fullness'][0] -= self.levels['Fullness'][1]
self.levels['Health'][0] -= self.levels['Health'][1]
self.levels['Entertainment'][0] -= self.levels['Entertainment'][1]
self.levels['Sanity'][0] -= self.levels['Sanity'][1]
self.levels['Fitness'][0] -= self.levels['Fitness'][1]
self.levels['Hydration'][0] -= self.levels['Hydration'][1]
self.levels['Hygiene'][0] -= self.levels['Hygiene'][1]
self.levels['Relief'][0] -= self.levels['Relief'][1]
for levels in self.levels:
if (self.levels[levels][0] > 100):
self.levels[levels][0] = 100
if (self.levels[levels][0] <= 0):
self.levels[levels][0] = 0
publish_string = ("%s: %s"%(levels,self.levels[levels][0]))
self.human_pub.publish(publish_string)
# loop through all attributes
for attribute, value in self.levels.iteritems():
# publish them
self.human_pub.publish("%s: %d" % (attribute, value[0]))
#print attribute, value[0]
|
|
# Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import requests
import socket
import testresources
import testtools
import time
import uuid
from heatclient import client as heatclient
from keystoneclient.v2_0 import client as ksclient
import murano.tests.functional.engine.config as cfg
CONF = cfg.cfg.CONF
class Client(object):
def __init__(self, user, password, tenant, auth_url, murano_url):
self.auth = ksclient.Client(username=user, password=password,
tenant_name=tenant, auth_url=auth_url)
self.endpoint = murano_url
self.headers = {
'X-Auth-Token': self.auth.auth_token,
'Content-type': 'application/json'
}
def create_environment(self, name):
post_body = {'name': name}
resp = requests.post(self.endpoint + 'environments',
data=json.dumps(post_body),
headers=self.headers)
return resp.json()
def delete_environment(self, environment_id):
endpoint = '{0}environments/{1}'.format(self.endpoint, environment_id)
return requests.delete(endpoint, headers=self.headers)
def get_environment(self, environment_id):
return requests.get('{0}environments/{1}'.format(self.endpoint,
environment_id),
headers=self.headers).json()
def create_session(self, environment_id):
post_body = None
endpoint = '{0}environments/{1}/configure'.format(self.endpoint,
environment_id)
return requests.post(endpoint, data=post_body,
headers=self.headers).json()
def deploy_session(self, environment_id, session_id):
endpoint = '{0}environments/{1}/sessions/{2}/deploy'.format(
self.endpoint, environment_id, session_id)
return requests.post(endpoint, data=None, headers=self.headers)
def create_service(self, environment_id, session_id, json_data):
headers = self.headers.copy()
headers.update({'x-configuration-session': session_id})
endpoint = '{0}environments/{1}/services'.format(self.endpoint,
environment_id)
return requests.post(endpoint, data=json.dumps(json_data),
headers=headers).json()
def delete_service(self, environment_id, session_id, service_id):
headers = self.headers.copy()
headers.update({'x-configuration-session': session_id})
endpoint = '{0}environments/{1}/services/{2}'.format(
self.endpoint, environment_id, service_id)
requests.delete(endpoint, headers=headers)
def wait_for_environment_deploy(self, environment_id):
environment = self.get_environment(environment_id)
start_time = time.time()
while environment['status'] != 'ready':
if time.time() - start_time > 1200:
return
time.sleep(5)
environment = self.get_environment(environment_id)
return environment
def get_ip_list(self, environment):
return [service['instance']['ipAddresses']
for service in environment['services']]
def deployments_list(self, environment_id):
endpoint = '{0}environments/{1}/deployments'.format(self.endpoint,
environment_id)
return requests.get(endpoint,
headers=self.headers).json()['deployments']
class MuranoBase(testtools.TestCase, testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
@classmethod
def setUpClass(cls):
super(MuranoBase, cls).setUpClass()
cfg.load_config()
cls.client = Client(user=CONF.murano.user,
password=CONF.murano.password,
tenant=CONF.murano.tenant,
auth_url=CONF.murano.auth_url,
murano_url=CONF.murano.murano_url)
cls.linux = CONF.murano.linux_image
cls.windows = CONF.murano.windows_image
heat_url = cls.client.auth.service_catalog.url_for(
service_type='orchestration', endpoint_type='publicURL')
cls.heat_client = heatclient.Client('1', endpoint=heat_url,
token=cls.client.auth.auth_token)
cls.location = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
cls.packages_path = '/'.join(cls.location.split('/')[:-1:])
def upload_package(package_name, body, app):
#TODO(efedorova): Use muranoclient to upload packages
files = {'%s' % package_name: open(
os.path.join(cls.packages_path, app), 'rb')}
post_body = {'JsonString': json.dumps(body)}
request_url = '{endpoint}{url}'.format(
endpoint=CONF.murano.murano_url,
url='catalog/packages')
headers = cls.client.headers.copy()
del headers['Content-type']
return requests.post(request_url,
files=files,
data=post_body,
headers=headers).json()['id']
cls.postgre_id = upload_package(
'PostgreSQL',
{"categories": ["Databases"], "tags": ["tag"]},
'murano-app-incubator/io.murano.apps.PostgreSql.zip')
cls.apache_id = upload_package(
'Apache',
{"categories": ["Application Servers"], "tags": ["tag"]},
'murano-app-incubator/io.murano.apps.apache.Apache.zip')
cls.tomcat_id = upload_package(
'Tomcat',
{"categories": ["Application Servers"], "tags": ["tag"]},
'murano-app-incubator/io.murano.apps.apache.Tomcat.zip')
cls.telnet_id = upload_package(
'Telnet',
{"categories": ["Web"], "tags": ["tag"]},
'murano-app-incubator/io.murano.apps.linux.Telnet.zip')
cls.ad_id = upload_package(
'Active Directory',
{"categories": ["Microsoft Services"], "tags": ["tag"]},
'murano-app-incubator/io.murano.windows.ActiveDirectory.zip')
def setUp(self):
super(MuranoBase, self).setUp()
self.environments = []
self.stack_names = []
def tearDown(self):
super(MuranoBase, self).tearDown()
for env in self.environments:
try:
self.client.delete_environment(env)
except Exception:
pass
def check_port_access(self, ip, port):
start_time = time.time()
while time.time() - start_time < 300:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((str(ip), port))
sock.close()
if result == 0:
break
time.sleep(5)
self.assertEqual(0, result,
'{0} port is closed on instance'.format(port))
def deployment_success_check(self, environment, port):
deployments = self.client.deployments_list(environment['id'])
for deployment in deployments:
self.assertEqual('success', deployment['state'],
'Deployment status is {0}'.format(
deployment['state']))
ip = self.client.get_ip_list(environment)
if ip:
self.assertEqual(2, len(ip[0]),
'Instance does not have floatingIP')
self.check_port_access(ip[0][1], port)
else:
self.fail('Instance does not have IPs')
def test_deploy_telnet(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.linux.Telnet",
"id": str(uuid.uuid4())
}
}
environment_name = 'Telnetenv' + uuid.uuid4().hex[:5]
environment = self.client.create_environment(environment_name)
self.environments.append(environment['id'])
session = self.client.create_session(environment['id'])
self.client.create_service(environment['id'], session['id'], post_body)
self.client.deploy_session(environment['id'], session['id'])
env = self.client.wait_for_environment_deploy(environment['id'])
self.assertIsNotNone(env)
self.deployment_success_check(env, 23)
def test_deploy_apache(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.apache.Apache",
"id": str(uuid.uuid4())
}
}
environment_name = 'Apacheenv' + uuid.uuid4().hex[:5]
environment = self.client.create_environment(environment_name)
self.environments.append(environment['id'])
self.stack_names.append(environment_name)
session = self.client.create_session(environment['id'])
self.client.create_service(environment['id'], session['id'], post_body)
self.client.deploy_session(environment['id'], session['id'])
env = self.client.wait_for_environment_deploy(environment['id'])
self.assertIsNotNone(env)
self.deployment_success_check(env, 80)
def test_deploy_postgresql(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.PostgreSql",
"id": str(uuid.uuid4())
}
}
environment_name = 'Postgreenv' + uuid.uuid4().hex[:5]
environment = self.client.create_environment(environment_name)
self.environments.append(environment['id'])
self.stack_names.append(environment_name)
session = self.client.create_session(environment['id'])
self.client.create_service(environment['id'], session['id'], post_body)
self.client.deploy_session(environment['id'], session['id'])
env = self.client.wait_for_environment_deploy(environment['id'])
self.assertIsNotNone(env)
self.deployment_success_check(env, 5432)
def test_deploy_tomcat(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.apache.Tomcat",
"id": str(uuid.uuid4())
}
}
environment_name = 'Tomcatenv' + uuid.uuid4().hex[:5]
environment = self.client.create_environment(environment_name)
self.environments.append(environment['id'])
session = self.client.create_session(environment['id'])
self.client.create_service(environment['id'], session['id'], post_body)
self.client.deploy_session(environment['id'], session['id'])
env = self.client.wait_for_environment_deploy(environment['id'])
self.assertIsNotNone(env)
self.deployment_success_check(env, 8080)
def _get_telnet_app(self):
return {
"instance": {
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"flavor": "m1.medium",
"image": self.linux,
"name": "instance{0}".format(uuid.uuid4().hex[:5]),
},
"name": "app{0}".format(uuid.uuid4().hex[:5]),
"?": {
"type": "io.murano.apps.linux.Telnet",
"id": str(uuid.uuid4())
}
}
def _quick_deploy(self, name, *apps):
environment = self.client.create_environment(name)
session = self.client.create_session(environment['id'])
environment_id, session_id = environment['id'], session['id']
for app in apps:
self.client.create_service(environment_id, session_id, app)
self.client.deploy_session(environment_id, session_id)
return self.client.wait_for_environment_deploy(environment_id)
def _get_stack(self, environment_id):
for stack in self.heat_client.stacks.list():
if environment_id in stack.description:
return stack
def test_instance_refs_are_removed_after_application_is_removed(self):
name = 'e' + uuid.uuid4().hex
# create environment with telnet application
application1 = self._get_telnet_app()
application2 = self._get_telnet_app()
application_id = application1['?']['id']
instance_name = application1['instance']['name']
apps = [application1, application2]
environment_id = self._quick_deploy(name, *apps)['id']
# add environment to the list for tear-down clean-up
self.environments.append(environment_id)
# delete telnet application
session_id = self.client.create_session(environment_id)['id']
self.client.delete_service(environment_id, session_id, application_id)
self.client.deploy_session(environment_id, session_id)
self.client.wait_for_environment_deploy(environment_id)
stack_name = self._get_stack(environment_id).stack_name
template = self.heat_client.stacks.template(stack_name)
ip_addresses = '{0}-assigned-ip'.format(instance_name)
floating_ip = '{0}-FloatingIPaddress'.format(instance_name)
self.assertNotIn(ip_addresses, template['outputs'])
self.assertNotIn(floating_ip, template['outputs'])
self.assertNotIn(instance_name, template['resources'])
def test_stack_deletion_after_env_is_deleted(self):
name = 'e' + uuid.uuid4().hex
application = self._get_telnet_app()
environment = self._quick_deploy(name, application)
self.assertIsNotNone(environment)
stack = self._get_stack(environment['id'])
self.assertIsNotNone(stack)
self.client.delete_environment(environment['id'])
start_time = time.time()
while stack is not None:
if time.time() - start_time > 300:
break
time.sleep(5)
stack = self._get_stack(environment['id'])
self.assertIsNone(stack, 'stack is not deleted')
|
|
import os
import sys
import argparse
import datetime
import logging
import multiprocessing as mp
import json
import dateutil.relativedelta
import numpy as np
import pyfolio as pf
import pandas as pd
from pyfolio import timeseries
try:
from . import module_loader
except:
import module_loader
try:
from . import algo_runner
except:
import algo_runner
from learn.context import LearnContext
from learn.feature.rsi import RsiFeature
from learn.feature.macd import MacdFeature
sys.dont_write_bytecode = True
data_path = os.path.join(os.path.dirname(__file__), "..", "data")
def valid_date(s):
try:
return datetime.datetime.strptime(s, "%Y%m%d").date()
except:
msg = "Not a valid date: '{0}'.".format(s)
raise argparse.ArgumentTypeError(msg)
def valid_relative_date(s):
if s.endswith('y'):
return dateutil.relativedelta.relativedelta(years=+int(s[:-1]))
elif s.endswith('m'):
return dateutil.relativedelta.relativedelta(months=+int(s[:-1]))
elif s.endswith('d'):
return dateutil.relativedelta.relativedelta(days=+int(s[:-1]))
else:
raise argparse.ArgumentTypeError("Not a valid relative date: '{0}'.".format(s))
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", help="print debug information", action="count")
parser.add_argument("-v", "--version", action="version", version='%(prog)s 1.0')
parser.add_argument("-s", "--stock_id", help="stock ids to process",
action="append", required=True, dest="stock_ids")
parser.add_argument("--capital_base", help="initial capital base value, default 100000.0",
type=float, default=100000.0)
parser.add_argument("--data_range", help="stock data range", nargs=2, required=True,
type=valid_date,
metavar=('<begin date>', '<end date>'))
parser.add_argument("--optimize_range", help="data range using to do optimize", nargs=2, required=True,
type=valid_date,
metavar=('<begin date>', '<end date>'))
parser.add_argument("--wfa_size", help="walking forward analysis window size, such as 1y or 6m", required=True,
type=valid_relative_date, metavar='<size>')
parser.add_argument("--strategy", help="trading strategy to evaluate", required=True,
type=str, metavar='<name>')
parser.add_argument("--object_function", help="object function to evaluate trading strategy", required=True,
type=str, metavar='<name>')
parser.add_argument("--stock_data_provider", help="data provider for stock data", required=True,
type=str, metavar='<name>')
parser.add_argument("--no_pool", help="do not run with pool",
action='store_true')
parser.add_argument("--load_optimized_parameter", help="load optimized parameter",
action='store_true')
parser.add_argument("--skip_wfa", help="skip wfa running stage",
action='store_true')
parser.add_argument("--use_default_parameter", help="use strategy default parameter",
action='store_true')
parser.add_argument("--learn_data_file", help="append learn data to file",
type=str, metavar='<learn data file>')
return parser.parse_args()
def validate_args(args):
if args.data_range[0] >= args.data_range[1]:
raise argparse.ArgumentTypeError('invalid data range:{}'.format(args.data_range))
if args.optimize_range[0] >= args.optimize_range[1]:
raise argparse.ArgumentTypeError('invalid optimize range:{}'.format(args.optimize_range))
if (args.optimize_range[0] < args.data_range[0] or
args.optimize_range[0] >= args.data_range[1] or
args.optimize_range[1] <= args.data_range[0] or
args.optimize_range[0] > args.data_range[1]):
raise argparse.ArgumentTypeError('invalid optimize range:{}, data range:{}'.format(args.optimize_range, args.data_range))
if (args.optimize_range[1] + args.wfa_size <= args.optimize_range[1]):
raise argparse.ArgumentTypeError('invalid wfa size:{}'.format(args.wfa_size))
if (args.optimize_range[1] + args.wfa_size > args.data_range[1]):
raise argparse.ArgumentTypeError('invalid wfa size:{}'.format(args.wfa_size))
class Analyzer(object):
def __init__(self, obj_func_module):
super().__init__()
self.parameter_ = None
self.obj_func_module_ = obj_func_module
self.object_function_accept_ = module_loader.load_module_func(obj_func_module, 'accept')
self.object_function_better_ = module_loader.load_module_func(obj_func_module, 'better_results')
self.best_results_ = None
self.learn_data_ = None
def analyze(self, result):
for parameter, results, learn_data in result:
if self.learn_data_ is not None and learn_data is not None:
self.learn_data_ = pd.concat([self.learn_data_, learn_data])
else:
self.learn_data_ = learn_data
if results is None:
continue
returns, positions, transactions = pf.utils.extract_rets_pos_txn_from_zipline(results)
perf_stats = timeseries.perf_stats(returns,
positions=positions,
transactions=transactions)
logging.info("Parameter:%s", parameter)
logging.info(perf_stats)
logging.info("Sharpe Ratio:{}%".format(np.round(perf_stats.loc['Sharpe ratio'] * 100)))
logging.info("")
cur_results = (results, perf_stats)
if self.object_function_accept_(cur_results) and self.object_function_better_(cur_results, self.best_results_):
self.best_results_ = cur_results
self.parameter_ = parameter
def algo_running_worker(args, parameter):
strategy_module = module_loader.load_module_from_file(args.strategy)
stock_data_provider = module_loader.load_module_from_file(args.stock_data_provider)
create_strategy = module_loader.load_module_func(strategy_module, 'create_strategy')
strategy = create_strategy(args)
learn_context = None
if args.load_optimized_parameter or args.use_default_parameter:
learn_context = LearnContext()
learn_context.add_feature(RsiFeature())
learn_context.add_feature(MacdFeature())
strategy.learn_context = learn_context
runner = algo_runner.AlgoRunner(stock_data_provider, args.capital_base, args)
symbols = args.stock_ids
start_date = args.optimize_range[0]
end_date = args.optimize_range[1]
def tmp_analyze_func(context=None, results=None):
pass
strategy.current_parameter = parameter
logging.info('running strategy:%s', strategy)
try:
perf_data = runner.run(strategy,
symbols,
start_date=start_date,
end_date=end_date,
analyze_func=tmp_analyze_func)
return (parameter, perf_data, learn_context.get_learning_data() if learn_context is not None else None)
except:
logging.exception('running strategy:%s failed', strategy)
return (parameter, None, None)
def __get_optimized_parameter_file_path(stock_ids, strategy):
file_name = '_'.join(stock_ids) + '-' + strategy.name + '.json'
parameters_dir = os.path.abspath(os.path.join(data_path, 'parameters'))
if not os.path.exists(parameters_dir):
os.makedirs(parameters_dir)
return os.path.join(parameters_dir, file_name)
def __save_optimized_parameter(stock_ids, strategy, perf_stats):
with open(__get_optimized_parameter_file_path(stock_ids, strategy), 'w') as f:
json.dump({'stock_ids': stock_ids,
'strategy' : {
'name': strategy.name,
'parameter': strategy.current_parameter
},
'perf_stats': perf_stats.to_dict()
}, f)
def __load_optimized_parameter(stock_ids, strategy):
fn = __get_optimized_parameter_file_path(stock_ids, strategy)
if not os.path.exists(fn):
return None
with open(fn, 'r') as f:
obj = json.load(f)
return obj['strategy']['parameter']
def wfa_runner_main():
args = parse_arguments()
if args.debug is None:
args.debug = 0
if args.debug > 0:
logging.getLogger('').setLevel(logging.DEBUG)
else:
logging.getLogger('').setLevel(logging.INFO)
logging.debug('debug level:{}'.format(args.debug))
logging.debug('stock_ids:{}'.format(args.stock_ids))
logging.debug('data_range:{}'.format(args.data_range))
logging.debug('optimize_range:{}'.format(args.optimize_range))
logging.debug('wfa_size:{}'.format(args.wfa_size))
logging.debug('wfa range:{}'.format([args.optimize_range[1], args.optimize_range[1] + args.wfa_size]))
logging.debug('strategy:{}'.format(args.strategy))
logging.debug('object_function:{}'.format(args.object_function))
logging.debug('data_provider:{}'.format(args.stock_data_provider))
validate_args(args)
if args.load_optimized_parameter or args.use_default_parameter:
args.no_pool = True
strategy_module = module_loader.load_module_from_file(args.strategy)
object_function_module = module_loader.load_module_from_file(args.object_function)
stock_data_provider = module_loader.load_module_from_file(args.stock_data_provider)
create_strategy = module_loader.load_module_func(strategy_module, 'create_strategy')
strategy = create_strategy(args)
analyzer = Analyzer(object_function_module)
runner = algo_runner.AlgoRunner(stock_data_provider, args.capital_base, args)
runner.ensure_stock_data(args.stock_ids)
pool = mp.Pool(mp.cpu_count())
parameter_set = strategy.parameter_set()
if args.load_optimized_parameter:
value = __load_optimized_parameter(args.stock_ids,
strategy)
if value is None:
value = strategy.get_default_parameter()
if value is None:
logging.error('unable find optmized parameter')
return
parameter_set = list([value])
if args.use_default_parameter:
value = strategy.get_default_parameter()
if value is None:
logging.error('unable find default parameter')
return
parameter_set = list([value])
if args.no_pool:
for parameter in parameter_set:
analyzer.analyze([algo_running_worker(args, parameter)])
else:
pool.starmap_async(algo_running_worker,
list((args, parameter) for parameter in parameter_set),
callback=analyzer.analyze,
error_callback=lambda x:logging.error('starmap async failed:%s', x))
pool.close()
pool.join()
if analyzer.best_results_ is None:
logging.error('non parameter of strategy:[%s] is suitable for the stock:%s', strategy, args.stock_ids)
return
_, perf_stats = analyzer.best_results_
logging.info('Best results:%s', perf_stats)
strategy.current_parameter = analyzer.parameter_
logging.info('optimized strategy:%s', strategy)
if not args.load_optimized_parameter:
__save_optimized_parameter(args.stock_ids, strategy, perf_stats)
if analyzer.learn_data_ is not None:
logging.info('learn data:%s', analyzer.learn_data_)
if args.learn_data_file:
with open(args.learn_data_file, 'a+') as f:
analyzer.learn_data_.to_csv(f)
if args.skip_wfa:
return
# do wfa analyze
wfa_begin = args.optimize_range[1]
wfa_end = wfa_begin + args.wfa_size
while True:
logging.info('running wfa on out of sample data:%s=>%s', wfa_begin, wfa_end)
try:
runner.run(strategy,
args.stock_ids,
start_date=wfa_begin,
end_date=wfa_end,
analyze_func=None)
except:
logging.exception('failed running wfa on out of sample data:%s=>%s', wfa_begin, wfa_end)
wfa_begin = wfa_end
wfa_end = wfa_begin + args.wfa_size
if wfa_begin >= args.data_range[1]:
break
if wfa_end > args.data_range[1]:
wfa_end = args.data_range[1]
logging.info('next wfa:%s=>%s', wfa_begin, wfa_end)
if __name__ == '__main__':
mp.set_start_method('forkserver')
wfa_runner_main()
|
|
import os
import unittest
import json
import trebek
import entities
import fakeredis
import time
import datetime
# Reference this SO post on getting distances between strings:
# http://stackoverflow.com/a/1471603/98562
def get_clue_json():
with open('test-json-output.json') as json_data:
clue = json.load(json_data)
return clue
def fake_fetch_random_clue():
return entities.Question(**get_clue_json())
def fake_get_year_month():
now = datetime.datetime.now()
year, month = divmod(now.month + 1, 12)
if month == 0:
month = 12
year = year -1
next_month = datetime.datetime(now.year + year, month, 1)
return "{0}-{1}".format(next_month.year, str(next_month.month).zfill(2))
_fetch_count = 0
_invalid_clue = None
def fetch_invalid_clue():
global _fetch_count, _invalid_clue
clue = get_clue_json()
if _fetch_count == 0:
clue = _invalid_clue
_fetch_count += 1
return entities.Question(**clue)
class TestTrebek(unittest.TestCase):
def setUp(self):
d = self.get_setup_json()
self.room_message = entities.HipChatRoomMessage(**d)
self.trebek_bot = self.create_bot_with_dictionary(d)
def tearDown(self):
self.trebek_bot.redis.flushall()
def get_setup_json(self):
with open('test-room-message.json') as data:
d = json.load(data)
return d
def create_bot_with_dictionary(self, room_dictionary):
bot = trebek.Trebek(entities.HipChatRoomMessage(**room_dictionary))
bot.redis = fakeredis.FakeStrictRedis()
bot.fetch_random_clue = fake_fetch_random_clue
return bot
def create_user_scores(self, bot = None):
if bot != None:
r = bot.redis
else:
r = self.trebek_bot.redis
bot = self.trebek_bot
hipchat = trebek.Trebek.hipchat_user_key
r.set(hipchat.format(1), 'Aaron')
r.set(hipchat.format(2), 'Allen')
r.set(hipchat.format(3), 'Cordarrell')
r.set(hipchat.format(4), 'Melvin')
r.set(hipchat.format(5), 'Mark')
r.set(hipchat.format(6), 'Richard')
r.set(hipchat.format(7), 'Darren S')
r.set(hipchat.format(8), 'Arian')
r.set(hipchat.format(9), 'Zach')
r.set(hipchat.format(10), 'Darren M')
r.set(hipchat.format(11), 'Alex')
r.set(hipchat.format(12), 'Michael')
r.set(hipchat.format(13), 'Reggie')
r.set(hipchat.format(14), 'Legacy Score')
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
# Regression test old score keys will still appear in lifetime loserboard
r.set("user_score:{0}".format(14), 5)
bot.get_year_month = fake_get_year_month
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
def test_when_value_not_included_default_to_200(self):
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.value, 200)
def test_when_answer_includes_html_answer_is_sanitized(self):
# example answer: <i>Let\\'s Make a Deal</i>
self.trebek_bot.fetch_random_clue = fake_fetch_random_clue
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.answer, "Let's Make a Deal")
def test_when_response_doesNot_begin_with_question_return_none(self):
response = "some test response"
assert self.trebek_bot.response_is_a_question(response) == None
def test_when_response_is_question_return_true(self):
response = "what is some test response"
assert self.trebek_bot.response_is_a_question(response)
def test_fuzzy_matching_of_answer(self):
test_clue = fake_fetch_random_clue()
self.assertFalse(self.trebek_bot.is_correct_answer("polygamist", "polyamourus"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is let's make a deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Lets Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Dela"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Mae a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is elt's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer("a ukulele", "a ukelele"))
self.assertTrue(self.trebek_bot.is_correct_answer("Scrabble", "Scrablle"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Aristotle) Onassis", "Onassis"))
self.assertTrue(self.trebek_bot.is_correct_answer("(William) Blake", "blake"))
self.assertTrue(self.trebek_bot.is_correct_answer("wings (or feathers)", "feathers"))
self.assertTrue(self.trebek_bot.is_correct_answer("A.D. (Anno Domini)", "AD"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Little Orphan) Annie", "annie"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "turtle"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "tortoise"))
# self.assertTrue(self.trebek_bot.is_correct_answer("ben affleck and matt damon", "Matt Damon & Ben Affleck"))
def test_given_json_dictionary_hipchat_object_is_parsed(self):
with open ('test-room-message.json') as data:
d = json.load(data)
t = entities.HipChatRoomMessage(**d)
self.assertEqual(t.item.message.message, "jeopardy")
self.assertEqual(t.item.message.user_from.name, "James A")
def test_message_object_trims_leading_slash_command(self):
p = {}
p['from'] = { 'id':None, 'links': None, 'mention_name':None, 'name': None, 'version': None}
p['message'] = '/trebek jeopardy me'
msg = entities.HipChatMessage(p)
self.assertEqual(msg.message, "jeopardy me")
def test_when_get_response_message_is_called_user_name_is_saved(self):
self.trebek_bot.get_response_message()
key = trebek.Trebek.hipchat_user_key.format('582174')
self.assertTrue(self.trebek_bot.redis.exists(key))
user_name = self.trebek_bot.redis.get(trebek.Trebek.hipchat_user_key.format('582174')).decode()
self.assertEqual("James A", user_name)
def test_number_is_formatted_as_currency(self):
currency = self.trebek_bot.format_currency("100")
self.assertEqual("$100", currency)
currency = self.trebek_bot.format_currency("1000")
self.assertEqual("$1,000", currency)
currency = self.trebek_bot.format_currency("1000000000")
self.assertEqual("$1,000,000,000", currency)
currency = self.trebek_bot.format_currency("-100")
self.assertEqual("<span style='color: red;'>-$100</span>", currency)
currency = self.trebek_bot.format_currency("-1000000000")
self.assertEqual("<span style='color: red;'>-$1,000,000,000</span>", currency)
def test_user_requests_score_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek score"
bot = self.create_bot_with_dictionary(d)
key = "{0}:{1}".format(bot.user_score_prefix,
bot.room_message.item.message.user_from.id)
bot.redis.set(key, 500)
response = bot.get_response_message()
self.assertEqual("$500", response)
def test_user_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Leaderboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Arian: $5,430</li>"
expected += "<li>Darren S: $500</li>"
expected += "<li>Zach: $412</li>"
expected += "<li>Alex: $225</li>"
expected += "<li>Richard: $200</li></ol>"
self.assertEqual(expected, response)
def test_user_loserboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Loserboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Allen: $20</li>"
expected += "<li>Mark: $30</li>"
expected += "<li>Melvin: $50</li>"
expected += "<li>Cordarrell: $70</li>"
expected += "<li>Reggie: $87</li></ol>"
self.assertEqual(expected, response)
def test_jeopardy_round_can_start_from_nothing(self):
response = self.trebek_bot.get_response_message()
expected = "The category is <b>CLASSIC GAME SHOW TAGLINES</b> for $200: "
expected += "<b>\"CAVEAT EMPTOR. LET THE BUYER BEWARE\"</b> (Air Date: 18-Oct-2001)"
self.assertEqual(expected, response)
def test_user_cannot_answer_same_question_twice(self):
# Arrange
clue = self.trebek_bot.get_jeopardy_clue()
d = self.get_setup_json()
user_answer_key = trebek.Trebek.user_answer_key.format(
self.trebek_bot.room_id, clue.id, d['item']['message']['from']['id'])
self.trebek_bot.redis.set(user_answer_key, 'true')
self.trebek_bot.get_question()
d['item']['message']['message'] = '/trebek this is an answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = self.trebek_bot.redis
# Act
response = bot.get_response_message()
# Assert
self.assertEqual("You have already answered James A. Let someone else respond.", response)
def test_given_incorrect_answer_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = '/trebek some test answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is incorrect, James A. Your score is now {0}".format(score_string), response)
def test_given_correct_answer_user_score_increased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertEqual("$200", bot.format_currency(score))
self.assertEqual("That is correct, James A. Your score is now $200 (Expected Answer: Let's Make a Deal)", response)
def test_given_correct_answer_nonQuestion_form_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is correct James A, however responses should be in the form of a question. Your score is now {0}".format(score_string), response)
def test_given_incorrect_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek foobar"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "Time is up! The correct answer was: <b>Let's Make a Deal</b>")
def test_given_correct_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "That is correct James A, however time is up. (Expected Answer: Let's Make a Deal)")
def test_when_asked_for_answer_bot_responds_with_answer(self):
d = self.get_setup_json()
bot = self.create_bot_with_dictionary(d)
bot.get_question()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
response = bot.get_response_message()
self.assertEqual("The answer was: Let's Make a Deal", response)
def test_when_no_question_exists_answer_returns_no_active_clue(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
bot.redis.flushdb()
response = bot.get_response_message()
self.assertEqual("No active clue. Type '/trebek jeopardy' to start a round", response)
def test_when_answer_contains_HTML_word_is_filtered(self):
# e.g.: ANSWER: the <i>Stegosaurus</i>
c = {'id':1, 'title': 'foo', 'created_at': 'bar', 'updated_at': 'foobar', 'clues_count':1}
q = entities.Question(1, answer= "the <i>Stegosaurus</i>", category = c)
self.assertEqual("the Stegosaurus", q.answer)
# e.g.: ANSWER: <i>the Seagull</i>
q = entities.Question(1, answer= "<i>the Seagull</i>", category = c)
self.assertEqual("the Seagull", q.answer)
q = entities.Question(1, answer= "Theodore Roosevelt", category = c)
self.assertEqual("Theodore Roosevelt", q.answer)
def test_when_fetched_clue_is_invalid_get_new_clue(self):
global _invalid_clue, _fetch_count
_fetch_count = 0
clue = get_clue_json()
clue['invalid_count'] = 1
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertEqual(clue.invalid_count, None)
def test_when_fetched_clue_is_missing_question_get_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = ""
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertNotEqual(clue.question.strip(), "")
def test_when_fetched_clue_contains_visual_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the picture seen here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("seen here" in clue.question)
def test_when_fetched_clue_contains_audio_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the audio heard here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("heard here" in clue.question)
def test_when_new_month_arrives_score_resets_to_zero(self):
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.assertEqual("$0", self.trebek_bot.get_user_score())
def test_lifetimescore_includes_multiple_months(self):
# Seed other user's data (to reproduce bug)
self.create_user_scores()
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.trebek_bot.update_score(200)
self.assertEqual("$400", self.trebek_bot.get_user_score(True))
def test_user_lifetime_loserboard_value_includes_multiple_months(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the lifetime loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Legacy Score: $5</li>"
expected += "<li>Allen: $40</li>"
expected += "<li>Mark: $60</li>"
expected += "<li>Melvin: $100</li>"
expected += "<li>Cordarrell: $140</li></ol>"
self.assertEqual(expected, response)
def test_user_lifetime_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek lifetime leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Arian: $10,860</li>"
expected += "<li>Darren S: $1,000</li>"
expected += "<li>Zach: $824</li>"
expected += "<li>Alex: $450</li>"
expected += "<li>Richard: $400</li></ol>"
self.assertEqual(expected, response)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
#<!--------------------------------------------------------------------------->
#<!-- ITU - IT University of Copenhagen -->
#<!-- SSS - Software and Systems Section -->
#<!-- File : OpenCV3D.py -->
#<!-- Description: Main class of this project -->
#<!-- Author : Fabricio Batista Narcizo -->
#<!-- : Rued Langgaards Vej 7 - 4D06 - DK-2300 - Copenhagen S -->
#<!-- : fabn[at]itu[dot]dk -->
#<!-- Responsable: Dan Witzner Hansen (witzner[at]itu[dot]dk) -->
#<!-- Fabricio Batista Narcizo (fabn[at]itu[dot]dk) -->
#<!-- Information: No additional information -->
#<!-- Date : 05/04/2015 -->
#<!-- Change : 05/04/2015 - Creation of these classes -->
#<!-- : 06/04/2015 - Comentaries -->
#<!-- Review : 06/04/2015 - Finalized -->
#<!--------------------------------------------------------------------------->
__version__ = '$Revision: 2015040601 $'
########################################################################
import cv2
import sys
import numpy as np
from pylab import *
from collections import deque
from threading import Thread
from Cameras.CameraEnum import CameraEnum
from Cameras.StereoCameras import StereoCameras
from Processing.Configuration import Configuration
from Cameras import CamerasParameters
from Processing.Calibration import Calibration
from Processing.CalibrationEnum import CalibrationEnum
from Processing.Augmented import Augmented
from Processing import Utils
from Settings import Constant as C
########################################################################
class OpenCV3D(object):
"""OpenCV3D class is the main class of this project."""
#----------------------------------------------------------------------#
# Class Attributes #
#----------------------------------------------------------------------#
___plyHeader = '''ply
format ascii 1.0
element vertex %(num)d
property float x
property float y
property float z
end_header
'''
#----------------------------------------------------------------------#
# Class Properties #
#----------------------------------------------------------------------#
@property
def Image(self):
"""Get the last processed image."""
return self.__image
@Image.setter
def Image(self, value):
"""Set the last processed image."""
self.__image = value
@property
def PointsQueue(self):
"""Get the queue with selected points."""
return self.__pointsQueue
@PointsQueue.setter
def PointsQueue(self, value):
"""Set the queue with selected points."""
self.__pointsQueue = value
@property
def IsCalibrating(self):
"""Check if the calibration process is running."""
return self.__isCalibrating
@IsCalibrating.setter
def IsCalibrating(self, value):
"""Set that the calibration process starts."""
self.__isCalibrating = value
@property
def IsSaving(self):
"""Check if the PLY save file process is running."""
return self.__isSaving
@IsSaving.setter
def IsSaving(self, value):
"""Set that the PLY save process starts."""
self.__isSaving = value
@property
def IsFrozen(self):
"""Check if the fundamental matrix process is running."""
return self.__isFrozen
@IsFrozen.setter
def IsFrozen(self, value):
"""Set that the fundamental matrix process is running."""
self.__isFrozen = value
@property
def IsDrawing(self):
"""Check if the system is drawing some object."""
return self.__isDrawing
@IsDrawing.setter
def IsDrawing(self, value):
"""Set that the system will draw objects."""
self.__isDrawing = value
#----------------------------------------------------------------------#
# OpenCV3D Class Constructor #
#----------------------------------------------------------------------#
def __init__(self):
"""OpenCV3D Class Constructor."""
self.Clear()
def __del__(self):
"""OpenCV3D Class Destructor."""
# Stops the main thread system.
self.Stop()
#----------------------------------------------------------------------#
# Public Class Methods #
#----------------------------------------------------------------------#
def Start(self):
"""Start a new thread for managering the system."""
self.__isRunning = True
self.__thread = Thread(target=self.__CaptureThread)
self.__thread.start()
return True
def Stop(self):
"""Stop the main thread."""
if self.__isRunning is not True:
return False
self.__isRunning = False
self.__thread.join(1000)
return True
def Clear(self):
"""Empty all internal parameters used for this class."""
self.hasFundamentalMatrix = self.IsCalibrating = self.IsSaving = self.IsFrozen = self.IsDrawing = False
self.PointsQueue = deque(maxlen=16)
#----------------------------------------------------------------------#
# Private Class Methods #
#----------------------------------------------------------------------#
def __CaptureThread(self):
"""Main thread of this system."""
# Creates a window to show the original images.
cv2.namedWindow("Original", cv2.WINDOW_AUTOSIZE)
cv2.setMouseCallback("Original", self.mouseEvent)
# Creates a window to show the stereo images.
cv2.namedWindow("Stereo", cv2.WINDOW_AUTOSIZE)
# Creates a window to show the depth map.
cv2.namedWindow("DepthMap", cv2.WINDOW_AUTOSIZE)
cv2.createTrackbar("minDisparity", "DepthMap", 1, 32, self.__SetMinDisparity)
cv2.createTrackbar("blockSize", "DepthMap", 1, 5, self.__SetNothing)
# Repetition statement for analyzing each captured image.
while True:
# Check if the fundamental matrix process is running.
if not self.IsFrozen:
# Grabs the next frame from capturing device.
StereoCameras.Instance.Grab()
# Decodes and returns the grabbed video frames.
leftImage, rightImage = StereoCameras.Instance.Retrieve()
# TODO: Uncomment
# leftImage = Utils.get_frame_from_video(C.VIDEO_LEFT_4, 55) # Fail to find chessboard pattern
# leftImage = Utils.get_frame_from_video(C.VIDEO_LEFT_4, 30)
# rightImage = Utils.get_frame_from_video(C.VIDEO_RIGHT_4, 30)
# leftImage = cv2.imread(C.II_1)
# rightImage = cv2.imread(C.II_1)
# Find the pattern in the image.
leftCorners = Configuration.Instance.Pattern.FindCorners(leftImage, not self.IsDrawing)
rightCorners = Configuration.Instance.Pattern.FindCorners(rightImage, not self.IsDrawing)
# leftCorners = Configuration.Instance.Pattern.FindCorners(leftImage)
# rightCorners = Configuration.Instance.Pattern.FindCorners(rightImage)
# Check if the calibration process is running.
if self.IsCalibrating:
print "Start calibration"
# If both pattern have been recognized, start the calibration process.
if leftCorners is not None and rightCorners is not None:
self.__Calibrate(leftCorners, rightCorners)
# Otherwise, stop the calibrations process.
else:
self.IsCalibrating = False
print "Done Calibrating"
# Check if the system is calibrated.
elif Configuration.Instance.Calibration.IsCalibrated:
# Check if the system is drawing some object.
if self.IsDrawing:
print "Start drawing"
if leftCorners is not None and rightCorners is not None:
leftImage = self.__Augmentation(leftCorners, leftImage)
# rightImage = self.__Augmentation(rightCorners, rightImage, CameraEnum.RIGHT)
# TODO: Uncomment or delete
print "Done drawing"
# Otherwise, estimates the depth map from two stereo images.
else:
self.__DepthMap(leftImage, rightImage)
# Combine two stereo images in only one window.
self.Image = self.__CombineImages(leftImage, rightImage, 0.5)
cv2.imshow("Original", self.Image)
# Check what the user wants to do.
inputKey = cv2.waitKey(1)
# Esc or letter "q" key.
if inputKey == 27 or inputKey == ord("q"):
break
# Space key.
elif inputKey == 32:
self.IsCalibrating = True
# Letter "s" key.
elif inputKey == ord("s"):
self.IsSaving = True
# Letter "f" key.
elif inputKey == ord("f"):
self.IsFrozen = not self.IsFrozen
# Letter "d" key.
elif inputKey == ord("d"):
self.IsDrawing = not self.IsDrawing
# elif inputKey == ord("j"):
# lImg = Utils.get_frame_from_video(C.VIDEO_LEFT_3, 25)
# rImg = Utils.get_frame_from_video(C.VIDEO_RIGHT_3, 25)
# combImg = self.__CombineImages(lImg, rImg, 0.5)
# cv2.imshow("TESTING", combImg)
# Closes all video capturing devices.
StereoCameras.Instance.Release()
# Close all OpenCV windows.
cv2.destroyAllWindows()
def estimateFundamental(self, x1, x2):
n = x1.shape[1]
if x2.shape[1] != n:
raise ValueError("Number of points do not match.")
# Build matrix for equation
A = np.zeros((n, 9))
for i in range(n):
A[i] = [
x1[0, i] * x2[0, i], x1[0, i] * x2[1, i], x1[0, i] * x2[2, i],
x1[1, i] * x2[0, i], x1[1, i] * x2[1, i], x1[1, i] * x2[2, i],
x1[2, i] * x2[0, i], x1[2, i] * x2[1, i], x1[3, i] * x2[2, i],
]
# Compute linear least square solution
U, S, V = linalg.svd(A)
F = V[-1].reshape(3, 3)
# Constrain F
# Make rank 2 by zeroing out last singular value
U, S, V = linalg.svd(F)
S[2] = 0
F = dot(U, dot(diag(S), V))
return F
def getSelectedPoints(self, points):
print points
x1 = points[::2]
x2 = points[1::2]
return x1, x2
def computeEpipole(self, F):
U, S, V = linalg.svd(F)
e = V[-1]
return e / e[2]
def getHardCodedPoints(self):
p = array([
[3.64000000e+02, 1.80000000e+02, 1.00000000e+00],
[9.14000000e+02, 2.52000000e+02, 1.00000000e+00],
[3.64000000e+02, 6.82000000e+02, 1.00000000e+00],
[8.96000000e+02, 7.60000000e+02, 1.00000000e+00],
[1.09800000e+03, 6.96000000e+02, 1.00000000e+00],
[1.64600000e+03, 7.82000000e+02, 1.00000000e+00],
[2.25000000e+03, 4.06000000e+02, 1.00000000e+00],
[4.10000000e+02, 3.20000000e+02, 1.00000000e+00],
[6.92000000e+02, 4.00000000e+01, 1.00000000e+00],
[1.24400000e+03, 1.18000000e+02, 1.00000000e+00],
[4.12000000e+02, 2.22000000e+02, 1.00000000e+00],
[9.62000000e+02, 2.90000000e+02, 1.00000000e+00],
[8.96000000e+02, 8.40000000e+01, 1.00000000e+00],
[1.44800000e+03, 1.72000000e+02, 1.00000000e+00],
[8.84000000e+02, 2.84000000e+02, 1.00000000e+00],
[1.43800000e+03, 3.68000000e+02, 1.00000000e+00]
])
return p
def plotEpipolarLine(self, im, F, x, epipole=None, showEpipole=True):
m,n = im.shape[:2]
line = dot(F, x)
t = linspace(0, n, 100)
lt = array([(line[2] + line[0] * tt) / (-line[1]) for tt in t])
# take only line points inside the image
ndx = (lt >= 0) & (lt < m)
plot(t[ndx], lt[ndx], linewidth=2)
if showEpipole:
if epipole is None:
epipole = self.computeEpipole(F)
# plot(epipole[0] / epipole[2], epipole[1] / epipole[2], 'r*')
# return epipole[0] / epipole[2], epipole[1] / epipole[2]
return t[ndx], lt[ndx]
def __FundamentalMatrix(self, point):
# Check if the image is frozen.
# SIGB: The user can frozen the input image presses "f" key.
if self.IsFrozen:
# Insert the new selected point in the queue.
if self.__UpdateQueue(point):
# Get all points selected by the user.
points = np.asarray(self.PointsQueue, dtype=np.float32)
points = self.getHardCodedPoints()
# <000> Get the selected points from the left and right images.
left, right = self.getSelectedPoints(points)
left = np.array(left, dtype = np.float32)
right = np.array(right, dtype = np.float32)
# TODO: Remove ?
left = np.float32(left)
right = np.float32(right)
left = np.delete(left, 2, 1)
right = np.delete(right, 2, 1)
# <001> Estimate the Fundamental Matrix.
F, mask = cv2.findFundamentalMat(left, right)
# <002> Save the Fundamental Matrix in the F attribute of the CamerasParameters class.
CamerasParameters.CamerasParameters.F = F
# self.build_epipolar_lines(left, F, False)
# self.build_epipolar_lines(right, F, True)
# Update the fundamental matrix flag and release the system
e = self.computeEpipole(F)
# Update the fundamental matrix flag and release the system
self.hasFundamentalMatrix = True
def build_epipolar_lines(self, points, fundamental_matrix, is_right, show_lines=True):
lines = cv2.computeCorrespondEpilines(points, 2 if is_right else 1, fundamental_matrix)
lines = lines.reshape(-1, 3)
if show_lines:
self.draw_lines(self.Image, lines, points, is_right)
def draw_lines(self, img, lines, points, is_right):
height, width, layers = img.shape
color = (0, 0, 255) if not is_right else (255, 0, 0)
x_gap_point = 0 if not is_right else width / 2
x_gap_line = 0 if is_right else width / 2
for height, row in zip(lines, points):
x_start, y_start = map(int, [0, -height[2]/height[1]])
x_end, y_end = map(int, [width/2, -(height[2]+height[0]*(width/2))/height[1]])
cv2.line(img, (x_start + x_gap_line, y_start), (x_end + x_gap_line, y_end), color, 1)
cv2.circle(img, (int(row[0] + x_gap_point), int(row[1])), 3, color)
return img
def __Calibrate(self, leftCorners, rightCorners):
"""Calibrate the stereo camera for each new detected pattern."""
# Get The outer vector contains as many elements as the number of the pattern views.
objectPoints = Configuration.Instance.Pattern.CalculatePattern()
# <007> Insert the pattern detection results in three vectors.
Configuration.Instance.Pattern.LeftCorners.append(leftCorners)
Configuration.Instance.Pattern.RightCorners.append(rightCorners)
Configuration.Instance.Pattern.ObjectPoints.append(objectPoints)
# Get the parameters used for calibrating each stereo camera.
leftCorners = Configuration.Instance.Pattern.LeftCorners
rightCorners = Configuration.Instance.Pattern.RightCorners
objectPoints = Configuration.Instance.Pattern.ObjectPoints
# <008> Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
calibration = Calibration()
cameraMatrixLeft, distCoeffsLeft = calibration.CalibrateCamera(leftCorners, objectPoints, CameraEnum.LEFT)
cameraMatrixRight, distCoeffsRight = calibration.CalibrateCamera(rightCorners, objectPoints, CameraEnum.RIGHT)
# <008> Write the camera intrinsic and extrinsic parameters.
# camParam = CamerasParameters.CamerasParameters()
StereoCameras.Instance.Parameters.CameraMatrix1 = cameraMatrixLeft
StereoCameras.Instance.Parameters.CameraMatrix2 = cameraMatrixRight
StereoCameras.Instance.Parameters.DistCoeffs1 = distCoeffsLeft
StereoCameras.Instance.Parameters.DistCoeffs2 = distCoeffsRight
# Calibrates the stereo camera.
R, T = Configuration.Instance.Calibration.StereoCalibrate(leftCorners, rightCorners, objectPoints)
# <015> Computes rectification transforms for each head of a calibrated stereo camera.
Configuration.Instance.Calibration.StereoRectify(R, T)
# <016> Computes the undistortion and rectification transformation maps.
Configuration.Instance.Calibration.UndistortRectifyMap()
# End the calibration process.
self.IsCalibrating = False
# Stop the system for 1 second, because the user will see the processed images.
cv2.waitKey(1000)
def __Epipolar(self, point):
"""Define the points used during the fundamental matrix process."""
pass
def __DepthMap(self, leftImage, rightImage):
"""Estimate the depth map from two stereo images."""
# <017> Create the stereo image.
# <018> Get the attributes for the block matching algorithm.
# SIGB: minDisparity needs to be divisible by 16 and block size needs to be an odd number.
# <019> Computing a stereo correspondence using the block matching algorithm.
# Check if it is necessary to save the PLY file.
# TODO: Uncomment
# if self.IsSaving:
# self.__SavePLY(disparity, leftStereo)
# <020> Normalizes the disparity image for a valid output OpenCV image.
# Shows the disparity image.
# TODO: Uncomment
# cv2.imshow("DepthMap", disparity)
# Combine two stereo images in only one window.
# stereo = self.__CombineImages(leftStereo, rightStereo, 0.5)
stereo = self.__CombineImages(leftImage, rightImage, 0.5)
cv2.imshow("Stereo", stereo)
def __Augmentation(self, corners, image, camera=CameraEnum.LEFT):
"""Draws some augmentated object in the input image."""
# Get The outer vector contains as many elements as the number of the pattern views.
objectPoints = Configuration.Instance.Pattern.CalculatePattern()
# <021> Prepares the external parameters.
if camera == CameraEnum.LEFT:
cameraMatrix = StereoCameras.Instance.Parameters.CameraMatrix1
distCoeffs = StereoCameras.Instance.Parameters.DistCoeffs1
else:
cameraMatrix = StereoCameras.Instance.Parameters.CameraMatrix2
distCoeffs = StereoCameras.Instance.Parameters.DistCoeffs2
# <022> Get the points of the coordinate system.
points = Configuration.Instance.Augmented.CoordinateSystem
# Defines the pose estimation of the coordinate system.
coordEstimation = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, points, cameraMatrix, distCoeffs)
# <025> Draws the coordinate system over the chessboard pattern.
corner = tuple(corners[0].ravel())
cv2.line(image, corner, tuple(coordEstimation[0].ravel()), (255,0,0), 5)
cv2.line(image, corner, tuple(coordEstimation[1].ravel()), (0,255,0), 5)
cv2.line(image, corner, tuple(coordEstimation[2].ravel()), (0,0,255), 5)
# <026> Get the points of the cube.
cube = Configuration.Instance.Augmented.Cube
# <027> Defines the pose estimation of the cube.
cubeEstimation = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, cube, cameraMatrix, distCoeffs)
cubeEstimation = np.int32(cubeEstimation).reshape(-1,2)
# <028> Draws ground floor in green color.
# SIGB: Uses the last four points to do this.
cv2.drawContours(image, [cubeEstimation[:4]],-1,(0,255,0),-3)
# <029> Draw pillars in blue color.
# SIGB: Uses the intersections between the first four points and the last four points.
for i,j in zip(range(4),range(4,8)):
cv2.line(image, tuple(cubeEstimation[i]), tuple(cubeEstimation[j]),(255),3)
# <030> Draw top layer in red color.
# SIGB: Uses the first four points to do this.
cv2.drawContours(image, [cubeEstimation[4:]],-1,(0,0,255),3)
# Check if it is necessary to apply a texture mapping.
if camera == CameraEnum.RIGHT:
return
# Define each correspoding cube face.
cube = Configuration.Instance.Augmented.Cube
TopFace = cube[4:]
UpFace = np.vstack([cube[5], cube[1:3], cube[6]])
DownFace = np.vstack([cube[7], cube[3], cube[0], cube[4]])
LeftFace = np.vstack([cube[4], cube[0:2], cube[5]])
RightFace = np.vstack([cube[6], cube[2:4], cube[7]])
# Threshould used for selecting which cube faces will be drawn.
threshold = 88
textures = []
image2 = image
# Calculate normals for all corners. Used in shading
top, right, left, up, down = Configuration.Instance.Augmented.CalculateFaceCornerNormals(TopFace, RightFace, LeftFace, UpFace, DownFace)
cubeFaces = []
cubeFaces.append((TopFace, top))
cubeFaces.append((UpFace, up))
cubeFaces.append((DownFace, down))
cubeFaces.append((LeftFace, left))
cubeFaces.append((RightFace, right))
t1, b1, d1 = self.calculateTexture(TopFace, objectPoints, corners, cameraMatrix, distCoeffs, image, C.TEXTURE_TOP, threshold)
t2, b2, d2 = self.calculateTexture(UpFace, objectPoints, corners, cameraMatrix, distCoeffs, image, C.TEXTURE_UP, threshold)
t3, b3, d3 = self.calculateTexture(RightFace, objectPoints, corners, cameraMatrix, distCoeffs, image, C.TEXTURE_RIGHT, threshold)
t4, b4, d4 = self.calculateTexture(LeftFace, objectPoints, corners, cameraMatrix, distCoeffs, image, C.TEXTURE_LEFT, threshold)
t5, b5, d5 = self.calculateTexture(DownFace, objectPoints, corners, cameraMatrix, distCoeffs, image, C.TEXTURE_DOWN, threshold)
# <035> Applies the texture mapping over all cube sides.
# Get / calculate the texture for all sides of the cube
if d1: textures.append((t1, b1))
if d2: textures.append((t2, b2))
if d3: textures.append((t3, b3))
if d4: textures.append((t4, b4))
if d5: textures.append((t5, b5))
# Apply texture to cube
for t in textures:
bin = bitwise_not(t[1])
tmpImg = cv2.bitwise_and(image2, cv2.cvtColor(bin, cv2.COLOR_GRAY2BGR))
image2 = cv2.bitwise_or(tmpImg, t[0])
for f in cubeFaces:
self.applyShading(image2, f[0], f[1], corners, threshold, objectPoints, cameraMatrix, distCoeffs)
self.writeImage(image2)
# Apply the shading
# tf = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, TopFace, cameraMatrix, distCoeffs)
# rf = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, RightFace, cameraMatrix, distCoeffs)
# lf = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, LeftFace, cameraMatrix, distCoeffs)
# uf = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, UpFace, cameraMatrix, distCoeffs)
# df = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, DownFace, cameraMatrix, distCoeffs)
#
# Configuration.Instance.Augmented.ShadeFace(image2, TopFace, top, tf, corners)
# Configuration.Instance.Augmented.ShadeFace(image2, RightFace, right, rf, corners)
# Configuration.Instance.Augmented.ShadeFace(image2, LeftFace, left, lf, corners)
# Configuration.Instance.Augmented.ShadeFace(image2, UpFace, up, uf, corners)
# Configuration.Instance.Augmented.ShadeFace(image2, DownFace, down, df, corners)
return image2
def applyShading(self, image, cubeFace, shadeFace, corners, threshold, objectPoints, cameraMatrix, distCoeffs):
normal, center, angle = Configuration.Instance.Augmented.GetFaceNormal(cubeFace)
draw = angle[0] < threshold
if draw:
cf = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, cubeFace, cameraMatrix, distCoeffs)
Configuration.Instance.Augmented.ShadeFace(image, cubeFace, shadeFace, cf, corners)
def calculateTexture(self, cubeFace, objectPoints, corners, cameraMatrix, distCoeffs, image, texture, threshold):
# Calculate normal vector of cube face to determine if it should be drawn
normal, center, angle = Configuration.Instance.Augmented.GetFaceNormal(cubeFace)
draw = angle[0] < threshold
if draw:
cf = Configuration.Instance.Augmented.PoseEstimation(objectPoints, corners, cubeFace, cameraMatrix, distCoeffs)
t, b = Configuration.Instance.Augmented.ApplyTexture(image, texture, cf)
else:
t = 0
b = 0
return t, b, draw
def writeImage(self, image):
if C.WRITE_LEFT_IMAGE:
cv2.imwrite(C.WRITE_LOCATION, image)
def draw(self, img, corners, imgpts):
corner = tuple(corners[0].ravel())
print tuple(imgpts[0].ravel())
print tuple(imgpts[1].ravel())
print tuple(imgpts[2].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
def __SavePLY(self, disparity, image):
# Check if the system is calibrated.
if Configuration.Instance.Calibration.IsCalibrated:
# Get a 4x4 disparity-to-depth mapping matrix.
Q = StereoCameras.Instance.Parameters.Q
# Reprojects a disparity image to 3D space.
points = cv2.reprojectImageTo3D(disparity, Q)
# Creates a mask of the depth mapping matrix.
mask = disparity > disparity.min()
points = points[mask].reshape(-1, 3)
# Defines the output numpy array.
output = points
# Save the output file.
with open("OpenCV3D.ply", "w") as filename:
filename.write(OpenCV3D.___plyHeader % dict(num = len(output)))
np.savetxt(filename, output, "%f %f %f", newline="\n")
# End the PLY save process.
self.IsSaving = False
def __CombineImages(self, image1, image2, scale=1):
"""Combine two image in only one visualization."""
# Define the final size.
height, width = image1.shape[:2]
width = int(width * scale)
height = int(height * scale)
# Define the new size to input images.
image1 = cv2.resize(image1, (width, height))
image2 = cv2.resize(image2, (width, height))
# Create the final image.
image = np.zeros((height, width * 2, 3), dtype=np.uint8)
image[:height, :width ] = image1
image[:height, width:width * 2] = image2
# Return the combine images.
return image
#----------------------------------------------------------------------#
# Class Action Events Methods #
#----------------------------------------------------------------------#
def mouseEvent(self, event, x, y, flag, param):
"""This is an example of a calibration process using the mouse clicks."""
# Starts the PLY save process.
if event == cv2.EVENT_LBUTTONDOWN:
if self.hasFundamentalMatrix:
self.__Epipolar((x, y))
else:
self.__FundamentalMatrix((x, y))
# Reset all configuration variables.
elif event == cv2.EVENT_MBUTTONDOWN:
self.Clear()
Configuration.Instance.Clear()
# Starts the calibration process.
elif event == cv2.EVENT_RBUTTONUP:
self.IsCalibrating = True
def __UpdateQueue(self, point):
"""Insert a new point in the queue."""
# Get the current queue size.
size = len(self.PointsQueue)
# Check if the queue is full.
if size == self.PointsQueue.maxlen:
return True
# Defines the color used for draw the circle and the line.
color = (0, 0, 255) if size % 2 == 0 else (255, 0, 0)
# Draw a circle in the selected point.
cv2.circle(self.Image, point, 3, color, thickness=-1)
cv2.imshow("Original", self.Image)
# Adjust the right click to correct position.
if size % 2 != 0:
point = (point[0] - 320, point[1])
# It is necessary to update the selected point, because the systems shows a resized input image.
# SIBG: You can use the original size, if you call __CombineImages() method with scale factor value 1.0.
point = (point[0] * 2, point[1] * 2, 1)
# Insert the new point in the queue.
self.PointsQueue.append(point)
# Check if the queue is full now.
if size + 1 == self.PointsQueue.maxlen:
return True
# It is necessary to add more points.
return False
def __SetMinDisparity(self, value):
"""Masks the minDisparity variable."""
if value == 0:
cv2.setTrackbarPos("minDisparity", "DepthMap", int(1))
def __SetNothing(self, value):
"""Standard mask."""
pass
#----------------------------------------------------------------------#
# Main Methods #
#----------------------------------------------------------------------#
def main(argv):
OpenCV3D().Start()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
|
#!/usr/bin/env python
#
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import argparse
import collections
import contextlib
import json
import logging
import tempfile
import os
import sys
import urllib
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.abspath(os.path.join(
CURRENT_DIR, '..', '..', '..', '..', '..'))
sys.path.append(os.path.join(BASE_DIR, 'build', 'android'))
from pylib.results.presentation import standard_gtest_merge
from pylib.utils import google_storage_helper # pylint: disable=import-error
sys.path.append(os.path.join(BASE_DIR, 'third_party'))
import jinja2 # pylint: disable=import-error
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True)
def cell(data, html_class='center'):
"""Formats table cell data for processing in jinja template."""
return {
'data': data,
'class': html_class,
}
def pre_cell(data, html_class='center'):
"""Formats table <pre> cell data for processing in jinja template."""
return {
'cell_type': 'pre',
'data': data,
'class': html_class,
}
class LinkTarget(object):
# Opens the linked document in a new window or tab.
NEW_TAB = '_blank'
# Opens the linked document in the same frame as it was clicked.
CURRENT_TAB = '_self'
def link(data, href, target=LinkTarget.CURRENT_TAB):
"""Formats <a> tag data for processing in jinja template.
Args:
data: String link appears as on HTML page.
href: URL where link goes.
target: Where link should be opened (e.g. current tab or new tab).
"""
return {
'data': data,
'href': href,
'target': target,
}
def links_cell(links, html_class='center', rowspan=None):
"""Formats table cell with links for processing in jinja template.
Args:
links: List of link dictionaries. Use |link| function to generate them.
html_class: Class for table cell.
rowspan: Rowspan HTML attribute.
"""
return {
'cell_type': 'links',
'class': html_class,
'links': links,
'rowspan': rowspan,
}
def action_cell(action, data, html_class):
"""Formats table cell with javascript actions.
Args:
action: Javscript action.
data: Data in cell.
class: Class for table cell.
"""
return {
'cell_type': 'action',
'action': action,
'data': data,
'class': html_class,
}
def flakiness_dashbord_link(test_name, suite_name):
url_args = urllib.urlencode([
('testType', suite_name),
('tests', test_name)])
return ('https://test-results.appspot.com/'
'dashboards/flakiness_dashboard.html#%s' % url_args)
def logs_cell(result, test_name, suite_name):
"""Formats result logs data for processing in jinja template."""
link_list = []
result_link_dict = result.get('links', {})
result_link_dict['flakiness'] = flakiness_dashbord_link(
test_name, suite_name)
for name, href in sorted(result_link_dict.items()):
link_list.append(link(
data=name,
href=href,
target=LinkTarget.NEW_TAB))
if link_list:
return links_cell(link_list)
else:
return cell('(no logs)')
def code_search(test, cs_base_url):
"""Returns URL for test on codesearch."""
search = test.replace('#', '.')
return '%s/search/?q=%s&type=cs' % (cs_base_url, search)
def status_class(status):
"""Returns HTML class for test status."""
if not status:
return 'failure unknown'
status = status.lower()
if status not in ('success', 'skipped'):
return 'failure %s' % status
return status
def create_test_table(results_dict, cs_base_url, suite_name):
"""Format test data for injecting into HTML table."""
header_row = [
cell(data='test_name', html_class='text'),
cell(data='status', html_class='flaky'),
cell(data='elapsed_time_ms', html_class='number'),
cell(data='logs', html_class='text'),
cell(data='output_snippet', html_class='text'),
]
test_row_blocks = []
for test_name, test_results in results_dict.iteritems():
test_runs = []
for index, result in enumerate(test_results):
if index == 0:
test_run = [links_cell(
links=[
link(href=code_search(test_name, cs_base_url),
target=LinkTarget.NEW_TAB,
data=test_name)],
rowspan=len(test_results),
html_class='left %s' % test_name
)] # test_name
else:
test_run = []
test_run.extend([
cell(data=result['status'] or 'UNKNOWN',
# status
html_class=('center %s' %
status_class(result['status']))),
cell(data=result['elapsed_time_ms']), # elapsed_time_ms
logs_cell(result, test_name, suite_name), # logs
pre_cell(data=result['output_snippet'], # output_snippet
html_class='left'),
])
test_runs.append(test_run)
test_row_blocks.append(test_runs)
return header_row, test_row_blocks
def create_suite_table(results_dict):
"""Format test suite data for injecting into HTML table."""
SUCCESS_COUNT_INDEX = 1
FAIL_COUNT_INDEX = 2
ALL_COUNT_INDEX = 3
TIME_INDEX = 4
header_row = [
cell(data='suite_name', html_class='text'),
cell(data='number_success_tests', html_class='number'),
cell(data='number_fail_tests', html_class='number'),
cell(data='all_tests', html_class='number'),
cell(data='elapsed_time_ms', html_class='number'),
]
footer_row = [
action_cell(
'showTestsOfOneSuiteOnlyWithNewState("TOTAL")',
'TOTAL',
'center'
), # TOTAL
cell(data=0), # number_success_tests
cell(data=0), # number_fail_tests
cell(data=0), # all_tests
cell(data=0), # elapsed_time_ms
]
suite_row_dict = {}
for test_name, test_results in results_dict.iteritems():
# TODO(mikecase): This logic doesn't work if there are multiple test runs.
# That is, if 'per_iteration_data' has multiple entries.
# Since we only care about the result of the last test run.
result = test_results[-1]
suite_name = (test_name.split('#')[0] if '#' in test_name
else test_name.split('.')[0])
if suite_name in suite_row_dict:
suite_row = suite_row_dict[suite_name]
else:
suite_row = [
action_cell(
'showTestsOfOneSuiteOnlyWithNewState("%s")' % suite_name,
suite_name,
'left'
), # suite_name
cell(data=0), # number_success_tests
cell(data=0), # number_fail_tests
cell(data=0), # all_tests
cell(data=0), # elapsed_time_ms
]
suite_row_dict[suite_name] = suite_row
suite_row[ALL_COUNT_INDEX]['data'] += 1
footer_row[ALL_COUNT_INDEX]['data'] += 1
if result['status'] == 'SUCCESS':
suite_row[SUCCESS_COUNT_INDEX]['data'] += 1
footer_row[SUCCESS_COUNT_INDEX]['data'] += 1
elif result['status'] != 'SKIPPED':
suite_row[FAIL_COUNT_INDEX]['data'] += 1
footer_row[FAIL_COUNT_INDEX]['data'] += 1
# Some types of crashes can have 'null' values for elapsed_time_ms.
if result['elapsed_time_ms'] is not None:
suite_row[TIME_INDEX]['data'] += result['elapsed_time_ms']
footer_row[TIME_INDEX]['data'] += result['elapsed_time_ms']
for suite in suite_row_dict.values():
if suite[FAIL_COUNT_INDEX]['data'] > 0:
suite[FAIL_COUNT_INDEX]['class'] += ' failure'
else:
suite[FAIL_COUNT_INDEX]['class'] += ' success'
if footer_row[FAIL_COUNT_INDEX]['data'] > 0:
footer_row[FAIL_COUNT_INDEX]['class'] += ' failure'
else:
footer_row[FAIL_COUNT_INDEX]['class'] += ' success'
return (header_row,
[[suite_row] for suite_row in suite_row_dict.values()],
footer_row)
def feedback_url(result_details_link):
# pylint: disable=redefined-variable-type
url_args = [
('labels', 'Pri-2,Type-Bug,Restrict-View-Google'),
('summary', 'Result Details Feedback:'),
('components', 'Test>Android'),
]
if result_details_link:
url_args.append(('comment', 'Please check out: %s' % result_details_link))
url_args = urllib.urlencode(url_args)
# pylint: enable=redefined-variable-type
return 'https://bugs.chromium.org/p/chromium/issues/entry?%s' % url_args
def results_to_html(results_dict, cs_base_url, bucket, test_name,
builder_name, build_number, local_output):
"""Convert list of test results into html format.
Args:
local_output: Whether this results file is uploaded to Google Storage or
just a local file.
"""
test_rows_header, test_rows = create_test_table(
results_dict, cs_base_url, test_name)
suite_rows_header, suite_rows, suite_row_footer = create_suite_table(
results_dict)
suite_table_values = {
'table_id': 'suite-table',
'table_headers': suite_rows_header,
'table_row_blocks': suite_rows,
'table_footer': suite_row_footer,
}
test_table_values = {
'table_id': 'test-table',
'table_headers': test_rows_header,
'table_row_blocks': test_rows,
}
main_template = JINJA_ENVIRONMENT.get_template(
os.path.join('template', 'main.html'))
if local_output:
html_render = main_template.render( # pylint: disable=no-member
{
'tb_values': [suite_table_values, test_table_values],
'feedback_url': feedback_url(None),
})
return (html_render, None, None)
else:
dest = google_storage_helper.unique_name(
'%s_%s_%s' % (test_name, builder_name, build_number))
result_details_link = google_storage_helper.get_url_link(
dest, '%s/html' % bucket)
html_render = main_template.render( # pylint: disable=no-member
{
'tb_values': [suite_table_values, test_table_values],
'feedback_url': feedback_url(result_details_link),
})
return (html_render, dest, result_details_link)
def result_details(json_path, test_name, cs_base_url, bucket=None,
builder_name=None, build_number=None, local_output=False):
"""Get result details from json path and then convert results to html.
Args:
local_output: Whether this results file is uploaded to Google Storage or
just a local file.
"""
with open(json_path) as json_file:
json_object = json.loads(json_file.read())
if not 'per_iteration_data' in json_object:
return 'Error: json file missing per_iteration_data.'
results_dict = collections.defaultdict(list)
for testsuite_run in json_object['per_iteration_data']:
for test, test_runs in testsuite_run.iteritems():
results_dict[test].extend(test_runs)
return results_to_html(results_dict, cs_base_url, bucket, test_name,
builder_name, build_number, local_output)
def upload_to_google_bucket(html, bucket, dest):
with tempfile.NamedTemporaryFile(suffix='.html') as temp_file:
temp_file.write(html)
temp_file.flush()
return google_storage_helper.upload(
name=dest,
filepath=temp_file.name,
bucket='%s/html' % bucket,
content_type='text/html',
authenticated_link=True)
def ui_screenshot_set(json_path):
with open(json_path) as json_file:
json_object = json.loads(json_file.read())
if not 'per_iteration_data' in json_object:
# This will be reported as an error by result_details, no need to duplicate.
return None
ui_screenshots = []
# pylint: disable=too-many-nested-blocks
for testsuite_run in json_object['per_iteration_data']:
for _, test_runs in testsuite_run.iteritems():
for test_run in test_runs:
if 'ui screenshot' in test_run['links']:
screenshot_link = test_run['links']['ui screenshot']
if screenshot_link.startswith('file:'):
with contextlib.closing(urllib.urlopen(screenshot_link)) as f:
test_screenshots = json.load(f)
else:
# Assume anything that isn't a file link is a google storage link
screenshot_string = google_storage_helper.read_from_link(
screenshot_link)
if not screenshot_string:
logging.error('Bad screenshot link %s', screenshot_link)
continue
test_screenshots = json.loads(
screenshot_string)
ui_screenshots.extend(test_screenshots)
# pylint: enable=too-many-nested-blocks
if ui_screenshots:
return json.dumps(ui_screenshots)
return None
def upload_screenshot_set(json_path, test_name, bucket, builder_name,
build_number):
screenshot_set = ui_screenshot_set(json_path)
if not screenshot_set:
return None
dest = google_storage_helper.unique_name(
'screenshots_%s_%s_%s' % (test_name, builder_name, build_number),
suffix='.json')
with tempfile.NamedTemporaryFile(suffix='.json') as temp_file:
temp_file.write(screenshot_set)
temp_file.flush()
return google_storage_helper.upload(
name=dest,
filepath=temp_file.name,
bucket='%s/json' % bucket,
content_type='application/json',
authenticated_link=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--json-file', help='Path of json file.')
parser.add_argument('--cs-base-url', help='Base url for code search.',
default='http://cs.chromium.org')
parser.add_argument('--bucket', help='Google storage bucket.', required=True)
parser.add_argument('--builder-name', help='Builder name.')
parser.add_argument('--build-number', help='Build number.')
parser.add_argument('--test-name', help='The name of the test.',
required=True)
parser.add_argument(
'-o', '--output-json',
help='(Swarming Merge Script API) '
'Output JSON file to create.')
parser.add_argument(
'--build-properties',
help='(Swarming Merge Script API) '
'Build property JSON file provided by recipes.')
parser.add_argument(
'--summary-json',
help='(Swarming Merge Script API) '
'Summary of shard state running on swarming. '
'(Output of the swarming.py collect '
'--task-summary-json=XXX command.)')
parser.add_argument(
'--task-output-dir',
help='(Swarming Merge Script API) '
'Directory containing all swarming task results.')
parser.add_argument(
'positional', nargs='*',
help='output.json from shards.')
args = parser.parse_args()
if ((args.build_properties is None) ==
(args.build_number is None or args.builder_name is None)):
raise parser.error('Exactly one of build_perperties or '
'(build_number or builder_name) should be given.')
if (args.build_number is None) != (args.builder_name is None):
raise parser.error('args.build_number and args.builder_name '
'has to be be given together'
'or not given at all.')
if len(args.positional) == 0 and args.json_file is None:
if args.output_json:
with open(args.output_json, 'w') as f:
json.dump({}, f)
return
elif len(args.positional) != 0 and args.json_file:
raise parser.error('Exactly one of args.positional and '
'args.json_file should be given.')
if args.build_properties:
build_properties = json.loads(args.build_properties)
if ((not 'buildnumber' in build_properties) or
(not 'buildername' in build_properties)):
raise parser.error('Build number/builder name not specified.')
build_number = build_properties['buildnumber']
builder_name = build_properties['buildername']
elif args.build_number and args.builder_name:
build_number = args.build_number
builder_name = args.builder_name
if args.positional:
if len(args.positional) == 1:
json_file = args.positional[0]
else:
if args.output_json and args.summary_json:
standard_gtest_merge.standard_gtest_merge(
args.output_json, args.summary_json, args.positional)
json_file = args.output_json
elif not args.output_json:
raise Exception('output_json required by merge API is missing.')
else:
raise Exception('summary_json required by merge API is missing.')
elif args.json_file:
json_file = args.json_file
if not os.path.exists(json_file):
raise IOError('--json-file %s not found.' % json_file)
# Link to result details presentation page is a part of the page.
result_html_string, dest, result_details_link = result_details(
json_file, args.test_name, args.cs_base_url, args.bucket,
builder_name, build_number)
result_details_link_2 = upload_to_google_bucket(
result_html_string.encode('UTF-8'),
args.bucket, dest)
assert result_details_link == result_details_link_2, (
'Result details link do not match. The link returned by get_url_link'
' should be the same as that returned by upload.')
ui_screenshot_set_link = upload_screenshot_set(json_file, args.test_name,
args.bucket, builder_name, build_number)
if ui_screenshot_set_link:
ui_catalog_url = 'https://chrome-ui-catalog.appspot.com/'
ui_catalog_query = urllib.urlencode(
{'screenshot_source': ui_screenshot_set_link})
ui_screenshot_link = '%s?%s' % (ui_catalog_url, ui_catalog_query)
if args.output_json:
with open(json_file) as original_json_file:
json_object = json.load(original_json_file)
json_object['links'] = {
'result_details (logcats, flakiness links)': result_details_link
}
if ui_screenshot_set_link:
json_object['links']['ui screenshots'] = ui_screenshot_link
with open(args.output_json, 'w') as f:
json.dump(json_object, f)
else:
print('Result Details: %s' % result_details_link)
if ui_screenshot_set_link:
print('UI Screenshots %s' % ui_screenshot_link)
if __name__ == '__main__':
sys.exit(main())
|
|
#!/usr/bin/env python
#Copyright (C) 2013 by Glenn Hickey
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
import os
import argparse
import logging
import random
import numpy as np
from teHmm.track import TrackData
from teHmm.trackIO import readBedIntervals, getMergedBedIntervals
from teHmm.hmm import MultitrackHmm
from teHmm.emission import IndependentMultinomialAndGaussianEmissionModel
from teHmm.emission import PairEmissionModel
from teHmm.track import CategoryMap, BinaryMap
from teHmm.cfg import MultitrackCfg
from teHmm.modelIO import saveModel
from teHmm.common import myLog, EPSILON, initBedTool, cleanBedTool, LOGZERO
from teHmm.common import addLoggingOptions, setLoggingFromOptions, logger
from teHmm.common import runParallelShellCommands
from teHmm.bin.compareBedStates import checkExactOverlap
def main(argv=None):
if argv is None:
argv = sys.argv
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Create a teHMM")
parser.add_argument("tracksInfo", help="Path of Tracks Info file "
"containing paths to genome annotation tracks")
parser.add_argument("trainingBed", help="Path of BED file containing"
" genome regions to train model on. If --supervised "
"is used, the names in this bed file will be treated "
"as the true annotation (otherwise it is only used for "
"interval coordinates)")
parser.add_argument("outputModel", help="Path of output hmm")
parser.add_argument("--numStates", help="Number of states in model",
type = int, default=2)
parser.add_argument("--iter", help="Number of EM iterations",
type = int, default=100)
parser.add_argument("--supervised", help="Use name (4th) column of "
"<traingingBed> for the true hidden states of the"
" model. Transition parameters will be estimated"
" directly from this information rather than EM."
" NOTE: The number of states will be determined "
"from the bed.",
action = "store_true", default = False)
parser.add_argument("--cfg", help="Use Context Free Grammar insead of "
"HMM. Only works with --supervised for now",
action = "store_true", default = False)
parser.add_argument("--saPrior", help="Confidence in self alignment "
"track for CFG. Probability of pair emission "
"is multiplied by this number if the bases are aligned"
" and its complement if bases are not aligned. Must"
" be between [0,1].", default=0.95, type=float)
parser.add_argument("--pairStates", help="Comma-separated list of states"
" (from trainingBed) that are treated as pair-emitors"
" for the CFG", default=None)
parser.add_argument("--emFac", help="Normalization factor for weighting"
" emission probabilities because when there are "
"many tracks, the transition probabilities can get "
"totally lost. 0 = no normalization. 1 ="
" divide by number of tracks. k = divide by number "
"of tracks / k", type=int, default=0)
parser.add_argument("--initTransProbs", help="Path of text file where each "
"line has three entries: FromState ToState Probability"
". This file (all other transitions get probability 0)"
" is used to specifiy the initial transition model."
" The names and number of states will be initialized "
"according to this file (overriding --numStates)",
default = None)
parser.add_argument("--fixTrans", help="Do not learn transition parameters"
" (best used with --initTransProbs)",
action="store_true", default=False)
parser.add_argument("--initEmProbs", help="Path of text file where each "
"line has four entries: State Track Symbol Probability"
". This file (all other emissions get probability 0)"
" is used to specifiy the initial emission model. All "
"states specified in this file must appear in the file"
" specified with --initTransProbs (but not vice versa).",
default = None)
parser.add_argument("--fixEm", help="Do not learn emission parameters"
" (best used with --initEmProbs)",
action="store_true", default=False)
parser.add_argument("--initStartProbs", help="Path of text file where each "
"line has two entries: State Probability"
". This file (all other start probs get probability 0)"
" is used to specifiy the initial start dist. All "
"states specified in this file must appear in the file"
" specified with --initTransProbs (but not vice versa).",
default = None)
parser.add_argument("--fixStart", help="Do not learn start parameters"
" (best used with --initStartProbs)",
action="store_true", default=False)
parser.add_argument("--forceTransProbs",
help="Path of text file where each "
"line has three entries: FromState ToState Probability"
". These transition probabilities will override any "
" learned probabilities after each training iteration"
" (unspecified "
"will not be set to 0 in this case. the learned values"
" will be kept, but normalized as needed)" ,
default=None)
parser.add_argument("--forceEmProbs", help="Path of text file where each "
"line has four entries: State Track Symbol Probability"
". These "
"emission probabilities will override any learned"
" probabilities after each training iteration "
"(unspecified "
"will not be set to 0 in this case. the learned values"
" will be kept, but normalized as needed.)" ,
default = None)
parser.add_argument("--flatEm", help="Use a flat emission distribution as "
"a baseline. If not specified, the initial emission "
"distribution will be randomized by default. Emission"
" probabilities specified with --initEmpProbs or "
"--forceEmProbs will never be affected by randomizaiton"
". The randomization is important for Baum Welch "
"training, since if two states dont have at least one"
" different emission or transition probability to begin"
" with, they will never learn to be different.",
action="store_true", default=False)
parser.add_argument("--emRandRange", help="When randomly initialzing an"
" emission distribution, constrain"
" the values to the given range (pair of "
"comma-separated numbers). Overridden by "
"--initEmProbs and --forceEmProbs when applicable."
" Completely overridden by --flatEm (which is equivalent"
" to --emRandRange .5,.5.). Actual values used will"
" always be normalized.", default="0.2,0.8")
parser.add_argument("--segment", help="Bed file of segments to treat as "
"single columns for HMM (ie as created with "
"segmentTracks.py). IMPORTANT: this file must cover "
"the same regions as the traininBed file. Unless in "
"supervised mode, probably best to use same bed file "
" as both traingBed and --segment argument. Otherwise"
" use intersectBed to make sure the overlap is exact",
default=None)
parser.add_argument("--segLen", help="Effective segment length used for"
" normalizing input segments (specifying 0 means no"
" normalization applied)", type=int, default=0)
parser.add_argument("--seed", help="Seed for random number generator"
" which will be used to initialize emissions "
"(if --flatEM and --supervised not specified)",
default=None, type=int)
parser.add_argument("--reps", help="Number of replicates (with different"
" random initializations) to run. The replicate"
" with the highest likelihood will be chosen for the"
" output", default=1, type=int)
parser.add_argument("--numThreads", help="Number of threads to use when"
" running replicates (see --rep) in parallel.",
type=int, default=1)
parser.add_argument("--emThresh", help="Threshold used for convergence"
" in baum welch training. IE delta log likelihood"
" must be bigger than this number (which should be"
" positive) for convergence", type=float,
default=0.001)
parser.add_argument("--saveAllReps", help="Save all replicates (--reps)"
" models to disk, instead of just the best one"
". Format is <outputModel>.repN. There will be "
" --reps -1 such models saved as the best output"
" counts as a replicate",
action="store_true", default=False)
parser.add_argument("--maxProb", help="Gaussian distributions and/or"
" segment length corrections can cause probability"
" to *decrease* during BW iteration. Use this option"
" to remember the parameters with the highest probability"
" rather than returning the parameters after the final "
"iteration.", action="store_true", default=False)
parser.add_argument("--maxProbCut", help="Use with --maxProb option to stop"
" training if a given number of iterations go by without"
" hitting a new maxProb", default=None, type=int)
parser.add_argument("--transMatEpsilons", help="By default, epsilons are"
" added to all transition probabilities to prevent "
"converging on 0 due to rounding error only for fully"
" unsupervised training. Use this option to force this"
" behaviour for supervised and semisupervised modes",
action="store_true", default=False)
addLoggingOptions(parser)
args = parser.parse_args()
if args.cfg is True:
assert args.supervised is True
assert args.saPrior >= 0. and args.saPrior <= 1.
if args.pairStates is not None:
assert args.cfg is True
if args.initTransProbs is not None or args.fixTrans is True or\
args.initEmProbs is not None or args.fixEm is not None:
if args.cfg is True:
raise RuntimeError("--transProbs, --fixTrans, --emProbs, --fixEm "
"are not currently compatible with --cfg.")
if args.fixTrans is True and args.supervised is True:
raise RuntimeError("--fixTrans option not compatible with --supervised")
if args.fixEm is True and args.supervised is True:
raise RuntimeError("--fixEm option not compatible with --supervised")
if (args.forceTransProbs is not None or args.forceEmProbs is not None) \
and args.cfg is True:
raise RuntimeError("--forceTransProbs and --forceEmProbs are not "
"currently compatible with --cfg")
if args.flatEm is True and args.supervised is False and\
args.initEmProbs is None and args.initTransProbs is None:
raise RuntimeError("--flatEm must be used with --initEmProbs and or"
" --initTransProbs")
if args.initEmProbs is not None and args.initTransProbs is None:
raise RuntimeError("--initEmProbs can only be used in conjunction with"
" --initTransProbs")
if args.emRandRange is not None:
args.emRandRange = args.emRandRange.split(",")
try:
assert len(args.emRandRange) == 2
args.emRandRange = (float(args.emRandRange[0]),
float(args.emRandRange[1]))
except:
raise RuntimeError("Invalid --emRandRange specified")
if args.transMatEpsilons is False:
# old logic here. now overriden with above options
args.transMatEpsilons = (args.supervised is False and
args.initTransProbs is None and
args.forceTransProbs is None)
setLoggingFromOptions(args)
tempBedToolPath = initBedTool()
# read training intervals from the bed file
logger.info("loading training intervals from %s" % args.trainingBed)
mergedIntervals = getMergedBedIntervals(args.trainingBed, ncol=4)
if mergedIntervals is None or len(mergedIntervals) < 1:
raise RuntimeError("Could not read any intervals from %s" %
args.trainingBed)
# read segment intervals
segIntervals = None
if args.segment is not None:
logger.info("loading segment intervals from %s" % args.segment)
try:
checkExactOverlap(args.trainingBed, args.segment)
except:
raise RuntimeError("bed file passed with --segments option"
" must exactly overlap trainingBed")
segIntervals = readBedIntervals(args.segment, sort=True)
elif args.segLen > 0:
raise RuntimeError("--segLen can only be used with --segment")
if args.segLen <= 0:
args.segLen = None
if args.segLen > 0 and args.segLen != 1:
logger.warning("--segLen should be 0 (no correction) or 1 (base"
" correction). Values > 1 may cause bias.")
# read the tracks, while intersecting them with the training intervals
logger.info("loading tracks %s" % args.tracksInfo)
trackData = TrackData()
trackData.loadTrackData(args.tracksInfo, mergedIntervals,
segmentIntervals=segIntervals)
catMap = None
userTrans = None
if args.supervised is False and args.initTransProbs is not None:
logger.debug("initializing transition model with user data")
catMap = stateNamesFromUserTrans(args.initTransProbs)
# state number is overrided by the transProbs file
args.numStates = len(catMap)
truthIntervals = None
# state number is overrided by the input bed file in supervised mode
if args.supervised is True:
logger.info("processing supervised state names")
# we reload because we don't want to be merging them here
truthIntervals = readBedIntervals(args.trainingBed, ncol=4)
catMap = mapStateNames(truthIntervals)
args.numStates = len(catMap)
# train the model
seeds = [random.randint(0, 4294967294)]
if args.seed is not None:
seeds = [args.seed]
random.seed(args.seed)
seeds += [random.randint(0, sys.maxint) for x in xrange(1, args.reps)]
def trainClosure(randomSeed):
return trainModel(randomSeed, trackData=trackData, catMap=catMap,
userTrans=userTrans, truthIntervals=truthIntervals,
args=args)
modelList = runParallelShellCommands(argList=seeds, numProc = args.numThreads,
execFunction = trainClosure,
useThreads = True)
# select best model
logmsg = ""
bestModel = (-1, LOGZERO)
for i in xrange(len(modelList)):
curModel = (i, modelList[i].getLastLogProb())
if curModel[1] > bestModel[1]:
bestModel = curModel
if curModel[1] is not None:
logmsg += "Rep %i: TotalProb: %f\n" % curModel
if len(modelList) > 1:
logging.info("Training Replicates Statistics:\n%s" % logmsg)
logging.info("Selecting best replicate (%d, %f)" % bestModel)
model = modelList[bestModel[0]]
# write the model to a pickle
logger.info("saving trained model to %s" % args.outputModel)
saveModel(args.outputModel, model)
# write all replicates
writtenCount = 0
if args.saveAllReps is True:
for i, repModel in enumerate(modelList):
if i != bestModel[0]:
repPath = "%s.rep%d" % (args.outputModel, writtenCount)
logger.info("saving replicate model to %s" % repPath)
saveModel(repPath, repModel)
writtenCount += 1
cleanBedTool(tempBedToolPath)
###########################################################################
def trainModel(randomSeed, trackData, catMap, userTrans, truthIntervals,
args):
""" Run the whole training pipeline
"""
# activate the random seed
randGen = np.random.RandomState(randomSeed)
# create the independent emission model
logger.info("creating emission model")
numSymbolsPerTrack = trackData.getNumSymbolsPerTrack()
logger.debug("numSymbolsPerTrack=%s" % numSymbolsPerTrack)
# only randomize model if using Baum-Welch
randomize = args.supervised is False and args.flatEm is False
emissionModel = IndependentMultinomialAndGaussianEmissionModel(
args.numStates,
numSymbolsPerTrack,
trackData.getTrackList(),
normalizeFac=args.emFac,
randomize=randomize,
effectiveSegmentLength = args.segLen,
random_state = randGen,
randRange = args.emRandRange)
# create the model
if not args.cfg:
logger.info("creating hmm model")
model = MultitrackHmm(emissionModel, n_iter=args.iter,
state_name_map=catMap,
fixTrans = args.fixTrans,
fixEmission = args.fixEm,
fixStart = args.fixStart,
forceUserEmissions = args.forceEmProbs,
forceUserTrans = args.forceTransProbs,
random_state = randGen,
thresh = args.emThresh,
transMatEpsilons = args.transMatEpsilons,
maxProb = args.maxProb,
maxProbCut = args.maxProbCut)
else:
pairEM = PairEmissionModel(emissionModel, [args.saPrior] *
emissionModel.getNumStates())
assert args.supervised is True
nestStates = []
if args.pairStates is not None:
pairStates = args.pairStates.split(",")
nestStates = map(lambda x: catMap.getMap(x), pairStates)
logger.info("Creating cfg model")
model = MultitrackCfg(emissionModel, pairEM, nestStates,
state_name_map=catMap)
# initialize the user specified transition probabilities now if necessary
if args.initTransProbs is not None:
with open(args.initTransProbs) as f:
model.applyUserTrans(f.readlines())
# initialize the user specified emission probabilities now if necessary
if args.initEmProbs is not None:
with open(args.initEmProbs) as f:
# can't apply emissions without a track list!
model.trackList = trackData.getTrackList()
model.applyUserEmissions(f.readlines())
# initialize the user specified start probabilities now if necessary
if args.initStartProbs is not None:
with open(args.initStartProbs) as f:
model.applyUserStarts(f.readlines())
# make sure initialization didnt screw up
model.validate()
# do the training
if args.supervised is False:
logger.info("training via EM")
model.train(trackData)
else:
logger.info("training from input bed states")
model.supervisedTrain(trackData, truthIntervals)
# reset the user specified transition probabilities now if necessary
if args.forceTransProbs is not None:
with open(args.forceTransProbs) as f:
model.applyUserTrans(f.readlines())
# reset the user specified emission probabilities now if necessary
if args.forceEmProbs is not None:
with open(args.forceEmProbs) as f:
model.applyUserEmissions(f.readlines())
return model
###########################################################################
def stateNamesFromUserTrans(userTransPath):
""" Scan the user transitions to determine all state names. """
catMap = CategoryMap(reserved=0)
f = open(userTransPath, "r")
for line in f:
if len(line.lstrip()) > 0 and line.lstrip()[0] is not "#":
toks = line.split()
assert len(toks) == 3
float(toks[2])
catMap.getMap(toks[0], update=True)
catMap.getMap(toks[1], update=True)
f.close()
return catMap
###########################################################################
def mapStateNames(bedIntervals):
""" sanitize the states (column 4) of each bed interval, mapping to unique
integer in place. return the map"""
catMap = CategoryMap(reserved=0)
for idx, interval in enumerate(bedIntervals):
if len(interval) < 4 or interval[3] is None:
raise RuntimeError("Could not read state from 4th column" %
str(interval))
bedIntervals[idx] = (interval[0], interval[1], interval[2],
catMap.getMap(interval[3], update=True))
return catMap
###########################################################################
if __name__ == "__main__":
sys.exit(main())
|
|
"""Beam search parameters tuning for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import numpy as np
import argparse
import functools
import gzip
import logging
import paddle.v2 as paddle
import _init_paths
from data_utils.data import DataGenerator
from decoders.swig_wrapper import Scorer
from decoders.swig_wrapper import ctc_beam_search_decoder_batch
from model_utils.model import deep_speech_v2_network
from utils.error_rate import wer, cer
from utils.utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('num_batches', int, -1, "# of batches tuning on. "
"Default -1, on whole dev set.")
add_arg('batch_size', int, 256, "# of samples per batch.")
add_arg('trainer_count', int, 8, "# of Trainers (CPUs or GPUs).")
add_arg('beam_size', int, 500, "Beam search width.")
add_arg('num_proc_bsearch', int, 12, "# of CPUs for beam search.")
add_arg('num_conv_layers', int, 2, "# of convolution layers.")
add_arg('num_rnn_layers', int, 3, "# of recurrent layers.")
add_arg('rnn_layer_size', int, 2048, "# of recurrent cells per layer.")
add_arg('num_alphas', int, 45, "# of alpha candidates for tuning.")
add_arg('num_betas', int, 8, "# of beta candidates for tuning.")
add_arg('alpha_from', float, 1.0, "Where alpha starts tuning from.")
add_arg('alpha_to', float, 3.2, "Where alpha ends tuning with.")
add_arg('beta_from', float, 0.1, "Where beta starts tuning from.")
add_arg('beta_to', float, 0.45, "Where beta ends tuning with.")
add_arg('cutoff_prob', float, 1.0, "Cutoff probability for pruning.")
add_arg('cutoff_top_n', int, 40, "Cutoff number for pruning.")
add_arg('use_gru', bool, False, "Use GRUs instead of simple RNNs.")
add_arg('use_gpu', bool, True, "Use GPU or not.")
add_arg('share_rnn_weights',bool, True, "Share input-hidden weights across "
"bi-directional RNNs. Not for GRU.")
add_arg('tune_manifest', str,
'data/librispeech/manifest.dev-clean',
"Filepath of manifest to tune.")
add_arg('mean_std_path', str,
'data/librispeech/mean_std.npz',
"Filepath of normalizer's mean & std.")
add_arg('vocab_path', str,
'data/librispeech/vocab.txt',
"Filepath of vocabulary.")
add_arg('lang_model_path', str,
'models/lm/common_crawl_00.prune01111.trie.klm',
"Filepath for language model.")
add_arg('model_path', str,
'./checkpoints/libri/params.latest.tar.gz',
"If None, the training starts from scratch, "
"otherwise, it resumes from the pre-trained model.")
add_arg('error_rate_type', str,
'wer',
"Error rate type for evaluation.",
choices=['wer', 'cer'])
add_arg('specgram_type', str,
'linear',
"Audio feature type. Options: linear, mfcc.",
choices=['linear', 'mfcc'])
# yapf: disable
args = parser.parse_args()
logging.basicConfig(
format='[%(levelname)s %(asctime)s %(filename)s:%(lineno)d] %(message)s')
def tune():
"""Tune parameters alpha and beta incrementally."""
if not args.num_alphas >= 0:
raise ValueError("num_alphas must be non-negative!")
if not args.num_betas >= 0:
raise ValueError("num_betas must be non-negative!")
data_generator = DataGenerator(
vocab_filepath=args.vocab_path,
mean_std_filepath=args.mean_std_path,
augmentation_config='{}',
specgram_type=args.specgram_type,
num_threads=1)
audio_data = paddle.layer.data(
name="audio_spectrogram",
type=paddle.data_type.dense_array(161 * 161))
text_data = paddle.layer.data(
name="transcript_text",
type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))
output_probs, _ = deep_speech_v2_network(
audio_data=audio_data,
text_data=text_data,
dict_size=data_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_size=args.rnn_layer_size,
use_gru=args.use_gru,
share_rnn_weights=args.share_rnn_weights)
batch_reader = data_generator.batch_reader_creator(
manifest_path=args.tune_manifest,
batch_size=args.batch_size,
sortagrad=False,
shuffle_method=None)
# load parameters
if not os.path.isfile(args.model_path):
raise IOError("Invaid model path: %s" % args.model_path)
parameters = paddle.parameters.Parameters.from_tar(
gzip.open(args.model_path))
inferer = paddle.inference.Inference(
output_layer=output_probs, parameters=parameters)
# decoders only accept string encoded in utf-8
vocab_list = [chars.encode("utf-8") for chars in data_generator.vocab_list]
# init logger
logger = logging.getLogger("")
logger.setLevel(level=logging.INFO)
# init external scorer
logger.info("begin to initialize the external scorer for tuning")
if not os.path.isfile(args.lang_model_path):
raise IOError("Invaid language model path: %s" % args.lang_model_path)
ext_scorer = Scorer(
alpha=args.alpha_from,
beta=args.beta_from,
model_path=args.lang_model_path,
vocabulary=vocab_list)
logger.info("language model: "
"is_character_based = %d," % ext_scorer.is_character_based() +
" max_order = %d," % ext_scorer.get_max_order() +
" dict_size = %d" % ext_scorer.get_dict_size())
logger.info("end initializing scorer. Start tuning ...")
error_rate_func = cer if args.error_rate_type == 'cer' else wer
# create grid for search
cand_alphas = np.linspace(args.alpha_from, args.alpha_to, args.num_alphas)
cand_betas = np.linspace(args.beta_from, args.beta_to, args.num_betas)
params_grid = [(alpha, beta) for alpha in cand_alphas
for beta in cand_betas]
err_sum = [0.0 for i in xrange(len(params_grid))]
err_ave = [0.0 for i in xrange(len(params_grid))]
num_ins, cur_batch = 0, 0
## incremental tuning parameters over multiple batches
for infer_data in batch_reader():
if (args.num_batches >= 0) and (cur_batch >= args.num_batches):
break
infer_results = inferer.infer(input=infer_data)
num_steps = len(infer_results) // len(infer_data)
probs_split = [
infer_results[i * num_steps:(i + 1) * num_steps]
for i in xrange(len(infer_data))
]
target_transcripts = [
''.join([data_generator.vocab_list[token] for token in transcript])
for _, transcript in infer_data
]
num_ins += len(target_transcripts)
# grid search
for index, (alpha, beta) in enumerate(params_grid):
# reset alpha & beta
ext_scorer.reset_params(alpha, beta)
beam_search_results = ctc_beam_search_decoder_batch(
probs_split=probs_split,
vocabulary=vocab_list,
beam_size=args.beam_size,
num_processes=args.num_proc_bsearch,
cutoff_prob=args.cutoff_prob,
cutoff_top_n=args.cutoff_top_n,
ext_scoring_func=ext_scorer, )
result_transcripts = [res[0][1] for res in beam_search_results]
for target, result in zip(target_transcripts, result_transcripts):
err_sum[index] += error_rate_func(target, result)
err_ave[index] = err_sum[index] / num_ins
if index % 2 == 0:
sys.stdout.write('.')
sys.stdout.flush()
# output on-line tuning result at the end of current batch
err_ave_min = min(err_ave)
min_index = err_ave.index(err_ave_min)
print("\nBatch %d [%d/?], current opt (alpha, beta) = (%s, %s), "
" min [%s] = %f" %(cur_batch, num_ins,
"%.3f" % params_grid[min_index][0],
"%.3f" % params_grid[min_index][1],
args.error_rate_type, err_ave_min))
cur_batch += 1
# output WER/CER at every (alpha, beta)
print("\nFinal %s:\n" % args.error_rate_type)
for index in xrange(len(params_grid)):
print("(alpha, beta) = (%s, %s), [%s] = %f"
% ("%.3f" % params_grid[index][0], "%.3f" % params_grid[index][1],
args.error_rate_type, err_ave[index]))
err_ave_min = min(err_ave)
min_index = err_ave.index(err_ave_min)
print("\nFinish tuning on %d batches, final opt (alpha, beta) = (%s, %s)"
% (args.num_batches, "%.3f" % params_grid[min_index][0],
"%.3f" % params_grid[min_index][1]))
logger.info("finish tuning")
def main():
print_arguments(args)
paddle.init(use_gpu=args.use_gpu, trainer_count=args.trainer_count)
tune()
if __name__ == '__main__':
main()
|
|
data = (
'Po ', # 0x00
'Feng ', # 0x01
'Zhuan ', # 0x02
'Fu ', # 0x03
'She ', # 0x04
'Ke ', # 0x05
'Jiang ', # 0x06
'Jiang ', # 0x07
'Zhuan ', # 0x08
'Wei ', # 0x09
'Zun ', # 0x0a
'Xun ', # 0x0b
'Shu ', # 0x0c
'Dui ', # 0x0d
'Dao ', # 0x0e
'Xiao ', # 0x0f
'Ji ', # 0x10
'Shao ', # 0x11
'Er ', # 0x12
'Er ', # 0x13
'Er ', # 0x14
'Ga ', # 0x15
'Jian ', # 0x16
'Shu ', # 0x17
'Chen ', # 0x18
'Shang ', # 0x19
'Shang ', # 0x1a
'Mo ', # 0x1b
'Ga ', # 0x1c
'Chang ', # 0x1d
'Liao ', # 0x1e
'Xian ', # 0x1f
'Xian ', # 0x20
'[?] ', # 0x21
'Wang ', # 0x22
'Wang ', # 0x23
'You ', # 0x24
'Liao ', # 0x25
'Liao ', # 0x26
'Yao ', # 0x27
'Mang ', # 0x28
'Wang ', # 0x29
'Wang ', # 0x2a
'Wang ', # 0x2b
'Ga ', # 0x2c
'Yao ', # 0x2d
'Duo ', # 0x2e
'Kui ', # 0x2f
'Zhong ', # 0x30
'Jiu ', # 0x31
'Gan ', # 0x32
'Gu ', # 0x33
'Gan ', # 0x34
'Tui ', # 0x35
'Gan ', # 0x36
'Gan ', # 0x37
'Shi ', # 0x38
'Yin ', # 0x39
'Chi ', # 0x3a
'Kao ', # 0x3b
'Ni ', # 0x3c
'Jin ', # 0x3d
'Wei ', # 0x3e
'Niao ', # 0x3f
'Ju ', # 0x40
'Pi ', # 0x41
'Ceng ', # 0x42
'Xi ', # 0x43
'Bi ', # 0x44
'Ju ', # 0x45
'Jie ', # 0x46
'Tian ', # 0x47
'Qu ', # 0x48
'Ti ', # 0x49
'Jie ', # 0x4a
'Wu ', # 0x4b
'Diao ', # 0x4c
'Shi ', # 0x4d
'Shi ', # 0x4e
'Ping ', # 0x4f
'Ji ', # 0x50
'Xie ', # 0x51
'Chen ', # 0x52
'Xi ', # 0x53
'Ni ', # 0x54
'Zhan ', # 0x55
'Xi ', # 0x56
'[?] ', # 0x57
'Man ', # 0x58
'E ', # 0x59
'Lou ', # 0x5a
'Ping ', # 0x5b
'Ti ', # 0x5c
'Fei ', # 0x5d
'Shu ', # 0x5e
'Xie ', # 0x5f
'Tu ', # 0x60
'Lu ', # 0x61
'Lu ', # 0x62
'Xi ', # 0x63
'Ceng ', # 0x64
'Lu ', # 0x65
'Ju ', # 0x66
'Xie ', # 0x67
'Ju ', # 0x68
'Jue ', # 0x69
'Liao ', # 0x6a
'Jue ', # 0x6b
'Shu ', # 0x6c
'Xi ', # 0x6d
'Che ', # 0x6e
'Tun ', # 0x6f
'Ni ', # 0x70
'Shan ', # 0x71
'[?] ', # 0x72
'Xian ', # 0x73
'Li ', # 0x74
'Xue ', # 0x75
'Nata ', # 0x76
'[?] ', # 0x77
'Long ', # 0x78
'Yi ', # 0x79
'Qi ', # 0x7a
'Ren ', # 0x7b
'Wu ', # 0x7c
'Han ', # 0x7d
'Shen ', # 0x7e
'Yu ', # 0x7f
'Chu ', # 0x80
'Sui ', # 0x81
'Qi ', # 0x82
'[?] ', # 0x83
'Yue ', # 0x84
'Ban ', # 0x85
'Yao ', # 0x86
'Ang ', # 0x87
'Ya ', # 0x88
'Wu ', # 0x89
'Jie ', # 0x8a
'E ', # 0x8b
'Ji ', # 0x8c
'Qian ', # 0x8d
'Fen ', # 0x8e
'Yuan ', # 0x8f
'Qi ', # 0x90
'Cen ', # 0x91
'Qian ', # 0x92
'Qi ', # 0x93
'Cha ', # 0x94
'Jie ', # 0x95
'Qu ', # 0x96
'Gang ', # 0x97
'Xian ', # 0x98
'Ao ', # 0x99
'Lan ', # 0x9a
'Dao ', # 0x9b
'Ba ', # 0x9c
'Zuo ', # 0x9d
'Zuo ', # 0x9e
'Yang ', # 0x9f
'Ju ', # 0xa0
'Gang ', # 0xa1
'Ke ', # 0xa2
'Gou ', # 0xa3
'Xue ', # 0xa4
'Bei ', # 0xa5
'Li ', # 0xa6
'Tiao ', # 0xa7
'Ju ', # 0xa8
'Yan ', # 0xa9
'Fu ', # 0xaa
'Xiu ', # 0xab
'Jia ', # 0xac
'Ling ', # 0xad
'Tuo ', # 0xae
'Pei ', # 0xaf
'You ', # 0xb0
'Dai ', # 0xb1
'Kuang ', # 0xb2
'Yue ', # 0xb3
'Qu ', # 0xb4
'Hu ', # 0xb5
'Po ', # 0xb6
'Min ', # 0xb7
'An ', # 0xb8
'Tiao ', # 0xb9
'Ling ', # 0xba
'Chi ', # 0xbb
'Yuri ', # 0xbc
'Dong ', # 0xbd
'Cem ', # 0xbe
'Kui ', # 0xbf
'Xiu ', # 0xc0
'Mao ', # 0xc1
'Tong ', # 0xc2
'Xue ', # 0xc3
'Yi ', # 0xc4
'Kura ', # 0xc5
'He ', # 0xc6
'Ke ', # 0xc7
'Luo ', # 0xc8
'E ', # 0xc9
'Fu ', # 0xca
'Xun ', # 0xcb
'Die ', # 0xcc
'Lu ', # 0xcd
'An ', # 0xce
'Er ', # 0xcf
'Gai ', # 0xd0
'Quan ', # 0xd1
'Tong ', # 0xd2
'Yi ', # 0xd3
'Mu ', # 0xd4
'Shi ', # 0xd5
'An ', # 0xd6
'Wei ', # 0xd7
'Hu ', # 0xd8
'Zhi ', # 0xd9
'Mi ', # 0xda
'Li ', # 0xdb
'Ji ', # 0xdc
'Tong ', # 0xdd
'Wei ', # 0xde
'You ', # 0xdf
'Sang ', # 0xe0
'Xia ', # 0xe1
'Li ', # 0xe2
'Yao ', # 0xe3
'Jiao ', # 0xe4
'Zheng ', # 0xe5
'Luan ', # 0xe6
'Jiao ', # 0xe7
'E ', # 0xe8
'E ', # 0xe9
'Yu ', # 0xea
'Ye ', # 0xeb
'Bu ', # 0xec
'Qiao ', # 0xed
'Qun ', # 0xee
'Feng ', # 0xef
'Feng ', # 0xf0
'Nao ', # 0xf1
'Li ', # 0xf2
'You ', # 0xf3
'Xian ', # 0xf4
'Hong ', # 0xf5
'Dao ', # 0xf6
'Shen ', # 0xf7
'Cheng ', # 0xf8
'Tu ', # 0xf9
'Geng ', # 0xfa
'Jun ', # 0xfb
'Hao ', # 0xfc
'Xia ', # 0xfd
'Yin ', # 0xfe
'Yu ', # 0xff
)
|
|
#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
import os
import itertools
import sys
import weakref
import atexit
import threading # we want threading to install it's
# cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags
from . import process
__all__ = [
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
]
#
# Logging
#
NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25
LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
_logger = None
_log_to_stderr = False
def sub_debug(msg, *args):
if _logger:
_logger.log(SUBDEBUG, msg, *args)
def debug(msg, *args):
if _logger:
_logger.log(DEBUG, msg, *args)
def info(msg, *args):
if _logger:
_logger.log(INFO, msg, *args)
def sub_warning(msg, *args):
if _logger:
_logger.log(SUBWARNING, msg, *args)
def get_logger():
'''
Returns logger used by multiprocessing
'''
global _logger
import logging
logging._acquireLock()
try:
if not _logger:
_logger = logging.getLogger(LOGGER_NAME)
_logger.propagate = 0
# XXX multiprocessing should cleanup before logging
if hasattr(atexit, 'unregister'):
atexit.unregister(_exit_function)
atexit.register(_exit_function)
else:
atexit._exithandlers.remove((_exit_function, (), {}))
atexit._exithandlers.append((_exit_function, (), {}))
finally:
logging._releaseLock()
return _logger
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
global _log_to_stderr
import logging
logger = get_logger()
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
if level:
logger.setLevel(level)
_log_to_stderr = True
return _logger
#
# Function returning a temp directory which will be removed on exit
#
def _remove_temp_dir(rmtree, tempdir):
rmtree(tempdir)
current_process = process.current_process()
# current_process() can be None if the finalizer is called
# late during Python finalization
if current_process is not None:
current_process._config['tempdir'] = None
def get_temp_dir():
# get name of a temp directory which will be automatically cleaned up
tempdir = process.current_process()._config.get('tempdir')
if tempdir is None:
import shutil, tempfile
tempdir = tempfile.mkdtemp(prefix='pymp-')
info('created temp directory %s', tempdir)
# keep a strong reference to shutil.rmtree(), since the finalizer
# can be called late during Python shutdown
Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),
exitpriority=-100)
process.current_process()._config['tempdir'] = tempdir
return tempdir
#
# Support for reinitialization of objects when bootstrapping a child process
#
_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()
def _run_after_forkers():
items = list(_afterfork_registry.items())
items.sort()
for (index, ident, func), obj in items:
try:
func(obj)
except Exception as e:
info('after forker raised exception %s', e)
def register_after_fork(obj, func):
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
#
# Finalization using weakrefs
#
_finalizer_registry = {}
_finalizer_counter = itertools.count()
class Finalize(object):
'''
Class which supports object finalization using weakrefs
'''
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
if (exitpriority is not None) and not isinstance(exitpriority,int):
raise TypeError(
"Exitpriority ({0!r}) must be None or int, not {1!s}".format(
exitpriority, type(exitpriority)))
if obj is not None:
self._weakref = weakref.ref(obj, self)
elif exitpriority is None:
raise ValueError("Without object, exitpriority cannot be None")
self._callback = callback
self._args = args
self._kwargs = kwargs or {}
self._key = (exitpriority, next(_finalizer_counter))
self._pid = os.getpid()
_finalizer_registry[self._key] = self
def __call__(self, wr=None,
# Need to bind these locally because the globals can have
# been cleared at shutdown
_finalizer_registry=_finalizer_registry,
sub_debug=sub_debug, getpid=os.getpid):
'''
Run the callback unless it has already been called or cancelled
'''
try:
del _finalizer_registry[self._key]
except KeyError:
sub_debug('finalizer no longer registered')
else:
if self._pid != getpid():
sub_debug('finalizer ignored because different process')
res = None
else:
sub_debug('finalizer calling %s with args %s and kwargs %s',
self._callback, self._args, self._kwargs)
res = self._callback(*self._args, **self._kwargs)
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
return res
def cancel(self):
'''
Cancel finalization of the object
'''
try:
del _finalizer_registry[self._key]
except KeyError:
pass
else:
self._weakref = self._callback = self._args = \
self._kwargs = self._key = None
def still_active(self):
'''
Return whether this finalizer is still waiting to invoke callback
'''
return self._key in _finalizer_registry
def __repr__(self):
try:
obj = self._weakref()
except (AttributeError, TypeError):
obj = None
if obj is None:
return '<%s object, dead>' % self.__class__.__name__
x = '<%s object, callback=%s' % (
self.__class__.__name__,
getattr(self._callback, '__name__', self._callback))
if self._args:
x += ', args=' + str(self._args)
if self._kwargs:
x += ', kwargs=' + str(self._kwargs)
if self._key[0] is not None:
x += ', exitpriority=' + str(self._key[0])
return x + '>'
def _run_finalizers(minpriority=None):
'''
Run all finalizers whose exit priority is not None and at least minpriority
Finalizers with highest priority are called first; finalizers with
the same priority will be called in reverse order of creation.
'''
if _finalizer_registry is None:
# This function may be called after this module's globals are
# destroyed. See the _exit_function function in this module for more
# notes.
return
if minpriority is None:
f = lambda p : p[0] is not None
else:
f = lambda p : p[0] is not None and p[0] >= minpriority
# Careful: _finalizer_registry may be mutated while this function
# is running (either by a GC run or by another thread).
# list(_finalizer_registry) should be atomic, while
# list(_finalizer_registry.items()) is not.
keys = [key for key in list(_finalizer_registry) if f(key)]
keys.sort(reverse=True)
for key in keys:
finalizer = _finalizer_registry.get(key)
# key may have been removed from the registry
if finalizer is not None:
sub_debug('calling %s', finalizer)
try:
finalizer()
except Exception:
import traceback
traceback.print_exc()
if minpriority is None:
_finalizer_registry.clear()
#
# Clean up on exit
#
def is_exiting():
'''
Returns true if the process is shutting down
'''
return _exiting or _exiting is None
_exiting = False
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
active_children=process.active_children,
current_process=process.current_process):
# We hold on to references to functions in the arglist due to the
# situation described below, where this function is called after this
# module's globals are destroyed.
global _exiting
if not _exiting:
_exiting = True
info('process shutting down')
debug('running all "atexit" finalizers with priority >= 0')
_run_finalizers(0)
if current_process() is not None:
# We check if the current process is None here because if
# it's None, any call to ``active_children()`` will raise
# an AttributeError (active_children winds up trying to
# get attributes from util._current_process). One
# situation where this can happen is if someone has
# manipulated sys.modules, causing this module to be
# garbage collected. The destructor for the module type
# then replaces all values in the module dict with None.
# For instance, after setuptools runs a test it replaces
# sys.modules with a copy created earlier. See issues
# #9775 and #15881. Also related: #4106, #9205, and
# #9207.
for p in active_children():
if p.daemon:
info('calling terminate() for daemon %s', p.name)
p._popen.terminate()
for p in active_children():
info('calling join() for process %s', p.name)
p.join()
debug('running the remaining "atexit" finalizers')
_run_finalizers()
atexit.register(_exit_function)
#
# Some fork aware types
#
class ForkAwareThreadLock(object):
def __init__(self):
self._reset()
register_after_fork(self, ForkAwareThreadLock._reset)
def _reset(self):
self._lock = threading.Lock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __enter__(self):
return self._lock.__enter__()
def __exit__(self, *args):
return self._lock.__exit__(*args)
class ForkAwareLocal(threading.local):
def __init__(self):
register_after_fork(self, lambda obj : obj.__dict__.clear())
def __reduce__(self):
return type(self), ()
#
# Close fds except those specified
#
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except Exception:
MAXFD = 256
def close_all_fds_except(fds):
fds = list(fds) + [-1, MAXFD]
fds.sort()
assert fds[-1] == MAXFD, 'fd too large'
for i in range(len(fds) - 1):
os.closerange(fds[i]+1, fds[i+1])
#
# Close sys.stdin and replace stdin with os.devnull
#
def _close_stdin():
if sys.stdin is None:
return
try:
sys.stdin.close()
except (OSError, ValueError):
pass
try:
fd = os.open(os.devnull, os.O_RDONLY)
try:
sys.stdin = open(fd, closefd=False)
except:
os.close(fd)
raise
except (OSError, ValueError):
pass
#
# Flush standard streams, if any
#
def _flush_std_streams():
try:
sys.stdout.flush()
except (AttributeError, ValueError):
pass
try:
sys.stderr.flush()
except (AttributeError, ValueError):
pass
#
# Start a program with only specified fds kept open
#
def spawnv_passfds(path, args, passfds):
import _posixsubprocess
passfds = tuple(sorted(map(int, passfds)))
errpipe_read, errpipe_write = os.pipe()
try:
return _posixsubprocess.fork_exec(
args, [os.fsencode(path)], True, passfds, None, None,
-1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
False, False, None)
finally:
os.close(errpipe_read)
os.close(errpipe_write)
def close_fds(*fds):
"""Close each file descriptor given as an argument"""
for fd in fds:
os.close(fd)
|
|
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
from __future__ import division, absolute_import, print_function
__all__ = ["array2string", "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <[email protected]>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
import sys
from functools import reduce
from . import numerictypes as _nt
from .umath import maximum, minimum, absolute, not_equal, isnan, isinf
from .multiarray import (array, format_longfloat, datetime_as_string,
datetime_data, dtype)
from .fromnumeric import ravel
from .numeric import asarray
if sys.version_info[0] >= 3:
_MAXINT = sys.maxsize
_MININT = -sys.maxsize - 1
else:
_MAXINT = sys.maxint
_MININT = -sys.maxint - 1
def product(x, y):
return x*y
_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension
_summaryThreshold = 1000 # total items > triggers array summarization
_float_output_precision = 8
_float_output_suppress_small = False
_line_width = 75
_nan_str = 'nan'
_inf_str = 'inf'
_formatter = None # formatting function for array elements
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None,
nanstr=None, infstr=None,
formatter=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int, optional
Number of digits of precision for floating point output (default 8).
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
Whether or not suppress printing of small floating point values
using scientific notation (default False).
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
See Also
--------
get_printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> print(np.array([1.123456789]))
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> print(np.arange(10))
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3,infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
"""
global _summaryThreshold, _summaryEdgeItems, _float_output_precision
global _line_width, _float_output_suppress_small, _nan_str, _inf_str
global _formatter
if linewidth is not None:
_line_width = linewidth
if threshold is not None:
_summaryThreshold = threshold
if edgeitems is not None:
_summaryEdgeItems = edgeitems
if precision is not None:
_float_output_precision = precision
if suppress is not None:
_float_output_suppress_small = not not suppress
if nanstr is not None:
_nan_str = nanstr
if infstr is not None:
_inf_str = infstr
_formatter = formatter
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, set_string_function
"""
d = dict(precision=_float_output_precision,
threshold=_summaryThreshold,
edgeitems=_summaryEdgeItems,
linewidth=_line_width,
suppress=_float_output_suppress_small,
nanstr=_nan_str,
infstr=_inf_str,
formatter=_formatter)
return d
def _leading_trailing(a):
from . import numeric as _nc
if a.ndim == 1:
if len(a) > 2*_summaryEdgeItems:
b = _nc.concatenate((a[:_summaryEdgeItems],
a[-_summaryEdgeItems:]))
else:
b = a
else:
if len(a) > 2*_summaryEdgeItems:
l = [_leading_trailing(a[i]) for i in range(
min(len(a), _summaryEdgeItems))]
l.extend([_leading_trailing(a[-i]) for i in range(
min(len(a), _summaryEdgeItems), 0, -1)])
else:
l = [_leading_trailing(a[i]) for i in range(0, len(a))]
b = _nc.concatenate(tuple(l))
return b
def _boolFormatter(x):
if x:
return ' True'
else:
return 'False'
def repr_format(x):
return repr(x)
def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
prefix="", formatter=None):
if max_line_width is None:
max_line_width = _line_width
if precision is None:
precision = _float_output_precision
if suppress_small is None:
suppress_small = _float_output_suppress_small
if formatter is None:
formatter = _formatter
if a.size > _summaryThreshold:
summary_insert = "..., "
data = _leading_trailing(a)
else:
summary_insert = ""
data = ravel(asarray(a))
formatdict = {'bool': _boolFormatter,
'int': IntegerFormat(data),
'float': FloatFormat(data, precision, suppress_small),
'longfloat': LongFloatFormat(precision),
'complexfloat': ComplexFormat(data, precision,
suppress_small),
'longcomplexfloat': LongComplexFormat(precision),
'datetime': DatetimeFormat(data),
'timedelta': TimedeltaFormat(data),
'numpystr': repr_format,
'str': str}
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = formatter['all']
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = formatter['int_kind']
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = formatter['float_kind']
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = formatter['complex_kind']
if 'str_kind' in fkeys:
for key in ['numpystr', 'str']:
formatdict[key] = formatter['str_kind']
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = formatter[key]
# find the right formatting function for the array
dtypeobj = a.dtype.type
if issubclass(dtypeobj, _nt.bool_):
format_function = formatdict['bool']
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
format_function = formatdict['timedelta']
else:
format_function = formatdict['int']
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
format_function = formatdict['longfloat']
else:
format_function = formatdict['float']
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
format_function = formatdict['longcomplexfloat']
else:
format_function = formatdict['complexfloat']
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
format_function = formatdict['numpystr']
elif issubclass(dtypeobj, _nt.datetime64):
format_function = formatdict['datetime']
else:
format_function = formatdict['numpystr']
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, len(a.shape), max_line_width,
next_line_prefix, separator,
_summaryEdgeItems, summary_insert)[:-1]
return lst
def _convert_arrays(obj):
from . import numeric as _nc
newtup = []
for k in obj:
if isinstance(k, _nc.ndarray):
k = k.tolist()
elif isinstance(k, tuple):
k = _convert_arrays(k)
newtup.append(k)
return tuple(newtup)
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=repr, formatter=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : function, optional
A function that accepts an ndarray and returns a string. Used only
when the shape of `a` is equal to ``()``, i.e. for 0-D arrays.
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print(np.array2string(x, precision=2, separator=',',
... suppress_small=True))
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
if a.shape == ():
x = a.item()
if isinstance(x, tuple):
x = _convert_arrays(x)
lst = style(x)
elif reduce(product, a.shape) == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
lst = _array2string(a, max_line_width, precision, suppress_small,
separator, prefix, formatter=formatter)
return lst
def _extendLine(s, line, word, max_line_len, next_line_prefix):
if len(line.rstrip()) + len(word.rstrip()) >= max_line_len:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, rank, max_line_len,
next_line_prefix, separator, edge_items, summary_insert):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
if rank == 0:
obj = a.item()
if isinstance(obj, tuple):
obj = _convert_arrays(obj)
return str(obj)
if summary_insert and 2*edge_items < len(a):
leading_items = edge_items
trailing_items = edge_items
summary_insert1 = summary_insert
else:
leading_items = 0
trailing_items = len(a)
summary_insert1 = ""
if rank == 1:
s = ""
line = next_line_prefix
for i in range(leading_items):
word = format_function(a[i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
if summary_insert1:
s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix)
for i in range(trailing_items, 1, -1):
word = format_function(a[-i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
word = format_function(a[-1])
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
s += line + "]\n"
s = '[' + s[len(next_line_prefix):]
else:
s = '['
sep = separator.rstrip()
for i in range(leading_items):
if i > 0:
s += next_line_prefix
s += _formatArray(a[i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if summary_insert1:
s += next_line_prefix + summary_insert1 + "\n"
for i in range(trailing_items, 1, -1):
if leading_items or i != trailing_items:
s += next_line_prefix
s += _formatArray(a[-i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if leading_items or trailing_items > 1:
s += next_line_prefix
s += _formatArray(a[-1], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert).rstrip()+']\n'
return s
class FloatFormat(object):
def __init__(self, data, precision, suppress_small, sign=False):
self.precision = precision
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.max_str_len = 0
try:
self.fillFormat(data)
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
def fillFormat(self, data):
from . import numeric as _nc
with _nc.errstate(all='ignore'):
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
if len(non_zero) == 0:
max_val = 0.
min_val = 0.
else:
max_val = maximum.reduce(non_zero)
min_val = minimum.reduce(non_zero)
if max_val >= 1.e8:
self.exp_format = True
if not self.suppress_small and (min_val < 0.0001
or max_val/min_val > 1000.):
self.exp_format = True
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
self.max_str_len = 8 + self.precision
if self.large_exponent:
self.max_str_len += 1
if self.sign:
format = '%+'
else:
format = '%'
format = format + '%d.%de' % (self.max_str_len, self.precision)
else:
format = '%%.%df' % (self.precision,)
if len(non_zero):
precision = max([_digits(x, self.precision, format)
for x in non_zero])
else:
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
if _nc.any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
if self.sign:
format = '%#+'
else:
format = '%#'
format = format + '%d.%df' % (self.max_str_len, precision)
self.special_fmt = '%%%ds' % (self.max_str_len,)
self.format = format
def __call__(self, x, strip_zeros=True):
from . import numeric as _nc
with _nc.errstate(invalid='ignore'):
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
else:
return self.special_fmt % (_nan_str,)
elif isinf(x):
if x > 0:
if self.sign:
return self.special_fmt % ('+' + _inf_str,)
else:
return self.special_fmt % (_inf_str,)
else:
return self.special_fmt % ('-' + _inf_str,)
s = self.format % x
if self.large_exponent:
# 3-digit exponent
expsign = s[-3]
if expsign == '+' or expsign == '-':
s = s[1:-2] + '0' + s[-2:]
elif self.exp_format:
# 2-digit exponent
if s[-3] == '0':
s = ' ' + s[:-3] + s[-2:]
elif strip_zeros:
z = s.rstrip('0')
s = z + ' '*(len(s)-len(z))
return s
def _digits(x, precision, format):
if precision > 0:
s = format % x
z = s.rstrip('0')
return precision - len(s) + len(z)
else:
return 0
class IntegerFormat(object):
def __init__(self, data):
try:
max_str_len = max(len(str(maximum.reduce(data))),
len(str(minimum.reduce(data))))
self.format = '%' + str(max_str_len) + 'd'
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
except ValueError:
# this occurs when everything is NA
pass
def __call__(self, x):
if _MININT < x < _MAXINT:
return self.format % x
else:
return "%s" % x
class LongFloatFormat(object):
# XXX Have to add something to determine the width to use a la FloatFormat
# Right now, things won't line up properly
def __init__(self, precision, sign=False):
self.precision = precision
self.sign = sign
def __call__(self, x):
if isnan(x):
if self.sign:
return '+' + _nan_str
else:
return ' ' + _nan_str
elif isinf(x):
if x > 0:
if self.sign:
return '+' + _inf_str
else:
return ' ' + _inf_str
else:
return '-' + _inf_str
elif x >= 0:
if self.sign:
return '+' + format_longfloat(x, self.precision)
else:
return ' ' + format_longfloat(x, self.precision)
else:
return format_longfloat(x, self.precision)
class LongComplexFormat(object):
def __init__(self, precision):
self.real_format = LongFloatFormat(precision)
self.imag_format = LongFloatFormat(precision, sign=True)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
return r + i + 'j'
class ComplexFormat(object):
def __init__(self, x, precision, suppress_small):
self.real_format = FloatFormat(x.real, precision, suppress_small)
self.imag_format = FloatFormat(x.imag, precision, suppress_small,
sign=True)
def __call__(self, x):
r = self.real_format(x.real, strip_zeros=False)
i = self.imag_format(x.imag, strip_zeros=False)
if not self.imag_format.exp_format:
z = i.rstrip('0')
i = z + 'j' + ' '*(len(i)-len(z))
else:
i = i + 'j'
return r + i
class DatetimeFormat(object):
def __init__(self, x, unit=None, timezone=None, casting='same_kind'):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
if timezone is None:
timezone = 'naive'
self.timezone = timezone
self.unit = unit
self.casting = casting
def __call__(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(object):
def __init__(self, data):
if data.dtype.kind == 'm':
nat_value = array(['NaT'], dtype=data.dtype)[0]
int_dtype = dtype(data.dtype.byteorder + 'i8')
int_view = data.view(int_dtype)
v = int_view[not_equal(int_view, nat_value.view(int_dtype))]
if len(v) > 0:
# Max str length of non-NaT elements
max_str_len = max(len(str(maximum.reduce(v))),
len(str(minimum.reduce(v))))
else:
max_str_len = 0
if len(v) < len(data):
# data contains a NaT
max_str_len = max(max_str_len, 5)
self.format = '%' + str(max_str_len) + 'd'
self._nat = "'NaT'".rjust(max_str_len)
def __call__(self, x):
# TODO: After NAT == NAT deprecation should be simplified:
if (x + 1).view('i8') == x.view('i8'):
return self._nat
else:
return self.format % x.astype('i8')
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Pytest plugin for using tvm testing extensions.
TVM provides utilities for testing across all supported targets, and
to more easily parametrize across many inputs. For more information
on usage of these features, see documentation in the tvm.testing
module.
These are enabled by default in all pytests provided by tvm, but may
be useful externally for one-off testing. To enable, add the
following line to the test script, or to the conftest.py in the same
directory as the test scripts.
pytest_plugins = ['tvm.testing.plugin']
"""
import pytest
import _pytest
import tvm
from tvm.testing import utils
MARKERS = {
"gpu": "mark a test as requiring a gpu",
"tensorcore": "mark a test as requiring a tensorcore",
"cuda": "mark a test as requiring cuda",
"opencl": "mark a test as requiring opencl",
"rocm": "mark a test as requiring rocm",
"vulkan": "mark a test as requiring vulkan",
"metal": "mark a test as requiring metal",
"llvm": "mark a test as requiring llvm",
"ethosn": "mark a test as requiring ethosn",
"hexagon": "mark a test as requiring hexagon",
"corstone300": "mark a test as requiring Corstone300 FVP",
}
def pytest_configure(config):
"""Runs at pytest configure time, defines marks to be used later."""
for markername, desc in MARKERS.items():
config.addinivalue_line("markers", "{}: {}".format(markername, desc))
print("enabled targets:", "; ".join(map(lambda x: x[0], utils.enabled_targets())))
print("pytest marker:", config.option.markexpr)
def pytest_generate_tests(metafunc):
"""Called once per unit test, modifies/parametrizes it as needed."""
_parametrize_correlated_parameters(metafunc)
_auto_parametrize_target(metafunc)
_add_target_specific_marks(metafunc)
def pytest_collection_modifyitems(config, items):
"""Called after all tests are chosen, currently used for bookkeeping."""
# pylint: disable=unused-argument
_count_num_fixture_uses(items)
_remove_global_fixture_definitions(items)
_sort_tests(items)
@pytest.fixture
def dev(target):
"""Give access to the device to tests that need it."""
return tvm.device(target)
def pytest_sessionfinish(session, exitstatus):
# Don't exit with an error if we select a subset of tests that doesn't
# include anything
if session.config.option.markexpr != "":
if exitstatus == pytest.ExitCode.NO_TESTS_COLLECTED:
session.exitstatus = pytest.ExitCode.OK
def _auto_parametrize_target(metafunc):
"""Automatically applies parametrize_targets
Used if a test function uses the "target" fixture, but isn't
already marked with @tvm.testing.parametrize_targets. Intended
for use in the pytest_generate_tests() handler of a conftest.py
file.
"""
if "target" in metafunc.fixturenames:
# Check if any explicit parametrizations exist, and apply one
# if they do not. If the function is marked with either
# excluded or known failing targets, use these to determine
# the targets to be used.
parametrized_args = [
arg.strip()
for mark in metafunc.definition.iter_markers("parametrize")
for arg in mark.args[0].split(",")
]
if "target" not in parametrized_args:
excluded_targets = getattr(metafunc.function, "tvm_excluded_targets", [])
# Add a parametrize marker instead of calling
# metafunc.parametrize so that the parametrize rewriting
# can still occur.
mark = pytest.mark.parametrize(
"target",
[
t["target"]
for t in utils._get_targets()
if t["target_kind"] not in excluded_targets
],
scope="session",
)
metafunc.definition.add_marker(mark)
def _add_target_specific_marks(metafunc):
"""Add any target-specific marks to parametrizations over target"""
def update_parametrize_target_arg(
mark,
argnames,
argvalues,
*args,
**kwargs,
):
args = [arg.strip() for arg in argnames.split(",") if arg.strip()]
if "target" in args:
target_i = args.index("target")
new_argvalues = []
for argvalue in argvalues:
if isinstance(argvalue, _pytest.mark.structures.ParameterSet):
# The parametrized value is already a
# pytest.param, so track any marks already
# defined.
param_set = argvalue.values
target = param_set[target_i]
additional_marks = argvalue.marks
elif len(args) == 1:
# Single value parametrization, argvalue is a list of values.
target = argvalue
param_set = (target,)
additional_marks = []
else:
# Multiple correlated parameters, argvalue is a list of tuple of values.
param_set = argvalue
target = param_set[target_i]
additional_marks = []
if mark in metafunc.definition.own_markers:
xfail_targets = getattr(metafunc.function, "tvm_known_failing_targets", [])
target_kind = target.split()[0] if isinstance(target, str) else target.kind.name
if target_kind in xfail_targets:
additional_marks.append(
pytest.mark.xfail(
reason=f'Known failing test for target "{target_kind}"'
)
)
new_argvalues.append(
pytest.param(
*param_set, marks=_target_to_requirement(target) + additional_marks
)
)
try:
argvalues[:] = new_argvalues
except TypeError as err:
pyfunc = metafunc.definition.function
filename = pyfunc.__code__.co_filename
line_number = pyfunc.__code__.co_firstlineno
msg = (
f"Unit test {metafunc.function.__name__} ({filename}:{line_number}) "
"is parametrized using a tuple of parameters instead of a list "
"of parameters."
)
raise TypeError(msg) from err
if "target" in metafunc.fixturenames:
# Update any explicit use of @pytest.mark.parmaetrize to
# parametrize over targets. This adds the appropriate
# @tvm.testing.requires_* markers for each target.
for mark in metafunc.definition.iter_markers("parametrize"):
update_parametrize_target_arg(mark, *mark.args, **mark.kwargs)
def _count_num_fixture_uses(items):
# Helper function, counts the number of tests that use each cached
# fixture. Should be called from pytest_collection_modifyitems().
for item in items:
is_skipped = item.get_closest_marker("skip") or any(
mark.args[0] for mark in item.iter_markers("skipif")
)
if is_skipped:
continue
for fixturedefs in item._fixtureinfo.name2fixturedefs.values():
# Only increment the active fixturedef, in a name has been overridden.
fixturedef = fixturedefs[-1]
if hasattr(fixturedef.func, "num_tests_use_this_fixture"):
fixturedef.func.num_tests_use_this_fixture[0] += 1
def _remove_global_fixture_definitions(items):
# Helper function, removes fixture definitions from the global
# variables of the modules they were defined in. This is intended
# to improve readability of error messages by giving a NameError
# if a test function accesses a pytest fixture but doesn't include
# it as an argument. Should be called from
# pytest_collection_modifyitems().
modules = set(item.module for item in items)
for module in modules:
for name in dir(module):
obj = getattr(module, name)
if hasattr(obj, "_pytestfixturefunction") and isinstance(
obj._pytestfixturefunction, _pytest.fixtures.FixtureFunctionMarker
):
delattr(module, name)
def _sort_tests(items):
"""Sort tests by file/function.
By default, pytest will sort tests to maximize the re-use of
fixtures. However, this assumes that all fixtures have an equal
cost to generate, and no caches outside of those managed by
pytest. A tvm.testing.parameter is effectively free, while
reference data for testing may be quite large. Since most of the
TVM fixtures are specific to a python function, sort the test
ordering by python function, so that
tvm.testing.utils._fixture_cache can be cleared sooner rather than
later.
Should be called from pytest_collection_modifyitems.
"""
def sort_key(item):
filename, lineno, test_name = item.location
test_name = test_name.split("[")[0]
return filename, lineno, test_name
items.sort(key=sort_key)
def _target_to_requirement(target):
if isinstance(target, str):
target = tvm.target.Target(target)
# mapping from target to decorator
if target.kind.name == "cuda" and "cudnn" in target.attrs.get("libs", []):
return utils.requires_cudnn()
if target.kind.name == "cuda" and "cublas" in target.attrs.get("libs", []):
return utils.requires_cublas()
if target.kind.name == "cuda":
return utils.requires_cuda()
if target.kind.name == "rocm":
return utils.requires_rocm()
if target.kind.name == "vulkan":
return utils.requires_vulkan()
if target.kind.name == "nvptx":
return utils.requires_nvptx()
if target.kind.name == "metal":
return utils.requires_metal()
if target.kind.name == "opencl":
return utils.requires_opencl()
if target.kind.name == "llvm":
return utils.requires_llvm()
if target.kind.name == "hexagon":
return utils.requires_hexagon()
return []
def _parametrize_correlated_parameters(metafunc):
parametrize_needed = {}
for name, fixturedefs in metafunc.definition._fixtureinfo.name2fixturedefs.items():
fixturedef = fixturedefs[-1]
if hasattr(fixturedef.func, "parametrize_group") and hasattr(
fixturedef.func, "parametrize_values"
):
group = fixturedef.func.parametrize_group
values = fixturedef.func.parametrize_values
ids = fixturedef.func.parametrize_ids
if group in parametrize_needed:
assert ids == parametrize_needed[group]["ids"]
else:
parametrize_needed[group] = {"ids": ids, "params": []}
parametrize_needed[group]["params"].append((name, values))
for parametrize_group in parametrize_needed.values():
params = parametrize_group["params"]
ids = parametrize_group["ids"]
if len(params) == 1:
name, values = params[0]
metafunc.parametrize(name, values, indirect=True, ids=ids)
else:
names = ",".join(name for name, values in params)
value_sets = zip(*[values for name, values in params])
metafunc.parametrize(names, value_sets, indirect=True, ids=ids)
|
|
import director.applogic as app
from director import lcmUtils
from director import transformUtils
from director import visualization as vis
from director import filterUtils
from director import drcargs
from director.shallowCopy import shallowCopy
from director.timercallback import TimerCallback
from director import vtkNumpy
from director import objectmodel as om
import director.vtkAll as vtk
from director.debugVis import DebugData
import PythonQt
from PythonQt import QtCore, QtGui
import bot_core as lcmbotcore
import numpy as np
from director.simpletimer import SimpleTimer
from director import ioUtils
import sys
import drc as lcmdrc
import maps as lcmmaps
import multisense as lcmmultisense
def clipRange(dataObj, arrayName, thresholdRange):
if not dataObj.GetPointData().GetArray(arrayName):
raise Exception('clipRange: could not locate array: %s' % arrayName)
dataObj.GetPointData().SetScalars(dataObj.GetPointData().GetArray(arrayName))
f = vtk.vtkClipPolyData()
f.SetInput(dataObj)
f.SetValue(thresholdRange[0])
f.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, arrayName)
f2 = vtk.vtkClipPolyData()
f2.AddInputConnection(f.GetOutputPort())
f2.SetValue(thresholdRange[1])
f2.InsideOutOn()
f2.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, arrayName)
f2.Update()
return shallowCopy(f2.GetOutput())
def makeSphere(radius, resolution):
s = vtk.vtkSphereSource()
s.SetThetaResolution(resolution)
s.SetPhiResolution(resolution)
s.SetRadius(radius)
s.SetEndPhi(85)
s.Update()
return shallowCopy(s.GetOutput())
def colorizePoints(polyData, cameraName='CAMERA_LEFT'):
imageManager.queue.colorizePoints(cameraName, polyData)
def sendFOVRequest(channel, imagePoints):
channelToImageType = {
'CAMERA_LEFT' : lcmmaps.data_request_t.CAMERA_IMAGE_HEAD_LEFT,
'CAMERACHEST_LEFT' : lcmmaps.data_request_t.CAMERA_IMAGE_LCHEST,
'CAMERACHEST_RIGHT' : lcmmaps.data_request_t.CAMERA_IMAGE_RCHEST,
}
dataRequest = lcmmaps.data_request_t()
dataRequest.type = channelToImageType[channel]
message = lcmdrc.subimage_request_t()
message.data_request = dataRequest
imagePoints = np.array([[pt[0], pt[1]] for pt in imagePoints])
minX, maxX = imagePoints[:,0].min(), imagePoints[:,0].max()
minY, maxY = imagePoints[:,1].min(), imagePoints[:,1].max()
message.x = minX
message.y = minY
message.w = maxX - minX
message.h = maxY - minY
#print message.x, message.y, message.w, message.h
requestChannel = 'SUBIMAGE_REQUEST'
lcmUtils.publish(requestChannel, message)
def testColorize():
radius = 10
resolution = 400
s = makeSphere(radius, resolution)
cameraView.queue.colorizePoints(s)
showPolyData(p, 'sphere', colorByName='rgb')
def rayDebug(position, ray):
d = DebugData()
d.addLine(position, position+ray*5.0)
drcView = app.getViewManager().findView('DRC View')
obj = vis.updatePolyData(d.getPolyData(), 'camera ray', view=drcView, color=[0,1,0])
obj.actor.GetProperty().SetLineWidth(2)
class ImageManager(object):
def __init__(self):
self.images = {}
self.imageUtimes = {}
self.textures = {}
self.imageRotations180 = {}
self.queue = PythonQt.dd.ddBotImageQueue(lcmUtils.getGlobalLCMThread())
self.queue.init(lcmUtils.getGlobalLCMThread(), drcargs.args().config_file)
def addImage(self, name):
if name in self.images:
return
image = vtk.vtkImageData()
tex = vtk.vtkTexture()
tex.SetInput(image)
tex.EdgeClampOn()
tex.RepeatOff()
self.imageUtimes[name] = 0
self.images[name] = image
self.textures[name] = tex
self.imageRotations180[name] = False
def writeImage(self, imageName, outFile):
writer = vtk.vtkPNGWriter()
writer.SetInput(self.images[imageName])
writer.SetFileName(outFile)
writer.Write()
def updateImage(self, imageName):
imageUtime = self.queue.getCurrentImageTime(imageName)
if imageUtime != self.imageUtimes[imageName]:
image = self.images[imageName]
self.imageUtimes[imageName] = self.queue.getImage(imageName, image)
if self.imageRotations180[imageName]:
self.images[imageName].ShallowCopy(filterUtils.rotateImage180(image))
return imageUtime
def updateImages(self):
for imageName in self.images.keys():
self.updateImage(imageName)
def setImageRotation180(self, imageName):
assert imageName in self.images
self.imageRotations180[imageName] = True
def hasImage(self, imageName):
return imageName in self.images
def getImage(self, imageName):
return self.images[imageName]
def getUtime(self, imageName):
return self.imageUtimes[imageName]
def getTexture(self, imageName):
return self.textures[imageName]
def disableCameraTexture(obj):
obj.actor.SetTexture(None)
obj.actor.GetProperty().LightingOn()
obj.actor.GetProperty().SetColor(obj.getProperty('Color'))
def applyCameraTexture(obj, imageManager, imageName='CAMERA_LEFT'):
imageUtime = imageManager.getUtime(imageName)
if not imageUtime:
return
cameraToLocal = vtk.vtkTransform()
imageManager.queue.getTransform(imageName, 'local', imageUtime, cameraToLocal)
pd = filterUtils.transformPolyData(obj.polyData, obj.actor.GetUserTransform())
pd = filterUtils.transformPolyData(pd, cameraToLocal.GetLinearInverse())
imageManager.queue.computeTextureCoords(imageName, pd)
tcoordsArrayName = 'tcoords_%s' % imageName
tcoords = pd.GetPointData().GetArray(tcoordsArrayName)
assert tcoords
obj.polyData.GetPointData().SetTCoords(None)
obj.polyData.GetPointData().SetTCoords(tcoords)
obj._updateColorByProperty()
obj.actor.SetTexture(imageManager.getTexture(imageName))
obj.actor.GetProperty().LightingOff()
obj.actor.GetProperty().SetColor([1,1,1])
class CameraView(object):
def __init__(self, imageManager, view=None):
self.imageManager = imageManager
self.updateUtimes = {}
self.robotModel = None
self.sphereObjects = {}
self.sphereImages = [
'CAMERA_LEFT',
'CAMERACHEST_RIGHT',
'CAMERACHEST_LEFT']
for name in self.sphereImages:
imageManager.addImage(name)
self.updateUtimes[name] = 0
self.initView(view)
self.initEventFilter()
self.rayCallback = rayDebug
self.timerCallback = TimerCallback()
self.timerCallback.targetFps = 60
self.timerCallback.callback = self.updateView
self.timerCallback.start()
def onViewDoubleClicked(self, displayPoint):
obj, pickedPoint = vis.findPickedObject(displayPoint, self.view)
if pickedPoint is None or not obj:
return
imageName = obj.getProperty('Name')
imageUtime = self.imageManager.getUtime(imageName)
cameraToLocal = vtk.vtkTransform()
self.imageManager.queue.getTransform(imageName, 'local', imageUtime, cameraToLocal)
utorsoToLocal = vtk.vtkTransform()
self.imageManager.queue.getTransform('utorso', 'local', imageUtime, utorsoToLocal)
p = range(3)
utorsoToLocal.TransformPoint(pickedPoint, p)
ray = np.array(p) - np.array(cameraToLocal.GetPosition())
ray /= np.linalg.norm(ray)
if self.rayCallback:
self.rayCallback(np.array(cameraToLocal.GetPosition()), ray)
def filterEvent(self, obj, event):
if event.type() == QtCore.QEvent.MouseButtonDblClick:
self.eventFilter.setEventHandlerResult(True)
self.onViewDoubleClicked(vis.mapMousePosition(obj, event))
elif event.type() == QtCore.QEvent.KeyPress:
if str(event.text()).lower() == 'p':
self.eventFilter.setEventHandlerResult(True)
elif str(event.text()).lower() == 'r':
self.eventFilter.setEventHandlerResult(True)
self.resetCamera()
def initEventFilter(self):
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
qvtkwidget = self.view.vtkWidget()
qvtkwidget.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseButtonDblClick)
self.eventFilter.addFilteredEventType(QtCore.QEvent.KeyPress)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.filterEvent)
def initImageRotations(self, robotModel):
self.robotModel = robotModel
# Rotate Multisense image/CAMERA_LEFT if the camera frame is rotated (e.g. for Valkyrie)
if robotModel.getHeadLink():
tf = robotModel.getLinkFrame(robotModel.getHeadLink())
roll = transformUtils.rollPitchYawFromTransform(tf)[0]
if np.isclose(np.abs(roll), np.pi, atol=1e-1):
self.imageManager.setImageRotation180('CAMERA_LEFT')
def initView(self, view):
self.view = view or app.getViewManager().createView('Camera View', 'VTK View')
self.renderers = [self.view.renderer()]
renWin = self.view.renderWindow()
renWin.SetNumberOfLayers(3)
for i in [1, 2]:
ren = vtk.vtkRenderer()
ren.SetLayer(2)
ren.SetActiveCamera(self.view.camera())
renWin.AddRenderer(ren)
self.renderers.append(ren)
def applyCustomBounds():
self.view.addCustomBounds([-100, 100, -100, 100, -100, 100])
self.view.connect('computeBoundsRequest(ddQVTKWidgetView*)', applyCustomBounds)
app.setCameraTerrainModeEnabled(self.view, True)
self.resetCamera()
def resetCamera(self):
self.view.camera().SetViewAngle(90)
self.view.camera().SetPosition(-7.5, 0.0, 5.0)
self.view.camera().SetFocalPoint(0.0, 0.0, 0.0)
self.view.camera().SetViewUp(0.0, 0.0, 1.0)
self.view.render()
def getSphereGeometry(self, imageName):
sphereObj = self.sphereObjects.get(imageName)
if sphereObj:
return sphereObj
if not self.imageManager.getImage(imageName).GetDimensions()[0]:
return None
sphereResolution = 50
sphereRadii = {
'CAMERA_LEFT' : 20,
'CAMERACHEST_LEFT' : 20,
'CAMERACHEST_RIGHT' : 20
}
geometry = makeSphere(sphereRadii[imageName], sphereResolution)
self.imageManager.queue.computeTextureCoords(imageName, geometry)
tcoordsArrayName = 'tcoords_%s' % imageName
vtkNumpy.addNumpyToVtk(geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:,0].copy(), 'tcoords_U')
vtkNumpy.addNumpyToVtk(geometry, vtkNumpy.getNumpyFromVtk(geometry, tcoordsArrayName)[:,1].copy(), 'tcoords_V')
geometry = clipRange(geometry, 'tcoords_U', [0.0, 1.0])
geometry = clipRange(geometry, 'tcoords_V', [0.0, 1.0])
geometry.GetPointData().SetTCoords(geometry.GetPointData().GetArray(tcoordsArrayName))
sphereObj = vis.showPolyData(geometry, imageName, view=self.view, parent='cameras')
sphereObj.actor.SetTexture(self.imageManager.getTexture(imageName))
sphereObj.actor.GetProperty().LightingOff()
self.view.renderer().RemoveActor(sphereObj.actor)
rendererId = 2 - self.sphereImages.index(imageName)
self.renderers[rendererId].AddActor(sphereObj.actor)
self.sphereObjects[imageName] = sphereObj
return sphereObj
def updateSphereGeometry(self):
for imageName in self.sphereImages:
sphereObj = self.getSphereGeometry(imageName)
if not sphereObj:
continue
transform = vtk.vtkTransform()
self.imageManager.queue.getBodyToCameraTransform(imageName, transform)
sphereObj.actor.SetUserTransform(transform.GetLinearInverse())
def updateImages(self):
updated = False
for imageName, lastUtime in self.updateUtimes.iteritems():
currentUtime = self.imageManager.updateImage(imageName)
if currentUtime != lastUtime:
self.updateUtimes[imageName] = currentUtime
updated = True
return updated
def updateView(self):
if not self.view.isVisible():
return
if not self.updateImages():
return
self.updateSphereGeometry()
self.view.render()
class ImageWidget(object):
def __init__(self, imageManager, imageName, view, visible=True):
self.view = view
self.imageManager = imageManager
self.imageName = imageName
self.visible = visible
self.updateUtime = 0
self.initialized = False
self.imageWidget = vtk.vtkLogoWidget()
imageRep = self.imageWidget.GetRepresentation()
self.imageWidget.ResizableOff()
self.imageWidget.SelectableOn()
imageRep.GetImageProperty().SetOpacity(1.0)
self.imageWidget.SetInteractor(self.view.renderWindow().GetInteractor())
self.flip = vtk.vtkImageFlip()
self.flip.SetFilteredAxis(1)
self.flip.SetInput(imageManager.getImage(imageName))
imageRep.SetImage(self.flip.GetOutput())
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
self.view.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.Resize)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.onResizeEvent)
self.timerCallback = TimerCallback()
self.timerCallback.targetFps = 60
self.timerCallback.callback = self.updateView
self.timerCallback.start()
def setWidgetSize(self, desiredWidth=400):
image = self.imageManager.getImage(self.imageName)
dims = image.GetDimensions()
if 0.0 in dims:
return
aspectRatio = float(dims[0])/dims[1]
imageWidth, imageHeight = desiredWidth, desiredWidth/aspectRatio
viewWidth, viewHeight = self.view.width, self.view.height
rep = self.imageWidget.GetBorderRepresentation()
rep.SetShowBorderToOff()
coord = rep.GetPositionCoordinate()
coord2 = rep.GetPosition2Coordinate()
coord.SetCoordinateSystemToDisplay()
coord2.SetCoordinateSystemToDisplay()
coord.SetValue(0, viewHeight-imageHeight)
coord2.SetValue(imageWidth, imageHeight)
self.view.render()
def onResizeEvent(self):
self.setWidgetSize(400)
def setImageName(self, imageName):
self.imageName = imageName
self.flip.SetInput(imageManager.getImage(imageName))
def setOpacity(self, opacity=1.0):
self.imageWidget.GetRepresentation().GetImageProperty().SetOpacity(opacity)
def hide(self):
self.visible = False
self.imageWidget.Off()
self.view.render()
def show(self):
self.visible = True
if self.haveImage():
self.imageWidget.On()
self.view.render()
def haveImage(self):
image = self.imageManager.getImage(self.imageName)
dims = image.GetDimensions()
return 0.0 not in dims
def updateView(self):
if not self.visible or not self.view.isVisible():
return
currentUtime = self.imageManager.updateImage(self.imageName)
if currentUtime != self.updateUtime:
self.updateUtime = currentUtime
self.flip.Update()
self.view.render()
if not self.initialized and self.visible and self.haveImage():
self.show()
self.setWidgetSize(400)
self.initialized = True
class CameraImageView(object):
def __init__(self, imageManager, imageName, viewName=None, view=None):
imageManager.addImage(imageName)
self.cameraRoll = None
self.imageManager = imageManager
self.viewName = viewName or imageName
self.imageName = imageName
self.imageInitialized = False
self.updateUtime = 0
self.initView(view)
self.initEventFilter()
def getImagePixel(self, displayPoint, restrictToImageDimensions=True):
worldPoint = [0.0, 0.0, 0.0, 0.0]
vtk.vtkInteractorObserver.ComputeDisplayToWorld(self.view.renderer(), displayPoint[0], displayPoint[1], 0, worldPoint)
imageDimensions = self.getImage().GetDimensions()
if 0.0 <= worldPoint[0] <= imageDimensions[0] and 0.0 <= worldPoint[1] <= imageDimensions[1] or not restrictToImageDimensions:
return [worldPoint[0], worldPoint[1], 0.0]
else:
return None
def getWorldPositionAndRay(self, imagePixel, imageUtime=None):
'''
Given an XY image pixel, computes an equivalent ray in the world
coordinate system using the camera to local transform at the given
imageUtime. If imageUtime is None, then the utime of the most recent
image is used.
Returns the camera xyz position in world, and a ray unit vector.
'''
if imageUtime is None:
imageUtime = self.imageManager.getUtime(self.imageName)
# input is pixel u,v, output is unit x,y,z in camera coordinates
cameraPoint = self.imageManager.queue.unprojectPixel(self.imageName, imagePixel[0], imagePixel[1])
cameraToLocal = vtk.vtkTransform()
self.imageManager.queue.getTransform(self.imageName, 'local', imageUtime, cameraToLocal)
p = np.array(cameraToLocal.TransformPoint(cameraPoint))
cameraPosition = np.array(cameraToLocal.GetPosition())
ray = p - cameraPosition
ray /= np.linalg.norm(ray)
return cameraPosition, ray
def filterEvent(self, obj, event):
if self.eventFilterEnabled and event.type() == QtCore.QEvent.MouseButtonDblClick:
self.eventFilter.setEventHandlerResult(True)
elif event.type() == QtCore.QEvent.KeyPress:
if str(event.text()).lower() == 'p':
self.eventFilter.setEventHandlerResult(True)
elif str(event.text()).lower() == 'r':
self.eventFilter.setEventHandlerResult(True)
self.resetCamera()
def onRubberBandPick(self, obj, event):
displayPoints = self.interactorStyle.GetStartPosition(), self.interactorStyle.GetEndPosition()
imagePoints = [vis.pickImage(point, self.view)[1] for point in displayPoints]
sendFOVRequest(self.imageName, imagePoints)
def getImage(self):
return self.imageManager.getImage(self.imageName)
def initView(self, view):
self.view = view or app.getViewManager().createView(self.viewName, 'VTK View')
self.view.installImageInteractor()
self.interactorStyle = self.view.renderWindow().GetInteractor().GetInteractorStyle()
self.interactorStyle.AddObserver('SelectionChangedEvent', self.onRubberBandPick)
self.imageActor = vtk.vtkImageActor()
self.imageActor.SetInput(self.getImage())
self.imageActor.SetVisibility(False)
self.view.renderer().AddActor(self.imageActor)
self.view.orientationMarkerWidget().Off()
self.view.backgroundRenderer().SetBackground(0,0,0)
self.view.backgroundRenderer().SetBackground2(0,0,0)
self.timerCallback = TimerCallback()
self.timerCallback.targetFps = 60
self.timerCallback.callback = self.updateView
self.timerCallback.start()
def initEventFilter(self):
self.eventFilter = PythonQt.dd.ddPythonEventFilter()
qvtkwidget = self.view.vtkWidget()
qvtkwidget.installEventFilter(self.eventFilter)
self.eventFilter.addFilteredEventType(QtCore.QEvent.MouseButtonDblClick)
self.eventFilter.addFilteredEventType(QtCore.QEvent.KeyPress)
self.eventFilter.connect('handleEvent(QObject*, QEvent*)', self.filterEvent)
self.eventFilterEnabled = True
def setCameraRoll(self, roll):
self.cameraRoll = roll
self.resetCamera()
def resetCamera(self):
camera = self.view.camera()
camera.ParallelProjectionOn()
camera.SetFocalPoint(0,0,0)
camera.SetPosition(0,0,-1)
camera.SetViewUp(0,-1, 0)
if self.cameraRoll is not None:
camera.SetRoll(self.cameraRoll)
self.view.resetCamera()
self.fitImageToView()
self.view.render()
def fitImageToView(self):
camera = self.view.camera()
image = self.getImage()
imageWidth, imageHeight, _ = image.GetDimensions()
viewWidth, viewHeight = self.view.renderWindow().GetSize()
aspectRatio = float(viewWidth)/viewHeight
parallelScale = max(imageWidth/aspectRatio, imageHeight) / 2.0
camera.SetParallelScale(parallelScale)
def setImageName(self, imageName):
if imageName == self.imageName:
return
assert self.imageManager.hasImage(imageName)
self.imageName = imageName
self.imageInitialized = False
self.updateUtime = 0
self.imageActor.SetInput(self.imageManager.getImage(self.imageName))
self.imageActor.SetVisibility(False)
self.view.render()
def updateView(self):
if not self.view.isVisible():
return
currentUtime = self.imageManager.updateImage(self.imageName)
if currentUtime != self.updateUtime:
self.updateUtime = currentUtime
self.view.render()
if not self.imageInitialized and self.imageActor.GetInput().GetDimensions()[0]:
self.imageActor.SetVisibility(True)
self.resetCamera()
self.imageInitialized = True
class CameraFrustumVisualizer(object):
def __init__(self, robotModel, imageManager, cameraName):
self.robotModel = robotModel
self.cameraName = cameraName
self.imageManager = imageManager
self.rayLength = 2.0
robotModel.connectModelChanged(self.update)
self.update(robotModel)
@staticmethod
def isCompatibleWithConfig():
return 'headLink' in drcargs.getDirectorConfig()
def getCameraToLocal(self):
'''
Returns cameraToLocal. cameraToHead is pulled from bot frames while
headToLocal is pulled from the robot model forward kinematics.
'''
headToLocal = self.robotModel.getLinkFrame( self.robotModel.getHeadLink() )
cameraToHead = vtk.vtkTransform()
self.imageManager.queue.getTransform(self.cameraName, self.robotModel.getHeadLink(), 0, cameraToHead)
return transformUtils.concatenateTransforms([cameraToHead, headToLocal])
def getCameraFrustumRays(self):
'''
Returns (cameraPositions, rays)
cameraPosition is in world frame.
rays are four unit length vectors in world frame that point in the
direction of the camera frustum edges
'''
cameraToLocal = self.getCameraToLocal()
cameraPos = np.array(cameraToLocal.GetPosition())
camRays = []
rays = np.array(self.imageManager.queue.getCameraFrustumBounds(self.cameraName))
for i in xrange(4):
ray = np.array(cameraToLocal.TransformVector(rays[i*3:i*3+3]))
ray /= np.linalg.norm(ray)
camRays.append(ray)
return cameraPos, camRays
def getCameraFrustumGeometry(self, rayLength):
camPos, rays = self.getCameraFrustumRays()
rays = [rayLength*r for r in rays]
d = DebugData()
d.addLine(camPos, camPos+rays[0])
d.addLine(camPos, camPos+rays[1])
d.addLine(camPos, camPos+rays[2])
d.addLine(camPos, camPos+rays[3])
d.addLine(camPos+rays[0], camPos+rays[1])
d.addLine(camPos+rays[1], camPos+rays[2])
d.addLine(camPos+rays[2], camPos+rays[3])
d.addLine(camPos+rays[3], camPos+rays[0])
return d.getPolyData()
def update(self, robotModel):
name = 'camera frustum %s' % self.robotModel.getProperty('Name')
obj = om.findObjectByName(name)
if obj and not obj.getProperty('Visible'):
return
vis.updatePolyData(self.getCameraFrustumGeometry(self.rayLength), name, parent=self.robotModel, visible=False)
views = {}
def addCameraView(channel, viewName=None, cameraName=None, imageType=-1):
cameraName = cameraName or channel
imageManager.queue.addCameraStream(channel, cameraName, imageType)
imageManager.addImage(cameraName)
view = CameraImageView(imageManager, cameraName, viewName)
global views
views[channel] = view
return view
def getStereoPointCloud(decimation=4, imagesChannel='CAMERA', cameraName='CAMERA_LEFT', removeSize=0, rangeThreshold = -1):
q = imageManager.queue
utime = q.getCurrentImageTime(cameraName)
if utime == 0:
return None
p = vtk.vtkPolyData()
cameraToLocal = vtk.vtkTransform()
q.getPointCloudFromImages(imagesChannel, p, decimation, removeSize, rangeThreshold)
if (p.GetNumberOfPoints() > 0):
q.getTransform(cameraName, 'local', utime, cameraToLocal)
p = filterUtils.transformPolyData(p, cameraToLocal)
return p
class KintinuousMapping(object):
def __init__(self):
self.lastUtime = 0
self.lastCameraToLocal = vtk.vtkTransform()
self.cameraToLocalFusedTransforms = []
self.cameraToLocalTransforms = []
self.pointClouds = []
def getStereoPointCloudElapsed(self,decimation=4, imagesChannel='CAMERA', cameraName='CAMERA_LEFT', removeSize=0):
q = imageManager.queue
utime = q.getCurrentImageTime(cameraName)
if utime == 0:
return None, None, None
if (utime - self.lastUtime < 1E6):
return None, None, None
p = vtk.vtkPolyData()
cameraToLocalFused = vtk.vtkTransform()
q.getTransform('CAMERA_LEFT_ALT', 'local', utime, cameraToLocalFused)
cameraToLocal = vtk.vtkTransform()
q.getTransform('CAMERA_LEFT', 'local', utime, cameraToLocal)
prevToCurrentCameraTransform = vtk.vtkTransform()
prevToCurrentCameraTransform.PostMultiply()
prevToCurrentCameraTransform.Concatenate( cameraToLocal )
prevToCurrentCameraTransform.Concatenate( self.lastCameraToLocal.GetLinearInverse() )
distTravelled = np.linalg.norm( prevToCurrentCameraTransform.GetPosition() )
# 0.2 heavy overlap
# 0.5 quite a bit of overlap
# 1.0 is good
if (distTravelled < 0.2 ):
return None, None, None
q.getPointCloudFromImages(imagesChannel, p, decimation, removeSize, removeThreshold = -1)
self.lastCameraToLocal = cameraToLocal
self.lastUtime = utime
return p, cameraToLocalFused, cameraToLocal
def showFusedMaps(self):
om.removeFromObjectModel(om.findObjectByName('stereo'))
om.getOrCreateContainer('stereo')
q = imageManager.queue
cameraToLocalNow = vtk.vtkTransform()
utime = q.getCurrentImageTime('CAMERA_TSDF')
q.getTransform('CAMERA_LEFT','local', utime,cameraToLocalNow)
cameraToLocalFusedNow = vtk.vtkTransform()
q.getTransform('CAMERA_LEFT_ALT','local', utime,cameraToLocalFusedNow)
for i in range(len(self.pointClouds)):
fusedNowToLocalNow = vtk.vtkTransform()
fusedNowToLocalNow.PreMultiply()
fusedNowToLocalNow.Concatenate( cameraToLocalNow)
fusedNowToLocalNow.Concatenate( cameraToLocalFusedNow.GetLinearInverse() )
fusedTransform = vtk.vtkTransform()
fusedTransform.PreMultiply()
fusedTransform.Concatenate( fusedNowToLocalNow)
fusedTransform.Concatenate( self.cameraToLocalFusedTransforms[i] )
pd = filterUtils.transformPolyData(self.pointClouds[i], fusedTransform)
vis.showFrame(fusedTransform, ('cloud frame ' + str(i)), visible=True, scale=0.2, parent='stereo')
vis.showPolyData(pd, ('stereo ' + str(i)), parent='stereo', colorByName='rgb_colors')
# Without compensation for fusion motion estimation:
#pd = filterUtils.transformPolyData(self.pointClouds[i], self.cameraToLocalTransforms[i])
#vis.showFrame(self.cameraToLocalTransforms[i], ('cloud frame ' + str(i)), visible=True, scale=0.2)
#vis.showPolyData(pd, ('stereo ' + str(i)) )
# in fusion coordinate frame:
#pd = filterUtils.transformPolyData(self.pointClouds[i], self.cameraToLocalFusedTransforms[i])
#vis.showFrame(self.cameraToLocalFusedTransforms[i], ('cloud frame ' + str(i)), visible=True, scale=0.2)
#vis.showPolyData(pd, ('stereo ' + str(i)) )
def cameraFusedCallback(self):
#pd = cameraview.getStereoPointCloud(2,"CAMERA_FUSED")
pd, cameraToLocalFused, cameraToLocal = self.getStereoPointCloudElapsed(2,"CAMERA_FUSED")
#vis.updateFrame(cameraToLocal, 'cloud frame now', visible=True, scale=0.2)
if (pd is None):
return
self.pointClouds.append(pd)
self.cameraToLocalFusedTransforms.append( cameraToLocalFused )
self.cameraToLocalTransforms.append( cameraToLocal )
#pdCopy = vtk.vtkPolyData()
#pdCopy.DeepCopy(pd)
#cameraToLocalCopy = transformUtils.copyFrame(cameraToLocalFused)
#pdCopy = filterUtils.transformPolyData(pdCopy, cameraToLocalCopy)
#vis.showFrame(cameraToLocalCopy, 'cloud frame', visible=True, scale=0.2)
#vis.showPolyData(pdCopy,'stereo')
self.showFusedMaps()
def init():
global imageManager
imageManager = ImageManager()
global cameraView
cameraView = CameraView(imageManager)
addCameraView('CAMERA_LEFT', 'Head camera')
#addCameraView('CAMERA', 'Head camera right', 'CAMERA_RIGHT', lcmmultisense.images_t.RIGHT)
#addCameraView('CAMERA', 'Head camera depth', 'CAMERA_DISPARITY', lcmmultisense.images_t.DISPARITY_ZIPPED)
addCameraView('CAMERACHEST_LEFT', 'Chest left')
addCameraView('CAMERACHEST_RIGHT', 'Chest right')
addCameraView('CAMERALHAND')
addCameraView('CAMERARHAND')
addCameraView('KINECT_RGB', 'Kinect RGB')
|
|
from __future__ import unicode_literals, division, absolute_import
import datetime
from math import ceil
from flask import jsonify, request
from flask_restplus import inputs
from flexget.api import api, APIResource
from flexget.plugins.api.series import NoResultFound
from flexget.plugins.filter import movie_queue as mq
from flexget.utils import qualities
movie_queue_api = api.namespace('movie_queue', description='Movie Queue operations')
default_error_schema = {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'message': {'type': 'string'}
}
}
default_error_schema = api.schema('default_error_schema', default_error_schema)
empty_response = api.schema('empty', {'type': 'object'})
movie_object = {
'type': 'object',
'properties': {
'added_date': {'type': 'string'},
'is_downloaded': {'type': 'boolean'},
'download_date': {'type': 'string'},
'entry_original_url': {'type': 'string'},
'entry_title': {'type': 'string'},
'entry_url': {'type': 'string'},
'id': {'type': 'integer'},
'imdb_id': {'type': 'string'},
'quality': {'type': 'string'},
'title': {'type': 'string'},
'tmdb_id': {'type': 'string'},
'queue_name': {'type': 'string'}
}
}
movie_object_schema = api.schema('movie_object', movie_object)
movie_queue_schema = {
'type': 'object',
'properties': {
'movies': {
'type': 'array',
'items': movie_object
},
'number_of_movies': {'type': 'integer'},
'total_number_of_pages': {'type': 'integer'},
'page_number': {'type': 'integer'}
}
}
movie_queue_schema = api.schema('list_movie_queue', movie_queue_schema)
movie_queue_parser = api.parser()
movie_queue_parser.add_argument('page', type=int, default=1, help='Page number')
movie_queue_parser.add_argument('max', type=int, default=100, help='Movies per page')
movie_queue_parser.add_argument('queue_name', default='default', help='Filter by movie queue name')
movie_queue_parser.add_argument('is_downloaded', type=inputs.boolean, help='Filter list by movies download status')
movie_queue_parser.add_argument('sort_by', choices=('added', 'is_downloaded', 'id', 'title', 'download_date'),
default='added', help="Sort response by attribute")
movie_queue_parser.add_argument('order', choices=('asc', 'desc'), default='desc', help="Sorting order")
movie_add_input_schema = {
'type': 'object',
'properties': {
'title': {'type': 'string'},
'imdb_id': {'type': 'string', 'pattern': r'tt\d{7}'},
'tmdb_id': {'type': 'integer'},
'quality': {'type': 'string', 'format': 'quality_requirements', 'default': 'any'},
'queue_name': {'type': 'string', 'default': 'default'}
},
'anyOf': [
{'required': ['title']},
{'required': ['imdb_id']},
{'required': ['tmdb_id']}
]
}
movie_add_input_schema = api.schema('movie_add_input_schema', movie_add_input_schema)
movie_edit_input_schema = {
'type': 'object',
'properties': {
'quality': {'type': 'string', 'format': 'quality_requirements'},
'reset_downloaded': {'type': 'boolean', 'default': True}
},
'anyOf': [
{'required': ['quality']},
{'required': ['reset_downloaded']}
]
}
movie_edit_input_schema = api.schema('movie_edit_input_schema', movie_edit_input_schema)
@movie_queue_api.route('/')
class MovieQueueAPI(APIResource):
@api.response(404, 'Page does not exist', model=default_error_schema)
@api.response(code_or_apierror=200, model=movie_queue_schema)
@api.doc(parser=movie_queue_parser, description="Get flexget's queued movies")
def get(self, session=None):
""" List queued movies """
args = movie_queue_parser.parse_args()
page = args['page']
max_results = args['max']
downloaded = args['is_downloaded']
sort_by = args['sort_by']
order = args['order']
queue_name = args['queue_name']
# Handles default if it explicitly called
if order == 'desc':
order = True
else:
order = False
raw_movie_queue = mq.queue_get(session=session, downloaded=downloaded, queue_name=queue_name)
converted_movie_queue = [movie.to_dict() for movie in raw_movie_queue]
sorted_movie_list = sorted(converted_movie_queue,
key=lambda movie: movie[sort_by] if movie[sort_by] else datetime.datetime,
reverse=order)
count = len(sorted_movie_list)
pages = int(ceil(count / float(max_results)))
if page > pages and pages != 0:
return {'status': 'error',
'message': 'page %s does not exist' % page}, 404
start = (page - 1) * max_results
finish = start + max_results
if finish > count:
finish = count
movie_items = []
for movie_number in range(start, finish):
movie_items.append(sorted_movie_list[movie_number])
return jsonify({
'movies': movie_items,
'number_of_movies': count,
'page_number': page,
'total_number_of_pages': pages
})
@api.response(500, 'Movie already in queue', model=default_error_schema)
@api.response(201, 'Movie successfully added', model=movie_object_schema)
@api.validate(movie_add_input_schema)
@api.doc(description="Add a movie to flexget's queued movies")
def post(self, session=None):
""" Add movies to movie queue """
kwargs = request.json
kwargs['quality'] = qualities.Requirements(kwargs.get('quality'))
kwargs['session'] = session
try:
movie = mq.queue_add(**kwargs)
except mq.QueueError as e:
reply = {
'status': 'error',
'message': e.message
}
return reply, 500
reply = jsonify(movie)
reply.status_code = 201
return reply
@api.response(404, 'ID not found', model=default_error_schema)
@movie_queue_api.route('/<id>/')
@api.doc(params={'id': 'ID of Queued Movie'})
class MovieQueueManageAPI(APIResource):
@api.response(200, 'Movie successfully retrieved', movie_object_schema)
@api.doc(description="Get a specific movie")
def get(self, id, session=None):
""" Returns a movie from queue by ID """
try:
movie = mq.get_movie_by_id(movie_id=id)
except NoResultFound as e:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
return jsonify(movie)
@api.response(200, 'Movie successfully deleted', model=empty_response)
@api.doc(description="Delete a specific movie")
def delete(self, id, session=None):
""" Delete movies from movie queue """
try:
mq.delete_movie_by_id(movie_id=id)
except NoResultFound:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
return {}
@api.response(405, 'Movie not marked as downloaded', model=default_error_schema)
@api.response(200, 'Movie successfully updated', movie_object_schema)
@api.validate(model=movie_edit_input_schema,
description='Values to use when editing existing movie. At least one value should be used')
@api.doc(description="Update a specific movie")
def put(self, id, session=None):
""" Updates movie quality or downloaded state in movie queue """
data = request.json
try:
movie = mq.get_movie_by_id(movie_id=id)
except NoResultFound:
return {'status': 'error',
'message': 'movie with ID {0} was not found'.format(id)}, 404
queue_name = movie.get('queue_name')
if data.get('reset_downloaded'):
try:
movie = mq.queue_forget(movie_id=id, queue_name=queue_name)
except mq.QueueError as e:
if e.errno == 1:
reply = {
'status': 'error',
'message': e.message
}
return reply, 405
else:
reply = {
'status': 'error',
'message': e.message
}
return reply, 404
if data.get('quality'):
try:
movie = mq.queue_edit(quality=data['quality'], movie_id=id, queue_name=queue_name)
except mq.QueueError as e:
reply = {'status': 'error',
'message': e.message}
return reply, 404
if not movie:
return {'status': 'error',
'message': 'Not enough parameters to edit movie data'}, 400
return jsonify(movie)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corporation
# Copyright 2017-2018 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is the common console support for confluent. It takes over
# whatever filehandle is conversing with the client and starts
# relaying data. It uses Ctrl-] like telnet for escape back to prompt
# we track nodes that are actively being logged, watched, or have attached
# there should be no more than one handler per node
import codecs
import collections
import confluent.collective.manager as collective
import confluent.config.configmanager as configmodule
import confluent.exceptions as exc
import confluent.interface.console as conapi
import confluent.log as log
import confluent.core as plugin
import confluent.tlvdata as tlvdata
import confluent.util as util
import eventlet
import eventlet.event
import eventlet.green.socket as socket
import eventlet.green.ssl as ssl
import pyte
import random
import time
import traceback
_handled_consoles = {}
_tracelog = None
try:
range = xrange
except NameError:
pass
pytecolors2ansi = {
'black': 0,
'red': 1,
'green': 2,
'brown': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
'default': 9,
}
# might be able to use IBMPC map from pyte charsets,
# in that case, would have to mask out certain things (like ESC)
# in the same way that Screen's draw method would do
# for now at least get some of the arrows in there (note ESC is one
# of those arrows... so skip it...
ansichars = dict(zip((0x18, 0x19), u'\u2191\u2193'))
def _utf8_normalize(data, shiftin, decoder):
# first we give the stateful decoder a crack at the byte stream,
# we may come up empty in the event of a partial multibyte
try:
data = decoder.decode(data)
except UnicodeDecodeError:
# first order of business is to reset the state of
# the decoder to a clean one, so we can switch back to utf-8
# when things change, for example going from an F1 setup menu stuck
# in the old days to a modern platform using utf-8
decoder.setstate(codecs.getincrementaldecoder('utf-8')().getstate())
# Ok, so we have something that is not valid UTF-8,
# our next stop is to try CP437. We don't try incremental
# decode, since cp437 is single byte
# replace is silly here, since there does not exist invalid c437,
# but just in case
data = data.decode('cp437', 'replace')
# Finally, the low part of ascii is valid utf-8, but we are going to be
# more interested in the cp437 versions (since this is console *output*
# not input
if shiftin is None:
data = data.translate(ansichars)
return data.encode('utf-8')
def pytechars2line(chars, maxlen=None):
line = '\x1b[m' # start at default params
lb = False # last bold
li = False # last italic
lu = False # last underline
ls = False # last strikethrough
lr = False # last reverse
lfg = 'default' # last fg color
lbg = 'default' # last bg color
hasdata = False
len = 1
for charidx in range(maxlen):
char = chars[charidx]
csi = []
if char.fg != lfg:
csi.append(30 + pytecolors2ansi[char.fg])
lfg = char.fg
if char.bg != lbg:
csi.append(40 + pytecolors2ansi[char.bg])
lbg = char.bg
if char.bold != lb:
lb = char.bold
csi.append(1 if lb else 22)
if char.italics != li:
li = char.italics
csi.append(3 if li else 23)
if char.underscore != lu:
lu = char.underscore
csi.append(4 if lu else 24)
if char.strikethrough != ls:
ls = char.strikethrough
csi.append(9 if ls else 29)
if char.reverse != lr:
lr = char.reverse
csi.append(7 if lr else 27)
if csi:
line += b'\x1b[' + b';'.join(['{0}'.format(x) for x in csi]) + b'm'
if not hasdata and char.data.encode('utf-8').rstrip():
hasdata = True
line += char.data.encode('utf-8')
if maxlen and len >= maxlen:
break
len += 1
return line, hasdata
class ConsoleHandler(object):
_plugin_path = '/nodes/{0}/_console/session'
_logtobuffer = True
_genwatchattribs = frozenset(('console.method', 'console.logging',
'collective.manager'))
def __init__(self, node, configmanager, width=80, height=24):
self.clearpending = False
self.clearerror = False
self.initsize = (width, height)
self._dologging = True
self._is_local = True
self._isondemand = False
self.error = None
self._retrytime = 0
self.cfgmgr = configmanager
self.node = node
self.connectstate = 'unconnected'
self._isalive = True
self.buffer = pyte.Screen(100, 31)
self.termstream = pyte.ByteStream()
self.termstream.attach(self.buffer)
self.livesessions = set([])
self.utf8decoder = codecs.getincrementaldecoder('utf-8')()
if self._logtobuffer:
self.logger = log.Logger(node, console=True,
tenant=configmanager.tenant)
(text, termstate, timestamp) = (b'', 0, False)
# when reading from log file, we will use wall clock
# it should usually match walltime.
self.lasttime = 0
if timestamp:
timediff = time.time() - timestamp
if timediff > 0:
self.lasttime = util.monotonic_time() - timediff
else:
# wall clock has gone backwards, use current time as best
# guess
self.lasttime = util.monotonic_time()
self.clearbuffer()
self.appmodedetected = False
self.shiftin = None
self.reconnect = None
if termstate & 1:
self.appmodedetected = True
if termstate & 2:
self.shiftin = '0'
self.users = {}
self._attribwatcher = None
self._console = None
self.connectionthread = None
self.send_break = None
if self._genwatchattribs:
self._attribwatcher = self.cfgmgr.watch_attributes(
(self.node,), self._genwatchattribs, self._attribschanged)
self.check_isondemand()
if not self._isondemand:
self.connectstate = 'connecting'
eventlet.spawn(self._connect)
def _get_retry_time(self):
clustsize = len(self.cfgmgr._cfgstore['nodes'])
self._retrytime = self._retrytime * 2 + 1
if self._retrytime > 120:
self._retrytime = 120
retrytime = clustsize * 0.05 * self._retrytime
if retrytime > 120:
retrytime = 120
return retrytime + (retrytime * random.random())
def feedbuffer(self, data):
try:
self.termstream.feed(data)
except StopIteration: # corrupt parser state, start over
self.termstream = pyte.ByteStream()
self.termstream.attach(self.buffer)
except Exception:
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
def check_isondemand(self):
self._dologging = True
attrvalue = self.cfgmgr.get_node_attributes(
(self.node,), ('console.logging', 'collective.manager'))
if self.node not in attrvalue:
self._isondemand = False
elif 'console.logging' not in attrvalue[self.node]:
self._isondemand = False
else:
if (attrvalue[self.node]['console.logging']['value'] not in (
'full', '', 'buffer')):
self._isondemand = True
if (attrvalue[self.node]['console.logging']['value']) in ('none', 'memory'):
self._dologging = False
self.check_collective(attrvalue)
def check_collective(self, attrvalue):
myc = attrvalue.get(self.node, {}).get('collective.manager', {}).get(
'value', None)
if configmodule.list_collective() and not myc:
self._is_local = False
self._detach()
self._disconnect()
if myc and myc != collective.get_myname():
# Do not do console connect for nodes managed by another
# confluent collective member
self._is_local = False
self._detach()
self._disconnect()
else:
self._is_local = True
def get_buffer_age(self):
"""Return age of buffered data
Returns age in seconds of the buffered data or
False in the event of calling before buffered data"""
if self.lasttime:
return util.monotonic_time() - self.lasttime
return False
def _attribschanged(self, nodeattribs, configmanager, **kwargs):
if 'collective.manager' in nodeattribs[self.node]:
attrval = configmanager.get_node_attributes(self.node,
'collective.manager')
self.check_collective(attrval)
if 'console.logging' in nodeattribs[self.node]:
# decide whether logging changes how we react or not
self._dologging = True
logvalue = 'full'
attributevalue = configmanager.get_node_attributes(
(self.node,), ('console.logging',))
try:
logvalue = \
attributevalue[self.node]['console.logging']['value']
except KeyError:
pass
if logvalue in ('full', ''):
# if the *only* thing to change is the log,
# then let always on handle reconnect if needed,
# since we want to avoid a senseless disconnect
# if already connected
# if other things change, then unconditionally reconnect
onlylogging = len(nodeattribs[self.node]) == 1
self._alwayson(doconnect=onlylogging)
if onlylogging:
return
else:
self._ondemand()
if logvalue in ('none', 'memory'):
self._dologging = False
if not self._isondemand or self.livesessions:
eventlet.spawn(self._connect)
def log(self, *args, **kwargs):
if not self._dologging:
return
self.logger.log(*args, **kwargs)
def _alwayson(self, doconnect=True):
self._isondemand = False
if not doconnect:
return
if not self._console and not self.connectionthread:
self._connect()
else:
self._console.ping()
def clearbuffer(self):
self.feedbuffer(
'\x1bc[No data has been received from the remote console since ' \
'connecting. This could\r\nbe due to having the console.logging ' \
'attribute set to none or interactive,\r\nserial console not ' \
'being enabled or incorrectly configured in the OS or\r\nfirmware, ' \
'or the console simply not having any output since last connection]')
self.clearpending = True
def _detach(self):
for ses in list(self.livesessions):
ses.detach()
def _disconnect(self):
if self.connectionthread:
self.connectionthread.kill()
self.connectionthread = None
# clear the terminal buffer when disconnected
self.clearbuffer()
if self._console:
self.log(
logdata='console disconnected', ltype=log.DataTypes.event,
event=log.Events.consoledisconnect)
self._console.close()
self._console = None
self.connectstate = 'unconnected'
self._send_rcpts({'connectstate': self.connectstate})
def _ondemand(self):
self._isondemand = True
if not self.livesessions and self._console:
self._disconnect()
def _connect(self):
if not self._is_local:
return
if self.connectionthread:
self.connectionthread.kill()
self.connectionthread = None
self.connectionthread = eventlet.spawn(self._connect_backend)
def _connect_backend(self):
if self._console:
self._console.close()
self._console = None
self.connectstate = 'connecting'
self._send_rcpts({'connectstate': self.connectstate})
if self.reconnect:
self.reconnect.cancel()
self.reconnect = None
try:
self._console = list(plugin.handle_path(
self._plugin_path.format(self.node),
"create", self.cfgmgr))[0]
except (exc.NotImplementedException, exc.NotFoundException):
self._console = None
except:
if _tracelog:
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
else:
print(traceback.format_exc())
if not isinstance(self._console, conapi.Console):
self.clearbuffer()
self.connectstate = 'unconnected'
self.error = 'misconfigured'
self._send_rcpts({'connectstate': self.connectstate,
'error': self.error})
self.feedbuffer(
'\x1bc\x1b[2J\x1b[1;1H[The console.method attribute for this node is '
'not configured,\r\nset it to a valid value for console '
'function]')
self._send_rcpts(
'\x1bc\x1b[2J\x1b[1;1H[The console.method attribute for this node is '
'not configured,\r\nset it to a valid value for console '
'function]')
self.clearerror = True
return
if self.clearerror:
self.clearerror = False
self.clearbuffer()
self._send_rcpts(b'\x1bc\x1b[2J\x1b[1;1H')
self.send_break = self._console.send_break
self.resize = self._console.resize
if self._attribwatcher:
self.cfgmgr.remove_watcher(self._attribwatcher)
self._attribwatcher = None
if hasattr(self._console, "configattributes"):
attribstowatch = self._console.configattributes | self._genwatchattribs
else:
attribstowatch = self._genwatchattribs
if self._genwatchattribs:
self._attribwatcher = self.cfgmgr.watch_attributes(
(self.node,), attribstowatch, self._attribschanged)
try:
self.resize(width=self.initsize[0], height=self.initsize[1])
self._console.connect(self.get_console_output)
except exc.TargetEndpointBadCredentials:
self.clearbuffer()
self.error = 'badcredentials'
self.connectstate = 'unconnected'
self._send_rcpts({'connectstate': self.connectstate,
'error': self.error})
retrytime = self._get_retry_time()
if not self.reconnect:
self.reconnect = eventlet.spawn_after(retrytime, self._connect)
return
except exc.TargetEndpointUnreachable:
self.clearbuffer()
self.error = 'unreachable'
self.connectstate = 'unconnected'
self._send_rcpts({'connectstate': self.connectstate,
'error': self.error})
retrytime = self._get_retry_time()
if not self.reconnect:
self.reconnect = eventlet.spawn_after(retrytime, self._connect)
return
except Exception:
self.clearbuffer()
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
self.error = 'unknown'
self.connectstate = 'unconnected'
self._send_rcpts({'connectstate': self.connectstate,
'error': self.error})
retrytime = self._get_retry_time()
if not self.reconnect:
self.reconnect = eventlet.spawn_after(retrytime, self._connect)
return
self._got_connected()
def _got_connected(self):
self.connectstate = 'connected'
self._retrytime = 0
self.log(
logdata='console connected', ltype=log.DataTypes.event,
event=log.Events.consoleconnect)
self._send_rcpts({'connectstate': self.connectstate})
def _got_disconnected(self):
if self.connectstate != 'unconnected':
self.connectstate = 'unconnected'
self.log(
logdata='console disconnected', ltype=log.DataTypes.event,
event=log.Events.consoledisconnect)
self._send_rcpts({'connectstate': self.connectstate})
if self._isalive:
self._connect()
else:
self.clearbuffer()
def close(self):
self._isalive = False
self._send_rcpts({'deleting': True})
self._disconnect()
if self._console:
self._console.close()
self._console = None
if self.connectionthread:
self.connectionthread.kill()
self.connectionthread = None
if self._attribwatcher:
self.cfgmgr.remove_watcher(self._attribwatcher)
self._attribwatcher = None
def get_console_output(self, data):
# Spawn as a greenthread, return control as soon as possible
# to the console object
eventlet.spawn(self._handle_console_output, data)
def attachsession(self, session):
edata = 1
for currsession in self.livesessions:
if currsession.username == session.username:
# indicate that user has multiple connections
edata = 2
self.livesessions.add(session)
self.log(
logdata=session.username, ltype=log.DataTypes.event,
event=log.Events.clientconnect, eventdata=edata)
self._send_rcpts({'clientcount': len(self.livesessions)})
if self.connectstate == 'unconnected':
# if console is not connected, take time to try to assert
# connectivity now.
if self.reconnect:
# cancel an automated retry if one is pending
self.reconnect.cancel()
self.reconnect = None
self.connectstate = 'connecting'
eventlet.spawn(self._connect)
def detachsession(self, session):
edata = 0
self.livesessions.discard(session)
for currsession in self.livesessions:
if currsession.username == session.username:
edata += 1
if edata > 1: # don't bother counting beyond 2 in the log
break
self.log(
logdata=session.username, ltype=log.DataTypes.event,
event=log.Events.clientdisconnect, eventdata=edata)
self._send_rcpts({'clientcount': len(self.livesessions)})
if self._isondemand and not self.livesessions:
self._disconnect()
def reopen(self):
self._got_disconnected()
def _handle_console_output(self, data):
if type(data) == int:
if data == conapi.ConsoleEvent.Disconnect:
self._got_disconnected()
return
elif data == '':
# ignore empty strings from a cconsole provider
return
if '\x1b[?1l' in data: # request for ansi mode cursor keys
self.appmodedetected = False
if '\x1b[?1h' in data: # remember the session wants the client to use
# 'application mode' Thus far only observed on esxi
self.appmodedetected = True
if '\x1b)0' in data:
# console indicates it wants access to special drawing characters
self.shiftin = '0'
eventdata = 0
if self.appmodedetected:
eventdata |= 1
if self.shiftin is not None:
eventdata |= 2
# TODO: analyze buffer for registered events, examples:
# panics
# certificate signing request
if self.clearpending or self.clearerror:
self.clearpending = False
self.clearerror = False
self.feedbuffer(b'\x1bc\x1b[2J\x1b[1;1H')
self._send_rcpts(b'\x1bc\x1b[2J\x1b[1;1H')
self._send_rcpts(_utf8_normalize(data, self.shiftin, self.utf8decoder))
self.log(data, eventdata=eventdata)
self.lasttime = util.monotonic_time()
self.feedbuffer(data)
def _send_rcpts(self, data):
for rcpt in list(self.livesessions):
try:
rcpt.data_handler(data)
except: # No matter the reason, advance to next recipient
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
def get_recent(self):
"""Retrieve 'recent' data
Replay data in the intent to perhaps reproduce the display.
"""
# For now, just try to seek back in buffer to find a clear screen
# If that fails, just return buffer
# a scheme always tracking the last clear screen would be too costly
connstate = {
'connectstate': self.connectstate,
'clientcount': len(self.livesessions),
}
retdata = b'\x1b[H\x1b[J' # clear screen
pendingbl = b'' # pending blank lines
maxlen = 0
for line in self.buffer.display:
line = line.rstrip()
if len(line) > maxlen:
maxlen = len(line)
for line in range(self.buffer.lines):
nline, notblank = pytechars2line(self.buffer.buffer[line], maxlen)
if notblank:
if pendingbl:
retdata += pendingbl
pendingbl = b''
retdata += nline + '\r\n'
else:
pendingbl += nline + '\r\n'
if len(retdata) > 6:
retdata = retdata[:-2] # remove the last \r\n
retdata += b'\x1b[{0};{1}H'.format(self.buffer.cursor.y + 1,
self.buffer.cursor.x + 1)
if self.shiftin is not None: # detected that terminal requested a
# shiftin character set, relay that to the terminal that cannected
retdata += '\x1b)' + self.shiftin
if self.appmodedetected:
retdata += '\x1b[?1h'
else:
retdata += '\x1b[?1l'
return retdata, connstate
def write(self, data):
if self.connectstate == 'connected':
try:
self._console.write(data)
except Exception:
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
self._got_disconnected()
def disconnect_node(node, configmanager):
consk = (node, configmanager.tenant)
if consk in _handled_consoles:
_handled_consoles[consk].close()
del _handled_consoles[consk]
def _nodechange(added, deleting, configmanager):
for node in added:
connect_node(node, configmanager)
for node in deleting:
disconnect_node(node, configmanager)
def _start_tenant_sessions(cfm):
nodeattrs = cfm.get_node_attributes(cfm.list_nodes(), 'collective.manager')
for node in nodeattrs:
manager = nodeattrs[node].get('collective.manager', {}).get('value',
None)
if manager and collective.get_myname() != manager:
continue
try:
connect_node(node, cfm)
except:
_tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event,
event=log.Events.stacktrace)
cfm.watch_nodecollection(_nodechange)
def start_console_sessions():
global _tracelog
_tracelog = log.Logger('trace')
configmodule.hook_new_configmanagers(_start_tenant_sessions)
def connect_node(node, configmanager, username=None, direct=True, width=80,
height=24):
attrval = configmanager.get_node_attributes(node, 'collective.manager')
myc = attrval.get(node, {}).get('collective.manager', {}).get(
'value', None)
myname = collective.get_myname()
if myc and myc != collective.get_myname() and direct:
minfo = configmodule.get_collective_member(myc)
return ProxyConsole(node, minfo, myname, configmanager, username,
width, height)
consk = (node, configmanager.tenant)
if consk not in _handled_consoles:
_handled_consoles[consk] = ConsoleHandler(node, configmanager, width,
height)
return _handled_consoles[consk]
# A stub console handler that just passes through to a remote confluent
# collective member. It can skip the multi-session sharing as that is handled
# remotely
class ProxyConsole(object):
_genwatchattribs = frozenset(('collective.manager',))
def __init__(self, node, managerinfo, myname, configmanager, user, width,
height):
self.skipreplay = True
self.initsize = (width, height)
self.managerinfo = managerinfo
self.myname = myname
self.cfm = configmanager
self.node = node
self.user = user
self.remote = None
self.clisession = None
self._attribwatcher = configmanager.watch_attributes(
(self.node,), self._genwatchattribs, self._attribschanged)
def _attribschanged(self, nodeattribs, configmanager, **kwargs):
if self.clisession:
self.clisession.detach()
self.clisession = None
def relay_data(self):
data = tlvdata.recv(self.remote)
while data:
self.data_handler(data)
data = tlvdata.recv(self.remote)
def get_buffer_age(self):
# the server sends a buffer age if appropriate, no need to handle
# it explicitly in the proxy instance
return False
def get_recent(self):
# Again, delegate this to the remote collective member
self.skipreplay = False
return b''
def write(self, data):
# Relay data to the collective manager
try:
tlvdata.send(self.remote, data)
except Exception:
if self.clisession:
self.clisession.detach()
self.clisession = None
def attachsession(self, session):
self.clisession = session
self.data_handler = session.data_handler
termreq = {
'proxyconsole': {
'name': self.myname,
'user': self.user,
'tenant': self.cfm.tenant,
'node': self.node,
'skipreplay': self.skipreplay,
'width': self.initsize[0],
'height': self.initsize[1],
#TODO(jjohnson2): declare myself as a proxy,
#facilitate redirect rather than relay on manager change
},
}
try:
remote = socket.create_connection((self.managerinfo['address'], 13001))
remote = ssl.wrap_socket(remote, cert_reqs=ssl.CERT_NONE,
keyfile='/etc/confluent/privkey.pem',
certfile='/etc/confluent/srvcert.pem')
if not util.cert_matches(self.managerinfo['fingerprint'],
remote.getpeercert(binary_form=True)):
raise Exception('Invalid peer certificate')
except Exception:
eventlet.sleep(3)
if self.clisession:
self.clisession.detach()
self.detachsession(None)
return
tlvdata.recv(remote)
tlvdata.recv(remote)
tlvdata.send(remote, termreq)
self.remote = remote
eventlet.spawn(self.relay_data)
def detachsession(self, session):
# we will disappear, so just let that happen...
if self.remote:
try:
tlvdata.send(self.remote, {'operation': 'stop'})
except Exception:
pass
self.clisession = None
def send_break(self):
tlvdata.send(self.remote, {'operation': 'break'})
def reopen(self):
tlvdata.send(self.remote, {'operation': 'reopen'})
def resize(self, width, height):
tlvdata.send(self.remote, {'operation': 'resize', 'width': width,
'height': height})
# this represents some api view of a console handler. This handles things like
# holding the caller specific queue data, for example, when http api should be
# sending data, but there is no outstanding POST request to hold it,
# this object has the job of holding the data
class ConsoleSession(object):
"""Create a new socket to converse with node console
This object provides a filehandle that can be read/written
too in a normal fashion and the concurrency, logging, and
event watching will all be handled seamlessly
:param node: Name of the node for which this session will be created
:param configmanager: A configuration manager object for current context
:param username: Username for which this session object will operate
:param datacallback: An asynchronous data handler, to be called when data
is available. Note that if passed, it makes
'get_next_output' non-functional
:param skipreplay: If true, will skip the attempt to redraw the screen
"""
def __init__(self, node, configmanager, username, datacallback=None,
skipreplay=False, direct=True, width=80, height=24):
self.registered = False
self.tenant = configmanager.tenant
if not configmanager.is_node(node):
raise exc.NotFoundException("Invalid node")
self.username = username
self.node = node
self.configmanager = configmanager
self.direct = direct # true if client is directly connected versus
# relay
self.width = width
self.height = height
self.connect_session()
self.registered = True
self._evt = None
self.node = node
self.write = self.conshdl.write
if datacallback is None:
self.reaper = eventlet.spawn_after(15, self.destroy)
self.databuffer = collections.deque([])
self.data_handler = self.got_data
if not skipreplay:
self.databuffer.extend(self.conshdl.get_recent())
else:
self.data_handler = datacallback
if not skipreplay:
for recdata in self.conshdl.get_recent():
if recdata:
datacallback(recdata)
self.conshdl.attachsession(self)
def connect_session(self):
"""Connect to the appropriate backend handler
This is not intended to be called by your usual consumer,
it is a hook for confluent to abstract the concept of a terminal
between console and shell.
"""
self.conshdl = connect_node(self.node, self.configmanager,
self.username, self.direct, self.width,
self.height)
def send_break(self):
"""Send break to remote system
"""
self.conshdl.send_break()
def resize(self, width, height):
self.conshdl.resize(width, height)
def get_buffer_age(self):
"""Get the age in seconds of the buffered data
Returns False if no data buffered yet"""
return self.conshdl.get_buffer_age()
def reopen(self):
"""Reopen the session
This can be useful if there is suspicion that the remote console is
dead. Note that developers should consider need for this a bug unless
there really is some fundamental, unavoidable limitation regarding
automatically detecting an unusable console in the underlying
technology that cannot be unambiguously autodetected.
"""
self.conshdl.reopen()
def destroy(self):
if self.registered:
self.conshdl.detachsession(self)
if self._evt:
self._evt.send()
self._evt = None
self.reghdl = None
def detach(self):
"""Handler for the console handler to detach so it can reattach,
currently to facilitate changing from one collective.manager to
another
:return:
"""
self.conshdl.detachsession(self)
self.connect_session()
self.conshdl.attachsession(self)
self.write = self.conshdl.write
def got_data(self, data):
"""Receive data from console and buffer
If the caller does not provide a callback and instead will be polling
for data, we must maintain data in a buffer until retrieved. This is
an internal function used as a means to convert the async behavior to
polling for consumers that cannot do the async behavior.
"""
self.databuffer.append(data)
if self._evt:
self._evt.send()
self._evt = None
def get_next_output(self, timeout=45):
"""Poll for next available output on this console.
Ideally purely event driven scheme is perfect. AJAX over HTTP is
at least one case where we don't have that luxury. This function
will not work if the session was initialized with a data callback
instead of polling mode.
"""
self.reaper.cancel()
# postpone death to be 15 seconds after this would timeout
self.reaper = eventlet.spawn_after(timeout + 15, self.destroy)
if self._evt:
raise Exception('get_next_output is not re-entrant')
if not self.databuffer:
self._evt = eventlet.event.Event()
with eventlet.Timeout(timeout, False):
self._evt.wait()
self._evt = None
if not self.databuffer:
return ""
currdata = self.databuffer.popleft()
if isinstance(currdata, dict):
return currdata
retval = currdata
while self.databuffer and not isinstance(self.databuffer[0], dict):
retval += self.databuffer.popleft()
return retval
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
''' Parameters are one of the core concepts of Luigi.
All Parameters sit on :class:`~luigi.task.Task` classes.
See :ref:`Parameter` for more info on how to define parameters.
'''
import abc
import argparse
import datetime
import warnings
try:
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import NoOptionError, NoSectionError
from luigi import task_register
from luigi import six
from luigi import configuration
from luigi.deprecate_kwarg import deprecate_kwarg
from datetime import timedelta
from luigi.cmdline_parser import CmdlineParser
_no_value = object()
class ParameterException(Exception):
"""
Base exception.
"""
pass
class MissingParameterException(ParameterException):
"""
Exception signifying that there was a missing Parameter.
"""
pass
class UnknownParameterException(ParameterException):
"""
Exception signifying that an unknown Parameter was supplied.
"""
pass
class DuplicateParameterException(ParameterException):
"""
Exception signifying that a Parameter was specified multiple times.
"""
pass
class Parameter(object):
"""
An untyped Parameter
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
.. code:: python
class MyTask(luigi.Task):
foo = luigi.Parameter()
class RequiringTask(luigi.Task):
def requires(self):
return MyTask(foo="hello")
def run(self):
print(self.requires().foo) # prints "hello"
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
When a task is instantiated, it will first use any argument as the value of the parameter, eg.
if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the
value will be resolved in this order of falling priority:
* Any value provided on the command line:
- With qualified task name (eg. ``--TaskA-param xyz``)
- Without (eg. ``--param xyz``)
* With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion`
* Any default value set using the ``default`` flag.
There are subclasses of ``Parameter`` that define what type the parameter has. This is not
enforced within Python, but are used for command line interaction.
Parameter objects may be reused, but you must then set the ``positional=False`` flag.
"""
_counter = 0 # non-atomically increasing counter used for ordering parameters.
@deprecate_kwarg('is_boolean', 'is_bool', False)
def __init__(self, default=_no_value, is_boolean=False, is_global=False, significant=True, description=None,
config_path=None, positional=True, always_in_help=False):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool is_bool: specify ``True`` if the parameter is a bool value. Default:
``False``. Bool's have an implicit default value of ``False``.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter. DEPRECATED.
Default: ``None``.
:param bool positional: If true, you can set the argument as a
positional argument. Generally we recommend ``positional=False``
as positional arguments become very tricky when
you have inheritance and whatnot.
:param bool always_in_help: For the --help option in the command line
parsing. Set true to always show in --help.
"""
if is_boolean:
self.__default = False
assert default is not True
else:
self.__default = default
self.is_bool = is_boolean # Only BoolParameter should ever use this. TODO(erikbern): should we raise some kind of exception?
if is_global:
warnings.warn("is_global support is removed. Assuming positional=False",
DeprecationWarning,
stacklevel=2)
positional = False
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
self.positional = positional
self.description = description
self.always_in_help = always_in_help
if config_path is not None and ('section' not in config_path or 'name' not in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self.__config = config_path
self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class)
Parameter._counter += 1
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError):
return _no_value
return self.parse(value)
def _get_value(self, task_name, param_name):
for value, warn in self._value_iterator(task_name, param_name):
if value != _no_value:
if warn:
warnings.warn(warn, DeprecationWarning)
return value
return _no_value
def _value_iterator(self, task_name, param_name):
"""
Yield the parameter values, with optional deprecation warning as second tuple value.
The parameter value will be whatever non-_no_value that is yielded first.
"""
cp_parser = CmdlineParser.get_instance()
if cp_parser:
dest = self._parser_dest(param_name, task_name)
found = getattr(cp_parser.known_args, dest, None)
yield (self._parse_or_no_value(found), None)
yield (self._get_value_from_config(task_name, param_name), None)
yield (self._get_value_from_config(task_name, param_name.replace('_', '-')),
'Configuration [{}] {} (with dashes) should be avoided. Please use underscores.'.format(
task_name, param_name))
if self.__config:
yield (self._get_value_from_config(self.__config['section'], self.__config['name']),
'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format(
self.__config['section'], self.__config['name'], task_name, param_name))
yield (self.__default, None)
def has_task_value(self, task_name, param_name):
return self._get_value(task_name, param_name) != _no_value
def task_value(self, task_name, param_name):
value = self._get_value(task_name, param_name)
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return value
def parse(self, x):
"""
Parse an individual value from the input.
The default implementation is the identity function, but subclasses should override
this method for specialized parsing.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return str(x)
@classmethod
def next_in_enumeration(_cls, _value):
"""
If your Parameter type has an enumerable ordering of values. You can
choose to override this method. This method is used by the
:py:mod:`luigi.execution_summary` module for pretty printing
purposes. Enabling it to pretty print tasks like ``MyTask(num=1),
MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``.
:param value: The value
:return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering.
"""
return None
def _parse_or_no_value(self, x):
if not x:
return _no_value
else:
return self.parse(x)
@staticmethod
def _parser_dest(param_name, task_name):
return task_name + '_' + param_name
@staticmethod
def _parser_flag_names(param_name, task_name, is_without_section, as_active):
if is_without_section:
yield param_name
else:
if as_active:
yield param_name
yield task_name + '_' + param_name
def _add_to_cmdline_parser(self, parser, param_name, task_name, is_without_section, as_active, help_all):
dest = self._parser_dest(param_name, task_name)
flag_names = self._parser_flag_names(param_name, task_name, is_without_section, as_active)
flags = ['--' + flag_name.replace('_', '-') for flag_name in flag_names]
if self.is_bool:
action = "store_true"
else:
action = "store"
help = self.description if as_active or help_all or self.always_in_help else argparse.SUPPRESS
parser.add_argument(*flags,
help=help,
action=action,
dest=dest
)
class DateParameterBase(Parameter):
"""
Base class Parameter for dates. Code reuse is made possible since all date
parameters are serialized in the same way.
"""
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
@abc.abstractproperty
def _timedelta(self):
"""
Either override me with a :py:class:`~datetime.timedelta` value or
implement :py:meth:`~Parameter.next_in_enumeration` to return ``None``.
"""
pass
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~DateParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
@classmethod
def next_in_enumeration(cls, value):
return value + cls._timedelta
class DateParameter(DateParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
"""
date_format = '%Y-%m-%d'
_timedelta = timedelta(days=1)
def parse(self, s):
"""
Parses a date string formatted as ``YYYY-MM-DD``.
"""
return datetime.datetime.strptime(s, self.date_format).date()
class MonthParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the month
(day of :py:class:`~datetime.date` is "rounded" to first of the month).
A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies
July of 2013.
"""
date_format = '%Y-%m'
@staticmethod
def next_in_enumeration(_value):
return None
class YearParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the year
(day and month of :py:class:`~datetime.date` is "rounded" to first day of the year).
A YearParameter is a Date string formatted ``YYYY``.
"""
date_format = '%Y'
@staticmethod
def next_in_enumeration(_value):
return None
class DateHourParameter(DateParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T'
_timedelta = timedelta(hours=1)
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime` using the format string ``%Y-%m-%dT%H``.
"""
return datetime.datetime.strptime(s, self.date_format)
class DateMinuteParameter(DateHourParameter):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute.
A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at
19:07.
"""
date_format = '%Y-%m-%dT%H%M'
_timedelta = timedelta(minutes=1)
deprecated_date_format = '%Y-%m-%dT%HH%M'
def parse(self, s):
try:
value = datetime.datetime.strptime(s, self.deprecated_date_format)
warnings.warn(
'Using "H" between hours and minutes is deprecated, omit it instead.',
DeprecationWarning,
stacklevel=2
)
return value
except ValueError:
return super(DateMinuteParameter, self).parse(s)
class IntParameter(Parameter):
"""
Parameter whose value is an ``int``.
"""
def parse(self, s):
"""
Parses an ``int`` from the string using ``int()``.
"""
return int(s)
@staticmethod
def next_in_enumeration(value):
return value + 1
class FloatParameter(Parameter):
"""
Parameter whose value is a ``float``.
"""
def parse(self, s):
"""
Parses a ``float`` from the string using ``float()``.
"""
return float(s)
class BoolParameter(Parameter):
"""
A Parameter whose value is a ``bool``.
"""
def __init__(self, *args, **kwargs):
"""
This constructor passes along args and kwargs to ctor for :py:class:`Parameter` but
specifies ``is_bool=True``.
"""
super(BoolParameter, self).__init__(*args, is_bool=True, **kwargs)
def parse(self, s):
"""
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
"""
return {'true': True, 'false': False}[str(s).lower()]
class BooleanParameter(BoolParameter):
def __init__(self, *args, **kwargs):
warnings.warn(
'BooleanParameter is deprecated, use BoolParameter instead',
DeprecationWarning,
stacklevel=2
)
super(BooleanParameter, self).__init__(*args, **kwargs)
class DateIntervalParameter(Parameter):
"""
A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`.
Date Intervals are specified using the ISO 8601 `Time Interval
<http://en.wikipedia.org/wiki/ISO_8601#Time_intervals>`_ notation.
"""
# Class that maps to/from dates using ISO 8601 standard
# Also gives some helpful interval algebra
def parse(self, s):
"""
Parses a :py:class:`~luigi.date_interval.DateInterval` from the input.
see :py:mod:`luigi.date_interval`
for details on the parsing of DateIntervals.
"""
# TODO: can we use xml.utils.iso8601 or something similar?
from luigi import date_interval as d
for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]:
i = cls.parse(s)
if i:
return i
else:
raise ValueError('Invalid date interval - could not be parsed')
class TimeDeltaParameter(Parameter):
"""
Class that maps to timedelta using strings in any of the following forms:
* ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h")
Note: multiple arguments must be supplied in longest to shortest unit order
* ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported)
* ISO 8601 duration ``PnW``
See https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
def _apply_regex(self, regex, input):
import re
re_match = re.match(regex, input)
if re_match:
kwargs = {}
has_val = False
for k, v in six.iteritems(re_match.groupdict(default="0")):
val = int(v)
has_val = has_val or val != 0
kwargs[k] = val
if has_val:
return timedelta(**kwargs)
def _parseIso8601(self, input):
def field(key):
return "(?P<%s>\d+)%s" % (key, key[0].upper())
def optional_field(key):
return "(%s)?" % field(key)
# A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta)
regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]]))
return self._apply_regex(regex, input)
def _parseSimple(self, input):
keys = ["weeks", "days", "hours", "minutes", "seconds"]
# Give the digits a regex group name from the keys, then look for text with the first letter of the key,
# optionally followed by the rest of the word, with final char (the "s") optional
regex = "".join(["((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys])
return self._apply_regex(regex, input)
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input)
class TaskParameter(Parameter):
"""
A parameter that takes another luigi task class.
When used programatically, the parameter should be specified
directly with the :py:class:`luigi.task.Task` (sub) class. Like
``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line,
you specify the :py:attr:`luigi.task.Task.task_family`. Like
.. code:: console
$ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask
Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module.
When the :py:class:`luigi.task.Task` class is instantiated to an object.
The value will always be a task class (and not a string).
"""
def parse(self, input):
"""
Parse a task_famly using the :class:`~luigi.task_register.Register`
"""
return task_register.Register.get_task_cls(input)
|
|
""" Utilities for transforming the syntax/style/grammar of a document, usually asciidoc or markdown
Instantiates Objects derived from the `_sre.SRE_Pattern` class (compiled regular expressions) so they work with regex.sub()
"""
import logging
# Simport regex
from copy import copy
from nlpia.regexes import Pattern, RE_HYPERLINK
# from nlpia.constants import DATA_PATH
log = logging.getLogger(__name__)
def looks_like_name(s):
if len(s) < 3:
return None
if ' ' in s or '.' not in s or '/' not in s:
return s
class Matcher(Pattern):
""" Pattern with additional .ismatch() that returns bool(re.match())
ismatch is desicned to be overriden by a custom function that returns a bool
>>> chars = list('Hello World!')
>>> m = Matcher('[a-z]')
>>> [m.ismatch(s) for s in chars]
[False, True, True, True, True, False, False, True, True, True, True, False]
>>> m = Matcher('^[A-Z][a-z]+$')
>>> tokens = 'A BIG Hello World to You!'.split()
>>> m = Matcher(lambda s: len(s) <= 3)
>>> [m.ismatch(s) for s in tokens]
[True, True, False, False, True, False]
>>> m = Matcher(None)
>>> [m.ismatch(s) for s in tokens]
[True, True, True, True, True, True]
"""
def __init__(self, pattern='', ismatchfun=None):
if callable(pattern):
ismatchfun = pattern
pattern = ''
self._ismatchfun = ismatchfun or self._return_true
super().__init__(pattern or '')
def _return_true(self, *args, **kwargs):
return True
def _return_false(self, *args, **kwargs):
return False
def _ismatchfun(self, s):
return self._return_true()
def ismatch(self, s):
""" like compiled_re.match() but returns True or False """
if self._ismatchfun(s) and self._compiled_pattern.match(s):
return True
return False
def match(self, s, *args, **kwargs):
if self.ismatchfun(s):
return super().match(s, *args, **kwargs)
class Filter(Matcher):
""" Pattern with additional .ismatch() and .filter() methods
>>> chars = list('Hello World!')
>>> m = Filter('[a-z]')
>>> [m.filter(c) for c in chars]
['', 'e', 'l', 'l', 'o', '', '', 'o', 'r', 'l', 'd', '']
>>> m = Filter('^[A-Z][a-z]+$')
>>> tokens = 'A BIG Hello World to You!'.split()
>>> [m.filter(s) for s in tokens]
['', '', 'Hello', 'World', '', '']
>>> m = Filter(None)
>>> [m.filter(s) for s in tokens]
['A', 'BIG', 'Hello', 'World', 'to', 'You!']
"""
def filter(self, s):
""" like compiled_re.match() but returns the entire string if it matches or empty string otherwise """
if self.ismatch(s):
return s
return ''
class Translator(Pattern):
r""" A pattern for translating a diff file into a more human (non-programmer) readable form
This is the start of a translator demo that turns diff patch files into human-readable email.
>> from nlpia.loaders import get_data
>> difftxt = get_data('forum_user_557658.patch')
>>> tran = Translator()
"""
def __init__(self, pattern=r'^\-(?P<text>.*)', template=' was: {text}'):
super().__init__(pattern=pattern)
def replace(self, text, to_template='{name} ({url})', from_template=None,
name_matcher=Matcher(looks_like_name), url_matcher=Matcher(r'.*[^:]+$')):
""" Replace all occurrences of rendered from_template in text with `template` rendered from each match.groupdict()
TODO: from_template
>>> translator = HyperlinkStyleCorrector()
>>> adoc = 'See http://totalgood.com[Total Good] about that.'
>>> translator.replace(adoc, '{scheme_type}s://', '{scheme}://')
'See http://totalgood.com[Total Good] about that.'
>>> adoc = "Nada here:// Only a .com & no (parens.symbol) or http/[hyperlinks] or anything!"
>>> translator.translate(adoc)
'Nada here:// Only a .com & no (parens.symbol) or http/[hyperlinks] or anything!'
>>> adoc = "Two http://what.com[WAT] with https://another.com/api?q=1&a=2[longer url]."
>>> translator.translate(adoc)
'Two WAT (http://what.com) with longer url (https://another.com/api?q=1&a=2).'
"""
self.name_matcher = name_matcher or Matcher()
self.url_matcher = url_matcher or Matcher()
matches = self.finditer(text)
newdoc = copy(text)
log.debug('before translate: {}'.format(newdoc))
for m in matches:
# this outer m.captures() loop is overkill:
# overlapping pattern matches probably won't match after the first replace
log.debug('match: {}'.format(m))
log.debug('match.captures(): {}'.format(m.captures()))
for i, captured_str in enumerate(m.captures()):
captureddict = {'name': None, 'scheme': None, 'url': None}
for k, v in m.capturesdict().items():
if len(v) > i:
captureddict[k] = v[i]
else:
captureddict[k] = None
log.warning('Overlapping captured matches were mishandled: {}'.format(m.capturesdict()))
# need to check for optional args:
name = captureddict.get('name', None)
url = captureddict.get('url', None)
scheme = captureddict.get('scheme', None)
if (not scheme or not name or not self.name_matcher.ismatch(name) or
not url or not self.url_matcher.ismatch(url)):
continue
if from_template:
rendered_from_template = from_template.format(**captureddict)
else:
rendered_from_template = captured_str
# TODO: render numbered references like r'\1' before rendering named references
# or do them together in one `.format(**kwargs)` after translating \1 to {1} and groupsdict().update({1: ...})
rendered_to_template = to_template.format(**m.groupdict())
newdoc = newdoc.replace(rendered_from_template, rendered_to_template)
return newdoc
class HyperlinkStyleCorrector(Pattern):
""" A pattern for matching asciidoc hyperlinks for transforming them to print-book version (Manning Style)
>>> adoc = 'See http://totalgood.com[Total Good] about that.'
>>> translator = HyperlinkStyleCorrector()
>>> matches = list(translator.finditer(adoc))
>>> m = matches[0]
>>> m
<regex.Match object; span=(4, 36), match='http://totalgood.com[Total Good]'>
>>> for m in matches:
... newdoc = adoc.replace(
... '{scheme}'.format(**m.groupdict()),
... ''.format(**m.groupdict()))
>>> newdoc
'See totalgood.com[Total Good] about that.'
>>> translator.replace(adoc, '{scheme}', '{scheme_type}s://')
'See http://totalgood.com[Total Good] about that.'
"""
def __init__(self, pattern=RE_HYPERLINK):
super().__init__(pattern=pattern)
def name_matcher(s):
return s
def replace(self, text, to_template='{name} ({url})', from_template=None,
name_matcher=Matcher(looks_like_name), url_matcher=Matcher(r'.*[^:]+$')):
""" Replace all occurrences of rendered from_template in text with `template` rendered from each match.groupdict()
TODO: from_template
>>> translator = HyperlinkStyleCorrector()
>>> adoc = 'See http://totalgood.com[Total Good] about that.'
>>> translator.replace(adoc, '{scheme_type}s://', '{scheme}')
'See https://totalgood.com[Total Good] about that.'
>>> adoc = "Nada here:// Only a .com & no (parens.symbol) or http/[hyperlinks] or anything!"
>>> translator.translate(adoc)
'Nada here:// Only a .com & no (parens.symbol) or http/[hyperlinks] or anything!'
>>> adoc = "Two http://what.com[WAT] with https://another.com/api?q=1&a=2[longer url]."
>>> translator.translate(adoc)
'Two WAT (http://what.com) with longer url (https://another.com/api?q=1&a=2).'
"""
self.name_matcher = name_matcher or Matcher()
self.url_matcher = url_matcher or Matcher()
matches = self.finditer(text)
newdoc = copy(text)
log.debug('before translate: {}'.format(newdoc))
for m in matches:
# this outer m.captures() loop is overkill:
# overlapping pattern matches probably won't match after the first replace
log.debug('match: {}'.format(m))
log.debug('match.captures(): {}'.format(m.captures()))
for i, captured_str in enumerate(m.captures()):
captureddict = {'name': None, 'scheme': None, 'url': None}
for k, v in m.capturesdict().items():
if len(v) > i:
captureddict[k] = v[i]
else:
captureddict[k] = None
log.warning('Overlapping captured matches were mishandled: {}'.format(m.capturesdict()))
# need to check for optional args:
name = captureddict.get('name', None)
url = captureddict.get('url', None)
scheme = captureddict.get('scheme', None)
if (not scheme or not name or not self.name_matcher.ismatch(name) or
not url or not self.url_matcher.ismatch(url)):
continue
if from_template:
rendered_from_template = from_template.format(**captureddict)
else:
rendered_from_template = captured_str
# TODO: render numbered references like r'\1' before rendering named references
# or do them together in one `.format(**kwargs)` after translating \1 to {1} and groupsdict().update({1: ...})
rendered_to_template = to_template.format(**m.groupdict())
newdoc = newdoc.replace(rendered_from_template, rendered_to_template)
return newdoc
def translate(self, text, to_template='{name} ({url})', from_template=None, name_matcher=None, url_matcher=None):
""" Translate hyperinks into printable book style for Manning Publishing
>>> translator = HyperlinkStyleCorrector()
>>> adoc = 'See http://totalgood.com[Total Good] about that.'
>>> translator.translate(adoc)
'See Total Good (http://totalgood.com) about that.'
"""
return self.replace(text, to_template=to_template, from_template=from_template,
name_matcher=name_matcher, url_matcher=url_matcher)
|
|
import json
import pytest
import jsonschema
from framework.auth.core import Auth
from framework.exceptions import PermissionsError
from website.settings import DOI_FORMAT, DATACITE_PREFIX
from website.project.licenses import set_license
from osf.models import FileMetadataSchema, NodeLicense, NodeLog
from osf_tests.factories import ProjectFactory, SubjectFactory, AuthUserFactory
from api_tests.utils import create_test_file
@pytest.fixture()
def node():
return ProjectFactory()
@pytest.fixture()
def osf_file(node):
return create_test_file(target=node, user=node.creator)
def inject_placeholder_doi(json_data):
# the OSF cannot currently issue DOIs for a file, which is required for datacite schema validation.
# Manually add a placeholder in tests for validation until we handle this better.
placeholder = DOI_FORMAT.format(prefix=DATACITE_PREFIX, guid='placeholder')
json_data['identifier'] = {'identifierType': 'DOI', 'identifier': placeholder}
return json_data
@pytest.mark.django_db
class TestFileMetadataRecordSerializer:
def test_record_created_post_save(self, node, osf_file):
# check there's a record for every FileMetadataSchema
assert FileMetadataSchema.objects.count() > 0
assert osf_file.records.count() == FileMetadataSchema.objects.count()
for record in osf_file.records.all().select_related('file'):
assert record.file == osf_file
def test_serialize_record_datacite(self, node, osf_file):
# Test all of the parts of serialize_json that are auto-generated
# from relationships and properties on the node and file
# add a contributor with an ORCID
contributor = AuthUserFactory()
contributor.external_identity = {
'ORCID': {
'0000-0001-9143-4653': 'VERIFIED'
}
}
contributor.save()
node.add_contributor(contributor, save=False)
# add subjects, tags, license, and guid
tags = ['fish', 'scale']
[osf_file.add_tag(tag, auth=Auth(node.creator), save=False) for tag in tags]
bepress_subject = SubjectFactory(text='BePress Text')
new_subject = SubjectFactory(bepress_subject=bepress_subject)
node.subjects.add(new_subject)
no_license = NodeLicense.objects.get(name='CC0 1.0 Universal')
license_detail = {
'id': no_license.license_id,
'year': '2018',
'copyrightHolders': ['Woop', 'Yeah']
}
set_license(node, license_detail, Auth(node.creator))
osf_file.save()
node.save()
osf_file.target.reload()
record = osf_file.records.get(schema___id='datacite')
serialized_record = json.loads(record.serialize())
# test titles
titles = [title['title'] for title in serialized_record['titles']]
assert osf_file.name in titles
assert node.title in titles
# test dates
dates = [date['date'] for date in serialized_record['dates']]
assert str(osf_file.created) in dates
assert str(osf_file.modified) in dates
assert str(osf_file.created.year) == serialized_record['publicationYear']
# no resource type provided
assert serialized_record['resourceType']['resourceType'] == '(:unas)'
assert serialized_record['resourceType']['resourceTypeGeneral'] == 'Other'
# guid in alternate identifiers
file_guid = osf_file.guids.first()._id
alternate_identifier = serialized_record['alternateIdentifiers'][0]
assert file_guid in alternate_identifier['alternateIdentifier']
# check for tags and subjects
subjects_in_record = [sub['subject'] for sub in serialized_record['subjects']]
assert bepress_subject.text in subjects_in_record
for tag in tags:
assert tag in subjects_in_record
# node license
rights = serialized_record['rightsList'][0]
assert rights['rights'] == no_license.name
assert rights['rightsURI'] == no_license.url
# test most recent version
assert serialized_record['version'] == osf_file.versions.first().identifier
def test_validate(self, node, osf_file):
record = osf_file.records.get(schema___id='datacite')
json_data = json.loads(record.serialize())
assert jsonschema.validate(
inject_placeholder_doi(json_data),
record.schema.schema
) is None
@pytest.mark.django_db
class TestFileMetadataRecord:
@pytest.fixture()
def initial_metadata(self):
return {
'file_description': 'Hello this is a description',
'resource_type': 'Book',
'related_publication_doi': '10.123/fkosf/hello'
}
@pytest.fixture()
def record(self, osf_file):
return osf_file.records.first()
def test_update_record(self, node, record, initial_metadata):
record.metadata = initial_metadata
record.save()
partial_metadata = {
'funders': [
{'funding_agency': 'Hello'},
{'funding_agency': 'Ric Flair', 'grant_number': 'Woooooo'},
]
}
record.update(partial_metadata, user=node.creator)
# Make sure an update creates a node log
assert node.logs.latest().action == NodeLog.FILE_METADATA_UPDATED
# Make sure old fields are cleared
assert initial_metadata.keys() not in record.metadata.keys()
full_metadata = {
'funders': [
{'funding_agency': 'Hello'},
{'funding_agency': 'Ric Flair', 'grant_number': 'Woooooo'},
],
'file_description': 'Hey this is a great interesting important file',
'resource_type': 'Funding Submission',
'related_publication_doi': '10.12345/fk2osf.io/hello/'
}
record.update(full_metadata, user=node.creator)
json_data = json.loads(record.serialize())
datacite_user_entered_fields = ['fundingReferences', 'resourceType', 'descriptions', 'relatedIdentifiers']
for field in datacite_user_entered_fields:
assert field in json_data.keys()
# validate record with all user entered metadata
assert jsonschema.validate(
inject_placeholder_doi(json_data),
record.schema.schema
) is None
def test_update_fails_with_incorrect_metadata(self, node, record):
# metadata not in schema fails
wrong_metadata = {
'favorite_schema': 'crossref'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_metadata, user=node.creator)
record.reload()
assert record.metadata == {}
assert node.logs.latest().action != NodeLog.FILE_METADATA_UPDATED
# metadata not matching schema pattern fails
wrong_doi = {
'related_publication_doi': 'whatever'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_doi, user=node.creator)
# resource_type not in specified options fails
wrong_resource_type = {
'resource_type': 'Scrap Book'
}
with pytest.raises(jsonschema.ValidationError):
record.update(wrong_resource_type, user=node.creator)
# funders but no funding agency
no_funding_agency_metadata = {
'funders': [
{'grant_number': 'Woooo'}
]
}
with pytest.raises(jsonschema.ValidationError):
record.update(no_funding_agency_metadata, user=node.creator)
# additional properties for funders fails
more_funders_metadata = {
'funders': [
{'funding_agency': 'Woop', 'there_it': 'is'}
]
}
with pytest.raises(jsonschema.ValidationError):
record.update(more_funders_metadata, user=node.creator)
def test_update_permissions(self, node, record, initial_metadata):
# Can't update with non-contributor auth
rando = AuthUserFactory()
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=rando)
# Can't update with read-only auth
read_contrib = AuthUserFactory()
node.add_contributor(read_contrib, permissions=['read'])
node.save()
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=read_contrib)
# Can't update with no auth
with pytest.raises(PermissionsError):
record.update(initial_metadata, user=None)
def test_forked_file_has_metadata_copied(self, node, record, initial_metadata):
record.metadata = initial_metadata
record.save()
fork = node.fork_node(auth=Auth(node.creator))
forked_record = fork.files.first().records.first()
assert forked_record.metadata == record.metadata
|
|
#!/usr/bin/env python
"""
Try out different partition assignment algorithms on a simulation trace, and
evaluate their performance.
"""
import argparse
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
def ParseArguments():
parser = argparse.ArgumentParser(description="Plot times")
parser.add_argument('--input', type=str, required=True,
help="Directory for input files")
parser.add_argument('--output', type=str, required=True,
help="Directory for output files")
parser.add_argument('--window', type=int, default=10,
help="Window (horizon) for computing assignments")
parser.add_argument('--cores', type=int, default=-1,
help="Compute quantities per core, not per worker")
return parser.parse_args()
"""
Generic assignment algorithm, parent class for all assignment algorithms.
"""
class AssignmentAlgorithm(object):
def __init__(self, num_workers, num_cores_per_worker, partition_time,
cores):
if cores != -1:
cores_per_worker = cores
self.num_workers = num_workers * num_cores_per_worker / cores_per_worker
self.num_cores_per_worker = cores_per_worker
else:
self.num_workers = num_workers
self.num_cores_per_worker = num_cores_per_worker
self.num_partitions = partition_time.shape[0]
self.num_iterations = partition_time.shape[1]
self.partition_time = partition_time
self.partition_to_worker = np.zeros(
shape=(self.num_partitions, self.num_iterations), dtype=int)
self.worker_time = np.zeros(
shape=(self.num_workers, self.num_iterations), dtype=float)
"""
Compute partition_to_worker for every partition and iteration.
"""
def AssignPartitions(self):
raise Exception("Unimplemented!")
"""
Compute worker time.
"""
def ComputeWorkerTimes(self):
for w in range(self.num_workers):
partitions = (self.partition_to_worker == w)
partition_time = self.partition_time.copy()
partition_time[~partitions] = 0
self.worker_time[w,:] = partition_time.sum(axis=0)/self.num_cores_per_worker
"""
Plot worker time.
"""
def PlotWorkerTimes(self, file_name):
start_iter=0
ave_worker_time = self.worker_time.mean(axis=0)[start_iter:]/1000
max_worker_time = self.worker_time.max(axis=0)[start_iter:]/1000
iterations = np.arange(start_iter, self.num_iterations, 1)
fig = plt.figure()
ax = fig.add_subplot(111)
min_time = ave_worker_time.min()
plt.plot(iterations, ave_worker_time, label='Average busy time')
plt.plot(iterations, max_worker_time, label='Maximum busy time')
plt.plot(iterations, 10*max_worker_time/ave_worker_time, label='LIF')
plt.legend(loc='lower right')
ax.set_ylabel('Time (seconds)')
ax.set_xlabel('Iteration Number')
fig.savefig(file_name)
"""
Plot iteration completion time -- determined by slowest worker.
"""
def PlotCompletionTimes(self, file_name):
start_iter=0
max_worker_time = self.worker_time.max(axis=0)[start_iter:]/1000
ave_worker_time = self.worker_time.mean(axis=0)[start_iter:]/1000
iterations = np.arange(start_iter, self.num_iterations, 1)
fig = plt.figure()
ax = fig.add_subplot(111)
cum_max_worker_time = np.cumsum(max_worker_time)
cum_ave_worker_time = np.cumsum(ave_worker_time)
plt.plot(iterations, cum_max_worker_time,
label='Completion Time')
plt.plot(iterations, cum_ave_worker_time,
label='Best Case Completion Time')
plt.grid(True)
plt.legend(loc='lower right')
ax.set_ylabel('Time (seconds)')
ax.set_xlabel('Iteration Number')
fig.savefig(file_name)
print("Total time = %f" % (cum_max_worker_time[-1]))
print("Max time ratio = %f" % (cum_max_worker_time[-1]/cum_ave_worker_time[-1]))
"""
Plot aggregate times at worker.
"""
def PlotAllWorkerTimes(self, file_name):
num_iterations = self.worker_time.shape[1]
iterations = np.arange(0, num_iterations, 1)
fig = plt.figure()
ax = fig.add_subplot(111)
for w in range(self.num_workers):
c = 0.5*w/self.num_workers
plt.plot(iterations, self.worker_time[w,:].reshape((num_iterations)),
color=(c,c,c), linestyle='--', linewidth=0.5,
marker='*', markersize=2)
plt.plot(iterations, self.worker_time.max(axis=0),
color='b', linestyle='--', linewidth=0.5,
marker='*', markersize=2)
plt.plot(iterations, self.worker_time.mean(axis=0),
color='r', linestyle='--', linewidth=0.5,
marker='*', markersize=2)
ax.set_ylim(bottom=0)
ax.set_ylabel('Time (seconds)')
ax.set_xlabel('Iteration Number')
fig.savefig(file_name, dpi=300)
"""
Run evlauation.
"""
def RunEvaluation(self, file_prefix=""):
self.AssignPartitions()
self.ComputeWorkerTimes()
if file_prefix != "":
self.PlotWorkerTimes(file_prefix + '_mean_max.png')
self.PlotCompletionTimes(file_prefix + '_completion.png')
self.PlotAllWorkerTimes(file_prefix+'_all.png')
"""
Perform a separate random assignment of partitions for consecutive "window"
frames.
"""
class AssignRandom(AssignmentAlgorithm):
def __init__(self, num_workers, num_cores_per_worker, partition_time,
per_core, assignment_window):
super(AssignRandom, self).__init__(num_workers, num_cores_per_worker,
partition_time, per_core)
self.assignment_window = assignment_window
"""
Compute partition_to_worker for every partition and iteration.
"""
def AssignPartitions(self):
print("Applying random assignments ...")
num_windows = int(math.ceil(float(self.num_iterations)/self.assignment_window))
partitions_per_worker = self.num_partitions/self.num_workers
for i in range(num_windows):
start_iter = i*self.assignment_window
end_iter = min(start_iter + self.assignment_window, self.num_iterations)
assignment = np.zeros((self.num_partitions,1), dtype=int)
for w in range(self.num_workers):
start = w*partitions_per_worker
end = start+partitions_per_worker
assignment[start:end] = w
np.random.shuffle(assignment)
self.partition_to_worker[:,start_iter:end_iter] = assignment
class DynamicGreedyForWindow(object):
def __init__(self, data, start_iter, end_iter):
# Initialize data.
self.data = data
self.num_workers = data.num_workers
self.num_cores_per_worker = data.num_cores_per_worker
self.num_partitions = data.num_partitions
self.start_iter = start_iter
self.end_iter = end_iter
self.window = end_iter - start_iter
self.partition_time = data.partition_time[:, start_iter:end_iter]
# Assignment for this window.
self.assignment = np.zeros((self.num_partitions), dtype=int) - 1
# Maximum and minimum for each partition
self.max_load_time = self.partition_time.argmax(axis=1).flatten()
self.max_load = self.partition_time.max(axis=1).flatten()
self.min_load = self.partition_time.min(axis=1).flatten()
# Compute variation and group partitions based on variation
variation = np.abs(self.max_load - self.min_load)
variation_nbins = 4
variation_min = variation.min().flatten()
variation_max = variation.max().flatten()
variation_bin_size = (variation_max - variation_min)/variation_nbins
variation_group = np.zeros(variation.shape, dtype=int) + variation_nbins - 1
for j in range(variation.shape[0]):
for b in range(variation_nbins):
if variation[j] < variation_min + b*variation_bin_size:
variation_group[j] = b
break
self.num_groups = variation_nbins
self.group = variation_group
# Store work assigned so far, and other variables to compute assignment.
self.work_assigned = np.zeros(
shape=(self.num_workers, self.window), dtype=float)
self.work_average = self.partition_time.sum(axis=0)/self.num_workers
self.next_worker = 0
def ComputeDynamicAssignment(self, g):
# Get indices for partitions in this group.
partitions_to_assign = np.where(self.group == g)[0]
group_mask = self.group == g
# Sort them by when they reach the maxima.
max_at = self.max_load_time[group_mask]
partition_order = np.argsort(max_at)
partitions_to_assign = partitions_to_assign[partition_order]
num_to_assign = partitions_to_assign.shape[0]
# Now go over each partition in this group, and assign it to the next
# worker that can accomodate this partition, or the worker with least
# amount of work.
for p in partitions_to_assign:
p_max_t = self.max_load_time[p]
p_max_load = self.max_load[p]
left_t = self.work_average[p_max_t] - self.work_assigned[:, p_max_t].flatten()
available_t = left_t > p_max_load
assignment = -1
if sum(available_t) > 0:
# Some worker can accommodate this partition
nw = self.next_worker
while not available_t[nw]:
nw = (nw+1)%self.num_workers
assignment = nw
self.next_worker = (nw+1)%self.num_workers
else:
# Just do a greedy assignment. Since no worker can fit this
# partition, pick the worker with least amount of work at t.
assignment = np.argmax(left_t)
self.assignment[p] = assignment
self.work_assigned[assignment, :] += self.partition_time[p, :]
def ComputeStaticAssignment(self, g):
# Get indices for partitions in this group.
partitions_to_assign = np.where(self.group == g)[0]
group_mask = self.group == g
# Sort them by average amount of load.
partition_load = self.partition_time[group_mask, :].mean(axis=1).flatten()
partition_order = np.argsort(partition_load)
partition_order = np.flip(partition_order, axis=0)
partitions_to_assign = partitions_to_assign[partition_order]
num_to_assign = partitions_to_assign.shape[0]
# Assign each partition to least loaded worker.
for p in partitions_to_assign:
assigned_average = self.work_assigned.mean(axis=1).flatten()
assignment = np.argmin(assigned_average)
self.assignment[p] = assignment
self.work_assigned[assignment, :] += self.partition_time[p, :]
def AssignPartitions(self):
for g in range(self.num_groups-1, -1, -1):
num_in_group = sum(self.group == g)
#print("Number of partitions in group %d = %d" % (g, num_in_group))
if g > self.num_groups/2:
self.ComputeDynamicAssignment(g)
#self.ComputeStaticAssignment(g)
else:
self.ComputeStaticAssignment(g)
"""
Perform dynamic greedy assignment from scratch for every window.
"""
class AssignDynamicGreedyFromScratch(AssignmentAlgorithm):
def __init__(self, num_workers, num_cores_per_worker, partition_time,
per_core, assignment_window):
super(AssignDynamicGreedyFromScratch, self).__init__(
num_workers, num_cores_per_worker, partition_time, per_core)
self.assignment_window = assignment_window
"""
Compute partition_to_worker for every partition and iteration.
"""
def AssignPartitions(self):
print("Applying assignments using dynamic greedy from scratch ...")
num_windows = int(math.ceil(float(self.num_iterations)/self.assignment_window))
for i in range(num_windows):
start_iter = i*self.assignment_window
end_iter = min(start_iter + self.assignment_window, self.num_iterations)
helper = DynamicGreedyForWindow(self, start_iter, end_iter)
helper.AssignPartitions()
self.partition_to_worker[:,start_iter:end_iter] = helper.assignment.reshape((-1,1))
"""
Perform greedy assignment from scratch for every window, using just the current information.
"""
class AssignSimpleGreedyFromScratch(AssignmentAlgorithm):
def __init__(self, num_workers, num_cores_per_worker, partition_time,
per_core, assignment_window):
super(AssignSimpleGreedyFromScratch, self).__init__(
num_workers, num_cores_per_worker, partition_time, per_core)
self.assignment_window = assignment_window
"""
Compute partition_to_worker for every partition and iteration.
"""
def AssignPartitions(self):
print("Applying assignments using simple greedy from scratch ...")
num_windows = int(math.ceil(float(self.num_iterations)/self.assignment_window))
for i in range(num_windows):
start_iter = i*self.assignment_window
end_iter = min(start_iter + self.assignment_window, self.num_iterations)
# Get the load at the beginning of the window and sort in descending order.
load_start = self.partition_time[:,start_iter].flatten()
partition_order = np.argsort(load_start)
partition_order = np.flip(partition_order, axis=0)
# Compute greedy assignment based on the load at start.
assignment = np.zeros(self.num_partitions, dtype=int)
assigned_work = np.zeros(self.num_workers, dtype=float)
for p in partition_order:
w = np.argmin(assigned_work)
assignment[p] = w
assigned_work[w] += load_start[p]
self.partition_to_worker[:,start_iter:end_iter] = assignment.reshape((-1,1))
if __name__ == '__main__':
args = ParseArguments()
# Load data.
partition_time = np.load(args.input+'partition_time.npy')
parameters = pickle.load(open(args.input+'parameters.txt'))
# Evaluate random partitioning.
random = AssignRandom(parameters['num_workers'], parameters['num_cores'],
partition_time, args.cores, args.window)
random.RunEvaluation(os.path.join(args.output, 'random_assignment'))
# Evaluate dynamic greedy partitioning.
dynamic_greedy_scratch = AssignDynamicGreedyFromScratch(
parameters['num_workers'], parameters['num_cores'],
partition_time, args.cores, args.window)
dynamic_greedy_scratch.RunEvaluation(
os.path.join(args.output, 'dynamic_greedy_scratch_assignment'))
# Evaluate greedy partitioning based on just the current information.
greedy_scratch = AssignSimpleGreedyFromScratch(
parameters['num_workers'], parameters['num_cores'],
partition_time, args.cores, args.window)
greedy_scratch.RunEvaluation(
os.path.join(args.output, 'greedy_scratch_assignment'))
|
|
# coding: utf-8
"""Website building routines."""
import os
import shutil
import traceback
from publicstatic import conf
from publicstatic import const
from publicstatic import logger
from publicstatic import helpers
from publicstatic import templates
def order():
"""Returns a sequence of builder functions."""
return [
css,
js,
less,
robots,
humans,
static,
pages,
posts,
archive,
tags,
atom,
sitemap,
]
def css(cache):
"""Minify CSS files to the build path."""
for source in cache.assets(ext='.css'):
helpers.makedirs(source.dest_dir())
command = conf.get('min_css_cmd')
if conf.get('min_css') and command:
logger.info('minifying CSS: ' + source.rel_path())
helpers.execute(command, source.path(), source.dest())
else:
logger.info('copying: ' + source.rel_path())
shutil.copyfile(source.path(), source.dest())
helpers.utime(source.dest(), source.updated())
source.processed(True)
def js(cache):
"""Minify JavaScript files to the build path."""
for source in cache.assets(ext='.js'):
helpers.makedirs(source.dest_dir())
command = conf.get('min_js_cmd')
if conf.get('min_js') and command:
logger.info('minifying JavaScript: ' + source.rel_path())
helpers.execute(command, source.path(), source.dest())
else:
logger.info('copying: ' + source.rel_path())
shutil.copyfile(source.path(), source.dest())
helpers.utime(source.dest(), source.updated())
source.processed(True)
def less(cache):
"""Compile and minify less files."""
for source in cache.assets(ext='.less'):
helpers.makedirs(source.dest_dir())
logger.info('compiling LESS: ' + source.rel_path())
if conf.get('min_css') and conf.get('min_css_cmd'):
tmp_file = os.path.join(source.dest_dir(), '_' + source.basename())
helpers.execute(conf.get('less_cmd'), source.path(), tmp_file)
logger.info('minifying CSS: ' + source.rel_path())
helpers.execute(conf.get('min_css_cmd'), tmp_file, source.dest())
os.remove(tmp_file)
else:
helpers.execute(conf.get('less_cmd'), source.path(), source.dest())
helpers.utime(source.dest(), source.updated())
source.processed(True)
def robots(cache):
"""Build robots.txt."""
for source in cache.assets(basename='robots.txt'):
logger.info('processing ' + source.rel_path())
helpers.makedirs(source.dest_dir())
try:
data = _complement({})
templates.render_file(source.path(), data, source.dest())
except Exception as ex:
logger.error('robots.txt processing failed: ' + str(ex))
logger.debug(traceback.format_exc())
finally:
source.processed(True)
def humans(cache):
"""Build humans.txt."""
for source in cache.assets(basename='humans.txt'):
logger.info('processing ' + source.rel_path())
helpers.makedirs(source.dest_dir())
try:
data = _complement({})
templates.render_file(source.path(), data, source.dest())
except Exception as ex:
logger.error('humans.txt processing failed: ' + str(ex))
logger.debug(traceback.format_exc())
finally:
source.processed(True)
def static(cache):
"""Copy other assets as is to the build path."""
for source in cache.assets(processed=False):
logger.info('copying: ' + source.rel_path())
helpers.makedirs(source.dest_dir())
shutil.copyfile(source.path(), source.dest())
helpers.utime(source.dest(), source.updated())
source.processed(True)
def pages(cache):
"""Build site pages."""
for source in cache.pages():
logger.info(_to('page', source.rel_path(), source.rel_dest()))
helpers.makedirs(source.dest_dir())
try:
data = _complement(source.data(), index=cache.index())
templates.render_page(data, source.dest())
except Exception as ex:
logger.error('page building error: ' + str(ex))
logger.debug(traceback.format_exc())
def posts(cache):
"""Build blog posts and copy the latest post to the site root."""
for source in cache.posts():
logger.info(_to('post', source.rel_path(), source.rel_dest()))
helpers.makedirs(source.dest_dir())
try:
data = _complement(source.data())
templates.render_page(data, source.dest())
except Exception as ex:
logger.error('post building error: ' + str(ex))
logger.debug(traceback.format_exc())
if conf.get('post_at_root_url'): # put the latest post at site root url
last = cache.posts()[0]
path = os.path.join(conf.get('build_path'), conf.get('index_page'))
logger.info(_to('root', last.rel_dest(), conf.get('index_page')))
if any(cache.pages(dest=conf.get('index_page'))):
logger.warn('root page will be overwritten by the latest post')
try:
shutil.copyfile(last.dest(), path)
except FileNotFoundError:
logger.error("latest post was not generated and can't be copied")
def archive(cache):
"""Build blog archive page."""
dest = os.path.join(conf.get('build_path'), conf.get('archive_location'))
logger.info('archive: ' + conf.get('archive_location'))
helpers.makedirs(os.path.dirname(dest))
page_data = {'title': 'Archive', 'tags': cache.tags()}
data = _complement(page_data, index=cache.index())
templates.render(data, 'archive.html', dest)
def tags(cache):
"""Build blog tag pages."""
for tag in cache.tags():
tag = tag['name']
dest = helpers.tag_path(tag)
logger.info(_to('tag', tag, dest))
helpers.makedirs(os.path.dirname(dest))
data = _complement({'title': tag}, index=cache.index(tag=tag))
templates.render(data, 'tag.html', dest)
def atom(cache):
"""Build atom feed."""
data = _complement(index=cache.index())
dest = os.path.join(conf.get('build_path'), conf.get('atom_location'))
logger.info(_to('atom feed', dest))
helpers.makedirs(os.path.dirname(dest))
templates.render(data, 'atom.xml', dest)
def sitemap(cache):
"""Build sitemap.xml."""
data = _complement(index=cache.full_index())
dest = os.path.join(conf.get('build_path'), const.SITEMAP)
logger.info(_to('sitemap', dest))
helpers.makedirs(os.path.dirname(dest))
templates.render(data, 'sitemap.xml', dest)
def _complement(page_data=None, index=None):
"""Complement individual page data with common variables and site index."""
return {
'commons': conf.commons(),
'page': page_data,
'index': index if index is not None else [],
}
def _rel(path):
build_path = conf.get('build_path')
use_rel = path.startswith(build_path)
return os.path.relpath(path, build_path) if use_rel else path
def _to(subj, a, b=None):
"""Generates a log message like '<something>: <from> -> <to>'."""
message = ("%s -> %s" % (a, _rel(b))) if b is not None else _rel(a)
return "%s: %s" % (subj, message)
|
|
# Copyright 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
import pypowervm.exceptions as exc
import pypowervm.tasks.power_opts as popts
import pypowervm.wrappers.base_partition as bp
class TestPowerOpts(testtools.TestCase):
def _test_enum(self, enum):
"""Validate that an enum class has a KEY and proper ALL_VALUES.
:param enum: Enumeration class.
"""
# Get the public symbols in the enum
syms = {sym for sym in dir(enum) if not sym.startswith('_')}
# Must have a KEY
self.assertIn('KEY', syms)
self.assertIsNotNone(getattr(enum, 'KEY'))
syms.remove('KEY')
# Must have ALL_VALUES
self.assertIn('ALL_VALUES', syms)
syms.remove('ALL_VALUES')
# ALL_VALUES must include all the values that aren't KEY or ALL_VALUES
self.assertEqual({getattr(enum, sym) for sym in syms},
set(enum.ALL_VALUES))
def test_enums(self):
self._test_enum(popts.IPLSrc)
self._test_enum(popts.BootMode)
self._test_enum(popts.KeylockPos)
self._test_enum(popts.IBMiOperationType)
self._test_enum(popts.PowerOffOperation)
def test_remove_optical(self):
knm = popts.RemoveOptical.KEY_NAME
ktm = popts.RemoveOptical.KEY_TIME
# Default time
self.assertEqual({knm: 'name', ktm: 0},
popts.RemoveOptical.bld_map('name'))
# Explicit time
self.assertEqual({knm: 'name', ktm: 10},
popts.RemoveOptical.bld_map('name', time=10))
def test_power_on_opts(self):
# Default init
poo = popts.PowerOnOpts()
self.assertEqual('PowerOn()', str(poo))
self.assertEqual('PowerOn', poo.JOB_SUFFIX)
# Legacy add_parms init
poo = popts.PowerOnOpts(legacy_add_parms=dict(foo=1, bar=2))
self.assertEqual('PowerOn(bar=2, foo=1)', str(poo))
# Carry those additional params forward to make sure they don't vanish
# Enum validation
for meth in ('bootmode', 'keylock_pos', 'ibmi_ipl_source',
'ibmi_op_type'):
self.assertRaises(exc.InvalidEnumValue, getattr(poo, meth), 'foo')
# Set specific (valid) values
# Setter method returns the instance
self.assertIs(poo, poo.bootmode(popts.BootMode.NORM))
self.assertEqual('PowerOn(bar=2, bootmode=norm, foo=1)', str(poo))
self.assertIs(poo, poo.keylock_pos(popts.KeylockPos.MANUAL))
self.assertEqual(
'PowerOn(bar=2, bootmode=norm, foo=1, keylock=manual)', str(poo))
self.assertIs(poo, poo.bootstring('canvas cord with aglet'))
self.assertEqual(
'PowerOn(bar=2, bootmode=norm, bootstring=canvas cord with aglet, '
'foo=1, keylock=manual)', str(poo))
# Make sure overwrite works
self.assertIs(poo, poo.bootstring('sturdy shoelace'))
self.assertEqual(
'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, '
'keylock=manual)', str(poo))
self.assertIs(poo, poo.force())
self.assertEqual(
'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, '
'force=true, keylock=manual)', str(poo))
# Turning off force gets rid of the key
self.assertIs(poo, poo.force(value=False))
self.assertEqual(
'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, '
'keylock=manual)', str(poo))
# Remove optical with default time
self.assertIs(poo, poo.remove_optical('vopt'))
self.assertEqual(
'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, '
'keylock=manual, remove_optical_name=vopt, remove_optical_time=0)',
str(poo))
# Remove optical with explicit time. Values are overwritten.
self.assertIs(poo, poo.remove_optical('VOPT', time=5))
self.assertEqual(
'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, '
'keylock=manual, remove_optical_name=VOPT, remove_optical_time=5)',
str(poo))
self.assertIs(poo, poo.ibmi_ipl_source(popts.IPLSrc.A))
self.assertEqual(
'PowerOn(bar=2, bootmode=norm, bootstring=sturdy shoelace, foo=1, '
'iIPLsource=a, keylock=manual, remove_optical_name=VOPT, '
'remove_optical_time=5)', str(poo))
self.assertIs(poo, poo.ibmi_op_type(popts.IBMiOperationType.NETBOOT))
self.assertEqual(
'PowerOn(OperationType=netboot, bar=2, bootmode=norm, '
'bootstring=sturdy shoelace, foo=1, iIPLsource=a, keylock=manual, '
'remove_optical_name=VOPT, remove_optical_time=5)', str(poo))
# Netboot params.
poo = popts.PowerOnOpts().ibmi_netboot_params(
'ip', 'serverip', 'gateway', 'serverdir')
self.assertEqual(
'PowerOn(Gateway=gateway, IBMiImageServerDirectory=serverdir, '
'IPAddress=ip, ServerIPAddress=serverip)', str(poo))
# Optional netboot params, and overwrites
self.assertIs(poo, poo.ibmi_netboot_params(
'IP', 'ServerIP', 'Gateway', 'ServerDir', vlanid=2, mtu='mtu',
duplex='duplex', connspeed=100, subnet='subnet'))
self.assertEqual(
'PowerOn(ConnectionSpeed=100, DuplexMode=duplex, Gateway=Gateway, '
'IBMiImageServerDirectory=ServerDir, IPAddress=IP, '
'MaximumTransmissionUnit=mtu, ServerIPAddress=ServerIP, '
'SubnetMask=subnet, VLANID=2)', str(poo))
def test_power_off_opts(self):
# Can OS shutdown?
ltyp = bp.LPARType
rmcs = bp.RMCState
for env, rmc, exp in ((ltyp.AIXLINUX, rmcs.ACTIVE, True),
(ltyp.AIXLINUX, rmcs.BUSY, False),
(ltyp.AIXLINUX, rmcs.INACTIVE, False),
(ltyp.OS400, rmcs.ACTIVE, True),
(ltyp.OS400, rmcs.BUSY, True),
(ltyp.OS400, rmcs.INACTIVE, True),
(ltyp.VIOS, rmcs.ACTIVE, True),
(ltyp.VIOS, rmcs.BUSY, False),
(ltyp.VIOS, rmcs.INACTIVE, False)):
self.assertEqual(exp, popts.PowerOffOpts.can_os_shutdown(
mock.Mock(env=env, rmc_state=rmc)))
# Default init
poo = popts.PowerOffOpts()
self.assertEqual('PowerOff()', str(poo))
self.assertEqual('PowerOff', poo.JOB_SUFFIX)
self.assertFalse(poo.is_param_set(popts.PowerOffOperation.KEY))
# Legacy add_parms init. Unknown keys are ignored.
poo = popts.PowerOffOpts(
legacy_add_parms=dict(operation='shutdown', foo=1, restart='true',
bar=2, immediate='true'))
self.assertEqual(
'PowerOff(immediate=true, operation=shutdown, restart=true)',
str(poo))
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertFalse(poo.is_os)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# Now an "empty" one
poo = popts.PowerOffOpts(legacy_add_parms=dict(foo=1, bar=2))
self.assertEqual('PowerOff()', str(poo))
self.assertFalse(poo.is_immediate)
self.assertFalse(poo.is_restart)
self.assertFalse(poo.is_os)
self.assertFalse(poo.is_param_set(popts.PowerOffOperation.KEY))
# Immediate
self.assertIs(poo, poo.immediate())
self.assertEqual('PowerOff(immediate=true)', str(poo))
self.assertTrue(poo.is_immediate)
self.assertFalse(poo.is_restart)
self.assertFalse(poo.is_os)
self.assertFalse(poo.is_param_set(popts.PowerOffOperation.KEY))
# Restart
self.assertIs(poo, poo.restart())
self.assertEqual(
'PowerOff(immediate=true, restart=true)', str(poo))
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertFalse(poo.is_os)
self.assertFalse(poo.is_param_set(popts.PowerOffOperation.KEY))
# Operation
self.assertIs(poo, poo.operation(popts.PowerOffOperation.DUMPRESTART))
self.assertEqual(
'PowerOff(immediate=true, operation=dumprestart, restart=true)',
str(poo))
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertFalse(poo.is_os)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# OS shutdown
self.assertIs(poo, poo.operation(popts.PowerOffOperation.OS))
self.assertEqual(
'PowerOff(immediate=true, operation=osshutdown, restart=true)',
str(poo))
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_os)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# Booleans can be shut off
self.assertIs(poo, poo.immediate(value=False))
self.assertEqual('PowerOff(operation=osshutdown, restart=true)',
str(poo))
self.assertFalse(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_os)
self.assertIs(poo, poo.restart(value=False))
self.assertEqual('PowerOff(operation=osshutdown)', str(poo))
self.assertFalse(poo.is_immediate)
self.assertFalse(poo.is_restart)
self.assertTrue(poo.is_os)
# "Smart" methods. Make sure restart is preserved every time we change
poo.restart()
# OS immediate
self.assertIs(poo, poo.os_immediate())
self.assertEqual('PowerOff(immediate=true, operation=osshutdown, '
'restart=true)', str(poo))
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_os)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# OS normal (wipes out immediate)
self.assertIs(poo, poo.os_normal())
self.assertEqual('PowerOff(operation=osshutdown, restart=true)',
str(poo))
self.assertFalse(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_os)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# VSP hard
self.assertIs(poo, poo.vsp_hard())
self.assertEqual('PowerOff(immediate=true, operation=shutdown, '
'restart=true)', str(poo))
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertFalse(poo.is_os)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# VSP normal (wipes out immediate)
self.assertIs(poo, poo.vsp_normal())
self.assertEqual('PowerOff(operation=shutdown, restart=true)',
str(poo))
self.assertFalse(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertFalse(poo.is_os)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# Soft detect
part = mock.Mock(env=ltyp.AIXLINUX, rmc_state=rmcs.ACTIVE)
self.assertIs(poo, poo.soft_detect(part))
self.assertTrue(poo.is_os)
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# Explicit normal shutdown
self.assertIs(poo, poo.soft_detect(part, immed_if_os=False))
self.assertTrue(poo.is_os)
self.assertFalse(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# Explicit immediate OS shutdown
self.assertIs(poo, poo.soft_detect(part, immed_if_os=True))
self.assertTrue(poo.is_os)
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# Can't OS shutdown
part = mock.Mock(env=ltyp.VIOS, rmc_state=rmcs.BUSY)
self.assertIs(poo, poo.soft_detect(part))
self.assertFalse(poo.is_os)
self.assertFalse(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# immed_if_os ignored
self.assertIs(poo, poo.soft_detect(part, immed_if_os=True))
self.assertFalse(poo.is_os)
self.assertFalse(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertIs(poo, poo.soft_detect(part, immed_if_os=False))
self.assertFalse(poo.is_os)
self.assertFalse(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertEqual('PowerOff(operation=shutdown, restart=true)',
str(poo))
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# IBMi defaults to OS normal
part = mock.Mock(env=ltyp.OS400, rmc_state=rmcs.INACTIVE)
self.assertIs(poo, poo.soft_detect(part))
self.assertTrue(poo.is_os)
self.assertFalse(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
# Explicit immediate
self.assertIs(poo, poo.soft_detect(part, immed_if_os=True))
self.assertTrue(poo.is_os)
self.assertTrue(poo.is_immediate)
self.assertTrue(poo.is_restart)
self.assertTrue(poo.is_param_set(popts.PowerOffOperation.KEY))
|
|
# Django settings for testdjango project.
import os
try:
from local_settings import APP_NAME, COMMENTS_APP
except ImportError:
APP_NAME = "tb"
COMMENTS_APP = "CommentRecaptcha"
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
ADMINS = (
('Administrator', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, 'tbonline.sqlite'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
EMAIL_HOST = 'smtp.webfaction.com'
EMAIL_HOST_USER = 'tbcab'
EMAIL_HOST_PASSWORD = '12345'
DEFAULT_FROM_EMAIL = '[email protected]'
# SERVER_EMAIL = '[email protected]'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Africa/Johannesburg'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(SITE_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
THUMBNAIL_DEBUG = True
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(SITE_ROOT, 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = STATIC_URL + "grappelli/"
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
#'/usr/local/lib/python2.6/dist-packages/filebrowser/media',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_)(+!6a28nsv%+f$ktmeo1a21&v=%bp=yk-tyxnh_1k*(3$0%#'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'accounts.middleware.AutoLogoutMiddleware',
'accounts.middleware.WebfactionFixMiddleware',
)
ROOT_URLCONF = 'tbonlineproject.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.join(SITE_ROOT, APP_NAME),'templates'),
os.path.join(SITE_ROOT, 'templates'),
os.path.join(SITE_ROOT, 'post/templates'),
os.path.join(SITE_ROOT, 'gallery/templates'),
os.path.join(SITE_ROOT, 'feeder/templates'),
os.path.join(SITE_ROOT, 'copyright/templates'),
os.path.join(SITE_ROOT, 'notifications/templates'),
os.path.join(SITE_ROOT, 'tweets/templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.markup',
'django.contrib.comments',
'django.contrib.flatpages',
'sorl.thumbnail',
'contact_form',
'grappelli',
'filebrowser',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.sitemaps',
'south',
'faq',
'registration',
'tagging',
'copyright',
'credit',
'relatedcontent',
'gallery',
'feeder',
'archive',
'enhancedtext',
'categories',
'post',
'story',
'tagviews',
'notifications',
'tweets',
'RegistrationRecaptcha',
'userprofiles',
COMMENTS_APP,
APP_NAME,
)
AUTHENTICATION_BACKENDS = (
'accounts.backends.EmailOrUsernameModelBackend',
'django.contrib.auth.backends.ModelBackend'
)
TWEETS_ACTIVATED = True
GRAPPELLI_ADMIN_TITLE = '<a href="/">TB Online</a> Administration'
ACCOUNT_ACTIVATION_DAYS = 2
LOGIN_REDIRECT_URL = '/'
HAYSTACK_SITECONF = 'tbonlineproject.search_sites'
HAYSTACK_SEARCH_ENGINE = 'xapian'
#HAYSTACK_WHOOSH_PATH = os.path.join(SITE_ROOT, 'whoosh_search_index')
HAYSTACK_XAPIAN_PATH = os.path.join(SITE_ROOT, 'xapian_search_index')
RECAPTCHA_PUBLIC_KEY = '6LeMQsoSAAAAAP5BQhOF0kuPCPvtwwu_9AYshPMA'
RECAPTCHA_PRIVATE_KEY = '6LeMQsoSAAAAAGrrDr05Uyhoh7DJHHsArD4BNXmA'
MAX_NUM_IMAGES = 10
#The time in MINUTES that the homepage and posts views will be cached
CACHE_TIME = 0
CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': 'unix:/home/tbcab/memcached.sock'
# }
}
TEMPLATE_CONTEXT_PROCESSORS = ("django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"post.context_processors.current_site")
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Mailchimp settings
MAILCHIMP_KEY = 'ba62b7f0b188051d29344063caba744b-us11'
LIST_ID = 'ae7bcd30bb'
try:
import local_settings
from local_settings import *
except ImportError:
pass
else:
PROJECT_TEMPLATE_DIR = getattr(local_settings, "PROJECT_TEMPLATE_DIR", None)
if PROJECT_TEMPLATE_DIR:
TEMPLATE_DIRS = (PROJECT_TEMPLATE_DIR,) + TEMPLATE_DIRS
|
|
# coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
# python 2 and python 3 compatibility library
import six
from onshape_client.oas.api_client import ApiClient
from onshape_client.oas.exceptions import ApiTypeError, ApiValueError
from onshape_client.oas.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
int,
none_type,
str,
validate_and_convert_types,
)
from onshape_client.oas.models import bt_webhook_params
from onshape_client.oas.models import bt_webhook_info
from onshape_client.oas.models import bt_list_response_bt_webhook_info
class WebhooksApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __create_webhook(self, **kwargs):
"""create_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_webhook(async_req=True)
>>> result = thread.get()
Keyword Args:
bt_webhook_params (bt_webhook_params.BTWebhookParams): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_webhook_info.BTWebhookInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
return self.call_with_http_info(**kwargs)
self.create_webhook = Endpoint(
settings={
"response_type": (bt_webhook_info.BTWebhookInfo,),
"auth": ["OAuth2"],
"endpoint_path": "/api/webhooks",
"operation_id": "create_webhook",
"http_method": "POST",
"servers": [],
},
params_map={
"all": ["bt_webhook_params",],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"bt_webhook_params": (bt_webhook_params.BTWebhookParams,),
},
"attribute_map": {},
"location_map": {"bt_webhook_params": "body",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": ["application/json;charset=UTF-8; qs=0.09"],
},
api_client=api_client,
callable=__create_webhook,
)
def __get_webhook(self, webhookid, **kwargs):
"""get_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_webhook(webhookid, async_req=True)
>>> result = thread.get()
Args:
webhookid (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_webhook_info.BTWebhookInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["webhookid"] = webhookid
return self.call_with_http_info(**kwargs)
self.get_webhook = Endpoint(
settings={
"response_type": (bt_webhook_info.BTWebhookInfo,),
"auth": ["OAuth2"],
"endpoint_path": "/api/webhooks/{webhookid}",
"operation_id": "get_webhook",
"http_method": "GET",
"servers": [],
},
params_map={
"all": ["webhookid",],
"required": ["webhookid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {"webhookid": (str,),},
"attribute_map": {"webhookid": "webhookid",},
"location_map": {"webhookid": "path",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__get_webhook,
)
def __get_webhooks(self, **kwargs):
"""get_webhooks # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_webhooks(async_req=True)
>>> result = thread.get()
Keyword Args:
company (str): [optional] if omitted the server will use the default value of ''
user (str): [optional]
offset (int): [optional] if omitted the server will use the default value of 0
limit (int): [optional] if omitted the server will use the default value of 20
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_list_response_bt_webhook_info.BTListResponseBTWebhookInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
return self.call_with_http_info(**kwargs)
self.get_webhooks = Endpoint(
settings={
"response_type": (
bt_list_response_bt_webhook_info.BTListResponseBTWebhookInfo,
),
"auth": ["OAuth2"],
"endpoint_path": "/api/webhooks",
"operation_id": "get_webhooks",
"http_method": "GET",
"servers": [],
},
params_map={
"all": ["company", "user", "offset", "limit",],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"company": (str,),
"user": (str,),
"offset": (int,),
"limit": (int,),
},
"attribute_map": {
"company": "company",
"user": "user",
"offset": "offset",
"limit": "limit",
},
"location_map": {
"company": "query",
"user": "query",
"offset": "query",
"limit": "query",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__get_webhooks,
)
def __ping_webhook(self, webhookid, **kwargs):
"""ping_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.ping_webhook(webhookid, async_req=True)
>>> result = thread.get()
Args:
webhookid (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["webhookid"] = webhookid
return self.call_with_http_info(**kwargs)
self.ping_webhook = Endpoint(
settings={
"response_type": None,
"auth": ["OAuth2"],
"endpoint_path": "/api/webhooks/{webhookid}/ping",
"operation_id": "ping_webhook",
"http_method": "POST",
"servers": [],
},
params_map={
"all": ["webhookid",],
"required": ["webhookid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {"webhookid": (str,),},
"attribute_map": {"webhookid": "webhookid",},
"location_map": {"webhookid": "path",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__ping_webhook,
)
def __unregister_webhook(self, webhookid, **kwargs):
"""unregister_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.unregister_webhook(webhookid, async_req=True)
>>> result = thread.get()
Args:
webhookid (str):
Keyword Args:
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
None
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["webhookid"] = webhookid
return self.call_with_http_info(**kwargs)
self.unregister_webhook = Endpoint(
settings={
"response_type": None,
"auth": ["OAuth2"],
"endpoint_path": "/api/webhooks/{webhookid}",
"operation_id": "unregister_webhook",
"http_method": "DELETE",
"servers": [],
},
params_map={
"all": ["webhookid",],
"required": ["webhookid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {"webhookid": (str,),},
"attribute_map": {"webhookid": "webhookid",},
"location_map": {"webhookid": "path",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": [],
},
api_client=api_client,
callable=__unregister_webhook,
)
def __update_webhook(self, webhookid, **kwargs):
"""update_webhook # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_webhook(webhookid, async_req=True)
>>> result = thread.get()
Args:
webhookid (str):
Keyword Args:
bt_webhook_params (bt_webhook_params.BTWebhookParams): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int): specifies the index of the server
that we want to use.
Default is 0.
async_req (bool): execute request asynchronously
Returns:
bt_webhook_info.BTWebhookInfo
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get(
"_return_http_data_only", True
)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index", 0)
kwargs["webhookid"] = webhookid
return self.call_with_http_info(**kwargs)
self.update_webhook = Endpoint(
settings={
"response_type": (bt_webhook_info.BTWebhookInfo,),
"auth": ["OAuth2"],
"endpoint_path": "/api/webhooks/{webhookid}",
"operation_id": "update_webhook",
"http_method": "POST",
"servers": [],
},
params_map={
"all": ["webhookid", "bt_webhook_params",],
"required": ["webhookid",],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"webhookid": (str,),
"bt_webhook_params": (bt_webhook_params.BTWebhookParams,),
},
"attribute_map": {"webhookid": "webhookid",},
"location_map": {"webhookid": "path", "bt_webhook_params": "body",},
"collection_format_map": {},
},
headers_map={
"accept": ["application/vnd.onshape.v1+json;charset=UTF-8;qs=0.1"],
"content_type": ["application/json;charset=UTF-8; qs=0.09"],
},
api_client=api_client,
callable=__update_webhook,
)
class Endpoint(object):
def __init__(
self,
settings=None,
params_map=None,
root_map=None,
headers_map=None,
api_client=None,
callable=None,
):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (tuple/None): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map["all"].extend(
[
"async_req",
"_host_index",
"_preload_content",
"_request_timeout",
"_return_http_data_only",
"_check_input_type",
"_check_return_type",
]
)
self.params_map["nullable"].extend(["_request_timeout"])
self.validations = root_map["validations"]
self.allowed_values = root_map["allowed_values"]
self.openapi_types = root_map["openapi_types"]
extra_types = {
"async_req": (bool,),
"_host_index": (int,),
"_preload_content": (bool,),
"_request_timeout": (none_type, int, (int,), [int]),
"_return_http_data_only": (bool,),
"_check_input_type": (bool,),
"_check_return_type": (bool,),
}
self.openapi_types.update(extra_types)
self.attribute_map = root_map["attribute_map"]
self.location_map = root_map["location_map"]
self.collection_format_map = root_map["collection_format_map"]
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map["enum"]:
if param in kwargs:
check_allowed_values(self.allowed_values, (param,), kwargs[param])
for param in self.params_map["validation"]:
if param in kwargs:
check_validations(self.validations, (param,), kwargs[param])
if kwargs["_check_input_type"] is False:
return
for key, value in six.iteritems(kwargs):
fixed_val = validate_and_convert_types(
value,
self.openapi_types[key],
[key],
False,
kwargs["_check_input_type"],
configuration=self.api_client.configuration,
)
kwargs[key] = fixed_val
def __gather_params(self, kwargs):
params = {
"body": None,
"collection_format": {},
"file": {},
"form": [],
"header": {},
"path": {},
"query": [],
}
for param_name, param_value in six.iteritems(kwargs):
param_location = self.location_map.get(param_name)
if param_location is None:
continue
if param_location:
if param_location == "body":
params["body"] = param_value
continue
base_name = self.attribute_map[param_name]
if param_location == "form" and self.openapi_types[param_name] == (
file_type,
):
params["file"][param_name] = [param_value]
elif param_location == "form" and self.openapi_types[param_name] == (
[file_type],
):
# param_value is already a list
params["file"][param_name] = param_value
elif param_location in {"form", "query"}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {"form", "query"}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params["collection_format"][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
pet_api = PetApi()
pet_api.add_pet # this is an instance of the class Endpoint
pet_api.add_pet() # this invokes pet_api.add_pet.__call__()
which then invokes the callable functions stored in that endpoint at
pet_api.add_pet.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
try:
_host = self.settings["servers"][kwargs["_host_index"]]
except IndexError:
if self.settings["servers"]:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s"
% len(self.settings["servers"])
)
_host = None
for key, value in six.iteritems(kwargs):
if key not in self.params_map["all"]:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" % (key, self.settings["operation_id"])
)
# only throw this nullable ApiValueError if _check_input_type
# is False, if _check_input_type==True we catch this case
# in self.__validate_inputs
if (
key not in self.params_map["nullable"]
and value is None
and kwargs["_check_input_type"] is False
):
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" % (key, self.settings["operation_id"])
)
for key in self.params_map["required"]:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings["operation_id"])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map["accept"]
if accept_headers_list:
params["header"]["Accept"] = self.api_client.select_header_accept(
accept_headers_list
)
content_type_headers_list = self.headers_map["content_type"]
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list
)
params["header"]["Content-Type"] = header_list
return self.api_client.call_api(
self.settings["endpoint_path"],
self.settings["http_method"],
params["path"],
params["query"],
params["header"],
body=params["body"],
post_params=params["form"],
files=params["file"],
response_type=self.settings["response_type"],
auth_settings=self.settings["auth"],
async_req=kwargs["async_req"],
_check_type=kwargs["_check_return_type"],
_return_http_data_only=kwargs["_return_http_data_only"],
_preload_content=kwargs["_preload_content"],
_request_timeout=kwargs["_request_timeout"],
_host=_host,
collection_formats=params["collection_format"],
)
|
|
<<<<<<< HEAD
#!/usr/bin/env python
#
# nginx-updater.py - updates nginx proxy based on udp requests
from __future__ import print_function
import socket
import re
import time
import os
from select import select
import itertools
import StringIO
import hashlib
import sys
import subprocess
class Request:
"""Represents a proxy request that has been received by the program"""
def __init__(self):
self.service = ""
self.host = ""
self.port = 0
self.timestamp = time.time()
class State:
"""Represents the current request state which is used to update nginx"""
def __init__(self):
self.requestCt = 0
self.requests = []
self.last_modified = time.time()
self.last_processed = time.time()
self.last_gc = time.time()
self.gc_interval = 5.0
self.max_request_refresh_interval = 60.0
def add_request(self, request):
"""Add a request to the application state"""
self.requestCt = self.requestCt + 1
found = False
for req in self.requests:
if req.service == request.service and \
req.host == req.host and \
req.port == req.port:
req.timestamp = request.timestamp
found = True
break
if not found:
self.requests.append(request)
self.last_modified = time.time()
def get_requests(self):
"""Get the requests and reset last_process timestamp"""
self.last_processed = self.last_modified
return self.requests
def gc_requests(self):
"""Prune the request list of unrefreshed items"""
now = time.time()
if (now - self.last_gc) > self.gc_interval:
print('now: {0}'.format(now))
for req in self.requests:
print('req.timestamp: {0}'.format(req.timestamp))
print('diff: {0}'.format(now - req.timestamp))
live = [req for req in self.requests if (now - req.timestamp) < self.max_request_refresh_interval]
dead = [req for req in self.requests if (now - req.timestamp) >= self.max_request_refresh_interval]
if len(dead):
self.requests = live
self.last_modified = now
self.last_gc = time.time()
def modified(self):
"""Have the requests been modified since they were last retrieved"""
return self.last_modified > self.last_processed
def parse_buffer(data):
"""Returns a sucessfully parsed and timestamped request or None"""
m = re.match("([^;]+);([\d.]+);([\d]+)", data)
if not m:
print('Couldn''t grok request ''{0}'''.format(data), file=sys.stderr)
return None
req = Request()
req.service = m.group(1)
req.host = m.group(2)
req.port = int(m.group(3))
req.timestamp = time.time()
return req
def canonical_request_dict(requests):
"""A stable sorted set of current requirements"""
key_func = lambda req: req.service
service_key_func = lambda req: '{0}-{1}'.format(req.host,req.port)
grouped_requests = sorted(requests, key=key_func)
result = {}
for k, g in itertools.groupby(grouped_requests, key_func):
result[k] = sorted(list(g), key=service_key_func)
return result
def upstream_name(uri):
"""Takes a service/uri location string and changes it into a upstream name"""
return uri.strip("/").replace("/", "-")
def prepare_upstreams(requests):
"""Prepare a nginx file with upstream directives - Expects a canonical request dict"""
upstreams = StringIO.StringIO()
upstreams.write('# Do not modify - this file is maintained by nginx_updater.py\n')
for service, providers in requests.iteritems():
upstreams.write('\nupstream {0} {{\n'.format(upstream_name(service)))
for provider in providers:
upstreams.write('\tserver {0}:{1} fail_timeout=10s;\n'.format(provider.host,provider.port))
upstreams.write('}\n')
return upstreams
def prepare_locations(requests):
"""Prepare a nginx file with location directives - Expects a canonical request dict"""
location = StringIO.StringIO()
location.write('# Do not modify - this file is maintained by nginx_updater.py\n')
for service in requests:
location.write('\nlocation {0} {{\n'.format(service))
location.write('\tproxy_pass http://{0}/;\n'.format(upstream_name(service)))
location.write('\tproxy_redirect off;\n')
location.write('\tproxy_next_upstream error timeout invalid_header http_500\n')
location.write('\tproxy_connect_timeout 1s;\n')
location.write('}\n')
return location
def should_replace(new_content, current_location):
"""Test to see if current content should be updated"""
new_hash = hashlib.sha1()
new_hash.update(new_content.getvalue())
new_hash = new_hash.hexdigest()
replace = False
try:
with open(current_location) as current:
current_hash = hashlib.sha1()
current_hash.update(current.read())
current_hash = current_hash.hexdigest()
if current_hash <> new_hash:
replace = True
except IOError:
print('Exception while checking ''{0}'' content'.format(current_location), file=sys.stderr)
replace = True
return replace
def replace_file(new_content, current_location):
"""Atomically replaces file if new_content differs from
content at current_location"""
if should_replace(new_content, current_location):
abs_path = os.path.abspath(current_location)
current_dir, filename = os.path.split(abs_path)
tmp_filename = '{0}.{1}'.format(filename, time.time())
tmp_path = os.path.join(current_dir, tmp_filename)
try:
with open(tmp_path, 'w') as tmp:
tmp.write(new_content.getvalue())
os.rename(tmp_path, abs_path)
except IOError:
print('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)
return False
return True
return False
def update_conf(requests, conf_dir):
"""Updates configuration files that are intended for
nginx"""
canonical = canonical_request_dict(requests)
locations = prepare_locations(canonical)
upstreams = prepare_upstreams(canonical)
locations_path = os.path.join(conf_dir, 'zorrillo-locations.conf')
upstreams_path = os.path.join(conf_dir, 'zorrillo-upstreams.conf')
locations_replaced = replace_file(locations, locations_path)
upstreams_replaced = replace_file(upstreams, upstreams_path)
if locations_replaced or upstreams_replaced:
try:
subprocess.check_call(['sudo', '/usr/sbin/nginx', 'reload'])
print('NGINX reloaded')
except (subprocess.CalledProcessError, OSError):
print('Reloading NGINX failed', file=sys.stderr)
def nginx_updater(requests, conf_dir):
"""Forked process that is responsible for updating nginx
with the current requests"""
newpid = os.fork()
if newpid == 0:
print('Contemplating {0} requests'.format(len(requests)))
update_conf(requests, conf_dir)
os._exit(0)
else:
return newpid
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='udp listener that updates nginx configuration')
parser.add_argument('-d', '--directory', help='Directory to maintain nginx configuration files in', required=True)
parser.add_argument('-p', '--port', help='Port to listen for UDP requests on', required=True)
args = parser.parse_args()
args.port = int(args.port)
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_sock.bind(("0.0.0.0", args.port))
updater_pid = 0
state = State()
start_time = time.time()
print('Starting up, observing requests for 60s before applying updates...')
# Sit in a loop aggregating requests and forking a process
# to update the nginx configuration
while True:
input_ready,_,__ = select([udp_sock], [], [], 1.0)
if input_ready:
data, addr = udp_sock.recvfrom(1024)
request = parse_buffer(data)
if request:
state.add_request(request)
state.gc_requests()
if updater_pid == 0 and state.modified() and (time.time() - start_time > 60):
updater_pid = nginx_updater(state.get_requests(), args.directory)
if updater_pid:
pid, err_code = os.waitpid(updater_pid, os.WNOHANG)
if pid <> 0:
if err_code <> 0:
print('updater exited with {0}'.format(err_code), file=sys.stderr)
updater_pid = 0
=======
#!/usr/bin/env python
#
# nginx-updater.py - updates nginx proxy based on udp requests
from __future__ import print_function
import socket
import re
import time
import os
from select import select
import itertools
import StringIO
import hashlib
import sys
import subprocess
class Request:
"""Represents a proxy request that has been received by the program"""
def __init__(self):
self.service = ""
self.host = ""
self.port = 0
self.timestamp = time.time()
class State:
"""Represents the current request state which is used to update nginx"""
def __init__(self):
self.requestCt = 0
self.requests = []
self.last_modified = time.time()
self.last_processed = time.time()
self.last_gc = time.time()
self.gc_interval = 5.0
self.max_request_refresh_interval = 60.0
def add_request(self, request):
"""Add a request to the application state"""
self.requestCt = self.requestCt + 1
found = False
for req in self.requests:
if req.service == request.service and \
req.host == req.host and \
req.port == req.port:
req.timestamp = request.timestamp
found = True
break
if not found:
self.requests.append(request)
self.last_modified = time.time()
def get_requests(self):
"""Get the requests and reset last_process timestamp"""
self.last_processed = self.last_modified
return self.requests
def gc_requests(self):
"""Prune the request list of unrefreshed items"""
now = time.time()
if (now - self.last_gc) > self.gc_interval:
print('now: {0}'.format(now))
for req in self.requests:
print('req.timestamp: {0}'.format(req.timestamp))
print('diff: {0}'.format(now - req.timestamp))
live = [req for req in self.requests if (now - req.timestamp) < self.max_request_refresh_interval]
dead = [req for req in self.requests if (now - req.timestamp) >= self.max_request_refresh_interval]
if len(dead):
self.requests = live
self.last_modified = now
self.last_gc = time.time()
def modified(self):
"""Have the requests been modified since they were last retrieved"""
return self.last_modified > self.last_processed
def parse_buffer(data):
"""Returns a sucessfully parsed and timestamped request or None"""
m = re.match("([^;]+);([\d.]+);([\d]+)", data)
if not m:
print('Couldn''t grok request ''{0}'''.format(data), file=sys.stderr)
return None
req = Request()
req.service = m.group(1)
req.host = m.group(2)
req.port = int(m.group(3))
req.timestamp = time.time()
return req
def canonical_request_dict(requests):
"""A stable sorted set of current requirements"""
key_func = lambda req: req.service
service_key_func = lambda req: '{0}-{1}'.format(req.host,req.port)
grouped_requests = sorted(requests, key=key_func)
result = {}
for k, g in itertools.groupby(grouped_requests, key_func):
result[k] = sorted(list(g), key=service_key_func)
return result
def upstream_name(uri):
"""Takes a service/uri location string and changes it into a upstream name"""
return uri.strip("/").replace("/", "-")
def prepare_upstreams(requests):
"""Prepare a nginx file with upstream directives - Expects a canonical request dict"""
upstreams = StringIO.StringIO()
upstreams.write('# Do not modify - this file is maintained by nginx_updater.py\n')
for service, providers in requests.iteritems():
upstreams.write('\nupstream {0} {{\n'.format(upstream_name(service)))
for provider in providers:
upstreams.write('\tserver {0}:{1} fail_timeout=10s;\n'.format(provider.host,provider.port))
upstreams.write('}\n')
return upstreams
def prepare_locations(requests):
"""Prepare a nginx file with location directives - Expects a canonical request dict"""
location = StringIO.StringIO()
location.write('# Do not modify - this file is maintained by nginx_updater.py\n')
for service in requests:
location.write('\nlocation {0} {{\n'.format(service))
location.write('\tproxy_pass http://{0}/;\n'.format(upstream_name(service)))
location.write('\tproxy_redirect off;\n')
location.write('\tproxy_next_upstream error timeout invalid_header http_500\n')
location.write('\tproxy_connect_timeout 1s;\n')
location.write('}\n')
return location
def should_replace(new_content, current_location):
"""Test to see if current content should be updated"""
new_hash = hashlib.sha1()
new_hash.update(new_content.getvalue())
new_hash = new_hash.hexdigest()
replace = False
try:
with open(current_location) as current:
current_hash = hashlib.sha1()
current_hash.update(current.read())
current_hash = current_hash.hexdigest()
if current_hash <> new_hash:
replace = True
except IOError:
print('Exception while checking ''{0}'' content'.format(current_location), file=sys.stderr)
replace = True
return replace
def replace_file(new_content, current_location):
"""Atomically replaces file if new_content differs from
content at current_location"""
if should_replace(new_content, current_location):
abs_path = os.path.abspath(current_location)
current_dir, filename = os.path.split(abs_path)
tmp_filename = '{0}.{1}'.format(filename, time.time())
tmp_path = os.path.join(current_dir, tmp_filename)
try:
with open(tmp_path, 'w') as tmp:
tmp.write(new_content.getvalue())
os.rename(tmp_path, abs_path)
except IOError:
print('Failed to replace ''{0}'''.format(abs_path), file=sys.stderr)
return False
return True
return False
def update_conf(requests, conf_dir):
"""Updates configuration files that are intended for
nginx"""
canonical = canonical_request_dict(requests)
locations = prepare_locations(canonical)
upstreams = prepare_upstreams(canonical)
locations_path = os.path.join(conf_dir, 'proxy-locations.conf')
upstreams_path = os.path.join(conf_dir, 'proxy-upstreams.conf')
locations_replaced = replace_file(locations, locations_path)
upstreams_replaced = replace_file(upstreams, upstreams_path)
if locations_replaced or upstreams_replaced:
try:
subprocess.check_call(['sudo', '/usr/sbin/nginx', 'reload'])
print('NGINX reloaded')
except (subprocess.CalledProcessError, OSError):
print('Reloading NGINX failed', file=sys.stderr)
def nginx_updater(requests, conf_dir):
"""Forked process that is responsible for updating nginx
with the current requests"""
newpid = os.fork()
if newpid == 0:
print('Contemplating {0} requests'.format(len(requests)))
update_conf(requests, conf_dir)
os._exit(0)
else:
return newpid
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='udp listener that updates nginx configuration')
parser.add_argument('-d', '--directory', help='Directory to maintain nginx configuration files in', required=True)
parser.add_argument('-p', '--port', help='Port to listen for UDP requests on', required=True)
args = parser.parse_args()
args.port = int(args.port)
udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_sock.bind(("0.0.0.0", args.port))
updater_pid = 0
state = State()
start_time = time.time()
print('Starting up, observing requests for 60s before applying updates...')
# Sit in a loop aggregating requests and forking a process
# to update the nginx configuration
while True:
input_ready,_,__ = select([udp_sock], [], [], 1.0)
if input_ready:
data, addr = udp_sock.recvfrom(1024)
request = parse_buffer(data)
if request:
state.add_request(request)
state.gc_requests()
if updater_pid == 0 and state.modified() and (time.time() - start_time > 60):
updater_pid = nginx_updater(state.get_requests(), args.directory)
if updater_pid:
pid, err_code = os.waitpid(updater_pid, os.WNOHANG)
if pid <> 0:
if err_code <> 0:
print('updater exited with {0}'.format(err_code), file=sys.stderr)
updater_pid = 0
>>>>>>> 5373d7b508e85fad821eb6919378d6f8ef2ce868
|
|
import json
import os
from datetime import datetime
from tempfile import NamedTemporaryFile
from time import strftime, strptime
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.core.files.storage import get_storage_class
from django.core.urlresolvers import reverse
from django.http import HttpResponseForbidden,\
HttpResponseRedirect, HttpResponseNotFound, HttpResponseBadRequest,\
HttpResponse
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.template import RequestContext
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from onadata.apps.main.models import UserProfile, MetaData, TokenStorageModel
from onadata.apps.logger.models import XForm, Attachment
from onadata.apps.logger.views import download_jsonform
from onadata.apps.viewer.models.data_dictionary import DataDictionary
from onadata.apps.viewer.models.export import Export
from onadata.apps.viewer.pandas_mongo_bridge import NoRecordsFoundError
from onadata.apps.viewer.tasks import create_async_export
from onadata.libs.utils.common_tags import SUBMISSION_TIME
from onadata.libs.utils.export_tools import generate_export,\
should_create_new_export
from onadata.libs.utils.export_tools import kml_export_data
from onadata.libs.utils.export_tools import newset_export_for
from onadata.libs.utils.image_tools import image_url
from onadata.libs.utils.google import google_export_xls, redirect_uri
from onadata.libs.utils.log import audit_log, Actions
from onadata.libs.utils.logger_tools import response_with_mimetype_and_name,\
disposition_ext_and_date
from onadata.libs.utils.viewer_tools import create_attachments_zipfile,\
export_def_from_filename
from onadata.libs.utils.user_auth import has_permission, get_xform_and_perms,\
helper_auth_helper
from xls_writer import XlsWriter
from onadata.libs.utils.chart_tools import build_chart_data
def encode(time_str):
time = strptime(time_str, "%Y_%m_%d_%H_%M_%S")
return strftime("%Y-%m-%d %H:%M:%S", time)
def dd_for_params(id_string, owner, request):
start = end = None
dd = DataDictionary.objects.get(id_string=id_string,
user=owner)
if request.GET.get('start'):
try:
start = encode(request.GET['start'])
except ValueError:
# bad format
return [False,
HttpResponseBadRequest(
_(u'Start time format must be YY_MM_DD_hh_mm_ss'))
]
dd.instances_for_export = \
lambda d: d.instances.filter(date_created__gte=start)
if request.GET.get('end'):
try:
end = encode(request.GET['end'])
except ValueError:
# bad format
return [False,
HttpResponseBadRequest(
_(u'End time format must be YY_MM_DD_hh_mm_ss'))
]
dd.instances_for_export = \
lambda d: d.instances.filter(date_created__lte=end)
if start and end:
dd.instances_for_export = \
lambda d: d.instances.filter(date_created__lte=end,
date_created__gte=start)
return [True, dd]
def parse_label_for_display(pi, xpath):
label = pi.data_dictionary.get_label(xpath)
if not type(label) == dict:
label = {'Unknown': label}
return label.items()
def average(values):
if len(values):
return sum(values, 0.0) / len(values)
return None
def map_view(request, username, id_string, template='map.html'):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
context.content_user = owner
context.xform = xform
context.profile, created = UserProfile.objects.get_or_create(user=owner)
context.form_view = True
context.jsonform_url = reverse(download_jsonform,
kwargs={"username": username,
"id_string": id_string})
context.enketo_edit_url = reverse('edit_data',
kwargs={"username": username,
"id_string": id_string,
"data_id": 0})
context.enketo_add_url = reverse('enter_data',
kwargs={"username": username,
"id_string": id_string})
context.enketo_add_with_url = reverse('add_submission_with',
kwargs={"username": username,
"id_string": id_string})
context.mongo_api_url = reverse('mongo_view_api',
kwargs={"username": username,
"id_string": id_string})
context.delete_data_url = reverse('delete_data',
kwargs={"username": username,
"id_string": id_string})
context.mapbox_layer = MetaData.mapbox_layer_upload(xform)
audit = {
"xform": xform.id_string
}
audit_log(Actions.FORM_MAP_VIEWED, request.user, owner,
_("Requested map on '%(id_string)s'.")
% {'id_string': xform.id_string}, audit, request)
return render_to_response(template, context_instance=context)
def map_embed_view(request, username, id_string):
return map_view(request, username, id_string, template='map_embed.html')
def add_submission_with(request, username, id_string):
import uuid
import requests
from django.template import loader, Context
from dpath import util as dpath_util
from dict2xml import dict2xml
def geopoint_xpaths(username, id_string):
d = DataDictionary.objects.get(
user__username=username, id_string=id_string)
return [e.get_abbreviated_xpath()
for e in d.get_survey_elements()
if e.bind.get(u'type') == u'geopoint']
value = request.GET.get('coordinates')
xpaths = geopoint_xpaths(username, id_string)
xml_dict = {}
for path in xpaths:
dpath_util.new(xml_dict, path, value)
context = {'username': username,
'id_string': id_string,
'xml_content': dict2xml(xml_dict)}
instance_xml = loader.get_template("instance_add.xml")\
.render(Context(context))
url = settings.ENKETO_API_INSTANCE_IFRAME_URL
return_url = reverse('thank_you_submission',
kwargs={"username": username, "id_string": id_string})
if settings.DEBUG:
openrosa_url = "https://dev.formhub.org/{}".format(username)
else:
openrosa_url = request.build_absolute_uri("/{}".format(username))
payload = {'return_url': return_url,
'form_id': id_string,
'server_url': openrosa_url,
'instance': instance_xml,
'instance_id': uuid.uuid4().hex}
r = requests.post(url, data=payload,
auth=(settings.ENKETO_API_TOKEN, ''), verify=False)
return HttpResponse(r.text, mimetype='application/json')
def thank_you_submission(request, username, id_string):
return HttpResponse("Thank You")
def data_export(request, username, id_string, export_type):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
helper_auth_helper(request)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
query = request.GET.get("query")
extension = export_type
# check if we should force xlsx
force_xlsx = request.GET.get('xls') != 'true'
if export_type == Export.XLS_EXPORT and force_xlsx:
extension = 'xlsx'
elif export_type in [Export.CSV_ZIP_EXPORT, Export.SAV_ZIP_EXPORT]:
extension = 'zip'
audit = {
"xform": xform.id_string,
"export_type": export_type
}
# check if we need to re-generate,
# we always re-generate if a filter is specified
if should_create_new_export(xform, export_type) or query or\
'start' in request.GET or 'end' in request.GET:
format_date_for_mongo = lambda x, datetime: datetime.strptime(
x, '%y_%m_%d_%H_%M_%S').strftime('%Y-%m-%dT%H:%M:%S')
# check for start and end params
if 'start' in request.GET or 'end' in request.GET:
if not query:
query = '{}'
query = json.loads(query)
query[SUBMISSION_TIME] = {}
try:
if request.GET.get('start'):
query[SUBMISSION_TIME]['$gte'] = format_date_for_mongo(
request.GET['start'], datetime)
if request.GET.get('end'):
query[SUBMISSION_TIME]['$lte'] = format_date_for_mongo(
request.GET['end'], datetime)
except ValueError:
return HttpResponseBadRequest(
_("Dates must be in the format YY_MM_DD_hh_mm_ss"))
else:
query = json.dumps(query)
try:
export = generate_export(
export_type, extension, username, id_string, None, query)
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created %(export_type)s export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
'export_type': export_type.upper()
}, audit, request)
except NoRecordsFoundError:
return HttpResponseNotFound(_("No records found to export"))
else:
export = newset_export_for(xform, export_type)
# log download as well
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Downloaded %(export_type)s export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
'export_type': export_type.upper()
}, audit, request)
if not export.filename:
# tends to happen when using newset_export_for.
return HttpResponseNotFound("File does not exist!")
# get extension from file_path, exporter could modify to
# xlsx if it exceeds limits
path, ext = os.path.splitext(export.filename)
ext = ext[1:]
if request.GET.get('raw'):
id_string = None
response = response_with_mimetype_and_name(
Export.EXPORT_MIMES[ext], id_string, extension=ext,
file_path=export.filepath)
return response
@require_POST
def create_export(request, username, id_string, export_type):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
query = request.POST.get("query")
force_xlsx = request.POST.get('xls') != 'true'
# export options
group_delimiter = request.POST.get("options[group_delimiter]", '/')
if group_delimiter not in ['.', '/']:
return HttpResponseBadRequest(
_("%s is not a valid delimiter" % group_delimiter))
# default is True, so when dont_.. is yes
# split_select_multiples becomes False
split_select_multiples = request.POST.get(
"options[dont_split_select_multiples]", "no") == "no"
binary_select_multiples = getattr(settings, 'BINARY_SELECT_MULTIPLES',
False)
options = {
'group_delimiter': group_delimiter,
'split_select_multiples': split_select_multiples,
'binary_select_multiples': binary_select_multiples
}
try:
create_async_export(xform, export_type, query, force_xlsx, options)
except Export.ExportTypeError:
return HttpResponseBadRequest(
_("%s is not a valid export type" % export_type))
else:
audit = {
"xform": xform.id_string,
"export_type": export_type
}
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created %(export_type)s export on '%(id_string)s'.") %
{
'export_type': export_type.upper(),
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(reverse(
export_list,
kwargs={
"username": username,
"id_string": id_string,
"export_type": export_type
})
)
def _get_google_token(request, redirect_to_url):
token = None
if request.user.is_authenticated():
try:
ts = TokenStorageModel.objects.get(id=request.user)
except TokenStorageModel.DoesNotExist:
pass
else:
token = ts.token
elif request.session.get('access_token'):
token = request.session.get('access_token')
if token is None:
request.session["google_redirect_url"] = redirect_to_url
return HttpResponseRedirect(redirect_uri)
return token
def export_list(request, username, id_string, export_type):
if export_type == Export.GDOC_EXPORT:
redirect_url = reverse(
export_list,
kwargs={
'username': username, 'id_string': id_string,
'export_type': export_type})
token = _get_google_token(request, redirect_url)
if isinstance(token, HttpResponse):
return token
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
if should_create_new_export(xform, export_type):
try:
create_async_export(
xform, export_type, query=None, force_xlsx=True)
except Export.ExportTypeError:
return HttpResponseBadRequest(
_("%s is not a valid export type" % export_type))
context = RequestContext(request)
context.username = owner.username
context.xform = xform
# TODO: better output e.g. Excel instead of XLS
context.export_type = export_type
context.export_type_name = Export.EXPORT_TYPE_DICT[export_type]
exports = Export.objects.filter(xform=xform, export_type=export_type)\
.order_by('-created_on')
context.exports = exports
return render_to_response('export_list.html', context_instance=context)
def export_progress(request, username, id_string, export_type):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
# find the export entry in the db
export_ids = request.GET.getlist('export_ids')
exports = Export.objects.filter(xform=xform, id__in=export_ids)
statuses = []
for export in exports:
status = {
'complete': False,
'url': None,
'filename': None,
'export_id': export.id
}
if export.status == Export.SUCCESSFUL:
status['url'] = reverse(export_download, kwargs={
'username': owner.username,
'id_string': xform.id_string,
'export_type': export.export_type,
'filename': export.filename
})
status['filename'] = export.filename
if export.export_type == Export.GDOC_EXPORT and \
export.export_url is None:
redirect_url = reverse(
export_progress,
kwargs={
'username': username, 'id_string': id_string,
'export_type': export_type})
token = _get_google_token(request, redirect_url)
if isinstance(token, HttpResponse):
return token
status['url'] = None
try:
url = google_export_xls(
export.full_filepath, xform.title, token, blob=True)
except Exception, e:
status['error'] = True
status['message'] = e.message
else:
export.export_url = url
export.save()
status['url'] = url
# mark as complete if it either failed or succeeded but NOT pending
if export.status == Export.SUCCESSFUL \
or export.status == Export.FAILED:
status['complete'] = True
statuses.append(status)
return HttpResponse(
json.dumps(statuses), mimetype='application/json')
def export_download(request, username, id_string, export_type, filename):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
helper_auth_helper(request)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
# find the export entry in the db
export = get_object_or_404(Export, xform=xform, filename=filename)
if export_type == Export.GDOC_EXPORT and export.export_url is not None:
return HttpResponseRedirect(export.export_url)
ext, mime_type = export_def_from_filename(export.filename)
audit = {
"xform": xform.id_string,
"export_type": export.export_type
}
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Downloaded %(export_type)s export '%(filename)s' "
"on '%(id_string)s'.") %
{
'export_type': export.export_type.upper(),
'filename': export.filename,
'id_string': xform.id_string,
}, audit, request)
if request.GET.get('raw'):
id_string = None
default_storage = get_storage_class()()
if not isinstance(default_storage, FileSystemStorage):
return HttpResponseRedirect(default_storage.url(export.filepath))
basename = os.path.splitext(export.filename)[0]
response = response_with_mimetype_and_name(
mime_type, name=basename, extension=ext,
file_path=export.filepath, show_date=False)
return response
@require_POST
def delete_export(request, username, id_string, export_type):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
export_id = request.POST.get('export_id')
# find the export entry in the db
export = get_object_or_404(Export, id=export_id)
export.delete()
audit = {
"xform": xform.id_string,
"export_type": export.export_type
}
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Deleted %(export_type)s export '%(filename)s'"
" on '%(id_string)s'.") %
{
'export_type': export.export_type.upper(),
'filename': export.filename,
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(reverse(
export_list,
kwargs={
"username": username,
"id_string": id_string,
"export_type": export_type
}))
def zip_export(request, username, id_string):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
helper_auth_helper(request)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
if request.GET.get('raw'):
id_string = None
attachments = Attachment.objects.filter(instance__xform=xform)
zip_file = create_attachments_zipfile(attachments)
audit = {
"xform": xform.id_string,
"export_type": Export.ZIP_EXPORT
}
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created ZIP export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
# log download as well
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Downloaded ZIP export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
if request.GET.get('raw'):
id_string = None
response = response_with_mimetype_and_name('zip', id_string,
file_path=zip_file,
use_local_filesystem=True)
return response
def kml_export(request, username, id_string):
# read the locations from the database
context = RequestContext(request)
context.message = "HELLO!!"
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
helper_auth_helper(request)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
context.data = kml_export_data(id_string, user=owner)
response = \
render_to_response("survey.kml", context_instance=context,
mimetype="application/vnd.google-earth.kml+xml")
response['Content-Disposition'] = \
disposition_ext_and_date(id_string, 'kml')
audit = {
"xform": xform.id_string,
"export_type": Export.KML_EXPORT
}
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created KML export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
# log download as well
audit_log(
Actions.EXPORT_DOWNLOADED, request.user, owner,
_("Downloaded KML export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return response
def google_xls_export(request, username, id_string):
token = None
if request.user.is_authenticated():
try:
ts = TokenStorageModel.objects.get(id=request.user)
except TokenStorageModel.DoesNotExist:
pass
else:
token = ts.token
elif request.session.get('access_token'):
token = request.session.get('access_token')
if token is None:
request.session["google_redirect_url"] = reverse(
google_xls_export,
kwargs={'username': username, 'id_string': id_string})
return HttpResponseRedirect(redirect_uri)
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
valid, dd = dd_for_params(id_string, owner, request)
if not valid:
return dd
ddw = XlsWriter()
tmp = NamedTemporaryFile(delete=False)
ddw.set_file(tmp)
ddw.set_data_dictionary(dd)
temp_file = ddw.save_workbook_to_file()
temp_file.close()
url = google_export_xls(tmp.name, xform.title, token, blob=True)
os.unlink(tmp.name)
audit = {
"xform": xform.id_string,
"export_type": "google"
}
audit_log(
Actions.EXPORT_CREATED, request.user, owner,
_("Created Google Docs export on '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return HttpResponseRedirect(url)
def data_view(request, username, id_string):
owner = get_object_or_404(User, username=username)
xform = get_object_or_404(XForm, id_string=id_string, user=owner)
if not has_permission(xform, owner, request):
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
context.owner = owner
context.xform = xform
audit = {
"xform": xform.id_string,
}
audit_log(
Actions.FORM_DATA_VIEWED, request.user, owner,
_("Requested data view for '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return render_to_response("data_view.html", context_instance=context)
def attachment_url(request, size='medium'):
media_file = request.GET.get('media_file')
# TODO: how to make sure we have the right media file,
# this assumes duplicates are the same file
result = Attachment.objects.filter(media_file=media_file)[0:1]
if result.count() == 0:
return HttpResponseNotFound(_(u'Attachment not found'))
attachment = result[0]
if not attachment.mimetype.startswith('image'):
return redirect(attachment.media_file.url)
try:
media_url = image_url(attachment, size)
except:
# TODO: log this somewhere
# image not found, 404, S3ResponseError timeouts
pass
else:
if media_url:
return redirect(media_url)
return HttpResponseNotFound(_(u'Error: Attachment not found'))
def instance(request, username, id_string):
xform, is_owner, can_edit, can_view = get_xform_and_perms(
username, id_string, request)
# no access
if not (xform.shared_data or can_view or
request.session.get('public_link') == xform.uuid):
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
audit = {
"xform": xform.id_string,
}
audit_log(
Actions.FORM_DATA_VIEWED, request.user, xform.user,
_("Requested instance view for '%(id_string)s'.") %
{
'id_string': xform.id_string,
}, audit, request)
return render_to_response('instance.html', {
'username': username,
'id_string': id_string,
'xform': xform,
'can_edit': can_edit
}, context_instance=context)
def charts(request, username, id_string):
xform, is_owner, can_edit, can_view = get_xform_and_perms(
username, id_string, request)
# no access
if not (xform.shared_data or can_view or
request.session.get('public_link') == xform.uuid):
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
try:
lang_index = int(request.GET.get('lang', 0))
except ValueError:
lang_index = 0
try:
page = int(request.GET.get('page', 0))
except ValueError:
page = 0
else:
page = max(page - 1, 0)
summaries = build_chart_data(xform, lang_index, page)
if request.is_ajax():
template = 'charts_snippet.html'
else:
template = 'charts.html'
return render_to_response(template, {
'xform': xform,
'summaries': summaries,
'page': page + 1
}, context_instance=context)
def stats_tables(request, username, id_string):
xform, is_owner, can_edit, can_view = get_xform_and_perms(
username, id_string, request)
# no access
if not (xform.shared_data or can_view or
request.session.get('public_link') == xform.uuid):
return HttpResponseForbidden(_(u'Not shared.'))
context = RequestContext(request)
return render_to_response('stats_tables.html', {
'xform': xform
}, context_instance=context)
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
import ctypes
import heapq
import threading
import time
import queue
import atexit
from . import lib_openal as al
from . import lib_alc as alc
from pyglet.media import MediaException, MediaEvent, AbstractAudioPlayer, \
AbstractAudioDriver, AbstractListener, MediaThread
import pyglet
_debug = pyglet.options['debug_media']
_debug_buffers = pyglet.options.get('debug_media_buffers', False)
class OpenALException(MediaException):
pass
# TODO move functions into context/driver?
def _split_nul_strings(s):
# NUL-separated list of strings, double-NUL-terminated.
nul = False
i = 0
while True:
if s[i] == '\0':
if nul:
break
else:
nul = True
else:
nul = False
i += 1
s = s[:i - 1]
return [_f for _f in [ss.strip() for ss in s.split('\0')] if _f]
format_map = {
(1, 8): al.AL_FORMAT_MONO8,
(1, 16): al.AL_FORMAT_MONO16,
(2, 8): al.AL_FORMAT_STEREO8,
(2, 16): al.AL_FORMAT_STEREO16,
}
class OpenALWorker(MediaThread):
# Minimum size to bother refilling (bytes)
_min_write_size = 512
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super(OpenALWorker, self).__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
self.condition.acquire()
if self.stopped:
self.condition.release()
break
sleep_time = -1
# Refill player with least write_size
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if sleep_time != -1:
self.sleep(sleep_time)
else:
# We MUST sleep, or we will starve pyglet's main loop. It
# also looks like if we don't sleep enough, we'll starve out
# various updates that stop us from properly removing players
# that should be removed.
time.sleep(self._nap_time)
def add(self, player):
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
def remove(self, player):
self.condition.acquire()
if player in self.players:
self.players.remove(player)
self.condition.notify()
self.condition.release()
class OpenALBufferPool(object):
"""At least Mac OS X doesn't free buffers when a source is deleted; it just
detaches them from the source. So keep our own recycled queue.
"""
def __init__(self):
self._buffers = [] # list of free buffer names
self._sources = {} # { sourceId : [ buffer names used ] }
def getBuffer(self, alSource):
"""Convenience for returning one buffer name"""
return self.getBuffers(alSource, 1)[0]
def getBuffers(self, alSource, i):
"""Returns an array containing i buffer names. The returned list must
not be modified in any way, and may get changed by subsequent calls to
getBuffers.
"""
assert context.lock.locked()
buffs = []
try:
while i > 0:
b = self._buffers.pop()
if not al.alIsBuffer(b):
# Protect against implementations that DO free buffers
# when they delete a source - carry on.
if _debug_buffers:
print("Found a bad buffer")
continue
buffs.append(b)
i -= 1
except IndexError:
while i > 0:
buffer_name = al.ALuint()
al.alGenBuffers(1, buffer_name)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("GEN BUFFERS: " + str(error)))
buffs.append(buffer_name)
i -= 1
alSourceVal = alSource.value
if alSourceVal not in self._sources:
self._sources[alSourceVal] = buffs
else:
self._sources[alSourceVal].extend(buffs)
return buffs
def deleteSource(self, alSource):
"""Delete a source pointer (self._al_source) and free its buffers"""
assert context.lock.locked()
if alSource.value in self._sources:
for buffer in self._sources.pop(alSource.value):
self._buffers.append(buffer)
def dequeueBuffer(self, alSource, buffer):
"""A buffer has finished playing, free it."""
assert context.lock.locked()
sourceBuffs = self._sources[alSource.value]
for i, b in enumerate(sourceBuffs):
if buffer == b.value:
self._buffers.append(sourceBuffs.pop(i))
break
else:
# If no such buffer exists, should not happen anyway.
if _debug_buffers:
print(("Bad buffer: " + str(buffer)))
def delete(self):
"""Delete all sources and free all buffers"""
assert context.lock.locked()
for source, buffers in list(self._sources.items()):
al.alDeleteSources(1, ctypes.byref(ctypes.c_uint(source)))
for b in buffers:
if not al.alIsBuffer(b):
# Protect against implementations that DO free buffers
# when they delete a source - carry on.
if _debug_buffers:
print("Found a bad buffer")
continue
al.alDeleteBuffers(1, ctypes.byref(b))
for b in self._buffers:
al.alDeleteBuffers(1, ctypes.byref(b))
self._buffers = []
self._sources = {}
bufferPool = OpenALBufferPool()
class OpenALAudioPlayer(AbstractAudioPlayer):
#: Minimum size of an OpenAL buffer worth bothering with, in bytes
_min_buffer_size = 512
#: Aggregate (desired) buffer size, in bytes
_ideal_buffer_size = 44800
def __init__(self, source_group, player):
super(OpenALAudioPlayer, self).__init__(source_group, player)
audio_format = source_group.audio_format
try:
self._al_format = format_map[(audio_format.channels,
audio_format.sample_size)]
except KeyError:
raise OpenALException('Unsupported audio format.')
self._al_source = al.ALuint()
al.alGenSources(1, self._al_source)
# Lock policy: lock all instance vars (except constants). (AL calls
# are locked on context).
self._lock = threading.RLock()
# Cursor positions, like DSound and Pulse drivers, refer to a
# hypothetical infinite-length buffer. Cursor units are in bytes.
# Cursor position of current (head) AL buffer
self._buffer_cursor = 0
# Estimated playback cursor position (last seen)
self._play_cursor = 0
# Cursor position of end of queued AL buffer.
self._write_cursor = 0
# List of currently queued buffer sizes (in bytes)
self._buffer_sizes = []
# List of currently queued buffer timestamps
self._buffer_timestamps = []
# Timestamp at end of last written buffer (timestamp to return in case
# of underrun)
self._underrun_timestamp = None
# List of (cursor, MediaEvent)
self._events = []
# Desired play state (True even if stopped due to underrun)
self._playing = False
# Has source group EOS been seen (and hence, event added to queue)?
self._eos = False
# OpenAL 1.0 timestamp interpolation: system time of current buffer
# playback (best guess)
if not context.have_1_1:
self._buffer_system_time = time.time()
self.refill(self._ideal_buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if _debug:
print('OpenALAudioPlayer.delete()')
if not self._al_source:
return
context.worker.remove(self)
with self._lock:
with context.lock:
al.alDeleteSources(1, self._al_source)
bufferPool.deleteSource(self._al_source)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("DELETE ERROR: " + str(error)))
self._al_source = None
def play(self):
if self._playing:
return
if _debug:
print('OpenALAudioPlayer.play()')
self._playing = True
self._al_play()
if not context.have_1_1:
self._buffer_system_time = time.time()
context.worker.add(self)
def _al_play(self):
if _debug:
print('OpenALAudioPlayer._al_play()')
with context.lock:
state = al.ALint()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
al.alSourcePlay(self._al_source)
def stop(self):
if not self._playing:
return
if _debug:
print('OpenALAudioPlayer.stop()')
self._pause_timestamp = self.get_time()
with context.lock:
al.alSourcePause(self._al_source)
self._playing = False
context.worker.remove(self)
def clear(self):
if _debug:
print('OpenALAudioPlayer.clear()')
with self._lock:
with context.lock:
al.alSourceStop(self._al_source)
self._playing = False
del self._events[:]
self._underrun_timestamp = None
self._buffer_timestamps = [None for _ in self._buffer_timestamps]
def _update_play_cursor(self):
if not self._al_source:
return
with self._lock:
with context.lock:
# Release spent buffers
processed = al.ALint()
al.alGetSourcei(self._al_source, al.AL_BUFFERS_PROCESSED, processed)
processed = processed.value
if _debug_buffers:
print(("Processed buffer count:", processed))
if processed:
buffers = (al.ALuint * processed)()
al.alSourceUnqueueBuffers(self._al_source, len(buffers), buffers)
error = al.alGetError()
if error != 0:
if _debug_buffers:
print(("Source unqueue error: " + str(error)))
else:
for b in buffers:
bufferPool.dequeueBuffer(self._al_source, b)
if processed:
if (len(self._buffer_timestamps) == processed
and self._buffer_timestamps[-1] is not None):
# Underrun, take note of timestamp.
# We check that the timestamp is not None, because otherwise
# our source could have been cleared.
self._underrun_timestamp = \
self._buffer_timestamps[-1] + \
self._buffer_sizes[-1] / \
float(self.source_group.audio_format.bytes_per_second)
self._buffer_cursor += sum(self._buffer_sizes[:processed])
del self._buffer_sizes[:processed]
del self._buffer_timestamps[:processed]
if not context.have_1_1:
self._buffer_system_time = time.time()
# Update play cursor using buffer cursor + estimate into current
# buffer
if context.have_1_1:
byte_offset = al.ALint()
with context.lock:
al.alGetSourcei(self._al_source, al.AL_BYTE_OFFSET, byte_offset)
if _debug:
print('Current offset in bytes:', byte_offset.value)
self._play_cursor = self._buffer_cursor + byte_offset.value
else:
# Interpolate system time past buffer timestamp
self._play_cursor = \
self._buffer_cursor + int(
(time.time() - self._buffer_system_time) * \
self.source_group.audio_format.bytes_per_second)
# Process events
while self._events and self._events[0][0] < self._play_cursor:
_, event = self._events.pop(0)
event._sync_dispatch_to_player(self.player)
def get_write_size(self):
with self._lock:
self._update_play_cursor()
write_size = self._ideal_buffer_size - \
(self._write_cursor - self._play_cursor)
if self._eos:
write_size = 0
if _debug:
print(("Write size {} bytes".format(write_size)))
return write_size
def refill(self, write_size):
if _debug:
print('refill', write_size)
with self._lock:
while write_size > self._min_buffer_size:
audio_data = self.source_group.get_audio_data(write_size)
if not audio_data:
self._eos = True
self._events.append(
(self._write_cursor, MediaEvent(0, 'on_eos')))
self._events.append(
(self._write_cursor, MediaEvent(0, 'on_source_group_eos')))
break
for event in audio_data.events:
cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((cursor, event))
with context.lock:
buffer_name = bufferPool.getBuffer(self._al_source)
al.alBufferData(buffer_name,
self._al_format,
audio_data.data,
audio_data.length,
self.source_group.audio_format.sample_rate)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("BUFFER DATA ERROR: " + str(error)))
al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buffer_name))
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("QUEUE BUFFER ERROR: " + str(error)))
self._write_cursor += audio_data.length
self._buffer_sizes.append(audio_data.length)
self._buffer_timestamps.append(audio_data.timestamp)
write_size -= audio_data.length
# Check for underrun stopping playback
if self._playing:
state = al.ALint()
with context.lock:
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
if _debug:
print('underrun')
al.alSourcePlay(self._al_source)
def get_time(self):
try:
buffer_timestamp = self._buffer_timestamps[0]
except IndexError:
return self._underrun_timestamp
if buffer_timestamp is None:
return None
return buffer_timestamp + \
(self._play_cursor - self._buffer_cursor) / \
float(self.source_group.audio_format.bytes_per_second)
def set_volume(self, volume):
volume = float(volume)
with context.lock:
al.alSourcef(self._al_source, al.AL_GAIN, max(0., volume))
def set_position(self, position):
x, y, z = list(map(float, position))
with context.lock:
al.alSource3f(self._al_source, al.AL_POSITION, x, y, z)
def set_min_distance(self, min_distance):
min_distance = float(min_distance)
with context.lock:
al.alSourcef(self._al_source, al.AL_REFERENCE_DISTANCE, min_distance)
def set_max_distance(self, max_distance):
max_distance = float(max_distance)
with context.lock:
al.alSourcef(self._al_source, al.AL_MAX_DISTANCE, max_distance)
def set_pitch(self, pitch):
pitch = float(pitch)
with context.lock:
al.alSourcef(self._al_source, al.AL_PITCH, max(0., pitch))
def set_cone_orientation(self, cone_orientation):
x, y, z = list(map(float, cone_orientation))
with context.lock:
al.alSource3f(self._al_source, al.AL_DIRECTION, x, y, z)
def set_cone_inner_angle(self, cone_inner_angle):
cone_inner_angle = float(cone_inner_angle)
with context.lock:
al.alSourcef(self._al_source, al.AL_CONE_INNER_ANGLE, cone_inner_angle)
def set_cone_outer_angle(self, cone_outer_angle):
cone_outer_angle = float(cone_outer_angle)
with context.lock:
al.alSourcef(self._al_source, al.AL_CONE_OUTER_ANGLE, cone_outer_angle)
def set_cone_outer_gain(self, cone_outer_gain):
cone_outer_gain = float(cone_outer_gain)
with context.lock:
al.alSourcef(self._al_source, al.AL_CONE_OUTER_GAIN, cone_outer_gain)
class OpenALDriver(AbstractAudioDriver):
_forward_orientation = (0, 0, -1)
_up_orientation = (0, 1, 0)
def __init__(self, device_name=None):
super(OpenALDriver, self).__init__()
# TODO devices must be enumerated on Windows, otherwise 1.0 context is
# returned.
self._device = alc.alcOpenDevice(device_name)
if not self._device:
raise Exception('No OpenAL device.')
self._context = alc.alcCreateContext(self._device, None)
alc.alcMakeContextCurrent(self._context)
self.have_1_1 = self.have_version(1, 1) and False
self.lock = threading.Lock()
self._listener = OpenALListener(self)
# Start worker thread
self.worker = OpenALWorker()
self.worker.start()
def create_audio_player(self, source_group, player):
assert self._device is not None, "Device was closed"
return OpenALAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
with self.lock:
alc.alcMakeContextCurrent(None)
alc.alcDestroyContext(self._context)
alc.alcCloseDevice(self._device)
self._device = None
def have_version(self, major, minor):
return (major, minor) <= self.get_version()
def get_version(self):
major = alc.ALCint()
minor = alc.ALCint()
alc.alcGetIntegerv(self._device, alc.ALC_MAJOR_VERSION,
ctypes.sizeof(major), major)
alc.alcGetIntegerv(self._device, alc.ALC_MINOR_VERSION,
ctypes.sizeof(minor), minor)
return major.value, minor.value
def get_extensions(self):
extensions = alc.alcGetString(self._device, alc.ALC_EXTENSIONS)
if pyglet.compat_platform == 'darwin' or pyglet.compat_platform.startswith('linux'):
return ctypes.cast(extensions, ctypes.c_char_p).value.split(b' ')
else:
return _split_nul_strings(extensions)
def have_extension(self, extension):
return extension in self.get_extensions()
def get_listener(self):
return self._listener
class OpenALListener(AbstractListener):
def __init__(self, driver):
self._driver = driver
def _set_volume(self, volume):
volume = float(volume)
with self._driver.lock:
al.alListenerf(al.AL_GAIN, volume)
self._volume = volume
def _set_position(self, position):
x, y, z = list(map(float, position))
with self._driver.lock:
al.alListener3f(al.AL_POSITION, x, y, z)
self._position = position
def _set_forward_orientation(self, orientation):
val = (al.ALfloat * 6)(*list(map(float, (orientation + self._up_orientation))))
with self._driver.lock:
al.alListenerfv(al.AL_ORIENTATION, val)
self._forward_orientation = orientation
def _set_up_orientation(self, orientation):
val = (al.ALfloat * 6)(*list(map(float, (self._forward_orientation + orientation))))
with self._driver.lock:
al.alListenerfv(al.AL_ORIENTATION, val)
self._up_orientation = orientation
context = None
def create_audio_driver(device_name=None):
global context
context = OpenALDriver(device_name)
if _debug:
print('OpenAL', context.get_version())
return context
def cleanup_audio_driver():
global context
if _debug:
print("Cleaning up audio driver")
if context:
with context.lock:
bufferPool.delete()
context.delete()
context = None
if _debug:
print("Cleaning done")
atexit.register(cleanup_audio_driver)
|
|
""":mod:`sqlalchemy_imageattach.entity` --- Image entities
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a short way to attach resizable images
to other object-relationally mapped entity classes.
For example, imagine there's a fictional entity named
:class:`User` and it has its :attr:`~User.picture` and
:attr:`~User.front_cover`. So there should be two
image entities that subclass :class:`Image` mixin::
class UserPicture(Base, Image):
'''User's profile picture.'''
user_id = Column(Integer, ForeignKey('User.id'), primary_key=True)
user = relationship('User')
__tablename__ = 'user_picture'
You have to also inherit your own :func:`declarative_base()
<sqlalchemy.ext.declarative.declarative_base>` class (``Base`` in the example).
Assume there's also :class:`UserFrontCover` in the same way.
Note that the class can override :attr:`~Image.object_id` property.
Backend storages utilize this to identify images e.g. filename, S3 key.
If the primary key of the image entity is integer, :attr:`~Image.object_id`
automatically uses the primary key value by default, but it can be
overridden if needed, and must be implemented if the primary key is not
integer or composite key.
There's also :attr:`~Image.object_type` property. :class:`Image` provides
the default value for it as well. It uses the class name (underscores
will be replaced by hyphens) by default, but you can override it.
These :class:`Image` subclasses can be related to the their
'parent' entity using :func:`image_attachment()` function.
It's a specialized version of SQLAlchemy's built-in
:func:`~sqlalchemy.orm.relationship()` function, so you can pass
the same options as :func:`~sqlalchemy.orm.relationship()` takes::
class User(Base):
'''Users have their profile picture and front cover.'''
id = Column(Integer, primary_key=True)
picture = image_attachment('UserPicture')
front_cover = image_attachment('UserFrontCover')
__tablename__ = 'user'
It's done, you can store the actual image files using
:meth:`ImageSet.from_file()` or :meth:`ImageSet.from_blob()`
method::
with store_context(store):
user = User()
with open('picture.jpg', 'rb') as f:
user.picture.from_blob(f.read())
with open('front_cover.jpg', 'rb') as f:
user.front_cover.from_file(f)
with session.begin():
session.add(user)
Or you can resize the image to make thumbnails using
:meth:`ImageSet.generate_thumbnail()` method::
with store_context(store):
user.picture.generate_thumbnail(ratio=0.5)
user.picture.generate_thumbnail(height=100)
user.front_cover.generate_thumbnail(width=500)
"""
from __future__ import division
import cgi
import io
import numbers
import shutil
from sqlalchemy import Column
from sqlalchemy.event import listen
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import relationship
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.instrumentation import instance_state
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.expression import exists, tuple_
from sqlalchemy.sql.functions import now
from sqlalchemy.types import Boolean, DateTime, Integer, String
from wand.image import Image as WandImage
from .context import current_store, get_current_store, store_context
from .file import ReusableFileProxy
from .store import Store
from .util import append_docstring_attributes
__all__ = 'VECTOR_TYPES', 'Image', 'ImageSet', 'image_attachment'
#: (:class:`collections.Set`) The set of vector image types.
VECTOR_TYPES = frozenset(['image/svg+xml', 'application/pdf'])
def image_attachment(*args, **kwargs):
"""The helper function, decorates raw
:func:`~sqlalchemy.orm.relationship()` function, sepcialized for
relationships between :class:`Image` subtypes.
It takes the same parameters as :func:`~sqlalchemy.orm.relationship()`.
:param \*args: the same arguments as
:func:`~sqlalchemy.orm.relationship()`
:param \*\*kwargs: the same keyword arguments as
:func:`~sqlalchemy.orm.relationship()`
:returns: the relationship property
:rtype: :class:`sqlalchemy.orm.properties.RelationshipProperty`
.. todo::
It currently doesn't support population (eager loading) on
:func:`image_attachment()` relationships yet.
We seem to need to work something on attribute instrumental
implementation.
"""
kwargs.setdefault('lazy', 'dynamic')
kwargs.setdefault('query_class', ImageSet)
kwargs.setdefault('uselist', True)
kwargs.setdefault('cascade', 'all, delete-orphan')
return relationship(*args, **kwargs)
class Image(object):
"""The image of the particular size.
Note that it implements :meth:`__html__()` method, a de facto
standard special method for HTML templating. So you can simply use
it in HTML templates like:
.. sourcecode:: jinja
{{ user.profile.find_thumbnail(120) }}
The above template is equivalent to:
.. sourcecode:: html+jinja
{% with thumbnail = user.profile.find_thumbnail(120) %}
<img src="{{ thumbnail.locate() }}"
width="{{ thumbnail.width }}"
height="{{ thumbnail.height }}">
{% endwith %}
"""
@declared_attr
def object_type(cls):
"""(:class:`basestring`) The identifier string of the image type.
It uses :attr:`__tablename__` (which replaces underscores with
hyphens) by default, but can be overridden.
"""
try:
name = cls.__tablename__
except AttributeError:
raise NotImplementedError('object_type property has to be '
'implemented')
return name.replace('_', '-')
@property
def object_id(self):
"""(:class:`numbers.Integral`) The identifier number of the image.
It uses the primary key if it's integer, but can be overridden,
and must be implemented when the primary key is not integer or
composite key.
"""
key_columns = inspect(type(self)).primary_key
pk = [c.name for c in key_columns if c.name not in ('width', 'height')]
if len(pk) == 1:
pk_value = getattr(self, pk[0])
if isinstance(pk_value, numbers.Integral):
return pk_value
raise NotImplementedError('object_id property has to be implemented')
#: (:class:`numbers.Integral`) The image's width.
width = Column('width', Integer, primary_key=True)
#: (:class:`numbers.Integral`) The image's height."""
height = Column('height', Integer, primary_key=True)
#: (:class:`basestring`) The mimetype of the image
#: e.g. ``'image/jpeg'``, ``'image/png'``.
mimetype = Column('mimetype', String(255), nullable=False)
#: (:class:`bool`) Whether it is original or resized.
original = Column('original', Boolean, nullable=False, default=False)
#: (:class:`datetime.datetime`) The created time.
created_at = Column('created_at',
DateTime(timezone=True), nullable=False, default=now())
@hybrid_property
def size(self):
"""(:class:`tuple`) The same to the pair of (:attr:`width`,
:attr:`height`).
"""
return self.width, self.height
@size.expression
def size(cls):
return tuple_(cls.width, cls.height)
@size.setter
def size(self, size):
self.width, self.height = size
def make_blob(self, store=current_store):
"""Gets the byte string of the image from the ``store``.
:param store: the storage which contains the image.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the binary data of the image
:rtype: :class:`str`
"""
with self.open_file(store) as f:
return f.read()
def open_file(self, store=current_store, use_seek=False):
"""Opens the file-like object which is a context manager
(that means it can used for :keyword:`with` statement).
If ``use_seek`` is ``True`` (though ``False`` by default)
it guarentees the returned file-like object is also seekable
(provides :meth:`~file.seek()` method).
:param store: the storage which contains image files.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is ``True``)
:rtype: :class:`file`,
:class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object
"""
if not isinstance(store, Store):
raise TypeError('store must be an instance of '
'sqlalchemy_imageattach.store.Store, not ' +
repr(store))
return store.open(self, use_seek)
def locate(self, store=current_store):
"""Gets the URL of the image from the ``store``.
:param store: the storage which contains the image.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the url of the image
:rtype: :class:`basestring`
"""
if not isinstance(store, Store):
raise TypeError('store must be an instance of '
'sqlalchemy_imageattach.store.Store, not ' +
repr(store))
return store.locate(self)
def __html__(self):
u = cgi.escape(self.locate())
return '<img src="{0}" width="{1}" height="{2}">'.format(u, *self.size)
__doc__ = append_docstring_attributes(
__doc__,
dict(
(k, v) for k, v in locals().items() if isinstance(v, declared_attr)
)
)
class NoopContext(object):
"""Null context manager that does nothing."""
__slots__ = 'object_',
def __init__(self, object_):
self.object_ = object_
def __enter__(self, *args, **kwargs):
return self.object_
def __exit__(self, *args, **kwargs):
pass
class ImageSet(Query):
"""The subtype of :class:`~sqlalchemy.orm.query.Query` specialized
for :class:`Image`. It provides more methods and properties over
:class:`~sqlalchemy.orm.query.Query`.
Note that it implements :meth:`__html__()` method, a de facto
standard special method for HTML templating. So you can simply use
it in Jinja2 like:
.. sourcecode:: jinja
{{ user.profile }}
instead of:
.. sourcecode:: html+jinja
<img src="{{ user.profile|permalink }}"
width="{{ user.profile.original.width }}"
height="{{ user.profile.original.height }}">
"""
#: (:class:`collections.MutableSet`) The set of instances that their
#: image files are stored but the ongoing transaction isn't committed.
#: When the transaction might fail and rollback, image files in the
#: set are deleted back in the storage.
_stored_images = set()
#: (:class:`collections.MutableSet`) The set of instanced marked
#: as deleted. If the ongoing transaction is successfully committed
#: the actual files in the storages will be deleted as well.
#: When the transaction might fail and rollback, image files won't
#: deleted and the set will be empty.
_deleted_images = set()
@classmethod
def _mark_image_file_stored(cls, mapper, connection, target):
"""When the session flushes, stores actual image files into
the storage. Note that these files could be deleted back
if the ongoing transaction has done rollback. See also
:meth:`_delete_image_file()`.
"""
try:
file_ = target.file
except AttributeError:
raise TypeError('sqlalchemy_imageattach.entity.Image which is '
'to be inserted must have file to store')
try:
try:
store = target.store
except AttributeError:
raise TypeError('sqlalchemy_imageattach.entity.Image which is '
'to be inserted must have store for the file')
store.store(target, file_)
cls._stored_images.add((target, store))
del target.file, target.store
finally:
file_.close()
@classmethod
def _mark_image_file_deleted(cls, mapper, connection, target):
"""When the session flushes, marks images as deleted.
The files of this marked images will be actually deleted
in the image storage when the ongoing transaction succeeds.
If it fails the :attr:`_deleted_images` queue will be just
empty.
"""
cls._deleted_images.add((target, get_current_store()))
@classmethod
def _images_failed(cls, session, previous_transaction):
"""Deletes the files of :attr:`_stored_images` back and clears
the :attr:`_stored_images` and :attr:`_deleted_images` set
when the ongoing transaction has done rollback.
"""
for image, store in cls._stored_images:
store.delete(image)
cls._stored_images.clear()
cls._deleted_images.clear()
@classmethod
def _images_succeeded(cls, session):
"""Clears the :attr:`_stored_images` set and deletes actual
files that are marked as deleted in the storage
if the ongoing transaction has committed.
"""
for image, store in cls._deleted_images:
for stored_image, _ in cls._stored_images:
if stored_image.object_type == image.object_type and \
stored_image.object_id == image.object_id and \
stored_image.width == image.width and \
stored_image.height == image.height and \
stored_image.mimetype == image.mimetype:
break
else:
store.delete(image)
cls._stored_images.clear()
cls._deleted_images.clear()
def from_raw_file(self, raw_file, store=current_store, size=None,
mimetype=None, original=True):
"""Similar to :meth:`from_file()` except it's lower than that.
It assumes that ``raw_file`` is readable and seekable while
:meth:`from_file()` only assumes the file is readable.
Also it doesn't make any in-memory buffer while
:meth:`from_file()` always makes an in-memory buffer and copy
the file into the buffer.
If ``size`` and ``mimetype`` are passed, it won't try to read
image and will use these values instead.
It's used for implementing :meth:`from_file()` and
:meth:`from_blob()` methods that are higher than it.
:param raw_file: the seekable and readable file of the image
:type raw_file: file-like object, :class:`file`
:param store: the storage to store the file.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:param size: an optional size of the image.
automatically detected if it's omitted
:type size: :class:`tuple`
:param mimetype: an optional mimetype of the image.
automatically detected if it's omitted
:type mimetype: :class:`basestring`
:param original: an optional flag which represents whether
it is an original image or not.
defualt is ``True`` (meaning original)
:type original: :class:`bool`
:returns: the created image instance
:rtype: :class:`Image`
"""
cls = self.column_descriptions[0]['type']
if not (isinstance(cls, type) and issubclass(cls, Image)):
raise TypeError('the first entity must be a subtype of '
'sqlalchemy_imageattach.entity.Image')
if original and self.session:
if store is current_store:
for existing in self:
self.remove(existing)
self.session.flush()
else:
with store_context(store):
for existing in self:
self.remove(existing)
self.session.flush()
if size is None or mimetype is None:
with WandImage(file=raw_file) as wand:
size = size or wand.size
mimetype = mimetype or wand.mimetype
if mimetype.startswith('image/x-'):
mimetype = 'image/' + mimetype[8:]
image = cls(size=size, mimetype=mimetype, original=original)
raw_file.seek(0)
image.file = raw_file
image.store = store
self.append(image)
return image
def from_blob(self, blob, store=current_store):
"""Stores the ``blob`` (byte string) for the image
into the ``store``.
:param blob: the byte string for the image
:type blob: :class:`str`
:param store: the storage to store the image data.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the created image instance
:rtype: :class:`Image`
"""
data = io.BytesIO(blob)
return self.from_raw_file(data, store, original=True)
def from_file(self, file, store=current_store):
"""Stores the ``file`` for the image into the ``store``.
:param file: the readable file of the image
:type file: file-like object, :class:`file`
:param store: the storage to store the file.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the created image instance
:rtype: :class:`Image`
"""
data = io.BytesIO()
shutil.copyfileobj(file, data)
data.seek(0)
return self.from_raw_file(data, store, original=True)
@property
def original(self):
"""(:class:`Image`) The original image. It could be ``None``
if there are no stored images yet.
"""
if Session.object_session(self.instance) is None:
for image, store in self._stored_images:
if image.original:
return image
state = instance_state(self.instance)
try:
added = state.committed_state[self.attr.key].added_items
except KeyError:
pass
else:
for image in added:
if image.original:
return image
if self.session:
for image in self.session.new:
if image.original:
return image
return
query = self.filter_by(original=True)
try:
return query.one()
except NoResultFound:
pass
def require_original(self):
"""Returns the :attr:`original` image or just raise
:exc:`~exceptions.IOError` (instead of returning ``None``).
That means it guarantees the return value is never ``None``
but always :class:`Image`.
:returns: the :attr:`original` image
:rtype: :class:`Image`
:raises exceptions.IOError: when there's no :attr:`original`
image yet
"""
original = self.original
if original is None:
raise IOError('there is no original image yet')
return original
def find_thumbnail(self, width=None, height=None):
"""Finds the thumbnail of the image with the given ``width``
and/or ``height``.
:param width: the thumbnail width
:type width: :class:`numbers.Integral`
:param height: the thumbnail height
:type height: :class:`numbers.Integral`
:returns: the thumbnail image
:rtype: :class:`Image`
:raises sqlalchemy.orm.exc.NoResultFound:
when there's no image of such size
"""
if width is None and height is None:
raise TypeError('required width and/or height')
q = self
if width is not None:
q = q.filter_by(width=width)
if height is not None:
q = q.filter_by(height=height)
try:
return q.one()
except NoResultFound:
if width is not None and height is not None:
msg = 'size: ' + repr((width, height))
elif width is not None:
msg = 'width: ' + repr(width)
else:
msg = 'height: ' + repr(height)
raise NoResultFound('no thumbnail image of such ' + msg)
def generate_thumbnail(self, ratio=None, width=None, height=None,
filter='undefined', store=current_store,
_preprocess_image=None, _postprocess_image=None):
"""Resizes the :attr:`original` (scales up or down) and
then store the resized thumbnail into the ``store``.
:param ratio: resize by its ratio. if it's greater than 1
it scales up, and if it's less than 1 it scales
down. exclusive for ``width`` and ``height``
parameters
:type ratio: :class:`numbers.Real`
:param width: resize by its width. exclusive for ``ratio``
and ``height`` parameters
:type width: :class:`numbers.Integral`
:param height: resize by its height. exclusive for ``ratio``
and ``width`` parameters
:type height: :class:`numbers.Integral`
:param filter: a filter type to use for resizing. choose one in
:const:`wand.image.FILTER_TYPES`. default is
``'undefined'`` which means ImageMagick will try
to guess best one to use
:type filter: :class:`basestring`, :class:`numbers.Integral`
:param store: the storage to store the resized image file.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:param _preprocess_image: internal-use only option for preprocessing
original image before resizing.
it has to be callable which takes
a :class:`wand.image.Image` object
and returns a new :class:`wand.image.Image`
object
:type _preprocess_image: :class:`collections.Callable`
:param _postprocess_image: internal-use only option for preprocessing
original image before resizing.
it has to be callable which takes
a :class:`wand.image.Image` object
and returns a new :class:`wand.image.Image`
object
:type _postprocess_image: :class:`collections.Callable`
:returns: the resized thumbnail image. it might be an already
existing image if the same size already exists
:rtype: :class:`Image`
:raises exceptions.IOError: when there's no :attr:`original`
image yet
"""
params = ratio, width, height
param_count = sum(p is not None for p in params)
if not param_count:
raise TypeError('pass an argument ratio, width, or height')
elif param_count > 1:
raise TypeError('pass only one argument in ratio, width, or '
'height; these parameters are exclusive for '
'each other')
transient = Session.object_session(self.instance) is None
state = instance_state(self.instance)
try:
added = state.committed_state[self.attr.key].added_items
except KeyError:
added = []
if width is not None:
if not isinstance(width, numbers.Integral):
raise TypeError('width must be integer, not ' + repr(width))
elif width < 1:
raise ValueError('width must be natural number, not ' +
repr(width))
# find the same-but-already-generated thumbnail
for image in added:
if image.width == width:
return image
if not transient:
query = self.filter_by(width=width)
try:
return query.one()
except NoResultFound:
pass
def height(sz):
return sz[1] * (width / sz[0])
elif height is not None:
if not isinstance(height, numbers.Integral):
raise TypeError('height must be integer, not ' + repr(height))
elif height < 1:
raise ValueError('height must be natural number, not ' +
repr(height))
# find the same-but-already-generated thumbnail
for image in added:
if image.height == height:
return image
if not transient:
query = self.filter_by(height=height)
try:
return query.one()
except NoResultFound:
pass
def width(sz):
return sz[0] * (height / sz[1])
elif ratio is not None:
if not isinstance(ratio, numbers.Real):
raise TypeError('ratio must be an instance of numbers.Real, '
'not ' + repr(ratio))
def width(sz):
return sz[0] * ratio
def height(sz):
return sz[1] * ratio
data = io.BytesIO()
with self.open_file(store=store) as f:
if _preprocess_image is None:
img = WandImage(file=f)
else:
with WandImage(file=f) as img:
img = _preprocess_image(img)
with img:
if img.mimetype in VECTOR_TYPES:
img.format = 'png'
original_size = img.size
if callable(width):
width = width(original_size)
if callable(height):
height = height(original_size)
width = int(width)
height = int(height)
# find the same-but-already-generated thumbnail
for image in added:
if image.width == width and image.height == height:
return image
if not transient:
query = self.filter_by(width=width, height=height)
try:
return query.one()
except NoResultFound:
pass
if len(img.sequence) > 1:
img_ctx = img.sequence[0].clone()
img_ctx.resize(width, height, filter=filter)
else:
img_ctx = NoopContext(img)
with img_ctx as single_img:
single_img.resize(width, height, filter=filter)
if _postprocess_image is None:
mimetype = img.mimetype
single_img.save(file=data)
else:
with _postprocess_image(img) as img:
mimetype = img.mimetype
single_img.save(file=data)
return self.from_raw_file(data, store,
size=(width, height),
mimetype=mimetype,
original=False)
def open_file(self, store=current_store, use_seek=False):
"""The shorthand of :meth:`~Image.open_file()` for
the :attr:`original`.
:param store: the storage which contains the image files
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:param use_seek: whether the file should seekable.
if ``True`` it maybe buffered in the memory.
default is ``False``
:type use_seek: :class:`bool`
:returns: the file-like object of the image, which is a context
manager (plus, also seekable only if ``use_seek``
is ``True``)
:rtype: :class:`file`,
:class:`~sqlalchemy_imageattach.file.FileProxy`,
file-like object
"""
original = self.require_original()
if Session.object_session(self.instance) is None:
try:
file = original.file
except AttributeError:
raise IOError('no stored original image file')
return ReusableFileProxy(file)
return original.open_file(store, use_seek)
def make_blob(self, store=current_store):
"""The shorthand of :meth:`~Image.make_blob()` for
the :attr:`original`.
:param store: the storage which contains the image files.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the byte string of the :attr:`original` image
:rtype: :class:`str`
"""
return self.require_original().make_blob(store)
def locate(self, store=current_store):
"""The shorthand of :meth:`~Image.locate()` for
the :attr:`original`.
:param store: the storage which contains the image files.
:data:`~sqlalchemy_imageattach.context.current_store`
by default
:type store: :class:`~sqlalchemy_imageattach.store.Store`
:returns: the url of the :attr:`original` image
:rtype: :class:`basestring`
"""
return self.require_original().locate(store)
def __nonzero__(self):
session = Session.object_session(self.instance)
if session is None:
return bool(self.count())
for v, in session.query(exists(self.as_scalar())):
return bool(v)
return False
def __html__(self):
if not self:
return ''
url = cgi.escape(self.locate())
size = self.require_original().size
return '<img src="{0}" width="{1}" height="{2}">'.format(url, *size)
listen(Session, 'after_soft_rollback', ImageSet._images_failed)
listen(Session, 'after_commit', ImageSet._images_succeeded)
listen(Image, 'after_insert', ImageSet._mark_image_file_stored, propagate=True)
listen(Image, 'after_delete', ImageSet._mark_image_file_deleted,
propagate=True)
|
|
import os
import sys
import time
from copy import deepcopy
from typing import Any, Dict, List, Tuple, Union
from urllib.parse import urljoin
from django.template.loaders import app_directories
import zerver.lib.logging_util
from scripts.lib.zulip_tools import get_tornado_ports
from zerver.lib.db import TimeTrackingConnection
from .config import (
DEPLOY_ROOT,
DEVELOPMENT,
PRODUCTION,
config_file,
get_config,
get_from_file_if_exists,
get_secret,
)
from .configured_settings import (
ADMINS,
ALLOWED_HOSTS,
AUTH_LDAP_BIND_DN,
AUTH_LDAP_CONNECTION_OPTIONS,
AUTH_LDAP_SERVER_URI,
AUTHENTICATION_BACKENDS,
CAMO_URI,
DEBUG,
DEBUG_ERROR_REPORTING,
EMAIL_BACKEND,
EMAIL_HOST,
ERROR_REPORTING,
EXTERNAL_HOST,
EXTERNAL_HOST_WITHOUT_PORT,
EXTERNAL_URI_SCHEME,
EXTRA_INSTALLED_APPS,
GOOGLE_OAUTH2_CLIENT_ID,
IS_DEV_DROPLET,
LOCAL_UPLOADS_DIR,
MEMCACHED_LOCATION,
MEMCACHED_USERNAME,
REALM_HOSTS,
REGISTER_LINK_DISABLED,
REMOTE_POSTGRES_HOST,
REMOTE_POSTGRES_PORT,
REMOTE_POSTGRES_SSLMODE,
SENDFILE_BACKEND,
SENTRY_DSN,
SOCIAL_AUTH_APPLE_APP_ID,
SOCIAL_AUTH_APPLE_SERVICES_ID,
SOCIAL_AUTH_GITHUB_KEY,
SOCIAL_AUTH_GITHUB_ORG_NAME,
SOCIAL_AUTH_GITHUB_TEAM_ID,
SOCIAL_AUTH_GOOGLE_KEY,
SOCIAL_AUTH_SAML_ENABLED_IDPS,
SOCIAL_AUTH_SAML_SECURITY_CONFIG,
STATSD_HOST,
TORNADO_PORTS,
USING_PGROONGA,
ZULIP_ADMINISTRATOR,
)
########################################################################
# INITIAL SETTINGS
########################################################################
# Make this unique, and don't share it with anybody.
SECRET_KEY = get_secret("secret_key")
# A shared secret, used to authenticate different parts of the app to each other.
SHARED_SECRET = get_secret("shared_secret")
# We use this salt to hash a user's email into a filename for their user-uploaded
# avatar. If this salt is discovered, attackers will only be able to determine
# that the owner of an email account has uploaded an avatar to Zulip, which isn't
# the end of the world. Don't use the salt where there is more security exposure.
AVATAR_SALT = get_secret("avatar_salt")
# SERVER_GENERATION is used to track whether the server has been
# restarted for triggering browser clients to reload.
SERVER_GENERATION = int(time.time())
# Key to authenticate this server to zulip.org for push notifications, etc.
ZULIP_ORG_KEY = get_secret("zulip_org_key")
ZULIP_ORG_ID = get_secret("zulip_org_id")
if DEBUG:
INTERNAL_IPS = ('127.0.0.1',)
# Detect whether we're running as a queue worker; this impacts the logging configuration.
if len(sys.argv) > 2 and sys.argv[0].endswith('manage.py') and sys.argv[1] == 'process_queue':
IS_WORKER = True
else:
IS_WORKER = False
# This is overridden in test_settings.py for the test suites
TEST_SUITE = False
# The new user tutorial is enabled by default, but disabled for client tests.
TUTORIAL_ENABLED = True
# This is overridden in test_settings.py for the test suites
PUPPETEER_TESTS = False
# This is overridden in test_settings.py for the test suites
RUNNING_OPENAPI_CURL_TEST = False
# This is overridden in test_settings.py for the test suites
GENERATE_STRIPE_FIXTURES = False
# This is overridden in test_settings.py for the test suites
BAN_CONSOLE_OUTPUT = False
# These are the settings that we will check that the user has filled in for
# production deployments before starting the app. It consists of a series
# of pairs of (setting name, default value that it must be changed from)
REQUIRED_SETTINGS = [("EXTERNAL_HOST", "zulip.example.com"),
("ZULIP_ADMINISTRATOR", "[email protected]"),
# SECRET_KEY doesn't really need to be here, in
# that we set it automatically, but just in
# case, it seems worth having in this list
("SECRET_KEY", ""),
("AUTHENTICATION_BACKENDS", ()),
]
MANAGERS = ADMINS
########################################################################
# STANDARD DJANGO SETTINGS
########################################################################
# Local time zone for this installation. Choices can be found here:
# https://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# this directory will be used to store logs for development environment
DEVELOPMENT_LOG_DIRECTORY = os.path.join(DEPLOY_ROOT, 'var', 'log')
# Make redirects work properly behind a reverse proxy
USE_X_FORWARDED_HOST = True
# Extend ALLOWED_HOSTS with localhost (needed to RPC to Tornado),
ALLOWED_HOSTS += ['127.0.0.1', 'localhost']
# ... with hosts corresponding to EXTERNAL_HOST,
ALLOWED_HOSTS += [EXTERNAL_HOST_WITHOUT_PORT, "." + EXTERNAL_HOST_WITHOUT_PORT]
# ... and with the hosts in REALM_HOSTS.
ALLOWED_HOSTS += REALM_HOSTS.values()
class TwoFactorLoader(app_directories.Loader):
def get_dirs(self) -> List[str]:
dirs = super().get_dirs()
return [d for d in dirs if 'two_factor' in d]
MIDDLEWARE = (
# With the exception of it's dependencies,
# our logging middleware should be the top middleware item.
'zerver.middleware.TagRequests',
'zerver.middleware.SetRemoteAddrFromForwardedFor',
'zerver.middleware.RequestContext',
'zerver.middleware.LogRequests',
'zerver.middleware.JsonErrorHandler',
'zerver.middleware.RateLimitMiddleware',
'zerver.middleware.FlushDisplayRecipientCache',
'zerver.middleware.ZulipCommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'zerver.middleware.LocaleMiddleware',
'zerver.middleware.HostDomainMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
# Make sure 2FA middlewares come after authentication middleware.
'django_otp.middleware.OTPMiddleware', # Required by two factor auth.
'two_factor.middleware.threadlocals.ThreadLocals', # Required by Twilio
# Needs to be after CommonMiddleware, which sets Content-Length
'zerver.middleware.FinalizeOpenGraphDescription',
)
ANONYMOUS_USER_ID = None
AUTH_USER_MODEL = "zerver.UserProfile"
TEST_RUNNER = 'zerver.lib.test_runner.Runner'
ROOT_URLCONF = 'zproject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'zproject.wsgi.application'
# A site can include additional installed apps via the
# EXTRA_INSTALLED_APPS setting
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'confirmation',
'webpack_loader',
'zerver',
'social_django',
# 2FA related apps.
'django_otp',
'django_otp.plugins.otp_static',
'django_otp.plugins.otp_totp',
'two_factor',
]
if USING_PGROONGA:
INSTALLED_APPS += ['pgroonga']
INSTALLED_APPS += EXTRA_INSTALLED_APPS
ZILENCER_ENABLED = 'zilencer' in INSTALLED_APPS
CORPORATE_ENABLED = 'corporate' in INSTALLED_APPS
if not TORNADO_PORTS:
TORNADO_PORTS = get_tornado_ports(config_file)
TORNADO_PROCESSES = len(TORNADO_PORTS)
RUNNING_INSIDE_TORNADO = False
AUTORELOAD = DEBUG
SILENCED_SYSTEM_CHECKS = [
# auth.W004 checks that the UserProfile field named by USERNAME_FIELD has
# `unique=True`. For us this is `email`, and it's unique only per-realm.
# Per Django docs, this is perfectly fine so long as our authentication
# backends support the username not being unique; and they do.
# See: https://docs.djangoproject.com/en/2.2/topics/auth/customizing/#django.contrib.auth.models.CustomUser.USERNAME_FIELD
"auth.W004",
]
########################################################################
# DATABASE CONFIGURATION
########################################################################
# Zulip's Django configuration supports 4 different ways to do
# PostgreSQL authentication:
#
# * The development environment uses the `local_database_password`
# secret from `zulip-secrets.conf` to authenticate with a local
# database. The password is automatically generated and managed by
# `generate_secrets.py` during or provision.
#
# The remaining 3 options are for production use:
#
# * Using PostgreSQL's "peer" authentication to authenticate to a
# database on the local system using one's user ID (processes
# running as user `zulip` on the system are automatically
# authenticated as database user `zulip`). This is the default in
# production. We don't use this in the development environment,
# because it requires the developer's user to be called `zulip`.
#
# * Using password authentication with a remote PostgreSQL server using
# the `REMOTE_POSTGRES_HOST` setting and the password from the
# `postgres_password` secret.
#
# * Using passwordless authentication with a remote PostgreSQL server
# using the `REMOTE_POSTGRES_HOST` setting and a client certificate
# under `/home/zulip/.postgresql/`.
#
# We implement these options with a default DATABASES configuration
# supporting peer authentication, with logic to override it as
# appropriate if DEVELOPMENT or REMOTE_POSTGRES_HOST is set.
DATABASES: Dict[str, Dict[str, Any]] = {"default": {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'zulip',
'USER': 'zulip',
# Password = '' => peer/certificate authentication (no password)
'PASSWORD': '',
# Host = '' => connect to localhost by default
'HOST': '',
'SCHEMA': 'zulip',
'CONN_MAX_AGE': 600,
'OPTIONS': {
'connection_factory': TimeTrackingConnection,
},
}}
if DEVELOPMENT:
LOCAL_DATABASE_PASSWORD = get_secret("local_database_password")
DATABASES["default"].update(
PASSWORD=LOCAL_DATABASE_PASSWORD,
HOST='localhost',
)
elif REMOTE_POSTGRES_HOST != '':
DATABASES['default'].update(
HOST=REMOTE_POSTGRES_HOST,
PORT=REMOTE_POSTGRES_PORT,
)
if get_secret("postgres_password") is not None:
DATABASES['default'].update(
PASSWORD=get_secret("postgres_password"),
)
if REMOTE_POSTGRES_SSLMODE != '':
DATABASES['default']['OPTIONS']['sslmode'] = REMOTE_POSTGRES_SSLMODE
else:
DATABASES['default']['OPTIONS']['sslmode'] = 'verify-full'
POSTGRESQL_MISSING_DICTIONARIES = bool(get_config('postgresql', 'missing_dictionaries', None))
########################################################################
# RABBITMQ CONFIGURATION
########################################################################
USING_RABBITMQ = True
RABBITMQ_PASSWORD = get_secret("rabbitmq_password")
########################################################################
# CACHING CONFIGURATION
########################################################################
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
MEMCACHED_PASSWORD = get_secret("memcached_password")
CACHES = {
'default': {
'BACKEND': 'django_bmemcached.memcached.BMemcached',
'LOCATION': MEMCACHED_LOCATION,
'OPTIONS': {
'socket_timeout': 3600,
'username': MEMCACHED_USERNAME,
'password': MEMCACHED_PASSWORD,
'pickle_protocol': 4,
},
},
'database': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'third_party_api_results',
# This cache shouldn't timeout; we're really just using the
# cache API to store the results of requests to third-party
# APIs like the Twitter API permanently.
'TIMEOUT': None,
'OPTIONS': {
'MAX_ENTRIES': 100000000,
'CULL_FREQUENCY': 10,
},
},
'in-memory': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
########################################################################
# REDIS-BASED RATE LIMITING CONFIGURATION
########################################################################
RATE_LIMITING_RULES = {
'api_by_user': [
(60, 200), # 200 requests max every minute
],
'authenticate_by_username': [
(1800, 5), # 5 login attempts within 30 minutes
],
'password_reset_form_by_email': [
(3600, 2), # 2 reset emails per hour
(86400, 5), # 5 per day
],
}
# List of domains that, when applied to a request in a Tornado process,
# will be handled with the separate in-memory rate limiting backend for Tornado,
# which has its own buckets separate from the default backend.
# In principle, it should be impossible to make requests to tornado that fall into
# other domains, but we use this list as an extra precaution.
RATE_LIMITING_DOMAINS_FOR_TORNADO = ['api_by_user']
RATE_LIMITING_MIRROR_REALM_RULES = [
(60, 50), # 50 emails per minute
(300, 120), # 120 emails per 5 minutes
(3600, 600), # 600 emails per hour
]
DEBUG_RATE_LIMITING = DEBUG
REDIS_PASSWORD = get_secret('redis_password')
########################################################################
# SECURITY SETTINGS
########################################################################
# Tell the browser to never send our cookies without encryption, e.g.
# when executing the initial http -> https redirect.
#
# Turn it off for local testing because we don't have SSL.
if PRODUCTION:
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
# https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-05#section-4.1.3.2
SESSION_COOKIE_NAME = "__Host-sessionid"
CSRF_COOKIE_NAME = "__Host-csrftoken"
# Prevent JavaScript from reading the CSRF token from cookies. Our code gets
# the token from the DOM, which means malicious code could too. But hiding the
# cookie will slow down some attackers.
CSRF_COOKIE_HTTPONLY = True
CSRF_FAILURE_VIEW = 'zerver.middleware.csrf_failure'
if DEVELOPMENT:
# Use fast password hashing for creating testing users when not
# PRODUCTION. Saves a bunch of time.
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
)
# Also we auto-generate passwords for the default users which you
# can query using ./manage.py print_initial_password
INITIAL_PASSWORD_SALT = get_secret("initial_password_salt")
else:
# For production, use the best password hashing algorithm: Argon2
# Zulip was originally on PBKDF2 so we need it for compatibility
PASSWORD_HASHERS = ('django.contrib.auth.hashers.Argon2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher')
########################################################################
# API/BOT SETTINGS
########################################################################
ROOT_DOMAIN_URI = EXTERNAL_URI_SCHEME + EXTERNAL_HOST
S3_KEY = get_secret("s3_key")
S3_SECRET_KEY = get_secret("s3_secret_key")
if LOCAL_UPLOADS_DIR is not None:
if SENDFILE_BACKEND is None:
SENDFILE_BACKEND = 'django_sendfile.backends.nginx'
SENDFILE_ROOT = os.path.join(LOCAL_UPLOADS_DIR, "files")
SENDFILE_URL = '/serve_uploads'
# GCM tokens are IP-whitelisted; if we deploy to additional
# servers you will need to explicitly add their IPs here:
# https://cloud.google.com/console/project/apps~zulip-android/apiui/credential
ANDROID_GCM_API_KEY = get_secret("android_gcm_api_key")
DROPBOX_APP_KEY = get_secret("dropbox_app_key")
BIG_BLUE_BUTTON_SECRET = get_secret('big_blue_button_secret')
MAILCHIMP_API_KEY = get_secret("mailchimp_api_key")
# Twitter API credentials
# Secrecy not required because its only used for R/O requests.
# Please don't make us go over our rate limit.
TWITTER_CONSUMER_KEY = get_secret("twitter_consumer_key")
TWITTER_CONSUMER_SECRET = get_secret("twitter_consumer_secret")
TWITTER_ACCESS_TOKEN_KEY = get_secret("twitter_access_token_key")
TWITTER_ACCESS_TOKEN_SECRET = get_secret("twitter_access_token_secret")
# These are the bots that Zulip sends automated messages as.
INTERNAL_BOTS = [{'var_name': 'NOTIFICATION_BOT',
'email_template': 'notification-bot@%s',
'name': 'Notification Bot',
},
{'var_name': 'EMAIL_GATEWAY_BOT',
'email_template': 'emailgateway@%s',
'name': 'Email Gateway',
},
{'var_name': 'NAGIOS_SEND_BOT',
'email_template': 'nagios-send-bot@%s',
'name': 'Nagios Send Bot',
},
{'var_name': 'NAGIOS_RECEIVE_BOT',
'email_template': 'nagios-receive-bot@%s',
'name': 'Nagios Receive Bot',
},
{'var_name': 'WELCOME_BOT',
'email_template': 'welcome-bot@%s',
'name': 'Welcome Bot',
}]
# Bots that are created for each realm like the reminder-bot goes here.
REALM_INTERNAL_BOTS: List[Dict[str, str]] = []
# These are realm-internal bots that may exist in some organizations,
# so configure power the setting, but should not be auto-created at this time.
DISABLED_REALM_INTERNAL_BOTS = [
{'var_name': 'REMINDER_BOT',
'email_template': 'reminder-bot@%s',
'name': 'Reminder Bot',
},
]
if PRODUCTION:
INTERNAL_BOTS += [
{'var_name': 'NAGIOS_STAGING_SEND_BOT',
'email_template': 'nagios-staging-send-bot@%s',
'name': 'Nagios Staging Send Bot',
},
{'var_name': 'NAGIOS_STAGING_RECEIVE_BOT',
'email_template': 'nagios-staging-receive-bot@%s',
'name': 'Nagios Staging Receive Bot',
},
]
INTERNAL_BOT_DOMAIN = "zulip.com"
########################################################################
# STATSD CONFIGURATION
########################################################################
# Statsd is not super well supported; if you want to use it you'll need
# to set STATSD_HOST and STATSD_PREFIX.
if STATSD_HOST != '':
INSTALLED_APPS += ['django_statsd']
STATSD_PORT = 8125
STATSD_CLIENT = 'django_statsd.clients.normal'
########################################################################
# CAMO HTTPS CACHE CONFIGURATION
########################################################################
if CAMO_URI != '':
# This needs to be synced with the Camo installation
CAMO_KEY = get_secret("camo_key")
########################################################################
# STATIC CONTENT AND MINIFICATION SETTINGS
########################################################################
if PRODUCTION or IS_DEV_DROPLET or os.getenv('EXTERNAL_HOST') is not None:
STATIC_URL = urljoin(ROOT_DOMAIN_URI, '/static/')
else:
STATIC_URL = 'http://localhost:9991/static/'
# ZulipStorage is a modified version of ManifestStaticFilesStorage,
# and, like that class, it inserts a file hash into filenames
# to prevent the browser from using stale files from cache.
#
# Unlike PipelineStorage, it requires the files to exist in
# STATIC_ROOT even for dev servers. So we only use
# ZulipStorage when not DEBUG.
if not DEBUG:
STATICFILES_STORAGE = 'zerver.lib.storage.ZulipStorage'
if PRODUCTION:
STATIC_ROOT = '/home/zulip/prod-static'
else:
STATIC_ROOT = os.path.abspath(os.path.join(DEPLOY_ROOT, 'prod-static/serve'))
# If changing this, you need to also the hack modifications to this in
# our compilemessages management command.
LOCALE_PATHS = (os.path.join(DEPLOY_ROOT, 'locale'),)
# We want all temporary uploaded files to be stored on disk.
FILE_UPLOAD_MAX_MEMORY_SIZE = 0
STATICFILES_DIRS = ['static/']
if DEBUG:
WEBPACK_STATS_FILE = os.path.join('var', 'webpack-stats-dev.json')
else:
WEBPACK_STATS_FILE = 'webpack-stats-production.json'
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': '../webpack/' if DEBUG else 'webpack-bundles/',
'STATS_FILE': os.path.join(DEPLOY_ROOT, WEBPACK_STATS_FILE),
},
}
########################################################################
# TEMPLATES SETTINGS
########################################################################
# List of callables that know how to import templates from various sources.
LOADERS: List[Union[str, Tuple[object, ...]]] = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
if PRODUCTION:
# Template caching is a significant performance win in production.
LOADERS = [('django.template.loaders.cached.Loader', LOADERS)]
base_template_engine_settings: Dict[str, Any] = {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'OPTIONS': {
'environment': 'zproject.jinja2.environment',
'extensions': [
'jinja2.ext.i18n',
'jinja2.ext.autoescape',
'webpack_loader.contrib.jinja2ext.WebpackExtension',
],
'context_processors': [
'zerver.context_processors.zulip_default_context',
'django.template.context_processors.i18n',
],
},
}
default_template_engine_settings = deepcopy(base_template_engine_settings)
default_template_engine_settings.update(
NAME='Jinja2',
DIRS=[
# The main templates directory
os.path.join(DEPLOY_ROOT, 'templates'),
# The webhook integration templates
os.path.join(DEPLOY_ROOT, 'zerver', 'webhooks'),
# The python-zulip-api:zulip_bots package templates
os.path.join('static' if DEBUG else STATIC_ROOT, 'generated', 'bots'),
],
APP_DIRS=True,
)
non_html_template_engine_settings = deepcopy(base_template_engine_settings)
non_html_template_engine_settings.update(
NAME='Jinja2_plaintext',
DIRS=[os.path.join(DEPLOY_ROOT, 'templates')],
APP_DIRS=False,
)
non_html_template_engine_settings['OPTIONS'].update(
autoescape=False,
trim_blocks=True,
lstrip_blocks=True,
)
# django-two-factor uses the default Django template engine (not Jinja2), so we
# need to add config for it here.
two_factor_template_options = deepcopy(default_template_engine_settings['OPTIONS'])
del two_factor_template_options['environment']
del two_factor_template_options['extensions']
two_factor_template_options['loaders'] = ['zproject.settings.TwoFactorLoader']
two_factor_template_engine_settings = {
'NAME': 'Two_Factor',
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': False,
'OPTIONS': two_factor_template_options,
}
# The order here is important; get_template and related/parent functions try
# the template engines in order until one succeeds.
TEMPLATES = [
default_template_engine_settings,
non_html_template_engine_settings,
two_factor_template_engine_settings,
]
########################################################################
# LOGGING SETTINGS
########################################################################
def zulip_path(path: str) -> str:
if DEVELOPMENT:
# if DEVELOPMENT, store these files in the Zulip checkout
if path.startswith("/var/log"):
path = os.path.join(DEVELOPMENT_LOG_DIRECTORY, os.path.basename(path))
else:
path = os.path.join(os.path.join(DEPLOY_ROOT, 'var'), os.path.basename(path))
return path
SERVER_LOG_PATH = zulip_path("/var/log/zulip/server.log")
ERROR_FILE_LOG_PATH = zulip_path("/var/log/zulip/errors.log")
MANAGEMENT_LOG_PATH = zulip_path("/var/log/zulip/manage.log")
WORKER_LOG_PATH = zulip_path("/var/log/zulip/workers.log")
SLOW_QUERIES_LOG_PATH = zulip_path("/var/log/zulip/slow_queries.log")
JSON_PERSISTENT_QUEUE_FILENAME_PATTERN = zulip_path("/home/zulip/tornado/event_queues%s.json")
EMAIL_LOG_PATH = zulip_path("/var/log/zulip/send_email.log")
EMAIL_MIRROR_LOG_PATH = zulip_path("/var/log/zulip/email_mirror.log")
EMAIL_DELIVERER_LOG_PATH = zulip_path("/var/log/zulip/email_deliverer.log")
EMAIL_CONTENT_LOG_PATH = zulip_path("/var/log/zulip/email_content.log")
LDAP_LOG_PATH = zulip_path("/var/log/zulip/ldap.log")
LDAP_SYNC_LOG_PATH = zulip_path("/var/log/zulip/sync_ldap_user_data.log")
QUEUE_ERROR_DIR = zulip_path("/var/log/zulip/queue_error")
QUEUE_STATS_DIR = zulip_path("/var/log/zulip/queue_stats")
DIGEST_LOG_PATH = zulip_path("/var/log/zulip/digest.log")
ANALYTICS_LOG_PATH = zulip_path("/var/log/zulip/analytics.log")
ANALYTICS_LOCK_DIR = zulip_path("/home/zulip/deployments/analytics-lock-dir")
WEBHOOK_LOG_PATH = zulip_path("/var/log/zulip/webhooks_errors.log")
WEBHOOK_UNSUPPORTED_EVENTS_LOG_PATH = zulip_path("/var/log/zulip/webhooks_unsupported_events.log")
SOFT_DEACTIVATION_LOG_PATH = zulip_path("/var/log/zulip/soft_deactivation.log")
TRACEMALLOC_DUMP_DIR = zulip_path("/var/log/zulip/tracemalloc")
SCHEDULED_MESSAGE_DELIVERER_LOG_PATH = zulip_path("/var/log/zulip/scheduled_message_deliverer.log")
RETENTION_LOG_PATH = zulip_path("/var/log/zulip/message_retention.log")
AUTH_LOG_PATH = zulip_path("/var/log/zulip/auth.log")
ZULIP_WORKER_TEST_FILE = '/tmp/zulip-worker-test-file'
if IS_WORKER:
FILE_LOG_PATH = WORKER_LOG_PATH
else:
FILE_LOG_PATH = SERVER_LOG_PATH
# This is disabled in a few tests.
LOGGING_ENABLED = True
DEFAULT_ZULIP_HANDLERS = [
*(['zulip_admins'] if ERROR_REPORTING else []),
'console', 'file', 'errors_file',
]
LOGGING: Dict[str, Any] = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'()': 'zerver.lib.logging_util.ZulipFormatter',
},
'webhook_request_data': {
'()': 'zerver.lib.logging_util.ZulipWebhookFormatter',
},
},
'filters': {
'ZulipLimiter': {
'()': 'zerver.lib.logging_util.ZulipLimiter',
},
'EmailLimiter': {
'()': 'zerver.lib.logging_util.EmailLimiter',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'nop': {
'()': 'zerver.lib.logging_util.ReturnTrue',
},
'require_logging_enabled': {
'()': 'zerver.lib.logging_util.ReturnEnabled',
},
'require_really_deployed': {
'()': 'zerver.lib.logging_util.RequireReallyDeployed',
},
'skip_200_and_304': {
'()': 'django.utils.log.CallbackFilter',
'callback': zerver.lib.logging_util.skip_200_and_304,
},
'skip_site_packages_logs': {
'()': 'django.utils.log.CallbackFilter',
'callback': zerver.lib.logging_util.skip_site_packages_logs,
},
},
'handlers': {
'zulip_admins': {
'level': 'ERROR',
'class': 'zerver.logging_handlers.AdminNotifyHandler',
'filters': (['ZulipLimiter', 'require_debug_false', 'require_really_deployed']
if not DEBUG_ERROR_REPORTING else []),
'formatter': 'default',
},
'auth_file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': AUTH_LOG_PATH,
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'default',
},
'file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': FILE_LOG_PATH,
},
'errors_file': {
'level': 'WARNING',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': ERROR_FILE_LOG_PATH,
},
'ldap_file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': LDAP_LOG_PATH,
},
'slow_queries_file': {
'level': 'INFO',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'default',
'filename': SLOW_QUERIES_LOG_PATH,
},
'webhook_file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'webhook_request_data',
'filename': WEBHOOK_LOG_PATH,
},
'webhook_unsupported_file': {
'level': 'DEBUG',
'class': 'logging.handlers.WatchedFileHandler',
'formatter': 'webhook_request_data',
'filename': WEBHOOK_UNSUPPORTED_EVENTS_LOG_PATH,
},
},
'loggers': {
# The Python logging module uses a hierarchy of logger names for config:
# "foo.bar" has parent "foo" has parent "", the root. But the semantics
# are subtle: it walks this hierarchy once to find the log level to
# decide whether to log the record at all, then a separate time to find
# handlers to emit the record.
#
# For `level`, the most specific ancestor that has a `level` counts.
# For `handlers`, the most specific ancestor that has a `handlers`
# counts (assuming we set `propagate=False`, which we always do.)
# These are independent -- they might come at the same layer, or
# either one could come before the other.
#
# For `filters`, no ancestors count at all -- only the exact logger name
# the record was logged at.
#
# Upstream docs: https://docs.python.org/3/library/logging
#
# Style rules:
# * Always set `propagate=False` if setting `handlers`.
# * Setting `level` equal to the parent is redundant; don't.
# * Setting `handlers` equal to the parent is redundant; don't.
# * Always write in order: level, filters, handlers, propagate.
# root logger
'': {
'level': 'INFO',
'filters': ['require_logging_enabled'],
'handlers': DEFAULT_ZULIP_HANDLERS,
},
# Django, alphabetized
'django': {
# Django's default logging config has already set some
# things on this logger. Just mentioning it here causes
# `logging.config` to reset it to defaults, as if never
# configured; which is what we want for it.
},
'django.request': {
# We set this to ERROR to prevent Django's default
# low-value logs with lines like "Not Found: /robots.txt"
# from being logged for every HTTP 4xx error at WARNING
# level, which would otherwise end up spamming our
# errors.log. We'll still get logs in errors.log
# including tracebacks for 5xx errors (i.e. Python
# exceptions).
'level': 'ERROR',
},
'django.security.DisallowedHost': {
'handlers': ['file'],
'propagate': False,
},
'django.server': {
'filters': ['skip_200_and_304'],
'handlers': ['console', 'file'],
'propagate': False,
},
'django.utils.autoreload': {
# We don't want logging spam from the autoreloaders in development.
'level': 'WARNING',
},
'django.template': {
'level': 'DEBUG',
'filters': ['require_debug_true', 'skip_site_packages_logs'],
'handlers': ['console'],
'propagate': False,
},
## Uncomment the following to get all database queries logged to the console
# 'django.db': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# 'propagate': False,
# },
# other libraries, alphabetized
'django_auth_ldap': {
'level': 'DEBUG',
'handlers': ['console', 'ldap_file', 'errors_file'],
'propagate': False,
},
'pika': {
# pika is super chatty on INFO.
'level': 'WARNING',
# pika spews a lot of ERROR logs when a connection fails.
# We reconnect automatically, so those should be treated as WARNING --
# write to the log for use in debugging, but no error emails/Zulips.
'handlers': ['console', 'file', 'errors_file'],
'propagate': False,
},
'requests': {
'level': 'WARNING',
},
# our own loggers, alphabetized
'zerver.lib.digest': {
'level': 'DEBUG',
},
'zerver.management.commands.deliver_email': {
'level': 'DEBUG',
},
'zerver.management.commands.enqueue_digest_emails': {
'level': 'DEBUG',
},
'zerver.management.commands.deliver_scheduled_messages': {
'level': 'DEBUG',
},
'zulip.auth': {
'level': 'DEBUG',
'handlers': [*DEFAULT_ZULIP_HANDLERS, 'auth_file'],
'propagate': False,
},
'zulip.ldap': {
'level': 'DEBUG',
'handlers': ['console', 'ldap_file', 'errors_file'],
'propagate': False,
},
'zulip.management': {
'handlers': ['file', 'errors_file'],
'propagate': False,
},
'zulip.queue': {
'level': 'WARNING',
},
'zulip.retention': {
'handlers': ['file', 'errors_file'],
'propagate': False,
},
'zulip.slow_queries': {
'level': 'INFO',
'handlers': ['slow_queries_file'],
'propagate': False,
},
'zulip.soft_deactivation': {
'handlers': ['file', 'errors_file'],
'propagate': False,
},
# This logger is used only for automated tests validating the
# error-handling behavior of the zulip_admins handler.
'zulip.test_zulip_admins_handler': {
'handlers': ['zulip_admins'],
'propagate': False,
},
'zulip.zerver.webhooks': {
'level': 'DEBUG',
'handlers': ['file', 'errors_file', 'webhook_file'],
'propagate': False,
},
'zulip.zerver.webhooks.unsupported': {
'level': 'DEBUG',
'handlers': ['webhook_unsupported_file'],
'propagate': False,
},
},
}
if DEVELOPMENT:
CONTRIBUTOR_DATA_FILE_PATH = os.path.join(DEPLOY_ROOT, 'var/github-contributors.json')
else:
CONTRIBUTOR_DATA_FILE_PATH = '/var/lib/zulip/github-contributors.json'
LOGIN_REDIRECT_URL = '/'
# Client-side polling timeout for get_events, in milliseconds.
# We configure this here so that the client test suite can override it.
# We already kill the connection server-side with heartbeat events,
# but it's good to have a safety. This value should be greater than
# (HEARTBEAT_MIN_FREQ_SECS + 10)
POLL_TIMEOUT = 90 * 1000
########################################################################
# SSO AND LDAP SETTINGS
########################################################################
USING_LDAP = "zproject.backends.ZulipLDAPAuthBackend" in AUTHENTICATION_BACKENDS
ONLY_LDAP = AUTHENTICATION_BACKENDS == ("zproject.backends.ZulipLDAPAuthBackend",)
USING_APACHE_SSO = ('zproject.backends.ZulipRemoteUserBackend' in AUTHENTICATION_BACKENDS)
ONLY_SSO = AUTHENTICATION_BACKENDS == ("zproject.backends.ZulipRemoteUserBackend",)
if ONLY_SSO:
HOME_NOT_LOGGED_IN = "/accounts/login/sso/"
else:
HOME_NOT_LOGGED_IN = '/login/'
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipDummyBackend',)
# Redirect to /devlogin/ by default in dev mode
if DEVELOPMENT:
HOME_NOT_LOGGED_IN = '/devlogin/'
LOGIN_URL = '/devlogin/'
POPULATE_PROFILE_VIA_LDAP = bool(AUTH_LDAP_SERVER_URI)
if POPULATE_PROFILE_VIA_LDAP and not USING_LDAP:
AUTHENTICATION_BACKENDS += ('zproject.backends.ZulipLDAPUserPopulator',)
else:
POPULATE_PROFILE_VIA_LDAP = USING_LDAP or POPULATE_PROFILE_VIA_LDAP
if POPULATE_PROFILE_VIA_LDAP:
import ldap
if (AUTH_LDAP_BIND_DN and ldap.OPT_REFERRALS not in AUTH_LDAP_CONNECTION_OPTIONS):
# The default behavior of python-ldap (without setting option
# `ldap.OPT_REFERRALS`) is to follow referrals, but anonymously.
# If our original query was non-anonymous, that's unlikely to
# work; skip the referral.
#
# The common case of this is that the server is Active Directory,
# it's already given us the answer we need, and the referral is
# just speculation about someplace else that has data our query
# could in principle match.
AUTH_LDAP_CONNECTION_OPTIONS[ldap.OPT_REFERRALS] = 0
if REGISTER_LINK_DISABLED is None:
# The default for REGISTER_LINK_DISABLED is a bit more
# complicated: we want it to be disabled by default for people
# using the LDAP backend that auto-creates users on login.
REGISTER_LINK_DISABLED = ONLY_LDAP
########################################################################
# SOCIAL AUTHENTICATION SETTINGS
########################################################################
SOCIAL_AUTH_FIELDS_STORED_IN_SESSION = ['subdomain', 'is_signup', 'mobile_flow_otp', 'desktop_flow_otp',
'multiuse_object_key']
SOCIAL_AUTH_LOGIN_ERROR_URL = '/login/'
# CLIENT is required by PSA's internal implementation. We name it
# SERVICES_ID to make things more readable in the configuration
# and our own custom backend code.
SOCIAL_AUTH_APPLE_CLIENT = SOCIAL_AUTH_APPLE_SERVICES_ID
SOCIAL_AUTH_APPLE_AUDIENCE = [id for id in [SOCIAL_AUTH_APPLE_CLIENT, SOCIAL_AUTH_APPLE_APP_ID] if id is not None]
if PRODUCTION:
SOCIAL_AUTH_APPLE_SECRET = get_from_file_if_exists("/etc/zulip/apple-auth-key.p8")
else:
SOCIAL_AUTH_APPLE_SECRET = get_from_file_if_exists("zproject/dev_apple.key")
SOCIAL_AUTH_GITHUB_SECRET = get_secret('social_auth_github_secret')
SOCIAL_AUTH_GITLAB_SECRET = get_secret('social_auth_gitlab_secret')
SOCIAL_AUTH_GITHUB_SCOPE = ['user:email']
if SOCIAL_AUTH_GITHUB_ORG_NAME or SOCIAL_AUTH_GITHUB_TEAM_ID:
SOCIAL_AUTH_GITHUB_SCOPE.append("read:org")
SOCIAL_AUTH_GITHUB_ORG_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_ORG_SECRET = SOCIAL_AUTH_GITHUB_SECRET
SOCIAL_AUTH_GITHUB_TEAM_KEY = SOCIAL_AUTH_GITHUB_KEY
SOCIAL_AUTH_GITHUB_TEAM_SECRET = SOCIAL_AUTH_GITHUB_SECRET
SOCIAL_AUTH_GOOGLE_SECRET = get_secret('social_auth_google_secret')
# Fallback to google-oauth settings in case social auth settings for
# Google are missing; this is for backwards-compatibility with older
# Zulip versions where /etc/zulip/settings.py has not been migrated yet.
GOOGLE_OAUTH2_CLIENT_SECRET = get_secret('google_oauth2_client_secret')
SOCIAL_AUTH_GOOGLE_KEY = SOCIAL_AUTH_GOOGLE_KEY or GOOGLE_OAUTH2_CLIENT_ID
SOCIAL_AUTH_GOOGLE_SECRET = SOCIAL_AUTH_GOOGLE_SECRET or GOOGLE_OAUTH2_CLIENT_SECRET
if PRODUCTION:
SOCIAL_AUTH_SAML_SP_PUBLIC_CERT = get_from_file_if_exists("/etc/zulip/saml/zulip-cert.crt")
SOCIAL_AUTH_SAML_SP_PRIVATE_KEY = get_from_file_if_exists("/etc/zulip/saml/zulip-private-key.key")
if "signatureAlgorithm" not in SOCIAL_AUTH_SAML_SECURITY_CONFIG:
# If the configuration doesn't explicitly specify the algorithm,
# we set RSA1 with SHA256 to override the python3-saml default, which uses
# insecure SHA1.
default_signature_alg = "http://www.w3.org/2001/04/xmldsig-more#rsa-sha256"
SOCIAL_AUTH_SAML_SECURITY_CONFIG["signatureAlgorithm"] = default_signature_alg
for idp_name, idp_dict in SOCIAL_AUTH_SAML_ENABLED_IDPS.items():
if DEVELOPMENT:
idp_dict['entity_id'] = get_secret('saml_entity_id', '')
idp_dict['url'] = get_secret('saml_url', '')
idp_dict['x509cert_path'] = 'zproject/dev_saml.cert'
# Set `x509cert` if not specified already; also support an override path.
if 'x509cert' in idp_dict:
continue
if 'x509cert_path' in idp_dict:
path = idp_dict['x509cert_path']
else:
path = f"/etc/zulip/saml/idps/{idp_name}.crt"
idp_dict['x509cert'] = get_from_file_if_exists(path)
SOCIAL_AUTH_PIPELINE = [
'social_core.pipeline.social_auth.social_details',
'zproject.backends.social_auth_associate_user',
'zproject.backends.social_auth_finish',
]
########################################################################
# EMAIL SETTINGS
########################################################################
# Django setting. Not used in the Zulip codebase.
DEFAULT_FROM_EMAIL = ZULIP_ADMINISTRATOR
if EMAIL_BACKEND is not None:
# If the server admin specified a custom email backend, use that.
pass
elif DEVELOPMENT:
# In the dev environment, emails are printed to the run-dev.py console.
EMAIL_BACKEND = 'zproject.email_backends.EmailLogBackEnd'
elif not EMAIL_HOST:
# If an email host is not specified, fail gracefully
WARN_NO_EMAIL = True
EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_TIMEOUT = 15
if DEVELOPMENT:
EMAIL_HOST = get_secret('email_host', '')
EMAIL_PORT = int(get_secret('email_port', '25'))
EMAIL_HOST_USER = get_secret('email_host_user', '')
EMAIL_HOST_PASSWORD = get_secret('email_password')
EMAIL_GATEWAY_PASSWORD = get_secret('email_gateway_password')
AUTH_LDAP_BIND_PASSWORD = get_secret('auth_ldap_bind_password', '')
########################################################################
# MISC SETTINGS
########################################################################
if PRODUCTION:
# Filter out user data
DEFAULT_EXCEPTION_REPORTER_FILTER = 'zerver.filters.ZulipExceptionReporterFilter'
# This is a debugging option only
PROFILE_ALL_REQUESTS = False
CROSS_REALM_BOT_EMAILS = {
'[email protected]',
'[email protected]',
'[email protected]',
}
THUMBOR_KEY = get_secret('thumbor_key')
TWO_FACTOR_PATCH_ADMIN = False
# Allow the environment to override the default DSN
SENTRY_DSN = os.environ.get("SENTRY_DSN", SENTRY_DSN)
if SENTRY_DSN:
from .sentry import setup_sentry
setup_sentry(SENTRY_DSN)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.